From b6475b537a6fd39568e46f3c91e6f53b79ae9703 Mon Sep 17 00:00:00 2001 From: adyoun2 Date: Mon, 15 Oct 2018 11:46:23 +0100 Subject: [PATCH] Squashed commit of the following: commit 7cb39d636c33289a1f93417c1750ef6173df348e Author: Jeff Storck Date: Fri Oct 12 16:57:15 2018 -0400 NIFI-5696 Update references to default value for nifi.cluster.node.load.load.balance.port This closes #3071. Signed-off-by: Koji Kawamura commit 0229a5c10953b0b003e3613e4a172a1641c6452d Author: zenfenan Date: Sun Oct 14 13:18:25 2018 +0530 NIFI-5698: Fixed DeleteAzureBlobStorage bug This closes #3073. Signed-off-by: Koji Kawamura commit e30a21cfc5e8bed187ec18e35ab0f21e6a16fb71 Author: Brad Hards Date: Sat Oct 13 19:25:43 2018 +1100 [NIFI-5697] Trivial description fix for GenerateFlowFile processor This closes #3072. Signed-off-by: Aldrin Piri commit 270ce8570df7a00a26f431d8d8ae0245b898bf69 Author: Mark Payne Date: Fri Oct 12 15:27:10 2018 -0400 NIFI-5695: Fixed bug that caused ports to not properly map to their correct child group on Flow Import if the child group is independently versioned This closes #3070. Signed-off-by: Bryan Bende commit 5eb5e96b1621d87bf8cca96d18a2c36177a1f54e Author: thenatog Date: Mon Oct 8 12:58:20 2018 -0400 NIFI-5665 - Changed netty versions to more closely match the original netty dependency version. NIFI-5665 - Fixed version for nifi-spark-bundle. NIFI-5665 - Fixing copy and paste error. This closes #3067 commit 02e0a16a681bea3a2bee6a90aa87baed2404a25e Author: Bryan Bende Date: Thu Oct 11 15:58:55 2018 -0400 NIFI-5680 Handling trailing slashes on URLs of registry clients This closes #3065. Signed-off-by: Mark Payne commit 0f8880547fad35243dae0d4d4ab6d8a5eb38d516 Author: Matt Gilman Date: Fri Oct 12 10:23:47 2018 -0400 NIFI-5691: - Overriding the version of jackson in aws java sdk. This closes #3066. Signed-off-by: Aldrin Piri commit e25b26e9cf860db038da8e23e8e64bdb1b8dba88 Author: joewitt Date: Fri Oct 12 11:27:48 2018 -0400 Revert "NIFI-5448 Added failure relationship to UpdateAttributes to handle bad expression language logic." This reverts commit 32ee552ada328ed1189ed2bd0a2af18ed213ddc8. commit 6b77e7dd895affa739c55a7db969fa6bcde44ad1 Author: joewitt Date: Fri Oct 12 11:08:22 2018 -0400 Revert "NIFI-5448 Changed from 'stop' to 'penalize' in allowablevalue field to make the popup more consistent." This reverts commit 9d2b698c1cdfb54411b9f147573767bdda6e355c. commit a6b9364ebf649679a739bfe4988899c4d9186f76 Author: Carl Gieringer Date: Thu Oct 4 12:50:08 2018 -0400 NIFI-5664 Support ArrayList in DataTypeUtils#toArray NIFI-5664 Generalize to handling List This closes #3049 Signed-off-by: Mike Thomsen commit 5aa426358802bfac91657fdb8a8a83094239ced8 Author: Endre Zoltan Kovacs Date: Mon Oct 8 13:10:37 2018 +0200 NIFI-1490: better field naming / displayname and description mix up fix This closes #2994. Signed-off-by: Mark Payne commit c81a135161c7af106117dcc616706798c4564669 Author: Endre Zoltan Kovacs Date: Thu Sep 6 17:33:33 2018 +0200 NIFI-1490: multipart/form-data support for ListenHTTP processor - introducing a in-memory-file-size-threashold, above which the incoming file is written to local disk - using java.io.tmpdir for such file writes - enhancing documentation commit 8398ea77bc2cb15068ebba37dd8f7f270befe308 Author: Mark Payne Date: Thu Oct 11 14:57:31 2018 -0400 NIFI-5688: Ensure that when we map our flow to a VersionedProcessGroup that we include the connections' Load Balance Compression flag This closes #3064 commit 8da403ce9e8135ea7fc86fa7eb308ad5dda9dabd Author: Matt Gilman Date: Thu Oct 11 13:21:20 2018 -0400 NIFI-5661: - Allowing load balance settings to be applied during creation. - Clearing the load balance settings when the dialog is closed. commit 79c03caf4f57a1273cbe5856ec7b6fc007f00a3e Author: Matt Gilman Date: Thu Oct 11 12:23:53 2018 -0400 NIFI-5661: - Allowing the load balance configuration to be shown/edited in both clustered and standalone mode. commit 64de5c70e19ae0e608a61f2f978661585140b777 Author: thenatog Date: Fri Sep 7 12:39:18 2018 -0400 NIFI-5479 - Supressed the AnnotationParser logs using the logback.xml. Dependency changes can be look at in future. NIFI-5479 - Updated comment. This closes #3034 commit 8a751e80183d46febd7abc6e66b72342ca5a5f38 Author: Koji Kawamura Date: Fri Sep 14 21:18:04 2018 +0900 NIFI-5661: Adding Load Balance config UI Incorporated review comments. Move combo options to a common place. This closes #3046 commit a6f722222a7fa31fef06a465efe506fd0a773eec Author: Koji Kawamura Date: Fri Sep 28 17:37:34 2018 +0900 NIFI-5645: Auto reconnect ConsumeWindowsEventLog This commit also contains following refactoring: - Catch URISyntaxException inside subscribe when constructing provenance URI as it does not affect the core responsibility of this processor. Even if it fails to be a proper URI, if the query works for consuming logs, the processor should proceed forward. Upgrade JNA version. Do not update lastActivityTimestamp when subscribe failed. This closes #3037 commit 97afa4e7bab53f707c8299adaab9cb30a8777dd5 Author: Mark Payne Date: Tue Oct 9 14:54:21 2018 -0400 NIFI-5585: Addressed bug in calculating swap size of a queue partition when rebalancing This closes #3010. Signed-off-by: Mark Payne commit a1a4c997634aa7edabda42407a0a7627d33e73fd Author: Mark Payne Date: Mon Oct 8 09:53:14 2018 -0400 NIFI-5585: Adjustments to the Connection Load Balancing to ensure that node offloading works smoothly Signed-off-by: Jeff Storck commit 01e2098d242f45f519bee0572de3c86cc7837645 Author: Jeff Storck Date: Tue Sep 25 15:17:19 2018 -0400 NIFI-5585 A node that was previously offloaded can now be reconnected to the cluster and queue flowfiles again Added Spock test for NonLocalPartitionPartitioner Updated NOTICE files for FontAwesome with the updated version (4.7.0) and URL to the free license Updated package-lock.json with the updated version of FontAwesome (4.7.0) Added method to FlowFileQueue interface to reset an offloaded queue Queues that are now immediately have the offloaded status reset once offloading finishes SocketLoadBalancedFlowFileQueue now ignores back-pressure when offloading flowfiles Cleaned up javascript in nf-cluster-table.js when creating markup for the node operation icons Fixed incorrect handling of a heartbeat from an offloaded node. Heartbeats from offloading or offloaded nodes will now be reported as an event, the heartbeat will be removed and ignored. Added unit tests and integration tests to cover offloading nodes Updated Cluster integration test class with accessor for the current cluster coordinator Updated Node integration test class's custom NiFiProperties implementation to return the load balancing port and a method to assert an offloaded node Added exclusion to top-level pom for ITSpec.class commit be2c24cfaf00f74ba9dfa1b5cf04aa43ee818afb Author: Mark Payne Date: Mon Sep 24 09:17:22 2018 -0400 NIFI-5585: Fixed bug that arised when multiple nodes were decommissioning at same time; could get into state where the nodes queued up data for one another so the data just stayed put commit 04d8da8f46c26bd829c4411ea92692589d93278a Author: Jeff Storck Date: Tue Sep 18 17:09:13 2018 -0400 NIFI-5585 Added capability to offload a node that is disconnected from the cluster. Updated NodeClusterCoordinator to allow idempotent requests to offload a cluster Added capability to connect/delete/disconnect/offload a node from the cluster to the Toolkit CLI Added capability to get the status of nodes from the cluster to the Toolkit CLI Upgraded FontAwesome to 4.7.0 (from 4.6.1) Added icon "fa-upload" for offloading nodes in the cluster table UI commit 83ca67649263bd5451807c446e03fa02a2429410 Author: Kotaro Terada Date: Tue Oct 9 18:31:41 2018 +0900 NIFI-5681: Fix a locale-dependent test in TestVersionedFlowSnapshotMetadataResult Signed-off-by: Pierre Villard This closes #3061. commit 6c17685cbee059c479b0cdfbfaa50ec613b78575 Author: Kotaro Terada Date: Fri Oct 5 16:43:44 2018 +0900 NIFI-5675: Fix some locale-dependent tests in ConvertExcelToCSVProcessorTest Signed-off-by: Pierre Villard This closes #3058. commit fc5c8baeb3c0e2fbed2cefd11752c40aa06e5783 Author: Kotaro Terada Date: Tue Oct 9 14:12:53 2018 +0900 NIFI-5676: Fix a timezone-dependent test in PutORCTest Signed-off-by: Pierre Villard This closes #3059. commit dd50322749aa667beb0dc16b2535a7aa1da1ca73 Author: Matt Gilman Date: Tue Oct 9 12:49:31 2018 -0400 NIFI-5600: Recalculating the available columns for the queue listing and component state because they contain conditions which need to be re-evaluated. Signed-off-by: Pierre Villard This closes #3055. commit 9dfc6683eef93290f118ccf6bc2b93f6370bddc5 Author: Mark Payne Date: Tue Oct 9 12:19:24 2018 -0400 NIFI-5672: Do not compare Load Balancing address/port for logical equivalence of Node Identifiers. Added more details to logging of Node Identifiers This closes #3054 commit 77edddd98828157d7f495c5b0ea667a0c36e96cc Author: joewitt Date: Mon Oct 8 13:35:01 2018 -0400 NIFI-5666 Updated all usages of Spring, beanutils, collections to move beyond deps with cves This closes #3052 commit 117e60c8e338fb78db2e7014042834f541720136 Author: Mark Payne Date: Tue Oct 9 12:23:44 2018 -0400 Empty commit to force Github sync commit c425bd2880dc2c45c96e0dfcc4990f1e20e14d0a Author: Mark Payne Date: Fri Aug 17 14:08:14 2018 -0400 NIFI-5533: Be more efficient with heap utilization - Updated FlowFile Repo / Write Ahead Log so that any update that writes more than 1 MB of data is written to a file inside the FlowFile Repo rather than being buffered in memory - Update SplitText so that it does not hold FlowFiles that are not the latest version in heap. Doing them from being garbage collected, so while the Process Session is holding the latest version of the FlowFile, SplitText is holding an older version, and this results in two copies of the same FlowFile object NIFI-5533: Checkpoint NIFI-5533: Bug Fixes Signed-off-by: Matthew Burgess This closes #2974 commit c87d791938562de04ee598ebffa296f954130ca7 Author: Mark Payne Date: Fri Oct 5 12:06:39 2018 -0400 NIFI-5663: Ensure that when sort Node Identifiers that we use both the node's API Address as well as API Port, in case 2 nodes are running on same host. Also ensure that when Local Node ID is determined that we update all Load Balancing Partitions, if necessary This closes #3048. Signed-off-by: Koji Kawamura commit 768bcfb5092fb240adbe528103c06d7796a709bc Author: Pierre Villard Date: Tue Sep 25 22:53:28 2018 +0200 NIFI-5635 - Description PutEmail properties with multiple senders/recipients This closes #3031 Signed-off-by: Mike Moser commit 246c090526143943557b15868db6e8fe3fb30cf6 Author: thenatog Date: Thu Sep 13 21:45:00 2018 -0400 NIFI-5595 - Added the CORS filter to the templates/upload endpoint using a URL matcher. Explicitly allow methods GET, HEAD. These are the Spring defaults when the allowedMethods is empty but now it is explicit. This will require other methods like POST etc to be from the same origin (for the template/upload URL). This closes #3024. Signed-off-by: Andy LoPresto commit c6572f042bf1637f6faaa2b2ffe4a56e297c6d1a Author: Matthew Burgess Date: Fri Aug 10 16:49:25 2018 -0400 NIFI-4517: Added ExecuteSQLRecord and QueryDatabaseTableRecord processors Signed-off-by: Pierre Villard This closes #2945. commit b4810b8dd76cae6272a0e9fc2c2cff2eddb80c8b Author: Mark Payne Date: Fri Oct 5 12:08:55 2018 -0400 Empty commit to force sync with mirrors commit 619f1ffe8fbbca61bc5545f13920190a77006e08 Author: Mark Payne Date: Thu Jun 14 11:57:21 2018 -0400 NIFI-5516: Implement Load-Balanced Connections Refactoring StandardFlowFileQueue to have an AbstractFlowFileQueue Refactored more into AbstractFlowFileQueue Added documentation, cleaned up code some Refactored FlowFileQueue so that there is SwappablePriorityQueue Several unit tests written Added REST API Endpoint to allow PUT to update connection to use load balancing or not. When enabling load balancing, though, I saw the queue size go from 9 to 18. Then was only able to process 9 FlowFiles. Bug fixes Code refactoring Added integration tests, bug fixes Refactored clients to use NIO Bug fixes. Appears to finally be working with NIO Client!!!!! NIFI-5516: Refactored some code from NioAsyncLoadBalanceClient to LoadBalanceSession Bug fixes and allowed load balancing socket connections to be reused Implemented ability to compress Nothing, Attributes, or Content + Attributes when performing load-balancing Added flag to ConnectionDTO to indicate Load Balance Status Updated Diagnostics DTO for connections Store state about cluster topology in NodeClusterCoordinator so that the state is known upon restart Code cleanup Fixed checkstyle and unit tests NIFI-5516: Updating logic for Cluster Node Firewall so that the node's identity comes from its certificate, not from whatever it says it is. NIFI-5516: FIxed missing License headers NIFI-5516: Some minor code cleanup NIFI-5516: Adddressed review feedback; Bug fixes; some code cleanup. Changed dependency on nifi-registry from SNAPSHOT to official 0.3.0 release NIFI-5516: Take backpressure configuration into account NIFI-5516: Fixed ConnectionDiagnosticsSnapshot to include node identifier NIFI-5516: Addressed review feedback This closes #2947 commit 5872eb3c4a060684a88555f1c697f07bec4c26dd Author: Mark Payne Date: Wed Aug 15 10:23:49 2018 -0400 NIFI-5331: When checkpointing SequentialAccessWriteAheadLog, if the journal is not healthy, ensure that we roll it over and ensure that if an Exception is thrown when attempting to fsync() or close() the journal, we continue creating a new one. This closes #2952. Signed-off-by: Brandon Devries commit 8f4d13eeacc0bb1bd159ecb8fedf71eda5ea15a9 Author: Koji Kawamura Date: Thu Oct 4 13:48:26 2018 +0900 NIFI-5581: Fix replicate request timeout This closes #3044 - Revert 87cf474e542ef16601a86cc66c624fb8902c9fc2 to enable connection pooling - Changes the expected HTTP status code for the 1st request of a two-phase commit transaction from 150 (NiFi custom) to 202 Accepted - Corrected RevisionManager Javadoc about revision varidation protocol commit f65286be83f03258964cdec9395c4148c29ec8f8 Author: Andy LoPresto Date: Fri Sep 21 19:26:10 2018 -0700 NIFI-5622 Updated test resource keystores and truststores with SubjectAlternativeNames to be compliant with RFC 6125. Refactored some test code to be clearer. Renamed some resources to be consistent across modules. Changed passwords to meet new minimum length requirements. This closes #3018 commit 8e233ca2ef55b2100174ee2b32a8668e9190a85c Author: joewitt Date: Thu Sep 20 23:24:17 2018 -0400 NIFI-4806 updated tika and a ton of other deps as found by dependency versions plugin This closes #3028 commit de685a7a741888c6ffd6468d89b536276975934c Author: pepov Date: Tue Oct 2 15:21:36 2018 +0200 NIFI-5656 Handly empty "Node Group" property in FileAccessPolicyProvider consistently, add some logs to help with debugging, add test for the invalid group name and for the empty case. This closes #3043. Signed-off-by: Kevin Doran commit b4c8e0179bc4b4e196eac15c7478fed784ee2bdc Merge: 895323f3c 76a9f98d7 Author: Brandon Devries Date: Tue Oct 2 11:08:43 2018 -0400 Merge branch 'pr2931' commit 76a9f98d7f43b0b23c7360a2d21ee81f5d19309e Author: Mike Moser Date: Wed Sep 5 15:49:44 2018 -0400 NIFI-3531 Catch and rethrow generic Exception to handle RuntimeExceptions, and allow test to pass This closes #2931. Signed-off-by: Brandon Devries commit 895323f3c2ab8541a663ba35ed98209c256f7c12 Merge: 813cc1f6a 4f538f1ec Author: Brandon Devries Date: Tue Oct 2 09:40:36 2018 -0400 Merge branch 'pr2949' commit 4f538f1ecb8e73859e8875bff10ab18971897180 Author: Mike Moser Date: Tue Aug 14 18:55:10 2018 +0000 NIFI-3672 updated PublishJMS message property docs This closes #2949 Signed-off-by: Brandon Devries commit 813cc1f6a25417c47eafd409089c7d2dec7ac12a Author: Matthew Burgess Date: Mon Oct 1 10:23:44 2018 -0400 NIFI-5650: Added Xerces to scripting bundle for Jython 2.7.1 This closes #3042 Signed-off-by: Mike Thomsen commit b1478cdb195722d7d285cad53d1f89c325868f55 Author: Mike Thomsen Date: Fri Apr 6 21:38:07 2018 -0400 NIFI-5051 Created ElasticSearch lookup service. NIFI-5051 Fixed checkstyle issue. NIFI-5051 Converted ES lookup service to use a SchemaRegistry. NIFI-5051 Cleaned up POM and added a simple unit test that uses a mock client service. NIFI-5051 Added change; waiting for feedback. NIFI-5051 Changed query setup based on code review. Changed tests to Groovy to make them easier to read with all of the inline JSON. NIFI-5051 fixed a checkstyle issue. NIFI-5051 Rebased to cleanup merge issues NIFI-5051 Added changes from a code review. NIFI-5051 Fixed a checkstyle issue. NIFI-5051 Added coverage generator for tests. Rebased. NIFI-5051 Updated service and switched it over to JsonInferenceSchemaRegistryService. NIFI-5051 Removed dead code. NIFI-5051 Fixed checkstyle errors. NIFI-5051 Refactored query builder. NIFI-5051 Added placeholder gitignore to force test compile. NIFI-5051 Added note explaining why the .gitignore file was needed. NIFI-5051 Made constructor public. NIFI-5051 Fixed path issue in client service integration tests. NIFI-5051 Added additional mapping capabilities to let users massage the result set into the fields they want. Signed-off-by: Matthew Burgess This closes #2615 commit 748cf745628dab20b7e71f12b5dcfe6ed0bbf134 Author: Andy LoPresto Date: Wed Sep 26 18:18:22 2018 -0700 NIFI-5628 Added content length check to OkHttpReplicationClient. Added unit tests. This closes #3035 commit 0dd382370bf139e0f8c1b22761e4aa306943dd77 Author: Colin Dean Date: Wed Sep 19 20:27:47 2018 -0400 NIFI-5612: Support JDBC drivers that return Long for unsigned ints Refactors tests in order to share code repeated in tests and to enable some parameterized testing. MySQL Connector/J 5.1.x in conjunction with MySQL 5.0.x will return a Long for ResultSet#getObject when the SQL type is an unsigned integer. This change prevents that error from occurring while implementing a more informational exception describing what the failing object's POJO type is in addition to its string value. Signed-off-by: Matthew Burgess This closes #3032 commit e24388aa7f23f056ec93b3205dd501514806672e Author: Jeff Storck Date: Tue Sep 25 18:30:19 2018 -0400 NIFI-5557 Added test in PutHDFSTest for IOException with a nested GSSException Resolved most of the code warnings in PutHDFSTest This closes #2971. commit 0f55cbfb9f49087492a333c59b63e146a1444d55 Author: Endre Zoltan Kovacs Date: Tue Aug 28 10:47:59 2018 +0200 NIFI-5557: handling expired ticket by rollback and penalization commit 2e1005e884cef70ea9c2eb1152d70e546ad2b5c3 Author: Mark Payne Date: Thu Sep 27 10:10:48 2018 -0400 NIFI-5640: Improved efficiency of Avro Reader and some methods of AvroTypeUtil. Also switched ServiceStateTransition to using read/write locks instead of synchronized blocks because profiling showed that significant time was spent in determining state of a Controller Service when attempting to use it. Switching to a ReadLock should provide better performance there. Signed-off-by: Matthew Burgess This closes #3036 commit ad4c886fbf2af2bc98ebe12200c4b119df67b90f Author: Mark Payne Date: Tue Sep 25 09:05:06 2018 -0400 NIFI-5634: When merging RPG entities, ensure that we only send back the ports that are common to all nodes - even if that means sending back no ports This closes #3030 commit 66eeb48802317cdff69fe83070d26dac7245294a Author: Mike Moser Date: Mon Aug 13 17:40:54 2018 +0000 NIFI-3672 Add support for strongly typed message properties in PublishJMS commit 8309747889c3187005a0ce4e4c0c11b1e3281e10 Author: Mike Moser Date: Wed Aug 1 20:11:35 2018 +0000 NIFI-3531 Moved session.recover in JMSConsumer to exceptional situations --- .../nifi/controller/queue/QueueSize.java | 4 + .../org/apache/nifi/flowfile/FlowFile.java | 4 + nifi-assembly/NOTICE | 2 +- .../TestCustomNotificationService.java | 2 - .../http/TestHttpNotificationServiceSSL.java | 16 +- .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes .../nifi-data-provenance-utils/pom.xml | 2 +- nifi-commons/nifi-expression-language/pom.xml | 6 +- .../language/StandardPreparedQuery.java | 21 +- .../CharSequenceTranslatorEvaluator.java | 4 +- nifi-commons/nifi-flowfile-packager/pom.xml | 9 +- .../apache/nifi/util/FlowFilePackagerV1.java | 2 +- nifi-commons/nifi-hl7-query-language/pom.xml | 18 +- .../org/apache/nifi/hl7/hapi/HapiField.java | 4 +- nifi-commons/nifi-json-utils/pom.xml | 2 +- .../org/apache/nifi/util/NiFiProperties.java | 31 + .../serialization/SimpleRecordSchema.java | 39 +- .../record/ResultSetRecordSet.java | 15 +- .../record/util/DataTypeUtils.java | 5 + .../record/TestDataTypeUtils.java | 13 + nifi-commons/nifi-schema-utils/pom.xml | 2 +- .../repository/schema/RecordIterator.java | 28 + .../repository/schema/SchemaRecordReader.java | 68 +- .../repository/schema/SchemaRecordWriter.java | 9 +- .../schema/SingleRecordIterator.java | 45 + nifi-commons/nifi-security-utils/pom.xml | 6 +- nifi-commons/nifi-site-to-site-client/pom.xml | 4 +- .../remote/client/http/TestHttpClient.java | 16 +- .../src/test/resources/certs/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/certs/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/certs/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/certs/truststore.jks | Bin 0 -> 911 bytes nifi-commons/nifi-socket-utils/pom.xml | 2 +- .../apache/nifi/stream/io/StreamUtils.java | 34 +- nifi-commons/nifi-web-utils/pom.xml | 10 +- .../.SequentialAccessWriteAheadLog.java.swp | Bin 0 -> 16384 bytes .../nifi/wali/LengthDelimitedJournal.java | 139 +- .../wali/SequentialAccessWriteAheadLog.java | 33 +- .../apache/nifi/wali/WriteAheadJournal.java | 10 + .../org/wali/MinimalLockingWriteAheadLog.java | 3 + .../src/main/java/org/wali/SerDe.java | 34 + .../java/org/wali/WriteAheadRepository.java | 2 +- .../TestSequentialAccessWriteAheadLog.java | 89 +- .../test/java/org/wali/DummyRecordSerde.java | 83 + .../main/asciidoc/administration-guide.adoc | 7 + nifi-external/nifi-spark-receiver/pom.xml | 2 +- nifi-external/nifi-storm-spout/pom.xml | 2 +- .../nifi/controller/queue/FlowFileQueue.java | 97 +- .../queue/LoadBalanceCompression.java | 35 + .../controller/queue/LoadBalanceStrategy.java | 41 + .../queue/LoadBalancedFlowFileQueue.java | 69 + .../queue/LocalQueuePartitionDiagnostics.java | 32 + .../controller/queue/QueueDiagnostics.java | 28 + .../RemoteQueuePartitionDiagnostics.java | 30 + .../repository/FlowFileSwapManager.java | 27 +- .../nifi-amqp-processors/pom.xml | 2 +- .../nifi-atlas-reporting-task/pom.xml | 13 +- .../emulator/AtlasAPIV2ServerEmulator.java | 11 +- nifi-nar-bundles/nifi-atlas-bundle/pom.xml | 6 + .../nifi-avro-processors/pom.xml | 4 +- .../nifi-aws-abstract-processors/pom.xml | 25 +- .../nifi-aws-service-api/pom.xml | 23 + nifi-nar-bundles/nifi-aws-bundle/pom.xml | 2 +- .../azure/storage/DeleteAzureBlobStorage.java | 39 +- .../nifi-cassandra-processors/pom.xml | 5 + .../processors/cassandra/QueryCassandra.java | 2 +- .../nifi-ccda-processors/pom.xml | 2 +- .../nifi-cdc/nifi-cdc-api/pom.xml | 4 +- .../nifi-couchbase-processors/pom.xml | 7 +- .../nifi-druid-controller-service-api/pom.xml | 8 +- .../nifi-elasticsearch-client-service/pom.xml | 39 +- .../ElasticSearchLookupService.java | 315 ++++ ...g.apache.nifi.controller.ControllerService | 1 + .../additionalDetails.html | 53 + .../ElasticSearch5ClientService_IT.groovy | 148 ++ .../ElasticSearchLookupServiceTest.groovy | 69 + .../ElasticSearchLookupService_IT.groovy | 211 +++ .../TestControllerServiceProcessor.groovy} | 38 +- .../TestElasticSearchClientService.groovy | 75 + .../integration/TestSchemaRegistry.groovy | 41 + .../src/test/java/.gitignore | 1 + .../ElasticSearchClientService_IT.java | 165 -- .../src/test/resources/setup.script | 11 +- .../nifi-elasticsearch-processors/pom.xml | 4 +- .../pom.xml | 2 +- .../nifi-email-processors/pom.xml | 8 +- .../nifi/processors/email/TestListenSMTP.java | 12 +- .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes .../nifi-enrich-processors/pom.xml | 2 +- .../nifi-hadoop-utils/pom.xml | 2 +- .../nifi-processor-utils/pom.xml | 2 +- .../org/apache/nifi/avro/AvroTypeUtil.java | 126 +- .../WriteAvroSchemaAttributeStrategy.java | 22 +- .../nifi-standard-record-utils/pom.xml | 5 + .../java/org/apache/nifi/csv/CSVUtils.java | 2 +- .../org/apache/nifi/csv/CSVValidators.java | 2 +- .../nifi-record-utils/pom.xml | 10 + .../nifi-reporting-utils/pom.xml | 2 +- .../nifi-syslog-utils/pom.xml | 2 +- .../src/main/resources/META-INF/NOTICE | 2 +- .../nifi/web/api/dto/ConnectionDTO.java | 49 + .../diagnostics/ConnectionDiagnosticsDTO.java | 127 +- .../ConnectionDiagnosticsSnapshotDTO.java | 76 + .../diagnostics/LocalQueuePartitionDTO.java | 136 ++ .../diagnostics/ProcessorDiagnosticsDTO.java | 10 +- .../diagnostics/RemoteQueuePartitionDTO.java | 126 ++ .../FileAccessPolicyProvider.java | 21 +- .../FileAccessPolicyProviderTest.java | 39 +- .../SchemaRepositoryRecordSerde.java | 71 +- .../coordination/ClusterCoordinator.java | 51 +- .../ClusterTopologyEventListener.java | 32 + .../node/NodeConnectionState.java | 10 + .../node/NodeConnectionStatus.java | 45 +- .../coordination/node/OffloadCode.java | 40 + .../ClusterCoordinationProtocolSender.java | 9 + .../nifi/cluster/protocol/NodeIdentifier.java | 69 +- .../cluster/protocol/ProtocolHandler.java | 5 +- ...terCoordinationProtocolSenderListener.java | 6 + .../protocol/impl/SocketProtocolListener.java | 67 +- ...dardClusterCoordinationProtocolSender.java | 26 + .../message/AdaptedNodeConnectionStatus.java | 20 +- .../jaxb/message/AdaptedNodeIdentifier.java | 18 + .../message/NodeConnectionStatusAdapter.java | 6 +- .../jaxb/message/NodeIdentifierAdapter.java | 4 +- .../protocol/jaxb/message/ObjectFactory.java | 5 + .../protocol/message/OffloadMessage.java | 53 + .../protocol/message/ProtocolMessage.java | 20 +- .../nifi-cluster-protocol-context.xml | 14 +- .../testutils/DelayedProtocolHandler.java | 8 +- .../testutils/ReflexiveProtocolHandler.java | 8 +- .../heartbeat/AbstractHeartbeatMonitor.java | 13 +- .../ClusterProtocolHeartbeatMonitor.java | 23 +- .../http/StandardHttpResponseMapper.java | 4 +- .../http/replication/RequestReplicator.java | 8 +- .../ThreadPoolRequestReplicator.java | 82 +- .../okhttp/OkHttpReplicationClient.java | 99 +- .../node/NodeClusterCoordinator.java | 395 ++++- .../node/state/NodeIdentifierDescriptor.java | 167 ++ .../manager/ConnectionEntityMerger.java | 18 + .../ProcessorDiagnosticsEntityMerger.java | 149 +- .../RemoteProcessGroupEntityMerger.java | 14 +- .../IllegalNodeOffloadException.java | 38 + .../OffloadedNodeMutableRequestException.java | 39 + .../okhttp/OkHttpReplicationClientTest.groovy | 138 ++ .../node/NodeClusterCoordinatorSpec.groovy | 99 ++ .../integration/OffloadNodeITSpec.groovy | 50 + .../flow/TestPopularVoteFlowElection.java | 2 +- .../TestAbstractHeartbeatMonitor.java | 26 +- .../StandardHttpResponseMapperSpec.groovy | 6 +- .../CurrentUserEndpointMergerTest.java | 4 +- .../StatusHistoryEndpointMergerSpec.groovy | 3 +- .../TestThreadPoolRequestReplicator.java | 4 +- .../node/TestNodeClusterCoordinator.java | 86 +- .../nifi/cluster/integration/Cluster.java | 4 + .../integration/ClusterConnectionIT.java | 2 +- .../apache/nifi/cluster/integration/Node.java | 49 +- .../manager/ConnectionEntityMergerSpec.groovy | 2 +- .../ControllerServiceEntityMergerSpec.groovy | 2 +- .../manager/LabelEntityMergerSpec.groovy | 2 +- .../RemoteProcessGroupEntityMergerTest.java | 79 + .../{ => queue}/DropFlowFileRequest.java | 18 +- .../repository/ContentNotFoundException.java | 16 + .../registry/flow/FlowRegistryClient.java | 12 +- .../nifi/web/revision/RevisionManager.java | 15 +- .../registry/flow/TestFlowRegistryClient.java | 109 ++ .../nifi/connectable/StandardConnection.java | 103 +- .../controller/FileSystemSwapManager.java | 152 +- .../nifi/controller/FlowController.java | 142 +- .../controller/StandardFlowFileQueue.java | 1572 ----------------- .../nifi/controller/StandardFlowService.java | 75 +- .../controller/StandardFlowSynchronizer.java | 119 +- .../controller/StandardProcessorNode.java | 11 +- .../queue/AbstractFlowFileQueue.java | 460 +++++ .../queue/BlockingSwappablePriorityQueue.java | 84 + .../queue/ConnectionEventListener.java | 24 + .../controller/queue/DropFlowFileAction.java | 27 + .../queue/DropFlowFileRepositoryRecord.java | 91 + .../queue/FlowFileQueueContents.java | 46 + .../queue/FlowFileQueueFactory.java | 22 + .../controller/queue/FlowFileQueueSize.java | 94 + .../nifi/controller/queue/MaxQueueSize.java | 47 + .../queue/NopConnectionEventListener.java | 29 + .../controller/queue/QueuePrioritizer.java | 90 + .../queue/StandardFlowFileQueue.java | 221 +++ ...tandardLocalQueuePartitionDiagnostics.java | 60 + .../queue/StandardQueueDiagnostics.java | 40 + ...andardRemoteQueuePartitionDiagnostics.java | 53 + .../queue/SwappablePriorityQueue.java | 994 +++++++++++ .../nifi/controller/queue/TimePeriod.java | 41 + .../ContentRepositoryFlowFileAccess.java | 91 + .../clustered/FlowFileContentAccess.java | 29 + .../queue/clustered/SimpleLimitThreshold.java | 42 + .../SocketLoadBalancedFlowFileQueue.java | 1146 ++++++++++++ .../queue/clustered/TransactionThreshold.java | 26 + .../clustered/TransferFailureDestination.java | 51 + .../client/LoadBalanceFlowFileCodec.java | 27 + .../StandardLoadBalanceFlowFileCodec.java | 50 + .../client/async/AsyncLoadBalanceClient.java | 51 + .../async/AsyncLoadBalanceClientFactory.java | 24 + .../async/AsyncLoadBalanceClientRegistry.java | 32 + .../async/TransactionCompleteCallback.java | 26 + .../async/TransactionFailureCallback.java | 44 + .../client/async/nio/LoadBalanceSession.java | 641 +++++++ .../async/nio/NioAsyncLoadBalanceClient.java | 477 +++++ .../nio/NioAsyncLoadBalanceClientFactory.java | 50 + .../NioAsyncLoadBalanceClientRegistry.java | 134 ++ .../nio/NioAsyncLoadBalanceClientTask.java | 103 ++ .../client/async/nio/PeerChannel.java | 358 ++++ .../client/async/nio/RegisteredPartition.java | 75 + .../CorrelationAttributePartitioner.java | 61 + .../partition/FirstNodePartitioner.java | 43 + .../partition/FlowFilePartitioner.java | 53 + .../partition/LocalPartitionPartitioner.java | 42 + .../partition/LocalQueuePartition.java | 108 ++ .../NonLocalPartitionPartitioner.java | 58 + .../clustered/partition/QueuePartition.java | 102 ++ .../partition/RebalancingPartition.java | 45 + .../partition/RemoteQueuePartition.java | 352 ++++ .../partition/RoundRobinPartitioner.java | 44 + .../StandardRebalancingPartition.java | 222 +++ .../SwappablePriorityQueueLocalPartition.java | 175 ++ .../LoadBalanceProtocolConstants.java | 46 + .../server/ClusterLoadBalanceAuthorizer.java | 67 + .../server/ConnectionLoadBalanceServer.java | 251 +++ .../server/LoadBalanceAuthorizer.java | 24 + .../clustered/server/LoadBalanceProtocol.java | 35 + .../server/NotAuthorizedException.java | 26 + .../server/StandardLoadBalanceProtocol.java | 614 +++++++ .../server/TransactionAbortedException.java | 30 + .../repository/FileSystemRepository.java | 2 +- .../repository/RepositoryContext.java | 3 - .../repository/StandardProcessSession.java | 220 ++- .../serialization/FlowFromDOMFactory.java | 23 +- .../serialization/StandardFlowSerializer.java | 41 +- .../service/ServiceStateTransition.java | 85 +- .../manager/StandardStateManagerProvider.java | 32 +- .../nifi/fingerprint/FingerprintFactory.java | 41 +- .../nifi/groups/StandardProcessGroup.java | 77 +- .../processor/StandardProcessContext.java | 36 +- .../flow/StandardFlowRegistryClient.java | 5 +- .../flow/mapping/NiFiRegistryFlowMapper.java | 82 +- .../nifi/util/FlowDifferenceFilters.java | 44 + .../src/main/resources/FlowConfiguration.xsd | 88 +- .../controller/StandardFlowServiceSpec.groovy | 128 ++ .../NonLocalPartitionPartitionerSpec.groovy | 107 ++ .../nifi/controller/MockFlowFileRecord.java | 139 ++ .../nifi/controller/MockSwapManager.java | 178 ++ .../controller/TestStandardFlowFileQueue.java | 354 +--- .../queue/clustered/LoadBalancedQueueIT.java | 1345 ++++++++++++++ .../MockTransferFailureDestination.java | 62 + .../TestContentRepositoryFlowFileAccess.java | 130 ++ .../clustered/TestNaiveLimitThreshold.java | 60 + .../TestSocketLoadBalancedFlowFileQueue.java | 514 ++++++ .../clustered/TestSwappablePriorityQueue.java | 471 +++++ .../async/nio/TestLoadBalanceSession.java | 273 +++ .../TestStandardLoadBalanceProtocol.java | 656 +++++++ .../TestStandardProcessSession.java | 105 +- .../TestWriteAheadFlowFileRepository.java | 162 +- .../scheduling/TestProcessorLifecycle.java | 6 +- .../TestStandardProcessScheduler.java | 5 +- .../nifi/util/TestFlowDifferenceFilters.java | 50 + .../src/test/resources/localhost-ks.jks | Bin 0 -> 3076 bytes .../src/test/resources/localhost-ts.jks | Bin 0 -> 911 bytes .../src/test/resources/logback-test.xml | 6 +- .../repository/StandardRepositoryRecord.java | 45 +- .../claim/StandardResourceClaim.java | 4 +- .../nifi-framework/nifi-resources/pom.xml | 7 + .../src/main/resources/conf/authorizers.xml | 2 + .../src/main/resources/conf/logback.xml | 3 + .../src/main/resources/conf/nifi.properties | 7 + .../security/util/SslContextFactoryTest.java | 10 +- .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes .../io/socket/ssl/TestSSLSocketChannel.java | 381 ---- .../resources/dummy-certs/localhost-ks.jks | Bin 3512 -> 0 bytes .../resources/dummy-certs/localhost-ts.jks | Bin 1816 -> 0 bytes .../nifi-web/nifi-jetty/pom.xml | 9 +- .../nifi/web/server/HostHeaderHandler.java | 2 +- .../web/NiFiWebApiSecurityConfiguration.java | 16 + .../nifi/web/StandardNiFiServiceFacade.java | 15 +- .../nifi/web/api/ApplicationResource.java | 4 +- .../IllegalNodeOffloadExceptionMapper.java | 46 + .../apache/nifi/web/api/dto/DtoFactory.java | 104 +- .../web/dao/impl/StandardConnectionDAO.java | 14 + .../resources/access-control/keystore.jks | Bin 0 -> 3088 bytes .../resources/access-control/localhost-ks.jks | Bin 3052 -> 0 bytes .../resources/access-control/localhost-ts.jks | Bin 911 -> 0 bytes .../access-control/nifi-flow.properties | 8 +- .../resources/access-control/nifi.properties | 8 +- .../resources/access-control/truststore.jks | Bin 0 -> 911 bytes .../src/main/frontend/package-lock.json | 6 +- .../src/main/frontend/package.json | 2 +- .../src/main/resources/META-INF/NOTICE | 2 +- .../canvas/connection-configuration.jsp | 59 +- .../WEB-INF/partials/connection-details.jsp | 67 +- .../main/webapp/css/connection-details.css | 16 +- .../nifi-web-ui/src/main/webapp/css/graph.css | 16 +- .../webapp/js/nf/canvas/nf-component-state.js | 74 +- .../nf/canvas/nf-connection-configuration.js | 62 +- .../main/webapp/js/nf/canvas/nf-connection.js | 76 +- .../webapp/js/nf/canvas/nf-queue-listing.js | 272 +-- .../webapp/js/nf/cluster/nf-cluster-table.js | 75 +- .../src/main/webapp/js/nf/nf-common.js | 51 + .../webapp/js/nf/nf-connection-details.js | 26 +- .../nifi-framework-bundle/pom.xml | 34 +- .../nifi-gcp-services-api/pom.xml | 3 +- .../nifi-grpc-processors/pom.xml | 2 +- .../nifi/processors/grpc/ITListenGRPC.java | 10 +- .../nifi/processors/grpc/TestInvokeGRPC.java | 8 +- .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes .../nifi/processors/hadoop/PutHDFS.java | 33 + .../nifi/processors/hadoop/PutHDFSTest.java | 109 +- .../nifi-hadoop-libraries-bundle/pom.xml | 10 + .../nifi-hbase-processors/pom.xml | 13 +- .../nifi/hbase/io/JsonFullRowSerializer.java | 2 +- .../JsonQualifierAndValueRowSerializer.java | 2 +- .../nifi/hbase/io/JsonRowSerializer.java | 2 +- .../nifi-hive-processors/pom.xml | 5 + .../apache/nifi/util/hive/HiveJdbcCommon.java | 2 +- .../apache/nifi/util/hive/HiveJdbcCommon.java | 2 +- .../nifi/processors/orc/PutORCTest.java | 7 +- nifi-nar-bundles/nifi-hive-bundle/pom.xml | 11 + .../nifi-hl7-processors/pom.xml | 20 +- .../nifi-html-processors/pom.xml | 2 +- .../nifi-ignite-processors/pom.xml | 5 +- nifi-nar-bundles/nifi-ignite-bundle/pom.xml | 11 +- .../nifi-influxdb-processors/pom.xml | 2 +- .../nifi-jms-processors/pom.xml | 6 +- .../nifi/jms/processors/JMSConsumer.java | 10 +- .../nifi/jms/processors/JMSPublisher.java | 70 +- .../nifi/jms/processors/PublishJMS.java | 6 +- .../nifi/jms/processors/PublishJMSIT.java | 73 + .../nifi-jolt-record-bundle/pom.xml | 2 +- nifi-nar-bundles/nifi-kafka-bundle/pom.xml | 6 + .../nifi-kerberos-iaa-providers/pom.xml | 8 +- .../pom.xml | 10 + .../nifi-kite-bundle/nifi-kite-nar/pom.xml | 2 +- .../nifi-kite-processors/pom.xml | 7 +- .../processors/kite/ConvertCSVToAvro.java | 2 +- .../nifi/processors/kite/InferAvroSchema.java | 2 +- nifi-nar-bundles/nifi-kite-bundle/pom.xml | 6 + .../nifi-ldap-iaa-providers/pom.xml | 15 +- .../nifi-ldap-iaa-providers-bundle/pom.xml | 10 + .../nifi-media-processors/pom.xml | 8 +- .../nifi-mongodb-services/pom.xml | 2 +- .../processors/mqtt/common/MqttTestUtils.java | 8 +- .../mqtt/integration/TestConsumeMqttSSL.java | 6 +- .../mqtt/integration/TestPublishMqttSSL.java | 6 +- .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes .../nifi-network-processors/pom.xml | 2 +- .../nifi-network-utils/pom.xml | 52 +- .../nifi-parquet-processors/pom.xml | 1 - nifi-nar-bundles/nifi-parquet-bundle/pom.xml | 11 +- .../nifi-poi-processors/pom.xml | 2 +- .../poi/ConvertExcelToCSVProcessor.java | 2 +- .../poi/ConvertExcelToCSVProcessorTest.java | 50 +- ...dWriteAheadProvenanceRepositoryTest.groovy | 2 + ... ITestPersistentProvenanceRepository.java} | 2 +- .../nifi-provenance-repository-bundle/pom.xml | 4 +- .../nifi-redis-extensions/pom.xml | 2 +- nifi-nar-bundles/nifi-redis-bundle/pom.xml | 2 +- .../nifi-registry-service/pom.xml | 2 +- .../nifi-rethinkdb-processors/pom.xml | 2 +- .../nifi-riemann-processors/pom.xml | 2 +- nifi-nar-bundles/nifi-riemann-bundle/pom.xml | 8 +- .../src/main/resources/META-INF/NOTICE | 14 + .../nifi-scripting-processors/pom.xml | 10 +- .../nifi-scripting-bundle/pom.xml | 2 +- .../nifi-solr-processors/pom.xml | 4 +- .../nifi-livy-processors/pom.xml | 2 +- .../livy/TestExecuteSparkInteractiveSSL.java | 10 +- .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes nifi-nar-bundles/nifi-spark-bundle/pom.xml | 7 +- nifi-nar-bundles/nifi-splunk-bundle/pom.xml | 2 +- .../nifi-spring-processors/pom.xml | 8 +- .../additionalDetails.html | 14 +- .../src/test/resources/aggregated.xml | 2 +- .../src/test/resources/fromSpringOnly.xml | 4 +- .../src/test/resources/requestReply.xml | 2 +- .../src/test/resources/toSpringOnly.xml | 2 +- .../standard/AbstractExecuteSQL.java | 369 ++++ .../standard/AbstractQueryDatabaseTable.java | 483 +++++ .../nifi/processors/standard/ExecuteSQL.java | 371 +--- .../processors/standard/ExecuteSQLRecord.java | 147 ++ .../processors/standard/GenerateFlowFile.java | 2 +- .../nifi/processors/standard/ListenHTTP.java | 29 +- .../nifi/processors/standard/PutEmail.java | 12 +- .../standard/QueryDatabaseTable.java | 453 +---- .../standard/QueryDatabaseTableRecord.java | 148 ++ .../nifi/processors/standard/SplitText.java | 87 +- .../nifi/processors/standard/SplitXml.java | 2 +- .../standard/servlets/ListenHTTPServlet.java | 353 ++-- .../standard/sql/DefaultAvroSqlWriter.java | 67 + .../standard/sql/RecordSqlWriter.java | 158 ++ .../processors/standard/sql/SqlWriter.java | 77 + .../processors/standard/util/JdbcCommon.java | 25 +- .../org.apache.nifi.processor.Processor | 2 + .../standard/TestGetHTTPGroovy.groovy | 8 +- .../standard/TestPostHTTPGroovy.groovy | 8 +- .../standard/ITListenAndPutSyslog.java | 8 +- .../QueryDatabaseTableRecordTest.java | 1332 ++++++++++++++ .../standard/QueryDatabaseTableTest.java | 2 +- .../processors/standard/TestExecuteSQL.java | 44 +- .../standard/TestExecuteSQLRecord.java | 376 ++++ .../nifi/processors/standard/TestGetHTTP.java | 8 +- .../standard/TestHandleHttpRequest.java | 8 +- .../processors/standard/TestInvokeHTTP.java | 8 +- .../standard/TestInvokeHttpSSL.java | 36 +- .../processors/standard/TestListenHTTP.java | 190 +- .../processors/standard/TestListenRELP.java | 8 +- .../processors/standard/TestListenTCP.java | 24 +- .../standard/TestListenTCPRecord.java | 24 +- .../processors/standard/TestPostHTTP.java | 84 +- .../processors/standard/TestPutEmail.java | 14 +- .../processors/standard/TestPutTcpSSL.java | 8 +- .../standard/util/JdbcCommonTestUtils.java | 60 + .../standard/util/TCPTestServer.java | 4 +- .../standard/util/TestJdbcCommon.java | 84 +- .../util/TestJdbcCommonConvertToAvro.java | 152 ++ .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes nifi-nar-bundles/nifi-standard-bundle/pom.xml | 30 +- .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes .../nifi-hbase_1_1_2-client-service/pom.xml | 2 +- .../pom.xml | 6 + .../nifi-hwx-schema-registry-service/pom.xml | 4 +- .../nifi-lookup-services/pom.xml | 6 +- .../pom.xml | 14 +- .../java/org/apache/nifi/avro/AvroReader.java | 19 +- .../avro/AvroReaderWithEmbeddedSchema.java | 12 +- .../avro/AvroReaderWithExplicitSchema.java | 17 +- .../nifi/avro/NonCachingDatumReader.java | 65 + .../apache/nifi/csv/TestCSVRecordReader.java | 2 +- .../nifi/csv/TestJacksonCSVRecordReader.java | 2 +- .../ssl/StandardSSLContextServiceTest.groovy | 10 +- .../nifi/ssl/SSLContextServiceTest.java | 109 +- .../src/test/resources/diffpass-ks.jks | Bin 2246 -> 0 bytes .../resources/keystore-different-password.jks | Bin 0 -> 3088 bytes .../src/test/resources/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/localhost-ks.jks | Bin 3512 -> 0 bytes .../src/test/resources/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/truststore.jks | Bin 0 -> 911 bytes .../nifi-update-attribute-processor/pom.xml | 2 +- .../attributes/UpdateAttribute.java | 151 +- .../attributes/TestUpdateAttribute.java | 29 - .../nifi-update-attribute-ui/pom.xml | 4 +- .../nifi-websocket-services-jetty/pom.xml | 5 - .../example/WebSocketClientExample.java | 8 +- .../example/WebSocketServerExample.java | 4 +- .../ITJettyWebSocketSecureCommunication.java | 8 +- .../src/test/resources/certs/keystore.jks | Bin 0 -> 3088 bytes .../src/test/resources/certs/localhost-ks.jks | Bin 2246 -> 0 bytes .../src/test/resources/certs/localhost-ts.jks | Bin 1816 -> 0 bytes .../src/test/resources/certs/localhost.crt | Bin 891 -> 0 bytes .../src/test/resources/certs/truststore.jks | Bin 0 -> 911 bytes .../nifi-windows-event-log-processors/pom.xml | 4 +- .../event/log/ConsumeWindowsEventLog.java | 95 +- nifi-toolkit/nifi-toolkit-admin/pom.xml | 6 +- nifi-toolkit/nifi-toolkit-cli/pom.xml | 4 +- .../impl/client/nifi/ControllerClient.java | 14 + .../nifi/impl/JerseyControllerClient.java | 87 + .../cli/impl/command/CommandOption.java | 3 + .../impl/command/nifi/NiFiCommandGroup.java | 12 + .../impl/command/nifi/nodes/ConnectNode.java | 67 + .../impl/command/nifi/nodes/DeleteNode.java | 58 + .../command/nifi/nodes/DisconnectNode.java | 67 + .../cli/impl/command/nifi/nodes/GetNode.java | 59 + .../cli/impl/command/nifi/nodes/GetNodes.java | 52 + .../impl/command/nifi/nodes/OffloadNode.java | 67 + .../toolkit/cli/impl/result/NodeResult.java | 48 + .../toolkit/cli/impl/result/NodesResult.java | 66 + ...stVersionedFlowSnapshotMetadataResult.java | 10 +- .../nifi-toolkit-encrypt-config/pom.xml | 4 +- nifi-toolkit/nifi-toolkit-s2s/pom.xml | 4 +- nifi-toolkit/nifi-toolkit-tls/pom.xml | 8 +- .../TlsToolkitGetStatusCommandLineTest.java | 24 +- .../test/resources/localhost/truststore.jks | Bin 927 -> 911 bytes .../nifi-toolkit-zookeeper-migrator/pom.xml | 2 +- nifi-toolkit/pom.xml | 7 + pom.xml | 5 +- 500 files changed, 24825 insertions(+), 5671 deletions(-) create mode 100644 nifi-bootstrap/src/test/resources/keystore.jks delete mode 100755 nifi-bootstrap/src/test/resources/localhost-ks.jks delete mode 100755 nifi-bootstrap/src/test/resources/localhost-ts.jks create mode 100644 nifi-bootstrap/src/test/resources/truststore.jks create mode 100644 nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/RecordIterator.java create mode 100644 nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SingleRecordIterator.java create mode 100644 nifi-commons/nifi-site-to-site-client/src/test/resources/certs/keystore.jks delete mode 100755 nifi-commons/nifi-site-to-site-client/src/test/resources/certs/localhost-ks.jks delete mode 100755 nifi-commons/nifi-site-to-site-client/src/test/resources/certs/localhost-ts.jks create mode 100644 nifi-commons/nifi-site-to-site-client/src/test/resources/certs/truststore.jks create mode 100644 nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/.SequentialAccessWriteAheadLog.java.swp create mode 100644 nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceCompression.java create mode 100644 nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceStrategy.java create mode 100644 nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalancedFlowFileQueue.java create mode 100644 nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LocalQueuePartitionDiagnostics.java create mode 100644 nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/QueueDiagnostics.java create mode 100644 nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/RemoteQueuePartitionDiagnostics.java create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/java/org/apache/nifi/elasticsearch/ElasticSearchLookupService.java create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/docs/org.apache.nifi.elasticsearch.ElasticSearchLookupService/additionalDetails.html create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearch5ClientService_IT.groovy create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupServiceTest.groovy create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupService_IT.groovy rename nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/{java/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.java => groovy/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.groovy} (51%) create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestElasticSearchClientService.groovy create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestSchemaRegistry.groovy create mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/.gitignore delete mode 100644 nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/org/apache/nifi/elasticsearch/integration/ElasticSearchClientService_IT.java create mode 100644 nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/resources/keystore.jks create mode 100644 nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/resources/truststore.jks create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsSnapshotDTO.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/LocalQueuePartitionDTO.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/RemoteQueuePartitionDTO.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/ClusterTopologyEventListener.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/OffloadCode.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/OffloadMessage.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/state/NodeIdentifierDescriptor.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/exception/IllegalNodeOffloadException.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/exception/OffloadedNodeMutableRequestException.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/coordination/http/replication/okhttp/OkHttpReplicationClientTest.groovy create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinatorSpec.groovy create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/integration/OffloadNodeITSpec.groovy rename nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/{ => queue}/DropFlowFileRequest.java (82%) create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/test/java/org/apache/nifi/registry/flow/TestFlowRegistryClient.java delete mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowFileQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/AbstractFlowFileQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/BlockingSwappablePriorityQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/ConnectionEventListener.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileAction.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileRepositoryRecord.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueContents.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueFactory.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueSize.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/MaxQueueSize.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/NopConnectionEventListener.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/QueuePrioritizer.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardFlowFileQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardLocalQueuePartitionDiagnostics.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardQueueDiagnostics.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardRemoteQueuePartitionDiagnostics.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/SwappablePriorityQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/TimePeriod.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/ContentRepositoryFlowFileAccess.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/FlowFileContentAccess.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SimpleLimitThreshold.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SocketLoadBalancedFlowFileQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransactionThreshold.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransferFailureDestination.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/LoadBalanceFlowFileCodec.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/StandardLoadBalanceFlowFileCodec.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClient.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientFactory.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientRegistry.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionCompleteCallback.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionFailureCallback.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/LoadBalanceSession.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClient.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientFactory.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientRegistry.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientTask.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/PeerChannel.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/RegisteredPartition.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/CorrelationAttributePartitioner.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FirstNodePartitioner.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FlowFilePartitioner.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalPartitionPartitioner.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalQueuePartition.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitioner.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/QueuePartition.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RebalancingPartition.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RemoteQueuePartition.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RoundRobinPartitioner.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/StandardRebalancingPartition.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/SwappablePriorityQueueLocalPartition.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/protocol/LoadBalanceProtocolConstants.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ClusterLoadBalanceAuthorizer.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ConnectionLoadBalanceServer.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceAuthorizer.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceProtocol.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/NotAuthorizedException.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/StandardLoadBalanceProtocol.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/TransactionAbortedException.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/StandardFlowServiceSpec.groovy create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitionerSpec.groovy create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockFlowFileRecord.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockSwapManager.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/LoadBalancedQueueIT.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/MockTransferFailureDestination.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestContentRepositoryFlowFileAccess.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestNaiveLimitThreshold.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSocketLoadBalancedFlowFileQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSwappablePriorityQueue.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/client/async/nio/TestLoadBalanceSession.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/server/TestStandardLoadBalanceProtocol.java create mode 100755 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/localhost-ks.jks create mode 100755 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-security/src/test/resources/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-security/src/test/resources/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-security/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-security/src/test/resources/truststore.jks delete mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-site-to-site/src/test/java/org/apache/nifi/remote/io/socket/ssl/TestSSLSocketChannel.java delete mode 100755 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-site-to-site/src/test/resources/dummy-certs/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-site-to-site/src/test/resources/dummy-certs/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-api/src/main/java/org/apache/nifi/web/api/config/IllegalNodeOffloadExceptionMapper.java create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-api/src/test/resources/access-control/keystore.jks delete mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-api/src/test/resources/access-control/localhost-ks.jks delete mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-api/src/test/resources/access-control/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-web/nifi-web-api/src/test/resources/access-control/truststore.jks create mode 100644 nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/truststore.jks create mode 100644 nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/truststore.jks rename nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/java/org/apache/nifi/provenance/{TestPersistentProvenanceRepository.java => ITestPersistentProvenanceRepository.java} (99%) create mode 100644 nifi-nar-bundles/nifi-spark-bundle/nifi-livy-processors/src/test/resources/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-spark-bundle/nifi-livy-processors/src/test/resources/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-spark-bundle/nifi-livy-processors/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-spark-bundle/nifi-livy-processors/src/test/resources/truststore.jks create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/DefaultAvroSqlWriter.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/RecordSqlWriter.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/SqlWriter.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/JdbcCommonTestUtils.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TestJdbcCommonConvertToAvro.java create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/truststore.jks create mode 100644 nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/truststore.jks create mode 100644 nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/NonCachingDatumReader.java delete mode 100644 nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/diffpass-ks.jks create mode 100644 nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/keystore-different-password.jks create mode 100644 nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/localhost-ts.jks create mode 100644 nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/truststore.jks create mode 100644 nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/keystore.jks delete mode 100755 nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/localhost-ks.jks delete mode 100755 nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/localhost-ts.jks delete mode 100644 nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/localhost.crt create mode 100644 nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/truststore.jks create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/ConnectNode.java create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DeleteNode.java create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DisconnectNode.java create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNode.java create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNodes.java create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/OffloadNode.java create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodeResult.java create mode 100644 nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodesResult.java diff --git a/nifi-api/src/main/java/org/apache/nifi/controller/queue/QueueSize.java b/nifi-api/src/main/java/org/apache/nifi/controller/queue/QueueSize.java index 35a860b32e34..50157e2e0719 100644 --- a/nifi-api/src/main/java/org/apache/nifi/controller/queue/QueueSize.java +++ b/nifi-api/src/main/java/org/apache/nifi/controller/queue/QueueSize.java @@ -63,6 +63,10 @@ public QueueSize add(final QueueSize other) { return new QueueSize(objectCount + other.getObjectCount(), totalSizeBytes + other.getByteCount()); } + public QueueSize add(final int count, final long bytes) { + return new QueueSize(objectCount + count, totalSizeBytes + bytes); + } + @Override public String toString() { return "QueueSize[FlowFiles=" + objectCount + ", ContentSize=" + NumberFormat.getNumberInstance().format(totalSizeBytes) + " Bytes]"; diff --git a/nifi-api/src/main/java/org/apache/nifi/flowfile/FlowFile.java b/nifi-api/src/main/java/org/apache/nifi/flowfile/FlowFile.java index 7d0e27ebb68c..c9eb49a4f210 100644 --- a/nifi-api/src/main/java/org/apache/nifi/flowfile/FlowFile.java +++ b/nifi-api/src/main/java/org/apache/nifi/flowfile/FlowFile.java @@ -121,5 +121,9 @@ public static String validateKey(final String key) { } return key; } + + public static boolean isValid(final String key) { + return key != null && !key.trim().isEmpty(); + } } } diff --git a/nifi-assembly/NOTICE b/nifi-assembly/NOTICE index 0bea06a1047d..dd2bf6d647bd 100644 --- a/nifi-assembly/NOTICE +++ b/nifi-assembly/NOTICE @@ -1873,4 +1873,4 @@ SIL OFL 1.1 ****************** The following binary components are provided under the SIL Open Font License 1.1 - (SIL OFL 1.1) FontAwesome (4.6.1 - http://fortawesome.github.io/Font-Awesome/license/) + (SIL OFL 1.1) FontAwesome (4.7.0 - https://fontawesome.com/license/free) diff --git a/nifi-bootstrap/src/test/groovy/org/apache/nifi/bootstrap/notification/TestCustomNotificationService.java b/nifi-bootstrap/src/test/groovy/org/apache/nifi/bootstrap/notification/TestCustomNotificationService.java index f1c2487b6bf4..515d2acba5ca 100644 --- a/nifi-bootstrap/src/test/groovy/org/apache/nifi/bootstrap/notification/TestCustomNotificationService.java +++ b/nifi-bootstrap/src/test/groovy/org/apache/nifi/bootstrap/notification/TestCustomNotificationService.java @@ -81,6 +81,4 @@ public void notify(NotificationContext context, NotificationType type, String su logger.info(context.getProperty(CUSTOM_USERNAME).evaluateAttributeExpressions().getValue()); logger.info(context.getProperty(CUSTOM_PASSWORD).evaluateAttributeExpressions().getValue()); } - - } diff --git a/nifi-bootstrap/src/test/java/org/apache/nifi/bootstrap/http/TestHttpNotificationServiceSSL.java b/nifi-bootstrap/src/test/java/org/apache/nifi/bootstrap/http/TestHttpNotificationServiceSSL.java index 7d1d452cff34..5ab8cab496be 100644 --- a/nifi-bootstrap/src/test/java/org/apache/nifi/bootstrap/http/TestHttpNotificationServiceSSL.java +++ b/nifi-bootstrap/src/test/java/org/apache/nifi/bootstrap/http/TestHttpNotificationServiceSSL.java @@ -42,12 +42,12 @@ public class TestHttpNotificationServiceSSL extends TestHttpNotificationService " http-notification\n"+ " org.apache.nifi.bootstrap.notification.http.HttpNotificationService\n"+ " ${test.server}\n"+ - " ./src/test/resources/localhost-ts.jks\n"+ + " ./src/test/resources/truststore.jks\n"+ " JKS\n"+ - " localtest\n"+ - " ./src/test/resources/localhost-ks.jks\n"+ + " passwordpassword\n"+ + " ./src/test/resources/keystore.jks\n"+ " JKS\n"+ - " localtest\n"+ + " passwordpassword\n"+ " ${literal('testing')}\n"+ " \n"+ ""; @@ -62,12 +62,12 @@ public static void startServer() throws IOException, UnrecoverableKeyException, mockWebServer = new MockWebServer(); final SSLContext sslContext = SslContextFactory.createSslContext( - "./src/test/resources/localhost-ks.jks", - "localtest".toCharArray(), + "./src/test/resources/keystore.jks", + "passwordpassword".toCharArray(), null, "JKS", - "./src/test/resources/localhost-ts.jks", - "localtest".toCharArray(), + "./src/test/resources/truststore.jks", + "passwordpassword".toCharArray(), "JKS", SslContextFactory.ClientAuth.REQUIRED, "TLS"); diff --git a/nifi-bootstrap/src/test/resources/keystore.jks b/nifi-bootstrap/src/test/resources/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-bootstrap/src/test/resources/localhost-ks.jks b/nifi-bootstrap/src/test/resources/localhost-ks.jks deleted file mode 100755 index df36197d92ab8e9870f42666d74c47646fd56f26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3512 zcmchZXH*l&7RQqiLLfv+Xd3T`p&Iir~-L#Y=#ud?{E1Ldil!ycf^TKF)2%4dD_ z&CRl2juv-w;hX`>r;n!ME0*eJZQo{liYbqFr%&s4KWji3-S|{|Q#z3Bi!_n-HQIvn z)_yhbW3OmoReGZ$;mdsOy)j4ml{e?MpM3BXEZ&%y>@=UK++W7rcU+QDvQxbsYBP>C ziqWW_4}oMI2<5S^ml11u$vzs(Bz1QY%@RE`7dI!_J9pQZTH;ai+~*FZ-!&0FO}AsY zOxhC^^;ctKwcW!%@WtyMsu@6xd3zdv(I!8(v5$IseOUHF#yBeb=(KkBD?D*{)a_{6 zy11;ZtH1s5w8!+ewZvnrkKmE%X*#>Ul%b`b!V6_&L1)$_<6^i6k7Bh$Cbm8X7HN40 zS#G)q)jhM1yqIk|ug4$}yr>lNM^7CDi=S{rQqn53pE8J!Vk=?&Q_pATc&ICwBQ zS(^FTsqy1f=9leGJUj=gReI>!b5N4p{xQ7Yh?)gcpugwPJJKnkHLG#|+$oVkg4yV1aO1A$e7 zaQjo^Q#=uo%^bn4wLVp1-Lpy>m3Om-GmM2@#_FNth9W;Io4*MtEVVL^kgC7SFA-we z#qVjp#>O>$RucpY72eI-)`&+06CPE;lJYi4}@3m`# zJ_AU}qlHP&l8^Sxdy9$-4gOUb4UL4637oYGzAr%oZTy>dW-CT`%o3B(duSJ1(e{$Y zM<9UyvWx;+833RQMN{a4(G-wlHXR5E0)ZV>5?#@72%}__LDViB2!zoC&;$$&%?P2h z0z(iWD~mq^C<3ITh2caaj#n5E%ofhx0nUQPL~nPTGlqqB22Ex{K(u_Eac+1F2b%p@ zfFWRi2!bZ=dhQr@H0!ZShxiYx(fr(S%o#KWt$@YIDPiPok3$Sr4*fIyhqIvoh5uR( z+G9aS0kQzl6d)6b0t5omn(X@$hGj=yE`{&~S2Gtia5Gn?EL_(yG|G+K@=fp0D^(rz zxT1R64#p$fx05POs#deg9+l!c8gwhEor|BbmTA)uRlj-gz6)6_cB&4*Tc-M`bK9>c z*H4msFu-a#7iT^GkUgZvxqIcr(X*;=?XWBEh_4N)!@=`Ah5M!kt4cNNSPATwH?AXC zdENd&XqoAr2Dq}BQ6Gnc3D~XB-xhZWLe^fld)&QlbH&rFP$(?%sxBMiB_=cw?r7CH@9Dd8TnkYHTi)yt>lPMf~Qh{TVz-%zd}mpoX@Lx z7dHOF@cCta&Y}DYj>8M>y0uqvg+{1>9qQK_{DUz^17>%6baZre>Zg9-*JTh{JeEgE(Xc$3KCdGsnB0X~&288Q1yu50`xi`1$u zxw%0F{zoTzg?QpaXg#S%Pc}TD&G9sE#r*FN1sL2ia!PT<-siU_xsUiWo{_zcpd9U!Ni)~G zLi}%abS2t*$1jmQ&rh~)%FTUKeNh{2;~_;7Z1a$&S<~zN0o(9-C8gCXFPUtQaEi(Ok}L|C$~05J}GOTeZ2`>N!9w z|5?&Yv(xUn4w}Md-)+>Xm-idnwqK!l-ep)3M#!opq&#uM)v4O^f$5XSSy^-7P*&lV zi*Bv9WLRzp8QFh_Sp$75|b~$}d%! zADHN!cN?}Zq;Pfp`_&u3UsSsuum4tHmJnSKKJnFdCJT}j<9dY@Y9;CdG*Uh6JugW| zjszU%k%LnRdK;+FkhCS;r3tV3Qu-?q>U@4Gz20FckyBYJ$a2l5D|g6nnw|8he9Zuw zE>xvKu;5sW8RFB^dtl3__u=TrP;92~^c`S>V6o8(>LDq#2#WbkDhztv-Y+KRxxc_( z9-Ig8g=a}sc!GElV)j`DAZZobG^EycOweBae{tMx(CCHt3QRem*{+4B%V0XzUy$!_ zUZ;}$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-commons/nifi-data-provenance-utils/pom.xml b/nifi-commons/nifi-data-provenance-utils/pom.xml index 5598b5955e87..3fd192473989 100644 --- a/nifi-commons/nifi-data-provenance-utils/pom.xml +++ b/nifi-commons/nifi-data-provenance-utils/pom.xml @@ -45,7 +45,7 @@ org.bouncycastle bcprov-jdk15on - 1.59 + 1.60 org.apache.nifi diff --git a/nifi-commons/nifi-expression-language/pom.xml b/nifi-commons/nifi-expression-language/pom.xml index 5a5a096d6709..91a9844576b2 100644 --- a/nifi-commons/nifi-expression-language/pom.xml +++ b/nifi-commons/nifi-expression-language/pom.xml @@ -81,12 +81,12 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.apache.commons - commons-lang3 - 3.7 + commons-text + 1.4 diff --git a/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/StandardPreparedQuery.java b/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/StandardPreparedQuery.java index cf90d8d0cc1b..fc3f9b71ca73 100644 --- a/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/StandardPreparedQuery.java +++ b/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/StandardPreparedQuery.java @@ -16,12 +16,6 @@ */ package org.apache.nifi.attribute.expression.language; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - import org.apache.nifi.attribute.expression.language.evaluation.Evaluator; import org.apache.nifi.attribute.expression.language.evaluation.literals.StringLiteralEvaluator; import org.apache.nifi.attribute.expression.language.evaluation.selection.AllAttributesEvaluator; @@ -34,7 +28,14 @@ import org.apache.nifi.expression.AttributeValueDecorator; import org.apache.nifi.processor.exception.ProcessException; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + public class StandardPreparedQuery implements PreparedQuery { + private static final String EMPTY_STRING = ""; private final List expressions; private volatile VariableImpact variableImpact; @@ -45,6 +46,14 @@ public StandardPreparedQuery(final List expressions) { @Override public String evaluateExpressions(final Map valMap, final AttributeValueDecorator decorator, final Map stateVariables) throws ProcessException { + if (expressions.isEmpty()) { + return EMPTY_STRING; + } + if (expressions.size() == 1) { + final String evaluated = expressions.get(0).evaluate(valMap, decorator, stateVariables); + return evaluated == null ? EMPTY_STRING : evaluated; + } + final StringBuilder sb = new StringBuilder(); for (final Expression expression : expressions) { diff --git a/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/evaluation/functions/CharSequenceTranslatorEvaluator.java b/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/evaluation/functions/CharSequenceTranslatorEvaluator.java index 9a671c1fbee5..9f86fa45da14 100644 --- a/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/evaluation/functions/CharSequenceTranslatorEvaluator.java +++ b/nifi-commons/nifi-expression-language/src/main/java/org/apache/nifi/attribute/expression/language/evaluation/functions/CharSequenceTranslatorEvaluator.java @@ -18,8 +18,8 @@ import java.util.Map; -import org.apache.commons.lang3.StringEscapeUtils; -import org.apache.commons.lang3.text.translate.CharSequenceTranslator; +import org.apache.commons.text.StringEscapeUtils; +import org.apache.commons.text.translate.CharSequenceTranslator; import org.apache.nifi.attribute.expression.language.evaluation.Evaluator; import org.apache.nifi.attribute.expression.language.evaluation.QueryResult; import org.apache.nifi.attribute.expression.language.evaluation.StringEvaluator; diff --git a/nifi-commons/nifi-flowfile-packager/pom.xml b/nifi-commons/nifi-flowfile-packager/pom.xml index 99af5fbe09aa..cd995dac356e 100644 --- a/nifi-commons/nifi-flowfile-packager/pom.xml +++ b/nifi-commons/nifi-flowfile-packager/pom.xml @@ -25,12 +25,17 @@ org.apache.commons commons-compress - 1.16.1 + 1.18 org.apache.commons commons-lang3 - 3.7 + 3.8.1 + + + org.apache.commons + commons-text + 1.4 diff --git a/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV1.java b/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV1.java index 479ac588a4bc..2e73e814df6a 100644 --- a/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV1.java +++ b/nifi-commons/nifi-flowfile-packager/src/main/java/org/apache/nifi/util/FlowFilePackagerV1.java @@ -24,7 +24,7 @@ import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; public class FlowFilePackagerV1 implements FlowFilePackager { diff --git a/nifi-commons/nifi-hl7-query-language/pom.xml b/nifi-commons/nifi-hl7-query-language/pom.xml index ffb22a8111bb..cbe90071c465 100644 --- a/nifi-commons/nifi-hl7-query-language/pom.xml +++ b/nifi-commons/nifi-hl7-query-language/pom.xml @@ -65,47 +65,47 @@ ca.uhn.hapi hapi-base - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v21 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v22 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v23 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v231 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v24 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v25 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v251 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v26 - 2.2 + 2.3 diff --git a/nifi-commons/nifi-hl7-query-language/src/main/java/org/apache/nifi/hl7/hapi/HapiField.java b/nifi-commons/nifi-hl7-query-language/src/main/java/org/apache/nifi/hl7/hapi/HapiField.java index 94cce5c1a9f0..f9624d0c440f 100644 --- a/nifi-commons/nifi-hl7-query-language/src/main/java/org/apache/nifi/hl7/hapi/HapiField.java +++ b/nifi-commons/nifi-hl7-query-language/src/main/java/org/apache/nifi/hl7/hapi/HapiField.java @@ -27,7 +27,6 @@ import ca.uhn.hl7v2.model.ExtraComponents; import ca.uhn.hl7v2.model.Primitive; import ca.uhn.hl7v2.model.Type; -import ca.uhn.hl7v2.model.Varies; import ca.uhn.hl7v2.parser.EncodingCharacters; import ca.uhn.hl7v2.parser.PipeParser; @@ -59,8 +58,7 @@ public HapiField(final Type type) { componentList.add(new SingleValueField(singleFieldValue)); for (int i = 0; i < extra.numComponents(); i++) { - final Varies varies = extra.getComponent(i); - componentList.add(new HapiField(varies)); + componentList.add(new HapiField(extra.getComponent(i))); } } diff --git a/nifi-commons/nifi-json-utils/pom.xml b/nifi-commons/nifi-json-utils/pom.xml index 655ab5195bdf..d4fdb1617eb2 100644 --- a/nifi-commons/nifi-json-utils/pom.xml +++ b/nifi-commons/nifi-json-utils/pom.xml @@ -32,7 +32,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 compile diff --git a/nifi-commons/nifi-properties/src/main/java/org/apache/nifi/util/NiFiProperties.java b/nifi-commons/nifi-properties/src/main/java/org/apache/nifi/util/NiFiProperties.java index f5dca6c57b1d..e346a6b91915 100644 --- a/nifi-commons/nifi-properties/src/main/java/org/apache/nifi/util/NiFiProperties.java +++ b/nifi-commons/nifi-properties/src/main/java/org/apache/nifi/util/NiFiProperties.java @@ -204,6 +204,13 @@ public abstract class NiFiProperties { public static final String FLOW_ELECTION_MAX_WAIT_TIME = "nifi.cluster.flow.election.max.wait.time"; public static final String FLOW_ELECTION_MAX_CANDIDATES = "nifi.cluster.flow.election.max.candidates"; + // cluster load balance properties + public static final String LOAD_BALANCE_ADDRESS = "nifi.cluster.load.balance.address"; + public static final String LOAD_BALANCE_PORT = "nifi.cluster.load.balance.port"; + public static final String LOAD_BALANCE_CONNECTIONS_PER_NODE = "nifi.cluster.load.balance.connections.per.node"; + public static final String LOAD_BALANCE_MAX_THREAD_COUNT = "nifi.cluster.load.balance.max.thread.count"; + public static final String LOAD_BALANCE_COMMS_TIMEOUT = "nifi.cluster.load.balance.comms.timeout"; + // zookeeper properties public static final String ZOOKEEPER_CONNECT_STRING = "nifi.zookeeper.connect.string"; public static final String ZOOKEEPER_CONNECT_TIMEOUT = "nifi.zookeeper.connect.timeout"; @@ -287,6 +294,13 @@ public abstract class NiFiProperties { public static final String DEFAULT_REQUEST_REPLICATION_CLAIM_TIMEOUT = "15 secs"; public static final String DEFAULT_FLOW_ELECTION_MAX_WAIT_TIME = "5 mins"; + // cluster load balance defaults + public static final int DEFAULT_LOAD_BALANCE_PORT = 6342; + public static final int DEFAULT_LOAD_BALANCE_CONNECTIONS_PER_NODE = 4; + public static final int DEFAULT_LOAD_BALANCE_MAX_THREAD_COUNT = 8; + public static final String DEFAULT_LOAD_BALANCE_COMMS_TIMEOUT = "30 sec"; + + // state management defaults public static final String DEFAULT_STATE_MANAGEMENT_CONFIG_FILE = "conf/state-management.xml"; @@ -736,6 +750,23 @@ public InetSocketAddress getClusterNodeProtocolAddress() { } } + public InetSocketAddress getClusterLoadBalanceAddress() { + try { + String address = getProperty(LOAD_BALANCE_ADDRESS); + if (StringUtils.isBlank(address)) { + address = getProperty(CLUSTER_NODE_ADDRESS); + } + if (StringUtils.isBlank(address)) { + address = "localhost"; + } + + final int port = getIntegerProperty(LOAD_BALANCE_PORT, DEFAULT_LOAD_BALANCE_PORT); + return InetSocketAddress.createUnresolved(address, port); + } catch (final Exception e) { + throw new RuntimeException("Invalid load balance address/port due to: " + e, e); + } + } + public Integer getClusterNodeProtocolPort() { try { return Integer.parseInt(getProperty(CLUSTER_NODE_PROTOCOL_PORT)); diff --git a/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/SimpleRecordSchema.java b/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/SimpleRecordSchema.java index 5b85f030a2e3..6926c939c021 100644 --- a/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/SimpleRecordSchema.java +++ b/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/SimpleRecordSchema.java @@ -17,24 +17,25 @@ package org.apache.nifi.serialization; +import org.apache.nifi.serialization.record.DataType; +import org.apache.nifi.serialization.record.RecordField; +import org.apache.nifi.serialization.record.RecordSchema; +import org.apache.nifi.serialization.record.SchemaIdentifier; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import org.apache.nifi.serialization.record.DataType; -import org.apache.nifi.serialization.record.RecordField; -import org.apache.nifi.serialization.record.RecordSchema; -import org.apache.nifi.serialization.record.SchemaIdentifier; - public class SimpleRecordSchema implements RecordSchema { private List fields = null; private Map fieldMap = null; private final boolean textAvailable; - private final String text; + private final AtomicReference text = new AtomicReference<>(); private final String schemaFormat; private final SchemaIdentifier schemaIdentifier; @@ -50,6 +51,10 @@ public SimpleRecordSchema(final String text, final String schemaFormat, final Sc this(text, schemaFormat, true, id); } + public SimpleRecordSchema(final SchemaIdentifier id) { + this(null, null, false, id); + } + public SimpleRecordSchema(final List fields, final String text, final String schemaFormat, final SchemaIdentifier id) { this(fields, text, schemaFormat, true, id); } @@ -60,7 +65,7 @@ private SimpleRecordSchema(final List fields, final String text, fi } private SimpleRecordSchema(final String text, final String schemaFormat, final boolean textAvailable, final SchemaIdentifier id) { - this.text = text; + this.text.set(text); this.schemaFormat = schemaFormat; this.schemaIdentifier = id; this.textAvailable = textAvailable; @@ -69,7 +74,7 @@ private SimpleRecordSchema(final String text, final String schemaFormat, final b @Override public Optional getSchemaText() { if (textAvailable) { - return Optional.ofNullable(text); + return Optional.ofNullable(text.get()); } else { return Optional.empty(); } @@ -121,13 +126,13 @@ public RecordField getField(final int index) { @Override public List getDataTypes() { - return getFields().stream().map(recordField -> recordField.getDataType()) + return getFields().stream().map(RecordField::getDataType) .collect(Collectors.toList()); } @Override public List getFieldNames() { - return getFields().stream().map(recordField -> recordField.getFieldName()) + return getFields().stream().map(RecordField::getFieldName) .collect(Collectors.toList()); } @@ -189,7 +194,19 @@ private static String createText(final List fields) { @Override public String toString() { - return text; + String textValue = text.get(); + if (textValue != null) { + return textValue; + } + + textValue = createText(fields); + final boolean updated = text.compareAndSet(null, textValue); + + if (updated) { + return textValue; + } else { + return text.get(); + } } @Override diff --git a/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/ResultSetRecordSet.java b/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/ResultSetRecordSet.java index 551789cac4cc..bf7d2245eea1 100644 --- a/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/ResultSetRecordSet.java +++ b/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/ResultSetRecordSet.java @@ -63,6 +63,19 @@ public RecordSchema getSchema() { return schema; } + // Protected methods for subclasses to access private member variables + protected ResultSet getResultSet() { + return rs; + } + + protected boolean hasMoreRows() { + return moreRows; + } + + protected void setMoreRows(boolean moreRows) { + this.moreRows = moreRows; + } + @Override public Record next() throws IOException { try { @@ -87,7 +100,7 @@ public void close() { } } - private Record createRecord(final ResultSet rs) throws SQLException { + protected Record createRecord(final ResultSet rs) throws SQLException { final Map values = new HashMap<>(schema.getFieldCount()); for (final RecordField field : schema.getFields()) { diff --git a/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/util/DataTypeUtils.java b/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/util/DataTypeUtils.java index 687d9ede1a50..336a70d11fea 100644 --- a/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/util/DataTypeUtils.java +++ b/nifi-commons/nifi-record/src/main/java/org/apache/nifi/serialization/record/util/DataTypeUtils.java @@ -339,6 +339,11 @@ public static Object[] toArray(final Object value, final String fieldName, final return dest; } + if (value instanceof List) { + final List list = (List)value; + return list.toArray(); + } + throw new IllegalTypeConversionException("Cannot convert value [" + value + "] of type " + value.getClass() + " to Object Array for field " + fieldName); } diff --git a/nifi-commons/nifi-record/src/test/java/org/apache/nifi/serialization/record/TestDataTypeUtils.java b/nifi-commons/nifi-record/src/test/java/org/apache/nifi/serialization/record/TestDataTypeUtils.java index a8bc28d04e00..5f15fc2d046b 100644 --- a/nifi-commons/nifi-record/src/test/java/org/apache/nifi/serialization/record/TestDataTypeUtils.java +++ b/nifi-commons/nifi-record/src/test/java/org/apache/nifi/serialization/record/TestDataTypeUtils.java @@ -24,6 +24,7 @@ import java.nio.charset.StandardCharsets; import java.sql.Timestamp; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -168,6 +169,18 @@ public void testConvertRecordFieldToObject() { } + @Test + public void testToArray() { + final List list = Arrays.asList("Seven", "Eleven", "Thirteen"); + + final Object[] array = DataTypeUtils.toArray(list, "list", null); + + assertEquals(list.size(), array.length); + for (int i = 0; i < list.size(); i++) { + assertEquals(list.get(i), array[i]); + } + } + @Test public void testStringToBytes() { Object bytes = DataTypeUtils.convertType("Hello", RecordFieldType.ARRAY.getArrayDataType(RecordFieldType.BYTE.getDataType()),null, StandardCharsets.UTF_8); diff --git a/nifi-commons/nifi-schema-utils/pom.xml b/nifi-commons/nifi-schema-utils/pom.xml index 561b36f0d7c6..8c53b61a29a4 100644 --- a/nifi-commons/nifi-schema-utils/pom.xml +++ b/nifi-commons/nifi-schema-utils/pom.xml @@ -24,7 +24,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 test diff --git a/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/RecordIterator.java b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/RecordIterator.java new file mode 100644 index 000000000000..de35cd538ac2 --- /dev/null +++ b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/RecordIterator.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.repository.schema; + +import java.io.Closeable; +import java.io.IOException; + +public interface RecordIterator extends Closeable { + + Record next() throws IOException; + + boolean isNext() throws IOException; + +} diff --git a/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordReader.java b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordReader.java index 84f353231acd..daedf376821b 100644 --- a/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordReader.java +++ b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordReader.java @@ -17,8 +17,11 @@ package org.apache.nifi.repository.schema; +import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; @@ -30,7 +33,6 @@ import java.util.Map; import java.util.Optional; - public class SchemaRecordReader { private final RecordSchema schema; @@ -56,15 +58,24 @@ private static void fillBuffer(final InputStream in, final byte[] destination) t } public Record readRecord(final InputStream in) throws IOException { - final int sentinelByte = in.read(); - if (sentinelByte < 0) { + final int recordIndicator = in.read(); + if (recordIndicator < 0) { return null; } - if (sentinelByte != 1) { - throw new IOException("Expected to read a Sentinel Byte of '1' but got a value of '" + sentinelByte + "' instead"); + if (recordIndicator == SchemaRecordWriter.EXTERNAL_FILE_INDICATOR) { + throw new IOException("Expected to read a Sentinel Byte of '1' indicating that the next record is inline but the Sentinel value was '" + SchemaRecordWriter.EXTERNAL_FILE_INDICATOR + + ", indicating that data was written to an External File. This data cannot be recovered via calls to #readRecord(InputStream) but must be recovered via #readRecords(InputStream)"); + } + + if (recordIndicator != 1) { + throw new IOException("Expected to read a Sentinel Byte of '1' but got a value of '" + recordIndicator + "' instead"); } + return readInlineRecord(in); + } + + private Record readInlineRecord(final InputStream in) throws IOException { final List schemaFields = schema.getFields(); final Map fields = new HashMap<>(schemaFields.size()); @@ -76,6 +87,53 @@ public Record readRecord(final InputStream in) throws IOException { return new FieldMapRecord(fields, schema); } + public RecordIterator readRecords(final InputStream in) throws IOException { + final int recordIndicator = in.read(); + if (recordIndicator < 0) { + return null; + } + + if (recordIndicator == SchemaRecordWriter.INLINE_RECORD_INDICATOR) { + final Record nextRecord = readInlineRecord(in); + return new SingleRecordIterator(nextRecord); + } + + if (recordIndicator != SchemaRecordWriter.EXTERNAL_FILE_INDICATOR) { + throw new IOException("Expected to read a Sentinel Byte of '" + SchemaRecordWriter.INLINE_RECORD_INDICATOR + "' or '" + SchemaRecordWriter.EXTERNAL_FILE_INDICATOR + + "' but encountered a value of '" + recordIndicator + "' instead"); + } + + final DataInputStream dis = new DataInputStream(in); + final String externalFilename = dis.readUTF(); + final File externalFile = new File(externalFilename); + final FileInputStream fis = new FileInputStream(externalFile); + final InputStream bufferedIn = new BufferedInputStream(fis); + + final RecordIterator recordIterator = new RecordIterator() { + @Override + public Record next() throws IOException { + return readRecord(bufferedIn); + } + + @Override + public boolean isNext() throws IOException { + bufferedIn.mark(1); + final int nextByte = bufferedIn.read(); + bufferedIn.reset(); + + return (nextByte > -1); + } + + @Override + public void close() throws IOException { + bufferedIn.close(); + } + }; + + return recordIterator; + } + + private Object readField(final InputStream in, final RecordField field) throws IOException { switch (field.getRepetition()) { diff --git a/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordWriter.java b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordWriter.java index 67d558ae47ab..d65e60be9d66 100644 --- a/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordWriter.java +++ b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SchemaRecordWriter.java @@ -21,6 +21,7 @@ import org.slf4j.LoggerFactory; import java.io.DataOutputStream; +import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.io.UTFDataFormatException; @@ -30,6 +31,8 @@ import java.util.Map; public class SchemaRecordWriter { + static final int INLINE_RECORD_INDICATOR = 1; + static final int EXTERNAL_FILE_INDICATOR = 8; public static final int MAX_ALLOWED_UTF_LENGTH = 65_535; @@ -41,7 +44,7 @@ public void writeRecord(final Record record, final OutputStream out) throws IOEx // write sentinel value to indicate that there is a record. This allows the reader to then read one // byte and check if -1. If so, the reader knows there are no more records. If not, then the reader // knows that it should be able to continue reading. - out.write(1); + out.write(INLINE_RECORD_INDICATOR); final byte[] buffer = byteArrayCache.checkOut(); try { @@ -226,4 +229,8 @@ static int getCharsInUTF8Limit(final String str, final int utf8Limit) { return charsInOriginal; } + public void writeExternalFileReference(final DataOutputStream out, final File externalFile) throws IOException { + out.write(EXTERNAL_FILE_INDICATOR); + out.writeUTF(externalFile.getAbsolutePath()); + } } diff --git a/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SingleRecordIterator.java b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SingleRecordIterator.java new file mode 100644 index 000000000000..cc007fc82773 --- /dev/null +++ b/nifi-commons/nifi-schema-utils/src/main/java/org/apache/nifi/repository/schema/SingleRecordIterator.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.repository.schema; + +public class SingleRecordIterator implements RecordIterator { + private final Record record; + private boolean consumed = false; + + public SingleRecordIterator(final Record record) { + this.record = record; + } + + @Override + public Record next() { + if (consumed) { + return null; + } + + consumed = true; + return record; + } + + @Override + public void close() { + } + + @Override + public boolean isNext() { + return !consumed; + } +} diff --git a/nifi-commons/nifi-security-utils/pom.xml b/nifi-commons/nifi-security-utils/pom.xml index bc5bc5e4a06a..da684c5735c5 100644 --- a/nifi-commons/nifi-security-utils/pom.xml +++ b/nifi-commons/nifi-security-utils/pom.xml @@ -44,7 +44,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 commons-codec @@ -54,12 +54,12 @@ org.bouncycastle bcprov-jdk15on - 1.59 + 1.60 org.bouncycastle bcpkix-jdk15on - 1.59 + 1.60 org.apache.nifi diff --git a/nifi-commons/nifi-site-to-site-client/pom.xml b/nifi-commons/nifi-site-to-site-client/pom.xml index ace8db1e1a73..cf9ab8c30c5d 100644 --- a/nifi-commons/nifi-site-to-site-client/pom.xml +++ b/nifi-commons/nifi-site-to-site-client/pom.xml @@ -48,12 +48,12 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.apache.nifi diff --git a/nifi-commons/nifi-site-to-site-client/src/test/java/org/apache/nifi/remote/client/http/TestHttpClient.java b/nifi-commons/nifi-site-to-site-client/src/test/java/org/apache/nifi/remote/client/http/TestHttpClient.java index ebcab319ef86..ded1db194be5 100644 --- a/nifi-commons/nifi-site-to-site-client/src/test/java/org/apache/nifi/remote/client/http/TestHttpClient.java +++ b/nifi-commons/nifi-site-to-site-client/src/test/java/org/apache/nifi/remote/client/http/TestHttpClient.java @@ -453,8 +453,8 @@ public static void setup() throws Exception { wrongPathContextHandler.insertHandler(wrongPathServletHandler); final SslContextFactory sslContextFactory = new SslContextFactory(); - sslContextFactory.setKeyStorePath("src/test/resources/certs/localhost-ks.jks"); - sslContextFactory.setKeyStorePassword("localtest"); + sslContextFactory.setKeyStorePath("src/test/resources/certs/keystore.jks"); + sslContextFactory.setKeyStorePassword("passwordpassword"); sslContextFactory.setKeyStoreType("JKS"); httpConnector = new ServerConnector(server); @@ -689,11 +689,11 @@ private SiteToSiteClient.Builder getDefaultBuilderHTTPS() { return new SiteToSiteClient.Builder().transportProtocol(SiteToSiteTransportProtocol.HTTP) .url("https://localhost:" + sslConnector.getLocalPort() + "/nifi") .timeout(3, TimeUnit.MINUTES) - .keystoreFilename("src/test/resources/certs/localhost-ks.jks") - .keystorePass("localtest") + .keystoreFilename("src/test/resources/certs/keystore.jks") + .keystorePass("passwordpassword") .keystoreType(KeystoreType.JKS) - .truststoreFilename("src/test/resources/certs/localhost-ts.jks") - .truststorePass("localtest") + .truststoreFilename("src/test/resources/certs/truststore.jks") + .truststorePass("passwordpassword") .truststoreType(KeystoreType.JKS) ; } @@ -707,13 +707,13 @@ private static void consumeDataPacket(DataPacket packet) throws IOException { @Test - public void testUnkownClusterUrl() throws Exception { + public void testUnknownClusterUrl() throws Exception { final URI uri = server.getURI(); try ( SiteToSiteClient client = getDefaultBuilder() - .url("http://" + uri.getHost() + ":" + uri.getPort() + "/unkown") + .url("http://" + uri.getHost() + ":" + uri.getPort() + "/unknown") .portName("input-running") .build() ) { diff --git a/nifi-commons/nifi-site-to-site-client/src/test/resources/certs/keystore.jks b/nifi-commons/nifi-site-to-site-client/src/test/resources/certs/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-commons/nifi-site-to-site-client/src/test/resources/certs/localhost-ks.jks b/nifi-commons/nifi-site-to-site-client/src/test/resources/certs/localhost-ks.jks deleted file mode 100755 index df36197d92ab8e9870f42666d74c47646fd56f26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3512 zcmchZXH*l&7RQqiLLfv+Xd3T`p&Iir~-L#Y=#ud?{E1Ldil!ycf^TKF)2%4dD_ z&CRl2juv-w;hX`>r;n!ME0*eJZQo{liYbqFr%&s4KWji3-S|{|Q#z3Bi!_n-HQIvn z)_yhbW3OmoReGZ$;mdsOy)j4ml{e?MpM3BXEZ&%y>@=UK++W7rcU+QDvQxbsYBP>C ziqWW_4}oMI2<5S^ml11u$vzs(Bz1QY%@RE`7dI!_J9pQZTH;ai+~*FZ-!&0FO}AsY zOxhC^^;ctKwcW!%@WtyMsu@6xd3zdv(I!8(v5$IseOUHF#yBeb=(KkBD?D*{)a_{6 zy11;ZtH1s5w8!+ewZvnrkKmE%X*#>Ul%b`b!V6_&L1)$_<6^i6k7Bh$Cbm8X7HN40 zS#G)q)jhM1yqIk|ug4$}yr>lNM^7CDi=S{rQqn53pE8J!Vk=?&Q_pATc&ICwBQ zS(^FTsqy1f=9leGJUj=gReI>!b5N4p{xQ7Yh?)gcpugwPJJKnkHLG#|+$oVkg4yV1aO1A$e7 zaQjo^Q#=uo%^bn4wLVp1-Lpy>m3Om-GmM2@#_FNth9W;Io4*MtEVVL^kgC7SFA-we z#qVjp#>O>$RucpY72eI-)`&+06CPE;lJYi4}@3m`# zJ_AU}qlHP&l8^Sxdy9$-4gOUb4UL4637oYGzAr%oZTy>dW-CT`%o3B(duSJ1(e{$Y zM<9UyvWx;+833RQMN{a4(G-wlHXR5E0)ZV>5?#@72%}__LDViB2!zoC&;$$&%?P2h z0z(iWD~mq^C<3ITh2caaj#n5E%ofhx0nUQPL~nPTGlqqB22Ex{K(u_Eac+1F2b%p@ zfFWRi2!bZ=dhQr@H0!ZShxiYx(fr(S%o#KWt$@YIDPiPok3$Sr4*fIyhqIvoh5uR( z+G9aS0kQzl6d)6b0t5omn(X@$hGj=yE`{&~S2Gtia5Gn?EL_(yG|G+K@=fp0D^(rz zxT1R64#p$fx05POs#deg9+l!c8gwhEor|BbmTA)uRlj-gz6)6_cB&4*Tc-M`bK9>c z*H4msFu-a#7iT^GkUgZvxqIcr(X*;=?XWBEh_4N)!@=`Ah5M!kt4cNNSPATwH?AXC zdENd&XqoAr2Dq}BQ6Gnc3D~XB-xhZWLe^fld)&QlbH&rFP$(?%sxBMiB_=cw?r7CH@9Dd8TnkYHTi)yt>lPMf~Qh{TVz-%zd}mpoX@Lx z7dHOF@cCta&Y}DYj>8M>y0uqvg+{1>9qQK_{DUz^17>%6baZre>Zg9-*JTh{JeEgE(Xc$3KCdGsnB0X~&288Q1yu50`xi`1$u zxw%0F{zoTzg?QpaXg#S%Pc}TD&G9sE#r*FN1sL2ia!PT<-siU_xsUiWo{_zcpd9U!Ni)~G zLi}%abS2t*$1jmQ&rh~)%FTUKeNh{2;~_;7Z1a$&S<~zN0o(9-C8gCXFPUtQaEi(Ok}L|C$~05J}GOTeZ2`>N!9w z|5?&Yv(xUn4w}Md-)+>Xm-idnwqK!l-ep)3M#!opq&#uM)v4O^f$5XSSy^-7P*&lV zi*Bv9WLRzp8QFh_Sp$75|b~$}d%! zADHN!cN?}Zq;Pfp`_&u3UsSsuum4tHmJnSKKJnFdCJT}j<9dY@Y9;CdG*Uh6JugW| zjszU%k%LnRdK;+FkhCS;r3tV3Qu-?q>U@4Gz20FckyBYJ$a2l5D|g6nnw|8he9Zuw zE>xvKu;5sW8RFB^dtl3__u=TrP;92~^c`S>V6o8(>LDq#2#WbkDhztv-Y+KRxxc_( z9-Ig8g=a}sc!GElV)j`DAZZobG^EycOweBae{tMx(CCHt3QRem*{+4B%V0XzUy$!_ zUZ;}$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-commons/nifi-socket-utils/pom.xml b/nifi-commons/nifi-socket-utils/pom.xml index eccf216e8d51..f4a4668ae305 100644 --- a/nifi-commons/nifi-socket-utils/pom.xml +++ b/nifi-commons/nifi-socket-utils/pom.xml @@ -45,7 +45,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 commons-io diff --git a/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/stream/io/StreamUtils.java b/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/stream/io/StreamUtils.java index 64f6eaab5769..dca3d0c1f46d 100644 --- a/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/stream/io/StreamUtils.java +++ b/nifi-commons/nifi-utils/src/main/java/org/apache/nifi/stream/io/StreamUtils.java @@ -16,6 +16,9 @@ */ package org.apache.nifi.stream.io; +import org.apache.nifi.stream.io.exception.BytePatternNotFoundException; +import org.apache.nifi.stream.io.util.NonThreadSafeCircularBuffer; + import java.io.EOFException; import java.io.IOException; import java.io.InputStream; @@ -23,9 +26,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.nifi.stream.io.exception.BytePatternNotFoundException; -import org.apache.nifi.stream.io.util.NonThreadSafeCircularBuffer; - public class StreamUtils { public static long copy(final InputStream source, final OutputStream destination) throws IOException { @@ -101,6 +101,34 @@ public static int fillBuffer(final InputStream source, final byte[] destination, return bytesRead; } + /** + * Reads byteCount bytes of data from the given InputStream, writing to the provided byte array. + * + * @param source the InputStream to read from + * @param destination the destination for the data + * @param byteCount the number of bytes to copy + * + * @throws IllegalArgumentException if the given byte array is smaller than byteCount elements. + * @throws EOFException if the InputStream does not have byteCount bytes in the InputStream + * @throws IOException if unable to read from the InputStream + */ + public static void read(final InputStream source, final byte[] destination, final int byteCount) throws IOException { + if (destination.length < byteCount) { + throw new IllegalArgumentException(); + } + + int bytesRead = 0; + int len; + while (bytesRead < byteCount) { + len = source.read(destination, bytesRead, byteCount - bytesRead); + if (len < 0) { + throw new EOFException("Expected to consume " + byteCount + " bytes but consumed only " + bytesRead); + } + + bytesRead += len; + } + } + /** * Copies data from in to out until either we are out of data (returns null) or we hit one of the byte patterns identified by the stoppers parameter (returns the byte pattern * matched). The bytes in the stopper will be copied. diff --git a/nifi-commons/nifi-web-utils/pom.xml b/nifi-commons/nifi-web-utils/pom.xml index 2830e58ab003..4d38aad23491 100644 --- a/nifi-commons/nifi-web-utils/pom.xml +++ b/nifi-commons/nifi-web-utils/pom.xml @@ -23,7 +23,7 @@ nifi-web-utils 2.26 - 2.9.5 + 2.9.7 @@ -39,7 +39,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.glassfish.jersey.core @@ -91,12 +91,6 @@ 4.5.6 compile - - org.apache.httpcomponents - httpclient - 4.5.6 - compile - diff --git a/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/.SequentialAccessWriteAheadLog.java.swp b/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/.SequentialAccessWriteAheadLog.java.swp new file mode 100644 index 0000000000000000000000000000000000000000..1d5f6412ea1c5fbb3db5bd409b35a723ad4d8ede GIT binary patch literal 16384 zcmeHN&5s;M74LizzRaf(*J3Tf?&9rPJ0Xtn+G1?)$`h}5H#4)bLoBUodTM6M+g;Uk zRquF$5J&_LgaZeJ1riqo`~!eEAVA^|K;jZUlz_wqaYDJ}kO03|UA;TA8Ltw=&DGLx zX1c20dsV+z^{Tq(^;Vy2-C$40s}Zh`MA6Q9`@);A-FtZH^g~gW^NA8vaq=}9pFMm2 z{1b7?)rk!oeQ_YthLQv6K2{Q)Wm=i)W?V=sR`^Kp{z|Hc4O1kIj7!zH!w+~v7sCe6 zc`_1}{FtY*(Gg!Ng|d>T%_I@V+$0rE(y*zAG0C0SpHLYXc(52a6+Ln8tXt}*7e2*4 z{;}N$YxrTbfq}q4U?4CM7zhjm1_A?tfd`xcn?DwP6&yV5dFcA@hmL*!$6wF-^7_f4 zZuf6r-}d!CJtqH_ufOig-#AwPrmuh8mtXOUYyG}Bd<6yq1A&3SKwuy+5Euvy1O@^F zfq}q4U?4E?KQO@iQFIM*Kc@$6{QqA+|G)L&DEcL!fX@K0ekh8*3%mf_0zM6#23|cC zMfZT~z@xz5KNv-S1zrPw3;YK73GfYI2(*CHzyfdzc>M!W^e5m~zz>110TSo{7lBU# z|9U_4051W3AOZG(TfiIdgMHv>;Fb4A(T{*10QZ3J05R};yi~}5Yrq$PHQ=A`!T5k3 z;PuC%=y$-&z_$PnoCh8U{_00*^yHov~ZF#7>r*$g0VueWbfhYv29q`nM3(<_~j^eo)Y1^*aO-QpjRyJ#5 zAkhIv?F%Gr=>0M$8M0H~&uf1)gxcb?CEqC;v$2KX8CyX&+^={j?BxlPM2dn>wo99r zw&N-mm*PSgVHcN{DueG<+Q=8h;u2e9mv=Wd*4ulX))&{$IC4l6YS0HI#_Z7gRc`rA z52gf{D#_Z%6-nE@J0h_=aK+kvmDJ;;R`8KCnCfI-s^PPxD20>0w0NU=Yp*ibnP+Je z67`I5RJ($Uby52!-N$3fwxM5(p0e5QPAh4#E$IyDOy?ggKG=q z|6smj#!WS)4ms*)d(@ezWK$X|lqi^)x|uh{I-qOTg@Bmdppf;cfGv@dEX>P-d2v`E-u`@-T&heZzydZ+DW*9i3u}%} z_scVDi3=yL%i#zyn`tnFl2Y|WkxrnU^`+To(i)Kt9MNOJbdr<>%cPPS_Lw0rdRTER z(P;{rX~BwGL?2h=G_Ej z9R92)e&s9YJ@~P)yo~c)mg~YY%EydLoK>*X4#nNrN0SS0uLpbZTmb@OV$;O)!~8pw zoZ1)Oi2&_JC%5Zj9qlS<=d|jGd8IdaKBq11M01L-Ahu$;Ax36GH_$cx{c>ekVDzSR@0m9oZ6|~b*9#&gD3CAGn>PG z>(Mu41n<+LCbLy~a7^jNuA_7Ys3LSKGf5jNa*eBy;a2^oOt5@~ixwS0;YBcCb`-vI zkw9OR`=TWZkOF`w%g6EZfm{6w%hE=_SRLm zAK`9;HMgE;*Re63VFDWlf}psY#^}Gl*3K zJH`$LHKd8Ik+u2f8;$XJT$4>{+0aHjn#N}9%KBDkedQdo$t$5LN@K>J*Coq1Hlvsj%5+Ris5^ z<3*ol2aTu(y9aew_(pY)pL=oj7j t7rXAQ8@-&2OGPh5>p$I`K=nrvUJm0=iK%N=5mk6)agu6-zbh|9{{>xuqb>jd literal 0 HcmV?d00001 diff --git a/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/LengthDelimitedJournal.java b/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/LengthDelimitedJournal.java index 0b2a8d3db392..d9fdc97fea55 100644 --- a/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/LengthDelimitedJournal.java +++ b/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/LengthDelimitedJournal.java @@ -17,6 +17,14 @@ package org.apache.nifi.wali; +import org.apache.nifi.stream.io.ByteCountingInputStream; +import org.apache.nifi.stream.io.LimitingInputStream; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.wali.SerDe; +import org.wali.SerDeFactory; +import org.wali.UpdateType; + import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; @@ -31,23 +39,19 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; +import java.nio.file.Files; import java.text.DecimalFormat; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; - -import org.apache.nifi.stream.io.ByteCountingInputStream; -import org.apache.nifi.stream.io.LimitingInputStream; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.wali.SerDe; -import org.wali.SerDeFactory; -import org.wali.UpdateType; +import java.util.UUID; public class LengthDelimitedJournal implements WriteAheadJournal { private static final Logger logger = LoggerFactory.getLogger(LengthDelimitedJournal.class); + private static final int DEFAULT_MAX_IN_HEAP_SERIALIZATION_BYTES = 5 * 1024 * 1024; // 5 MB + private static final JournalSummary INACTIVE_JOURNAL_SUMMARY = new StandardJournalSummary(-1L, -1L, 0); private static final int JOURNAL_ENCODING_VERSION = 1; private static final byte TRANSACTION_FOLLOWS = 64; @@ -55,9 +59,11 @@ public class LengthDelimitedJournal implements WriteAheadJournal { private static final int NUL_BYTE = 0; private final File journalFile; + private final File overflowDirectory; private final long initialTransactionId; private final SerDeFactory serdeFactory; private final ObjectPool streamPool; + private final int maxInHeapSerializationBytes; private SerDe serde; private FileOutputStream fileOut; @@ -72,13 +78,56 @@ public class LengthDelimitedJournal implements WriteAheadJournal { private final ByteBuffer transactionPreamble = ByteBuffer.allocate(12); // guarded by synchronized block public LengthDelimitedJournal(final File journalFile, final SerDeFactory serdeFactory, final ObjectPool streamPool, final long initialTransactionId) { + this(journalFile, serdeFactory, streamPool, initialTransactionId, DEFAULT_MAX_IN_HEAP_SERIALIZATION_BYTES); + } + + public LengthDelimitedJournal(final File journalFile, final SerDeFactory serdeFactory, final ObjectPool streamPool, final long initialTransactionId, + final int maxInHeapSerializationBytes) { this.journalFile = journalFile; + this.overflowDirectory = new File(journalFile.getParentFile(), "overflow-" + getBaseFilename(journalFile)); this.serdeFactory = serdeFactory; this.serde = serdeFactory.createSerDe(null); this.streamPool = streamPool; this.initialTransactionId = initialTransactionId; this.currentTransactionId = initialTransactionId; + this.maxInHeapSerializationBytes = maxInHeapSerializationBytes; + } + + public void dispose() { + logger.debug("Deleting Journal {} because it is now encapsulated in the latest Snapshot", journalFile.getName()); + if (!journalFile.delete() && journalFile.exists()) { + logger.warn("Unable to delete expired journal file " + journalFile + "; this file should be deleted manually."); + } + + if (overflowDirectory.exists()) { + final File[] overflowFiles = overflowDirectory.listFiles(); + if (overflowFiles == null) { + logger.warn("Unable to obtain listing of files that exist in 'overflow directory' " + overflowDirectory + + " - this directory and any files within it can now be safely removed manually"); + return; + } + + for (final File overflowFile : overflowFiles) { + if (!overflowFile.delete() && overflowFile.exists()) { + logger.warn("After expiring journal file " + journalFile + ", unable to remove 'overflow file' " + overflowFile + " - this file should be removed manually"); + } + } + + if (!overflowDirectory.delete()) { + logger.warn("After expiring journal file " + journalFile + ", unable to remove 'overflow directory' " + overflowDirectory + " - this file should be removed manually"); + } + } + } + + private static String getBaseFilename(final File file) { + final String name = file.getName(); + final int index = name.lastIndexOf("."); + if (index < 0) { + return name; + } + + return name.substring(0, index); } private synchronized OutputStream getOutputStream() throws FileNotFoundException { @@ -90,6 +139,10 @@ private synchronized OutputStream getOutputStream() throws FileNotFoundException return bufferedOut; } + @Override + public synchronized boolean isHealthy() { + return !closed && !poisoned; + } @Override public synchronized void writeHeader() throws IOException { @@ -177,12 +230,64 @@ public void update(final Collection records, final RecordLookup recordLook checkState(); + File overflowFile = null; final ByteArrayDataOutputStream bados = streamPool.borrowObject(); + try { - for (final T record : records) { - final Object recordId = serde.getRecordIdentifier(record); - final T previousRecordState = recordLookup.lookup(recordId); - serde.serializeEdit(previousRecordState, record, bados.getDataOutputStream()); + FileOutputStream overflowFileOut = null; + + try { + DataOutputStream dataOut = bados.getDataOutputStream(); + for (final T record : records) { + final Object recordId = serde.getRecordIdentifier(record); + final T previousRecordState = recordLookup.lookup(recordId); + serde.serializeEdit(previousRecordState, record, dataOut); + + final int size = bados.getByteArrayOutputStream().size(); + if (serde.isWriteExternalFileReferenceSupported() && size > maxInHeapSerializationBytes) { + if (!overflowDirectory.exists()) { + Files.createDirectory(overflowDirectory.toPath()); + } + + // If we have exceeded our threshold for how much to serialize in memory, + // flush the in-memory representation to an 'overflow file' and then update + // the Data Output Stream that is used to write to the file also. + overflowFile = new File(overflowDirectory, UUID.randomUUID().toString()); + logger.debug("Length of update with {} records exceeds in-memory max of {} bytes. Overflowing to {}", records.size(), maxInHeapSerializationBytes, overflowFile); + + overflowFileOut = new FileOutputStream(overflowFile); + bados.getByteArrayOutputStream().writeTo(overflowFileOut); + bados.getByteArrayOutputStream().reset(); + + // change dataOut to point to the File's Output Stream so that all subsequent records are written to the file. + dataOut = new DataOutputStream(new BufferedOutputStream(overflowFileOut)); + + // We now need to write to the ByteArrayOutputStream a pointer to the overflow file + // so that what is written to the actual journal is that pointer. + serde.writeExternalFileReference(overflowFile, bados.getDataOutputStream()); + } + } + + dataOut.flush(); + + // If we overflowed to an external file, we need to be sure that we sync to disk before + // updating the Journal. Otherwise, we could get to a state where the Journal was flushed to disk without the + // external file being flushed. This would result in a missed update to the FlowFile Repository. + if (overflowFileOut != null) { + if (logger.isDebugEnabled()) { // avoid calling File.length() if not necessary + logger.debug("Length of update to overflow file is {} bytes", overflowFile.length()); + } + + overflowFileOut.getFD().sync(); + } + } finally { + if (overflowFileOut != null) { + try { + overflowFileOut.close(); + } catch (final Exception e) { + logger.warn("Failed to close open file handle to overflow file {}", overflowFile, e); + } + } } final ByteArrayOutputStream baos = bados.getByteArrayOutputStream(); @@ -206,12 +311,20 @@ public void update(final Collection records, final RecordLookup recordLook logger.debug("Wrote Transaction {} to journal {} with length {} and {} records", transactionId, journalFile, baos.size(), records.size()); } catch (final Throwable t) { poison(t); + + if (overflowFile != null) { + if (!overflowFile.delete() && overflowFile.exists()) { + logger.warn("Failed to cleanup temporary overflow file " + overflowFile + " - this file should be cleaned up manually."); + } + } + throw t; } finally { streamPool.returnObject(bados); } } + private void checkState() throws IOException { if (poisoned) { throw new IOException("Cannot update journal file " + journalFile + " because this journal has already encountered a failure when attempting to write to the file. " @@ -331,7 +444,7 @@ public JournalRecovery recoverRecords(final Map recordMap, final Set< final ByteCountingInputStream transactionByteCountingIn = new ByteCountingInputStream(transactionLimitingIn); final DataInputStream transactionDis = new DataInputStream(transactionByteCountingIn); - while (transactionByteCountingIn.getBytesConsumed() < transactionLength) { + while (transactionByteCountingIn.getBytesConsumed() < transactionLength || serde.isMoreInExternalFile()) { final T record = serde.deserializeEdit(transactionDis, recordMap, serdeAndVersion.getVersion()); // Update our RecordMap so that we have the most up-to-date version of the Record. diff --git a/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/SequentialAccessWriteAheadLog.java b/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/SequentialAccessWriteAheadLog.java index e4a1db7bf970..11eb31cdd38a 100644 --- a/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/SequentialAccessWriteAheadLog.java +++ b/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/SequentialAccessWriteAheadLog.java @@ -17,6 +17,12 @@ package org.apache.nifi.wali; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.wali.SerDeFactory; +import org.wali.SyncListener; +import org.wali.WriteAheadRepository; + import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -32,12 +38,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Pattern; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.wali.SerDeFactory; -import org.wali.SyncListener; -import org.wali.WriteAheadRepository; - /** *

* This implementation of WriteAheadRepository provides the ability to write all updates to the @@ -251,13 +251,22 @@ public int checkpoint() throws IOException { try { if (journal != null) { final JournalSummary journalSummary = journal.getSummary(); - if (journalSummary.getTransactionCount() == 0) { + if (journalSummary.getTransactionCount() == 0 && journal.isHealthy()) { logger.debug("Will not checkpoint Write-Ahead Log because no updates have occurred since last checkpoint"); return snapshot.getRecordCount(); } - journal.fsync(); - journal.close(); + try { + journal.fsync(); + } catch (final Exception e) { + logger.error("Failed to synch Write-Ahead Log's journal to disk at {}", storageDirectory, e); + } + + try { + journal.close(); + } catch (final Exception e) { + logger.error("Failed to close Journal while attempting to checkpoint Write-Ahead Log at {}", storageDirectory); + } nextTransactionId = Math.max(nextTransactionId, journalSummary.getLastTransactionId() + 1); } @@ -291,10 +300,8 @@ public int checkpoint() throws IOException { snapshot.writeSnapshot(snapshotCapture); for (final File existingJournal : existingJournals) { - logger.debug("Deleting Journal {} because it is now encapsulated in the latest Snapshot", existingJournal.getName()); - if (!existingJournal.delete() && existingJournal.exists()) { - logger.warn("Unable to delete expired journal file " + existingJournal + "; this file should be deleted manually."); - } + final WriteAheadJournal journal = new LengthDelimitedJournal<>(existingJournal, serdeFactory, streamPool, nextTransactionId); + journal.dispose(); } final long totalNanos = System.nanoTime() - startNanos; diff --git a/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/WriteAheadJournal.java b/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/WriteAheadJournal.java index f35d47ab790f..d4fb6cbed474 100644 --- a/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/WriteAheadJournal.java +++ b/nifi-commons/nifi-write-ahead-log/src/main/java/org/apache/nifi/wali/WriteAheadJournal.java @@ -48,4 +48,14 @@ public interface WriteAheadJournal extends Closeable { * @throws IOException if unable to write to the underlying storage mechanism. */ JournalSummary getSummary(); + + /** + * @return true if the journal is healthy and can be written to, false if either the journal has been closed or is poisoned + */ + boolean isHealthy(); + + /** + * Destroys any resources that the journal occupies + */ + void dispose(); } diff --git a/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/MinimalLockingWriteAheadLog.java b/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/MinimalLockingWriteAheadLog.java index eabac9dc80cc..8db2d878856d 100644 --- a/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/MinimalLockingWriteAheadLog.java +++ b/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/MinimalLockingWriteAheadLog.java @@ -1103,6 +1103,9 @@ public Set recoverNextTransaction(final Map currentRecordMap, final S record; try { record = serde.deserializeEdit(recoveryIn, currentRecordMap, recoveryVersion); + if (record == null) { + throw new EOFException(); + } } catch (final EOFException eof) { throw eof; } catch (final Exception e) { diff --git a/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/SerDe.java b/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/SerDe.java index d1919e7f4c0f..356cf8457338 100644 --- a/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/SerDe.java +++ b/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/SerDe.java @@ -18,6 +18,7 @@ import java.io.DataInputStream; import java.io.DataOutputStream; +import java.io.File; import java.io.IOException; import java.util.Map; @@ -151,4 +152,37 @@ default void readHeader(DataInputStream in) throws IOException { */ default void close() throws IOException { } + + /** + * Optional method that a SerDe can support that indicates that the contents of the next update should be found + * in the given external File. + * + * @param externalFile the file that contains the update information + * @param out the DataOutputStream to write the external file reference to + * @throws IOException if unable to write the update + * @throws UnsupportedOperationException if this SerDe does not support this operation + */ + default void writeExternalFileReference(File externalFile, DataOutputStream out) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Indicates whether or not a call to {@link #writeExternalFileReference(File, DataOutputStream)} is valid for this implementation + * @return true if calls to {@link #writeExternalFileReference(File, DataOutputStream)} are supported, false if calling + * the method will result in an {@link UnsupportedOperationException} being thrown. + */ + default boolean isWriteExternalFileReferenceSupported() { + return false; + } + + /** + * If the last call to read data from this SerDe resulted in data being read from an External File, and there is more data in that External File, + * then this method will return true. Otherwise, it will return false. + * + * @return true if more data available in External File, false otherwise. + * @throws IOException if unable to read from External File to determine data availability + */ + default boolean isMoreInExternalFile() throws IOException { + return false; + } } diff --git a/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/WriteAheadRepository.java b/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/WriteAheadRepository.java index 7f0e8281e316..05fc8a57cd1d 100644 --- a/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/WriteAheadRepository.java +++ b/nifi-commons/nifi-write-ahead-log/src/main/java/org/wali/WriteAheadRepository.java @@ -89,7 +89,7 @@ public interface WriteAheadRepository { *

* Recovers all External Swap locations that were persisted. If this method * is to be called, it must be called AFTER {@link #recoverRecords()} and - * BEFORE {@link update}. + * BEFORE {@link #update(Collection, boolean)}}. *

* * @return swap location diff --git a/nifi-commons/nifi-write-ahead-log/src/test/java/org/apache/nifi/wali/TestSequentialAccessWriteAheadLog.java b/nifi-commons/nifi-write-ahead-log/src/test/java/org/apache/nifi/wali/TestSequentialAccessWriteAheadLog.java index 4fc0fe794519..6d24445093dc 100644 --- a/nifi-commons/nifi-write-ahead-log/src/test/java/org/apache/nifi/wali/TestSequentialAccessWriteAheadLog.java +++ b/nifi-commons/nifi-write-ahead-log/src/test/java/org/apache/nifi/wali/TestSequentialAccessWriteAheadLog.java @@ -17,10 +17,17 @@ package org.apache.nifi.wali; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.wali.DummyRecord; +import org.wali.DummyRecordSerde; +import org.wali.SerDeFactory; +import org.wali.SingletonSerDeFactory; +import org.wali.UpdateType; +import org.wali.WriteAheadRepository; import java.io.File; import java.io.IOException; @@ -38,22 +45,69 @@ import java.util.function.Function; import java.util.stream.Collectors; -import org.junit.Assert; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.wali.DummyRecord; -import org.wali.DummyRecordSerde; -import org.wali.SerDeFactory; -import org.wali.SingletonSerDeFactory; -import org.wali.UpdateType; -import org.wali.WriteAheadRepository; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; public class TestSequentialAccessWriteAheadLog { @Rule public TestName testName = new TestName(); + + @Test + public void testUpdateWithExternalFile() throws IOException { + final DummyRecordSerde serde = new DummyRecordSerde(); + final SequentialAccessWriteAheadLog repo = createWriteRepo(serde); + + final List records = new ArrayList<>(); + for (int i = 0; i < 350_000; i++) { + final DummyRecord record = new DummyRecord(String.valueOf(i), UpdateType.CREATE); + records.add(record); + } + + repo.update(records, false); + repo.shutdown(); + + assertEquals(1, serde.getExternalFileReferences().size()); + + final SequentialAccessWriteAheadLog recoveryRepo = createRecoveryRepo(); + final Collection recovered = recoveryRepo.recoverRecords(); + + // ensure that we get the same records back, but the order may be different, so wrap both collections + // in a HashSet so that we can compare unordered collections of the same type. + assertEquals(new HashSet<>(records), new HashSet<>(recovered)); + } + + @Test + public void testUpdateWithExternalFileFollowedByInlineUpdate() throws IOException { + final DummyRecordSerde serde = new DummyRecordSerde(); + final SequentialAccessWriteAheadLog repo = createWriteRepo(serde); + + final List records = new ArrayList<>(); + for (int i = 0; i < 350_000; i++) { + final DummyRecord record = new DummyRecord(String.valueOf(i), UpdateType.CREATE); + records.add(record); + } + + repo.update(records, false); + + final DummyRecord subsequentRecord = new DummyRecord("350001", UpdateType.CREATE); + repo.update(Collections.singleton(subsequentRecord), false); + repo.shutdown(); + + assertEquals(1, serde.getExternalFileReferences().size()); + + final SequentialAccessWriteAheadLog recoveryRepo = createRecoveryRepo(); + final Collection recovered = recoveryRepo.recoverRecords(); + + // ensure that we get the same records back, but the order may be different, so wrap both collections + // in a HashSet so that we can compare unordered collections of the same type. + final Set expectedRecords = new HashSet<>(records); + expectedRecords.add(subsequentRecord); + assertEquals(expectedRecords, new HashSet<>(recovered)); + } + @Test public void testRecoverWithNoCheckpoint() throws IOException { final SequentialAccessWriteAheadLog repo = createWriteRepo(); @@ -145,12 +199,15 @@ private SequentialAccessWriteAheadLog createRecoveryRepo() throws I } private SequentialAccessWriteAheadLog createWriteRepo() throws IOException { + return createWriteRepo(new DummyRecordSerde()); + } + + private SequentialAccessWriteAheadLog createWriteRepo(final DummyRecordSerde serde) throws IOException { final File targetDir = new File("target"); final File storageDir = new File(targetDir, testName.getMethodName()); deleteRecursively(storageDir); assertTrue(storageDir.mkdirs()); - final DummyRecordSerde serde = new DummyRecordSerde(); final SerDeFactory serdeFactory = new SingletonSerDeFactory<>(serde); final SequentialAccessWriteAheadLog repo = new SequentialAccessWriteAheadLog<>(storageDir, serdeFactory); diff --git a/nifi-commons/nifi-write-ahead-log/src/test/java/org/wali/DummyRecordSerde.java b/nifi-commons/nifi-write-ahead-log/src/test/java/org/wali/DummyRecordSerde.java index 1f6aede9dbf5..920349365ba7 100644 --- a/nifi-commons/nifi-write-ahead-log/src/test/java/org/wali/DummyRecordSerde.java +++ b/nifi-commons/nifi-write-ahead-log/src/test/java/org/wali/DummyRecordSerde.java @@ -16,17 +16,31 @@ */ package org.wali; +import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.HashSet; import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.LinkedBlockingQueue; public class DummyRecordSerde implements SerDe { + private static final int INLINE_RECORD_INDICATOR = 1; + private static final int EXTERNAL_FILE_INDICATOR = 8; private int throwIOEAfterNserializeEdits = -1; private int throwOOMEAfterNserializeEdits = -1; private int serializeEditCount = 0; + private final Set externalFilesWritten = new HashSet<>(); + private Queue externalRecords; + @SuppressWarnings("fallthrough") @Override public void serializeEdit(final DummyRecord previousState, final DummyRecord record, final DataOutputStream out) throws IOException { @@ -37,6 +51,7 @@ public void serializeEdit(final DummyRecord previousState, final DummyRecord rec throw new OutOfMemoryError("Serialized " + (serializeEditCount - 1) + " records successfully, so now it's time to throw OOME"); } + out.write(INLINE_RECORD_INDICATOR); out.writeUTF(record.getUpdateType().name()); out.writeUTF(record.getId()); @@ -72,6 +87,57 @@ public void serializeRecord(final DummyRecord record, final DataOutputStream out @Override @SuppressWarnings("fallthrough") public DummyRecord deserializeRecord(final DataInputStream in, final int version) throws IOException { + if (externalRecords != null) { + final DummyRecord record = externalRecords.poll(); + if (record != null) { + return record; + } + + externalRecords = null; + } + + final int recordLocationIndicator = in.read(); + if (recordLocationIndicator == EXTERNAL_FILE_INDICATOR) { + final String externalFilename = in.readUTF(); + final File externalFile = new File(externalFilename); + + try (final InputStream fis = new FileInputStream(externalFile); + final InputStream bufferedIn = new BufferedInputStream(fis); + final DataInputStream dis = new DataInputStream(bufferedIn)) { + + externalRecords = new LinkedBlockingQueue<>(); + + DummyRecord record; + while ((record = deserializeRecordInline(dis, version, true)) != null) { + externalRecords.offer(record); + } + + return externalRecords.poll(); + } + } else if (recordLocationIndicator == INLINE_RECORD_INDICATOR) { + return deserializeRecordInline(in, version, false); + } else { + throw new IOException("Encountered invalid record location indicator: " + recordLocationIndicator); + } + } + + @Override + public boolean isMoreInExternalFile() { + return externalRecords != null && !externalRecords.isEmpty(); + } + + private DummyRecord deserializeRecordInline(final DataInputStream in, final int version, final boolean expectInlineRecordIndicator) throws IOException { + if (expectInlineRecordIndicator) { + final int locationIndicator = in.read(); + if (locationIndicator < 0) { + return null; + } + + if (locationIndicator != INLINE_RECORD_INDICATOR) { + throw new IOException("Expected inline record indicator but encountered " + locationIndicator); + } + } + final String updateTypeName = in.readUTF(); final UpdateType updateType = UpdateType.valueOf(updateTypeName); final String id = in.readUTF(); @@ -135,4 +201,21 @@ public void setThrowOOMEAfterNSerializeEdits(final int n) { public String getLocation(final DummyRecord record) { return record.getSwapLocation(); } + + @Override + public boolean isWriteExternalFileReferenceSupported() { + return true; + } + + @Override + public void writeExternalFileReference(final File externalFile, final DataOutputStream out) throws IOException { + out.write(EXTERNAL_FILE_INDICATOR); + out.writeUTF(externalFile.getAbsolutePath()); + + externalFilesWritten.add(externalFile); + } + + public Set getExternalFileReferences() { + return Collections.unmodifiableSet(externalFilesWritten); + } } diff --git a/nifi-docs/src/main/asciidoc/administration-guide.adoc b/nifi-docs/src/main/asciidoc/administration-guide.adoc index d4c4fc9f0ed6..05aeff755a4d 100644 --- a/nifi-docs/src/main/asciidoc/administration-guide.adoc +++ b/nifi-docs/src/main/asciidoc/administration-guide.adoc @@ -3934,6 +3934,13 @@ from the remote node before considering the communication with the node a failur to the cluster. It provides an additional layer of security. This value is blank by default, meaning that no firewall file is to be used. |`nifi.cluster.flow.election.max.wait.time`|Specifies the amount of time to wait before electing a Flow as the "correct" Flow. If the number of Nodes that have voted is equal to the number specified by the `nifi.cluster.flow.election.max.candidates` property, the cluster will not wait this long. The default value is `5 mins`. Note that the time starts as soon as the first vote is cast. |`nifi.cluster.flow.election.max.candidates`|Specifies the number of Nodes required in the cluster to cause early election of Flows. This allows the Nodes in the cluster to avoid having to wait a long time before starting processing if we reach at least this number of nodes in the cluster. +|`nifi.cluster.flow.election.max.wait.time`|Specifies the amount of time to wait before electing a Flow as the "correct" Flow. If the number of Nodes that have voted is equal to the number specified + by the `nifi.cluster.flow.election.max.candidates` property, the cluster will not wait this long. The default value is `5 mins`. Note that the time starts as soon as the first vote is cast. +|`nifi.cluster.flow.election.max.candidates`|Specifies the number of Nodes required in the cluster to cause early election of Flows. This allows the Nodes in the cluster to avoid having to wait a +long time before starting processing if we reach at least this number of nodes in the cluster. +|`nifi.cluster.load.balance.port`|Specifies the port to listen on for incoming connections for load balancing data across the cluster. The default value is `6342`. +|`nifi.cluster.load.balance.host`|Specifies the hostname to listen on for incoming connections for load balancing data across the cluster. If not specified, will default to the value used by the `nifi +.cluster.node.address` property. |==== [[claim_management]] diff --git a/nifi-external/nifi-spark-receiver/pom.xml b/nifi-external/nifi-spark-receiver/pom.xml index 9a7083bcb8d3..581ee4cb7693 100644 --- a/nifi-external/nifi-spark-receiver/pom.xml +++ b/nifi-external/nifi-spark-receiver/pom.xml @@ -43,7 +43,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 diff --git a/nifi-external/nifi-storm-spout/pom.xml b/nifi-external/nifi-storm-spout/pom.xml index 45fc7a33cbb6..d81249f4bbf2 100644 --- a/nifi-external/nifi-storm-spout/pom.xml +++ b/nifi-external/nifi-storm-spout/pom.xml @@ -32,7 +32,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/FlowFileQueue.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/FlowFileQueue.java index 9e637b01da42..8870f1d1ce18 100644 --- a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/FlowFileQueue.java +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/FlowFileQueue.java @@ -16,18 +16,17 @@ */ package org.apache.nifi.controller.queue; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.flowfile.FlowFilePrioritizer; +import org.apache.nifi.processor.FlowFileFilter; + import java.io.IOException; import java.util.Collection; import java.util.List; -import java.util.Queue; import java.util.Set; import java.util.concurrent.TimeUnit; -import org.apache.nifi.controller.repository.FlowFileRecord; -import org.apache.nifi.controller.repository.SwapSummary; -import org.apache.nifi.flowfile.FlowFilePrioritizer; -import org.apache.nifi.processor.FlowFileFilter; - public interface FlowFileQueue { /** @@ -59,8 +58,6 @@ public interface FlowFileQueue { */ void purgeSwapFiles(); - int getSwapFileCount(); - /** * Resets the comparator used by this queue to maintain order. * @@ -108,33 +105,21 @@ public interface FlowFileQueue { */ boolean isActiveQueueEmpty(); - /** - * Returns a QueueSize that represents all FlowFiles that are 'unacknowledged'. A FlowFile - * is considered to be unacknowledged if it has been pulled from the queue by some component - * but the session that pulled the FlowFile has not yet been committed or rolled back. - * - * @return a QueueSize that represents all FlowFiles that are 'unacknowledged'. - */ - QueueSize getUnacknowledgedQueueSize(); - - QueueSize getActiveQueueSize(); - - QueueSize getSwapQueueSize(); - void acknowledge(FlowFileRecord flowFile); void acknowledge(Collection flowFiles); + /** + * @return true if at least one FlowFile is unacknowledged, false if all FlowFiles that have been dequeued have been acknowledged + */ + boolean isUnacknowledgedFlowFile(); + /** * @return true if maximum queue size has been reached or exceeded; false * otherwise */ boolean isFull(); - boolean isAnyActiveFlowFilePenalized(); - - boolean isAllActiveFlowFilesPenalized(); - /** * places the given file into the queue * @@ -163,18 +148,6 @@ public interface FlowFileQueue { */ List poll(int maxResults, Set expiredRecords); - /** - * Drains flow files from the given source queue into the given destination - * list. - * - * @param sourceQueue queue to drain from - * @param destination Collection to drain to - * @param maxResults max number to drain - * @param expiredRecords for expired records - * @return size (bytes) of flow files drained from queue - */ - long drainQueue(Queue sourceQueue, List destination, int maxResults, Set expiredRecords); - List poll(FlowFileFilter filter, Set expiredRecords); String getFlowFileExpiration(); @@ -187,7 +160,7 @@ public interface FlowFileQueue { * Initiates a request to drop all FlowFiles in this queue. This method returns * a DropFlowFileStatus that can be used to determine the current state of the request. * Additionally, the DropFlowFileStatus provides a request identifier that can then be - * passed to the {@link #getDropFlowFileStatus(String)} and {@link #cancelDropFlowFileStatus(String)} + * passed to the {@link #getDropFlowFileStatus(String)} and {@link #cancelDropFlowFileRequest(String)} * methods in order to obtain the status later or cancel a request * * @param requestIdentifier the identifier of the Drop FlowFile Request @@ -200,7 +173,7 @@ public interface FlowFileQueue { /** * Returns the current status of a Drop FlowFile Request that was initiated via the - * {@link #dropFlowFiles()} method that has the given identifier + * {@link #dropFlowFiles(String, String)} method that has the given identifier * * @param requestIdentifier the identifier of the Drop FlowFile Request * @return the status for the request with the given identifier, or null if no @@ -244,7 +217,7 @@ public interface FlowFileQueue { ListFlowFileStatus listFlowFiles(String requestIdentifier, int maxResults); /** - * Returns the current status of a List FlowFile Request that was initiated via the {@link #listFlowFiles(String)} + * Returns the current status of a List FlowFile Request that was initiated via the {@link #listFlowFiles(String, int)} * method that has the given identifier * * @param requestIdentifier the identifier of the Drop FlowFile Request @@ -282,4 +255,48 @@ public interface FlowFileQueue { * @throws IllegalStateException if the queue is not in a state in which a listing can be performed */ void verifyCanList() throws IllegalStateException; + + /** + * Returns diagnostic information about the queue + */ + QueueDiagnostics getQueueDiagnostics(); + + void lock(); + + void unlock(); + + void setLoadBalanceStrategy(LoadBalanceStrategy strategy, String partitioningAttribute); + + /** + * Offloads the flowfiles in the queue to other nodes. This disables the queue from partition flowfiles locally. + *

+ * This operation is a no-op if the node that contains this queue is not in a cluster. + */ + void offloadQueue(); + + /** + * Resets a queue that has previously been offloaded. This allows the queue to partition flowfiles locally, and + * has no other effect on processors or remote process groups. + *

+ * This operation is a no-op if the queue is not currently offloaded or the node that contains this queue is not + * clustered. + */ + void resetOffloadedQueue(); + + LoadBalanceStrategy getLoadBalanceStrategy(); + + void setLoadBalanceCompression(LoadBalanceCompression compression); + + LoadBalanceCompression getLoadBalanceCompression(); + + String getPartitioningAttribute(); + + void startLoadBalancing(); + + void stopLoadBalancing(); + + /** + * @return true if the queue is actively transferring data to another node, false otherwise + */ + boolean isActivelyLoadBalancing(); } diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceCompression.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceCompression.java new file mode 100644 index 000000000000..95c0b6fdb925 --- /dev/null +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceCompression.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public enum LoadBalanceCompression { + /** + * FlowFiles will not be compressed + */ + DO_NOT_COMPRESS, + + /** + * FlowFiles' attributes will be compressed, but the FlowFiles' contents will not be + */ + COMPRESS_ATTRIBUTES_ONLY, + + /** + * FlowFiles' attributes and content will be compressed + */ + COMPRESS_ATTRIBUTES_AND_CONTENT; +} diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceStrategy.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceStrategy.java new file mode 100644 index 000000000000..305354861d31 --- /dev/null +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalanceStrategy.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public enum LoadBalanceStrategy { + /** + * Do not load balance FlowFiles between nodes in the cluster. + */ + DO_NOT_LOAD_BALANCE, + + /** + * Determine which node to send a given FlowFile to based on the value of a user-specified FlowFile Attribute. + * All FlowFiles that have the same value for said Attribute will be sent to the same node in the cluster. + */ + PARTITION_BY_ATTRIBUTE, + + /** + * FlowFiles will be distributed to nodes in the cluster in a Round-Robin fashion. + */ + ROUND_ROBIN, + + /** + * All FlowFiles will be sent to the same node. Which node they are sent to is not defined. + */ + SINGLE_NODE; +} diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalancedFlowFileQueue.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalancedFlowFileQueue.java new file mode 100644 index 000000000000..f0eff27ef2e0 --- /dev/null +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LoadBalancedFlowFileQueue.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.Collection; + +public interface LoadBalancedFlowFileQueue extends FlowFileQueue { + /** + * Adds the given FlowFiles to this queue, as they have been received from another node in the cluster + * @param flowFiles the FlowFiles received from the peer + */ + void receiveFromPeer(Collection flowFiles); + + /** + * Distributes the given FlowFiles to the appropriate partitions. Unlike the {@link #putAll(Collection)} method, + * this does not alter the size of the FlowFile Queue itself, as it is intended only to place the FlowFiles into + * their appropriate partitions + * + * @param flowFiles the FlowFiles to distribute + */ + void distributeToPartitions(Collection flowFiles); + + /** + * Notifies the queue that the given FlowFiles have been successfully transferred to another node + * @param flowFiles the FlowFiles that were transferred + */ + void onTransfer(Collection flowFiles); + + /** + * Notifies the queue the a transaction containing the given FlowFiles was aborted + * @param flowFiles the FlowFiles in the transaction + */ + void onAbort(Collection flowFiles); + + /** + * Handles updating the repositories for the given FlowFiles, which have been expired + * @param flowFiles the expired FlowFiles + */ + void handleExpiredRecords(Collection flowFiles); + + /** + * There are times when we want to ensure that if a node in the cluster reaches the point where backpressure is engaged, that we + * honor that backpressure and do not attempt to load balance from a different node in the cluster to that node. There are other times + * when we may want to push data to the remote node even though it has already reached its backpressure threshold. This method indicates + * whether or not we want to propagate that backpressure indicator across the cluster. + * + * @return true if backpressure on Node A should prevent Node B from sending to it, false if Node B should send to Node A + * even when backpressure is engaged on Node A. + */ + boolean isPropagateBackpressureAcrossNodes(); + +} diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LocalQueuePartitionDiagnostics.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LocalQueuePartitionDiagnostics.java new file mode 100644 index 000000000000..cc097b1a52e9 --- /dev/null +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/LocalQueuePartitionDiagnostics.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public interface LocalQueuePartitionDiagnostics { + QueueSize getUnacknowledgedQueueSize(); + + QueueSize getActiveQueueSize(); + + QueueSize getSwapQueueSize(); + + int getSwapFileCount(); + + boolean isAnyActiveFlowFilePenalized(); + + boolean isAllActiveFlowFilesPenalized(); +} diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/QueueDiagnostics.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/QueueDiagnostics.java new file mode 100644 index 000000000000..4b5d93fd300a --- /dev/null +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/QueueDiagnostics.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import java.util.List; + +public interface QueueDiagnostics { + + LocalQueuePartitionDiagnostics getLocalQueuePartitionDiagnostics(); + + List getRemoteQueuePartitionDiagnostics(); + +} diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/RemoteQueuePartitionDiagnostics.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/RemoteQueuePartitionDiagnostics.java new file mode 100644 index 000000000000..caa6b2daef12 --- /dev/null +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/queue/RemoteQueuePartitionDiagnostics.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public interface RemoteQueuePartitionDiagnostics { + String getNodeIdentifier(); + + QueueSize getUnacknowledgedQueueSize(); + + QueueSize getActiveQueueSize(); + + QueueSize getSwapQueueSize(); + + int getSwapFileCount(); +} diff --git a/nifi-framework-api/src/main/java/org/apache/nifi/controller/repository/FlowFileSwapManager.java b/nifi-framework-api/src/main/java/org/apache/nifi/controller/repository/FlowFileSwapManager.java index 7092a6fe2648..8d9b38f25e4e 100644 --- a/nifi-framework-api/src/main/java/org/apache/nifi/controller/repository/FlowFileSwapManager.java +++ b/nifi-framework-api/src/main/java/org/apache/nifi/controller/repository/FlowFileSwapManager.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.nifi.controller.queue.FlowFileQueue; @@ -44,11 +45,12 @@ public interface FlowFileSwapManager { * * @param flowFiles the FlowFiles to swap out to external storage * @param flowFileQueue the queue that the FlowFiles belong to + * @param partitionName the name of the partition within the queue, or null if the queue is not partitioned * @return the location of the externally stored swap file * * @throws IOException if unable to swap the FlowFiles out */ - String swapOut(List flowFiles, FlowFileQueue flowFileQueue) throws IOException; + String swapOut(List flowFiles, FlowFileQueue flowFileQueue, final String partitionName) throws IOException; /** * Recovers the FlowFiles from the swap file that lives at the given location. This action @@ -82,11 +84,32 @@ public interface FlowFileSwapManager { * Determines swap files that exist for the given FlowFileQueue * * @param flowFileQueue the queue for which the FlowFiles should be recovered + * @param partitionName the partition within the FlowFileQueue to recover, or null if the queue is not partitioned * * @return all swap locations that have been identified for the given queue, in the order that they should * be swapped back in */ - List recoverSwapLocations(FlowFileQueue flowFileQueue) throws IOException; + List recoverSwapLocations(FlowFileQueue flowFileQueue, String partitionName) throws IOException; + + /** + * Determines the names of each of the Partitions for which there are swap files for the given queue + * + * @param queue the queue to which the FlowFiles belong + * + * @return the Set of names of all Partitions for which there are swap files + * @throws IOException if unable to read the information from the underlying storage + */ + Set getSwappedPartitionNames(FlowFileQueue queue) throws IOException; + + /** + * Updates the name of the partition that owns a given swap file + * + * @param swapLocation the location of the swap file + * @param newPartitionName the new name of the new partition that owns the swap file + * @return the new swap location + * @throws IOException if unable to rename the swap file + */ + String changePartitionName(String swapLocation, String newPartitionName) throws IOException; /** * Parses the contents of the swap file at the given location and provides a SwapSummary that provides diff --git a/nifi-nar-bundles/nifi-amqp-bundle/nifi-amqp-processors/pom.xml b/nifi-nar-bundles/nifi-amqp-bundle/nifi-amqp-processors/pom.xml index f317e8c69179..4a61ee4021ad 100644 --- a/nifi-nar-bundles/nifi-amqp-bundle/nifi-amqp-processors/pom.xml +++ b/nifi-nar-bundles/nifi-amqp-bundle/nifi-amqp-processors/pom.xml @@ -20,7 +20,7 @@ language governing permissions and limitations under the License. --> jar - 5.2.0 + 5.4.1 diff --git a/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/pom.xml b/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/pom.xml index 21bbff3ace60..5267723c1411 100644 --- a/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/pom.xml +++ b/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/pom.xml @@ -48,6 +48,13 @@ org.apache.nifi nifi-kerberos-credentials-service-api + + + commons-beanutils + commons-beanutils + 1.9.3 + org.apache.atlas atlas-client @@ -64,6 +71,11 @@ it.unimi.dsi fastutil + + + commons-beanutils + commons-beanutils-core + @@ -112,7 +124,6 @@ junit junit - 4.11 test diff --git a/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/src/test/java/org/apache/nifi/atlas/emulator/AtlasAPIV2ServerEmulator.java b/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/src/test/java/org/apache/nifi/atlas/emulator/AtlasAPIV2ServerEmulator.java index 577e58b5be4f..b1478100bc5d 100644 --- a/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/src/test/java/org/apache/nifi/atlas/emulator/AtlasAPIV2ServerEmulator.java +++ b/nifi-nar-bundles/nifi-atlas-bundle/nifi-atlas-reporting-task/src/test/java/org/apache/nifi/atlas/emulator/AtlasAPIV2ServerEmulator.java @@ -171,8 +171,7 @@ private void updateEntityByNotification(AtlasEntity entity) { final Object r; switch (k) { case "inputs": - case "outputs": - { + case "outputs": { // If a reference doesn't have guid, then find it. r = resolveIOReference(v); } @@ -211,7 +210,7 @@ private void createServer() throws Exception { httpConnector = new ServerConnector(server); httpConnector.setPort(21000); - server.setConnectors(new Connector[] {httpConnector}); + server.setConnectors(new Connector[]{httpConnector}); servletHandler.addServletWithMapping(TypeDefsServlet.class, "/types/typedefs/"); servletHandler.addServletWithMapping(EntityBulkServlet.class, "/entity/bulk/"); @@ -334,6 +333,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws Se public static class EntityGuidServlet extends HttpServlet { private static Pattern URL_PATTERN = Pattern.compile(".+/guid/([^/]+)"); + @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { final Matcher matcher = URL_PATTERN.matcher(req.getRequestURI()); @@ -358,6 +358,7 @@ private static AtlasEntity.AtlasEntityWithExtInfo createSearchResult(AtlasEntity public static class SearchByUniqueAttributeServlet extends HttpServlet { private static Pattern URL_PATTERN = Pattern.compile(".+/uniqueAttribute/type/([^/]+)"); + @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { // http://localhost:21000/api/atlas/v2/entity/uniqueAttribute/type/nifi_flow_path?attr:qualifiedName=2e9a2852-228f-379b-0000-000000000000@example @@ -479,7 +480,6 @@ private void traverse(Set seen, AtlasEntity s, List links, Ma } } - // Traverse entities those consume this entity as their input. final List outGoings = Stream.of(outgoingEntities.getOrDefault(toTypedQname(s), Collections.emptyList()), outgoingEntities.getOrDefault(s.getGuid(), Collections.emptyList())).flatMap(List::stream).collect(Collectors.toList()); @@ -567,7 +567,6 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws Se traverse(seen, s, links, nodeIndices, outgoingEntities); }); - } } @@ -596,7 +595,7 @@ protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws Se // Group links by its target, and configure each weight value. // E.g. 1 -> 3 and 2 -> 3, then 1 (0.5) -> 3 and 2 (0.5) -> 3. ls.stream().collect(Collectors.groupingBy(Link::getTarget)) - .forEach((t, ls2SameTgt) -> ls2SameTgt.forEach(l -> l.setValue(1.0 / (double) ls2SameTgt.size()))); + .forEach((t, ls2SameTgt) -> ls2SameTgt.forEach(l -> l.setValue(1.0 / (double) ls2SameTgt.size()))); } }); diff --git a/nifi-nar-bundles/nifi-atlas-bundle/pom.xml b/nifi-nar-bundles/nifi-atlas-bundle/pom.xml index e4db5a651fe1..d831f25a1dac 100644 --- a/nifi-nar-bundles/nifi-atlas-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-atlas-bundle/pom.xml @@ -36,6 +36,12 @@ + + + io.netty + netty + 3.7.1.Final + org.apache.nifi nifi-atlas-reporting-task diff --git a/nifi-nar-bundles/nifi-avro-bundle/nifi-avro-processors/pom.xml b/nifi-nar-bundles/nifi-avro-bundle/nifi-avro-processors/pom.xml index ae1758b0bda1..4776b2151251 100644 --- a/nifi-nar-bundles/nifi-avro-bundle/nifi-avro-processors/pom.xml +++ b/nifi-nar-bundles/nifi-avro-bundle/nifi-avro-processors/pom.xml @@ -34,7 +34,7 @@ language governing permissions and limitations under the License. --> org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.avro @@ -44,7 +44,7 @@ language governing permissions and limitations under the License. --> com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 commons-codec diff --git a/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-abstract-processors/pom.xml b/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-abstract-processors/pom.xml index 5a9c35739603..22f721a091ab 100644 --- a/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-abstract-processors/pom.xml +++ b/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-abstract-processors/pom.xml @@ -28,6 +28,16 @@ com.amazonaws aws-java-sdk-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-cbor + + com.amazonaws @@ -61,7 +71,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi @@ -85,6 +95,19 @@ org.apache.nifi nifi-proxy-configuration-api + + + + com.fasterxml.jackson.core + jackson-databind + 2.9.7 + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-cbor + 2.9.7 + diff --git a/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-service-api/pom.xml b/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-service-api/pom.xml index 3de49a34b2b0..b82b75a90bc0 100644 --- a/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-service-api/pom.xml +++ b/nifi-nar-bundles/nifi-aws-bundle/nifi-aws-service-api/pom.xml @@ -27,11 +27,34 @@ com.amazonaws aws-java-sdk-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.dataformat + jackson-dataformat-cbor + + org.apache.nifi nifi-api + + + + com.fasterxml.jackson.core + jackson-databind + 2.9.7 + + + + com.fasterxml.jackson.dataformat + jackson-dataformat-cbor + 2.9.7 + diff --git a/nifi-nar-bundles/nifi-aws-bundle/pom.xml b/nifi-nar-bundles/nifi-aws-bundle/pom.xml index ed421175ea76..b3bb9437e48c 100644 --- a/nifi-nar-bundles/nifi-aws-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-aws-bundle/pom.xml @@ -26,7 +26,7 @@ pom - 1.11.319 + 1.11.412 diff --git a/nifi-nar-bundles/nifi-azure-bundle/nifi-azure-processors/src/main/java/org/apache/nifi/processors/azure/storage/DeleteAzureBlobStorage.java b/nifi-nar-bundles/nifi-azure-bundle/nifi-azure-processors/src/main/java/org/apache/nifi/processors/azure/storage/DeleteAzureBlobStorage.java index a3f66d80c8f4..603bc697bd62 100644 --- a/nifi-nar-bundles/nifi-azure-bundle/nifi-azure-processors/src/main/java/org/apache/nifi/processors/azure/storage/DeleteAzureBlobStorage.java +++ b/nifi-nar-bundles/nifi-azure-bundle/nifi-azure-processors/src/main/java/org/apache/nifi/processors/azure/storage/DeleteAzureBlobStorage.java @@ -21,28 +21,56 @@ import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.InputRequirement.Requirement; import org.apache.nifi.annotation.documentation.CapabilityDescription; import org.apache.nifi.annotation.documentation.SeeAlso; import org.apache.nifi.annotation.documentation.Tags; +import org.apache.nifi.components.AllowableValue; +import org.apache.nifi.components.PropertyDescriptor; import org.apache.nifi.flowfile.FlowFile; import org.apache.nifi.processor.ProcessContext; import org.apache.nifi.processor.ProcessSession; import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.processor.util.StandardValidators; import org.apache.nifi.processors.azure.AbstractAzureBlobProcessor; import org.apache.nifi.processors.azure.storage.utils.AzureStorageUtils; import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; - @Tags({ "azure", "microsoft", "cloud", "storage", "blob" }) @SeeAlso({ ListAzureBlobStorage.class, FetchAzureBlobStorage.class, PutAzureBlobStorage.class}) @CapabilityDescription("Deletes the provided blob from Azure Storage") @InputRequirement(Requirement.INPUT_REQUIRED) public class DeleteAzureBlobStorage extends AbstractAzureBlobProcessor { + private static final AllowableValue DELETE_SNAPSHOTS_NONE = new AllowableValue(DeleteSnapshotsOption.NONE.name(), "None", "Delete the blob only."); + + private static final AllowableValue DELETE_SNAPSHOTS_ALSO = new AllowableValue(DeleteSnapshotsOption.INCLUDE_SNAPSHOTS.name(), "Include Snapshots", "Delete the blob and its snapshots."); + + private static final AllowableValue DELETE_SNAPSHOTS_ONLY = new AllowableValue(DeleteSnapshotsOption.DELETE_SNAPSHOTS_ONLY.name(), "Delete Snapshots Only", "Delete only the blob's snapshots."); + + private static final PropertyDescriptor DELETE_SNAPSHOTS_OPTION = new PropertyDescriptor.Builder() + .name("delete-snapshots-option") + .displayName("Delete Snapshots Option") + .description("Specifies the snapshot deletion options to be used when deleting a blob.") + .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) + .allowableValues(DELETE_SNAPSHOTS_NONE, DELETE_SNAPSHOTS_ALSO, DELETE_SNAPSHOTS_ONLY) + .defaultValue(DELETE_SNAPSHOTS_NONE.getValue()) + .required(true) + .build(); + + @Override + public List getSupportedPropertyDescriptors() { + List properties = new ArrayList<>(super.getSupportedPropertyDescriptors()); + properties.add(DELETE_SNAPSHOTS_OPTION); + return properties; + } + @Override public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); @@ -52,8 +80,9 @@ public void onTrigger(ProcessContext context, ProcessSession session) throws Pro } final long startNanos = System.nanoTime(); - String containerName = context.getProperty(AzureStorageUtils.CONTAINER).evaluateAttributeExpressions(flowFile).getValue(); - String blobPath = context.getProperty(BLOB).evaluateAttributeExpressions(flowFile).getValue(); + final String containerName = context.getProperty(AzureStorageUtils.CONTAINER).evaluateAttributeExpressions(flowFile).getValue(); + final String blobPath = context.getProperty(BLOB).evaluateAttributeExpressions(flowFile).getValue(); + final String deleteSnapshotOptions = context.getProperty(DELETE_SNAPSHOTS_OPTION).getValue(); try { CloudBlobClient blobClient = AzureStorageUtils.createCloudBlobClient(context, getLogger(), flowFile); @@ -62,12 +91,12 @@ public void onTrigger(ProcessContext context, ProcessSession session) throws Pro final OperationContext operationContext = new OperationContext(); AzureStorageUtils.setProxy(operationContext, context); - blob.deleteIfExists(null, null, null, operationContext); + blob.deleteIfExists(DeleteSnapshotsOption.valueOf(deleteSnapshotOptions), null, null, operationContext); session.transfer(flowFile, REL_SUCCESS); final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); session.getProvenanceReporter().send(flowFile, blob.getSnapshotQualifiedUri().toString(), transferMillis); - }catch ( StorageException | URISyntaxException e) { + } catch ( StorageException | URISyntaxException e) { getLogger().error("Failed to delete the specified blob {} from Azure Storage. Routing to failure", new Object[]{blobPath}, e); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); diff --git a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/pom.xml b/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/pom.xml index d5c36d45c0ff..3359b3dc81d9 100644 --- a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/pom.xml +++ b/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/pom.xml @@ -79,5 +79,10 @@ nifi-mock-record-utils 1.8.0-SNAPSHOT + + org.apache.commons + commons-text + 1.4 + diff --git a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/src/main/java/org/apache/nifi/processors/cassandra/QueryCassandra.java b/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/src/main/java/org/apache/nifi/processors/cassandra/QueryCassandra.java index 40b88cc9d7a6..75a66f0a5a4e 100644 --- a/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/src/main/java/org/apache/nifi/processors/cassandra/QueryCassandra.java +++ b/nifi-nar-bundles/nifi-cassandra-bundle/nifi-cassandra-processors/src/main/java/org/apache/nifi/processors/cassandra/QueryCassandra.java @@ -33,7 +33,7 @@ import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.generic.GenericRecord; import org.apache.avro.io.DatumWriter; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.commons.lang3.StringUtils; import org.apache.nifi.annotation.behavior.EventDriven; import org.apache.nifi.annotation.behavior.InputRequirement; diff --git a/nifi-nar-bundles/nifi-ccda-bundle/nifi-ccda-processors/pom.xml b/nifi-nar-bundles/nifi-ccda-bundle/nifi-ccda-processors/pom.xml index 6aee1f3af34d..7d7dc821ec35 100644 --- a/nifi-nar-bundles/nifi-ccda-bundle/nifi-ccda-processors/pom.xml +++ b/nifi-nar-bundles/nifi-ccda-bundle/nifi-ccda-processors/pom.xml @@ -38,7 +38,7 @@ org.apache.commons commons-jexl3 - 3.0 + 3.1 org.openehealth.ipf.oht.mdht diff --git a/nifi-nar-bundles/nifi-cdc/nifi-cdc-api/pom.xml b/nifi-nar-bundles/nifi-cdc/nifi-cdc-api/pom.xml index 21697115acd9..4abe273a531f 100644 --- a/nifi-nar-bundles/nifi-cdc/nifi-cdc-api/pom.xml +++ b/nifi-nar-bundles/nifi-cdc/nifi-cdc-api/pom.xml @@ -34,12 +34,12 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 diff --git a/nifi-nar-bundles/nifi-couchbase-bundle/nifi-couchbase-processors/pom.xml b/nifi-nar-bundles/nifi-couchbase-bundle/nifi-couchbase-processors/pom.xml index f358a92802d0..17706941f3aa 100644 --- a/nifi-nar-bundles/nifi-couchbase-bundle/nifi-couchbase-processors/pom.xml +++ b/nifi-nar-bundles/nifi-couchbase-bundle/nifi-couchbase-processors/pom.xml @@ -48,7 +48,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi @@ -62,11 +62,6 @@ org.apache.nifi nifi-record-serialization-service-api - - org.apache.nifi - nifi-utils - 1.8.0-SNAPSHOT - org.apache.nifi nifi-mock diff --git a/nifi-nar-bundles/nifi-druid-bundle/nifi-druid-controller-service-api/pom.xml b/nifi-nar-bundles/nifi-druid-bundle/nifi-druid-controller-service-api/pom.xml index d922e97d56ae..972ca59be4b4 100644 --- a/nifi-nar-bundles/nifi-druid-bundle/nifi-druid-controller-service-api/pom.xml +++ b/nifi-nar-bundles/nifi-druid-bundle/nifi-druid-controller-service-api/pom.xml @@ -30,6 +30,12 @@ nifi-api provided + + + commons-collections + commons-collections + 3.2.2 + io.druid tranquility-core_2.11 @@ -130,7 +136,7 @@ org.bouncycastle bcprov-jdk15on - 1.59 + 1.60 provided diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/pom.xml b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/pom.xml index 8eb976d1216f..5334aa2c9d86 100644 --- a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/pom.xml +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/pom.xml @@ -57,10 +57,16 @@ provided + + org.apache.nifi + nifi-lookup-service-api + provided + + com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 @@ -77,7 +83,7 @@ org.apache.commons commons-lang3 - 3.4 + 3.8.1 org.slf4j @@ -123,6 +129,33 @@ + + org.apache.nifi + nifi-avro-record-utils + 1.8.0-SNAPSHOT + compile + + + org.apache.nifi + nifi-schema-registry-service-api + compile + + + com.jayway.jsonpath + json-path + 2.4.0 + + + org.mockito + mockito-all + test + + + org.apache.nifi + nifi-record-path + 1.8.0-SNAPSHOT + compile + @@ -140,6 +173,8 @@ 9400 5.6.2 90 + ERROR + ${project.basedir}/src/test/resources/setup.script diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/java/org/apache/nifi/elasticsearch/ElasticSearchLookupService.java b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/java/org/apache/nifi/elasticsearch/ElasticSearchLookupService.java new file mode 100644 index 000000000000..c86477109011 --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/java/org/apache/nifi/elasticsearch/ElasticSearchLookupService.java @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.elasticsearch; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.jayway.jsonpath.JsonPath; +import org.apache.nifi.annotation.lifecycle.OnEnabled; +import org.apache.nifi.components.PropertyDescriptor; +import org.apache.nifi.components.ValidationResult; +import org.apache.nifi.controller.ConfigurationContext; +import org.apache.nifi.expression.ExpressionLanguageScope; +import org.apache.nifi.lookup.LookupFailureException; +import org.apache.nifi.lookup.LookupService; +import org.apache.nifi.processor.util.StandardValidators; +import org.apache.nifi.record.path.FieldValue; +import org.apache.nifi.record.path.RecordPath; +import org.apache.nifi.schema.access.SchemaNotFoundException; +import org.apache.nifi.serialization.JsonInferenceSchemaRegistryService; +import org.apache.nifi.serialization.record.MapRecord; +import org.apache.nifi.serialization.record.Record; +import org.apache.nifi.serialization.record.RecordSchema; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; + +public class ElasticSearchLookupService extends JsonInferenceSchemaRegistryService implements LookupService { + public static final PropertyDescriptor CLIENT_SERVICE = new PropertyDescriptor.Builder() + .name("el-rest-client-service") + .displayName("Client Service") + .description("An ElasticSearch client service to use for running queries.") + .identifiesControllerService(ElasticSearchClientService.class) + .required(true) + .build(); + public static final PropertyDescriptor INDEX = new PropertyDescriptor.Builder() + .name("el-lookup-index") + .displayName("Index") + .description("The name of the index to read from") + .required(true) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) + .build(); + + public static final PropertyDescriptor TYPE = new PropertyDescriptor.Builder() + .name("el-lookup-type") + .displayName("Type") + .description("The type of this document (used by Elasticsearch for indexing and searching)") + .required(false) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) + .build(); + + private ElasticSearchClientService clientService; + + private String index; + private String type; + private ObjectMapper mapper; + + private final List DESCRIPTORS; + + public ElasticSearchLookupService() { + List _desc = new ArrayList<>(); + _desc.addAll(super.getSupportedPropertyDescriptors()); + _desc.add(CLIENT_SERVICE); + _desc.add(INDEX); + _desc.add(TYPE); + DESCRIPTORS = Collections.unmodifiableList(_desc); + } + + private volatile ConcurrentHashMap mappings; + + @Override + @OnEnabled + public void onEnabled(final ConfigurationContext context) { + clientService = context.getProperty(CLIENT_SERVICE).asControllerService(ElasticSearchClientService.class); + index = context.getProperty(INDEX).evaluateAttributeExpressions().getValue(); + type = context.getProperty(TYPE).evaluateAttributeExpressions().getValue(); + mapper = new ObjectMapper(); + + List dynamic = context.getProperties().entrySet().stream() + .filter( e -> e.getKey().isDynamic()) + .map(e -> e.getKey()) + .collect(Collectors.toList()); + + Map _temp = new HashMap<>(); + for (PropertyDescriptor desc : dynamic) { + String value = context.getProperty(desc).getValue(); + String name = desc.getName(); + _temp.put(name, RecordPath.compile(value)); + } + + mappings = new ConcurrentHashMap<>(_temp); + + super.onEnabled(context); + } + + @Override + protected List getSupportedPropertyDescriptors() { + return DESCRIPTORS; + } + + @Override + public PropertyDescriptor getSupportedDynamicPropertyDescriptor(String name) { + return new PropertyDescriptor.Builder() + .name(name) + .addValidator((subject, input, context) -> { + ValidationResult.Builder builder = new ValidationResult.Builder(); + try { + JsonPath.parse(input); + builder.valid(true); + } catch (Exception ex) { + builder.explanation(ex.getMessage()) + .valid(false) + .subject(subject); + } + + return builder.build(); + }) + .dynamic(true) + .build(); + } + + @Override + public Optional lookup(Map coordinates) throws LookupFailureException { + Map context = coordinates.entrySet().stream() + .collect(Collectors.toMap( + e -> e.getKey(), + e -> e.getValue().toString() + )); + return lookup(coordinates, context); + } + + @Override + public Optional lookup(Map coordinates, Map context) throws LookupFailureException { + validateCoordinates(coordinates); + + try { + Record record; + if (coordinates.containsKey("_id")) { + record = getById((String)coordinates.get("_id"), context); + } else { + record = getByQuery(coordinates, context); + } + + return record == null ? Optional.empty() : Optional.of(record); + } catch (Exception ex) { + getLogger().error("Error during lookup.", ex); + throw new LookupFailureException(ex); + } + } + + private void validateCoordinates(Map coordinates) throws LookupFailureException { + List reasons = new ArrayList<>(); + + if (coordinates.containsKey("_id") && !(coordinates.get("_id") instanceof String)) { + reasons.add("_id was supplied, but it was not a String."); + } + + if (coordinates.containsKey("_id") && coordinates.size() > 1) { + reasons.add("When _id is used, it can be the only key used in the lookup."); + } + + if (reasons.size() > 0) { + String error = String.join("\n", reasons); + throw new LookupFailureException(error); + } + } + + private Record getById(final String _id, Map context) throws IOException, LookupFailureException, SchemaNotFoundException { + Map query = new HashMap(){{ + put("query", new HashMap() {{ + put("match", new HashMap(){{ + put("_id", _id); + }}); + }}); + }}; + + String json = mapper.writeValueAsString(query); + + SearchResponse response = clientService.search(json, index, type); + + if (response.getNumberOfHits() > 1) { + throw new LookupFailureException(String.format("Expected 1 response, got %d for query %s", + response.getNumberOfHits(), json)); + } else if (response.getNumberOfHits() == 0) { + return null; + } + + final Map source = (Map)response.getHits().get(0).get("_source"); + + RecordSchema toUse = getSchema(context, source, null); + + Record record = new MapRecord(toUse, source); + + if (mappings.size() > 0) { + record = applyMappings(record, source); + } + + return record; + } + + Map getNested(String key, Object value) { + String path = key.substring(0, key.lastIndexOf(".")); + + return new HashMap(){{ + put("path", path); + put("query", new HashMap(){{ + put("match", new HashMap(){{ + put(key, value); + }}); + }}); + }}; + } + + private Map buildQuery(Map coordinates) { + Map query = new HashMap(){{ + put("bool", new HashMap(){{ + put("must", coordinates.entrySet().stream() + .map(e -> new HashMap(){{ + if (e.getKey().contains(".")) { + put("nested", getNested(e.getKey(), e.getValue())); + } else { + put("match", new HashMap() {{ + put(e.getKey(), e.getValue()); + }}); + } + }}).collect(Collectors.toList()) + ); + }}); + }}; + + Map outter = new HashMap(){{ + put("size", 1); + put("query", query); + }}; + + return outter; + } + + private Record getByQuery(final Map query, Map context) throws LookupFailureException { + try { + final String json = mapper.writeValueAsString(buildQuery(query)); + + SearchResponse response = clientService.search(json, index, type); + + if (response.getNumberOfHits() == 0) { + return null; + } else { + final Map source = (Map)response.getHits().get(0).get("_source"); + RecordSchema toUse = getSchema(context, source, null); + Record record = new MapRecord(toUse, source); + + if (mappings.size() > 0) { + record = applyMappings(record, source); + } + + return record; + } + + } catch (Exception e) { + throw new LookupFailureException(e); + } + } + + private Record applyMappings(Record record, Map source) { + Record _rec = new MapRecord(record.getSchema(), new HashMap<>()); + + mappings.entrySet().forEach(entry -> { + try { + Object o = JsonPath.read(source, entry.getKey()); + RecordPath path = entry.getValue(); + Optional first = path.evaluate(_rec).getSelectedFields().findFirst(); + if (first.isPresent()) { + first.get().updateValue(o); + } + } catch (Exception ex) { + throw new RuntimeException(ex); + } + }); + + return _rec; + } + + @Override + public Class getValueType() { + return Record.class; + } + + @Override + public Set getRequiredKeys() { + return Collections.emptySet(); + } +} diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService index 161f6526901f..65745fb94638 100644 --- a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/META-INF/services/org.apache.nifi.controller.ControllerService @@ -12,4 +12,5 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +org.apache.nifi.elasticsearch.ElasticSearchLookupService org.apache.nifi.elasticsearch.ElasticSearchClientServiceImpl \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/docs/org.apache.nifi.elasticsearch.ElasticSearchLookupService/additionalDetails.html b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/docs/org.apache.nifi.elasticsearch.ElasticSearchLookupService/additionalDetails.html new file mode 100644 index 000000000000..3b95430e339c --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/main/resources/docs/org.apache.nifi.elasticsearch.ElasticSearchLookupService/additionalDetails.html @@ -0,0 +1,53 @@ + + + + + + ElasticSearchLookupService + + + + + +

Description:

+

+ This lookup service uses ElasticSearch as its data source. Mappings in LookupRecord map record paths to paths within + an ElasticSearch document. Example: +

+

+ /user/name => user.contact.name +

+

+ That would map the record path /user/name to an embedded document named contact with a field named + name. +

+

+ The query that is assembled from these is a boolean query where all of the criteria are under the must list. + In addition, wildcards are not supported right now and all criteria are translated into literal match queries. +

+

Post-Processing

+

+ Because an ElasticSearch result might be structured differently than the record which will be enriched by this service, + users can specify an additional set of mappings on this lookup service that map JsonPath operations to record paths. Example: +

+

+ $.user.contact.email => /user/email_address +

+

+ Would copy the field email from the embedded document contact into the record at that path. +

+ + \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearch5ClientService_IT.groovy b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearch5ClientService_IT.groovy new file mode 100644 index 000000000000..b5c446895d89 --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearch5ClientService_IT.groovy @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License") you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.elasticsearch.integration + +import org.apache.nifi.elasticsearch.DeleteOperationResponse +import org.apache.nifi.elasticsearch.ElasticSearchClientService +import org.apache.nifi.elasticsearch.ElasticSearchClientServiceImpl +import org.apache.nifi.elasticsearch.SearchResponse +import org.apache.nifi.util.TestRunner +import org.apache.nifi.util.TestRunners +import org.junit.After +import org.junit.Assert +import org.junit.Before +import org.junit.Test + +import static groovy.json.JsonOutput.prettyPrint +import static groovy.json.JsonOutput.toJson + +class ElasticSearch5ClientService_IT { + + private TestRunner runner + private ElasticSearchClientServiceImpl service + + static final String INDEX = "messages" + static final String TYPE = "message" + + @Before + void before() throws Exception { + runner = TestRunners.newTestRunner(TestControllerServiceProcessor.class) + service = new ElasticSearchClientServiceImpl() + runner.addControllerService("Client Service", service) + runner.setProperty(service, ElasticSearchClientService.HTTP_HOSTS, "http://localhost:9400") + runner.setProperty(service, ElasticSearchClientService.CONNECT_TIMEOUT, "10000") + runner.setProperty(service, ElasticSearchClientService.SOCKET_TIMEOUT, "60000") + runner.setProperty(service, ElasticSearchClientService.RETRY_TIMEOUT, "60000") + try { + runner.enableControllerService(service) + } catch (Exception ex) { + ex.printStackTrace() + throw ex + } + } + + @After + void after() throws Exception { + service.onDisabled() + } + + @Test + void testBasicSearch() throws Exception { + String query = prettyPrint(toJson([ + size: 10, + query: [ + match_all: [:] + ], + aggs: [ + term_counts: [ + terms: [ + field: "msg", + size: 5 + ] + ] + ] + ])) + + + SearchResponse response = service.search(query, "messages", "message") + Assert.assertNotNull("Response was null", response) + + Assert.assertEquals("Wrong count", 15, response.numberOfHits) + Assert.assertFalse("Timed out", response.isTimedOut()) + Assert.assertNotNull("Hits was null", response.getHits()) + Assert.assertEquals("Wrong number of hits", 10, response.hits.size()) + Assert.assertNotNull("Aggregations are missing", response.aggregations) + Assert.assertEquals("Aggregation count is wrong", 1, response.aggregations.size()) + + Map termCounts = response.aggregations.get("term_counts") + Assert.assertNotNull("Term counts was missing", termCounts) + def buckets = termCounts.get("buckets") + Assert.assertNotNull("Buckets branch was empty", buckets) + def expected = [ + "one": 1, + "two": 2, + "three": 3, + "four": 4, + "five": 5 + ] + + buckets.each { aggRes -> + def key = aggRes["key"] + def docCount = aggRes["doc_count"] + Assert.assertEquals("${key} did not match.", expected[key], docCount) + } + } + + @Test + void testDeleteByQuery() throws Exception { + String query = prettyPrint(toJson([ + query: [ + match: [ + msg: "five" + ] + ] + ])) + DeleteOperationResponse response = service.deleteByQuery(query, INDEX, TYPE) + Assert.assertNotNull(response) + Assert.assertTrue(response.getTook() > 0) + } + + @Test + void testDeleteById() throws Exception { + final String ID = "1" + DeleteOperationResponse response = service.deleteById(INDEX, TYPE, ID) + Assert.assertNotNull(response) + Assert.assertTrue(response.getTook() > 0) + def doc = service.get(INDEX, TYPE, ID) + Assert.assertNull(doc) + doc = service.get(INDEX, TYPE, "2") + Assert.assertNotNull(doc) + } + + @Test + void testGet() throws IOException { + Map old + 1.upto(15) { index -> + String id = String.valueOf(index) + def doc = service.get(INDEX, TYPE, id) + Assert.assertNotNull("Doc was null", doc) + Assert.assertNotNull("${doc.toString()}\t${doc.keySet().toString()}", doc.get("msg")) + old = doc + } + } +} diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupServiceTest.groovy b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupServiceTest.groovy new file mode 100644 index 000000000000..27452a34ac30 --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupServiceTest.groovy @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License") you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.elasticsearch.integration + +import org.apache.nifi.elasticsearch.ElasticSearchClientService +import org.apache.nifi.elasticsearch.ElasticSearchLookupService +import org.apache.nifi.schema.access.SchemaAccessUtils +import org.apache.nifi.serialization.record.MapRecord +import org.apache.nifi.util.TestRunner +import org.apache.nifi.util.TestRunners +import org.junit.Assert +import org.junit.Before +import org.junit.Test + +class ElasticSearchLookupServiceTest { + ElasticSearchClientService mockClientService + ElasticSearchLookupService lookupService + TestRunner runner + + @Before + void setup() throws Exception { + mockClientService = new TestElasticSearchClientService() + lookupService = new ElasticSearchLookupService() + def registry = new TestSchemaRegistry() + runner = TestRunners.newTestRunner(TestControllerServiceProcessor.class) + runner.addControllerService("clientService", mockClientService) + runner.addControllerService("lookupService", lookupService) + runner.addControllerService("registry", registry) + runner.enableControllerService(mockClientService) + runner.enableControllerService(registry) + runner.setProperty(lookupService, ElasticSearchLookupService.CLIENT_SERVICE, "clientService") + runner.setProperty(lookupService, ElasticSearchLookupService.INDEX, "users") + runner.setProperty(TestControllerServiceProcessor.CLIENT_SERVICE, "clientService") + runner.setProperty(TestControllerServiceProcessor.LOOKUP_SERVICE, "lookupService") + runner.setProperty(lookupService, SchemaAccessUtils.SCHEMA_REGISTRY, "registry") + runner.setProperty(lookupService, SchemaAccessUtils.SCHEMA_ACCESS_STRATEGY, SchemaAccessUtils.INFER_SCHEMA) + runner.enableControllerService(lookupService) + } + + @Test + void simpleLookupTest() throws Exception { + def coordinates = ["_id": "12345" ] + + Optional result = lookupService.lookup(coordinates) + + Assert.assertNotNull(result) + Assert.assertTrue(result.isPresent()) + MapRecord record = result.get() + Assert.assertEquals("john.smith", record.getAsString("username")) + Assert.assertEquals("testing1234", record.getAsString("password")) + Assert.assertEquals("john.smith@test.com", record.getAsString("email")) + Assert.assertEquals("Software Engineer", record.getAsString("position")) + } +} diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupService_IT.groovy b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupService_IT.groovy new file mode 100644 index 000000000000..b2558364b4e9 --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/ElasticSearchLookupService_IT.groovy @@ -0,0 +1,211 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License") you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.elasticsearch.integration + +import org.apache.nifi.elasticsearch.ElasticSearchClientService +import org.apache.nifi.elasticsearch.ElasticSearchClientServiceImpl +import org.apache.nifi.elasticsearch.ElasticSearchLookupService +import org.apache.nifi.lookup.LookupFailureException +import org.apache.nifi.record.path.RecordPath +import org.apache.nifi.schema.access.SchemaAccessUtils +import org.apache.nifi.schemaregistry.services.SchemaRegistry +import org.apache.nifi.serialization.record.MapRecord +import org.apache.nifi.serialization.record.Record +import org.apache.nifi.serialization.record.RecordSchema +import org.apache.nifi.serialization.record.type.RecordDataType +import org.apache.nifi.util.TestRunner +import org.apache.nifi.util.TestRunners +import org.junit.Assert +import org.junit.Before +import org.junit.Test + +class ElasticSearchLookupService_IT { + private TestRunner runner + private ElasticSearchClientService service + private ElasticSearchLookupService lookupService + + @Before + void before() throws Exception { + runner = TestRunners.newTestRunner(TestControllerServiceProcessor.class) + service = new ElasticSearchClientServiceImpl() + lookupService = new ElasticSearchLookupService() + runner.addControllerService("Client Service", service) + runner.addControllerService("Lookup Service", lookupService) + runner.setProperty(service, ElasticSearchClientService.HTTP_HOSTS, "http://localhost:9400") + runner.setProperty(service, ElasticSearchClientService.CONNECT_TIMEOUT, "10000") + runner.setProperty(service, ElasticSearchClientService.SOCKET_TIMEOUT, "60000") + runner.setProperty(service, ElasticSearchClientService.RETRY_TIMEOUT, "60000") + runner.setProperty(TestControllerServiceProcessor.CLIENT_SERVICE, "Client Service") + runner.setProperty(TestControllerServiceProcessor.LOOKUP_SERVICE, "Lookup Service") + runner.setProperty(lookupService, ElasticSearchLookupService.CLIENT_SERVICE, "Client Service") + runner.setProperty(lookupService, ElasticSearchLookupService.INDEX, "user_details") + runner.setProperty(lookupService, ElasticSearchLookupService.TYPE, "details") + + try { + runner.enableControllerService(service) + runner.enableControllerService(lookupService) + } catch (Exception ex) { + ex.printStackTrace() + throw ex + } + } + + @Test + void testValidity() throws Exception { + setDefaultSchema() + runner.assertValid() + } + + private void setDefaultSchema() throws Exception { + runner.disableControllerService(lookupService) + SchemaRegistry registry = new TestSchemaRegistry() + runner.addControllerService("registry", registry) + runner.setProperty(lookupService, SchemaAccessUtils.SCHEMA_REGISTRY, "registry") + runner.enableControllerService(registry) + runner.enableControllerService(lookupService) + } + + @Test + void lookupById() { + def coordinates = [ _id: "2" ] + Optional result = lookupService.lookup(coordinates) + + Assert.assertNotNull(result) + Assert.assertTrue(result.isPresent()) + def record = result.get() + Assert.assertEquals("jane.doe@company.com", record.getAsString("email")) + Assert.assertEquals("098-765-4321", record.getAsString("phone")) + Assert.assertEquals("GHIJK", record.getAsString("accessKey")) + } + + @Test + void testInvalidIdScenarios() { + def coordinates = [ + [ + _id: 1 + ], + [ + _id: "1", "email": "john.smith@company.com" + ] + ] + + coordinates.each { coordinate -> + def exception + + try { + lookupService.lookup(coordinate) + } catch (Exception ex) { + exception = ex + } + + Assert.assertNotNull(exception) + Assert.assertTrue(exception instanceof LookupFailureException) + } + } + + @Test + void lookupByQuery() { + def coordinates = [ "phone": "098-765-4321", "email": "jane.doe@company.com" ] + Optional result = lookupService.lookup(coordinates) + + Assert.assertNotNull(result) + Assert.assertTrue(result.isPresent()) + def record = result.get() + Assert.assertEquals("jane.doe@company.com", record.getAsString("email")) + Assert.assertEquals("098-765-4321", record.getAsString("phone")) + Assert.assertEquals("GHIJK", record.getAsString("accessKey")) + } + + @Test + void testNestedSchema() { + def coordinates = [ + "subField.deeper.deepest.super_secret": "The sky is blue", + "subField.deeper.secretz": "Buongiorno, mondo!!", + "msg": "Hello, world" + ] + + runner.disableControllerService(lookupService) + runner.setProperty(lookupService, ElasticSearchLookupService.INDEX, "nested") + runner.setProperty(lookupService, ElasticSearchLookupService.TYPE, "nested_complex") + runner.enableControllerService(lookupService) + + Optional response = lookupService.lookup(coordinates) + Assert.assertNotNull(response) + Assert.assertTrue(response.isPresent()) + def rec = response.get() + Assert.assertEquals("Hello, world", rec.getValue("msg")) + def subRec = getSubRecord(rec, "subField") + Assert.assertNotNull(subRec) + def deeper = getSubRecord(subRec, "deeper") + Assert.assertNotNull(deeper) + def deepest = getSubRecord(deeper, "deepest") + Assert.assertNotNull(deepest) + Assert.assertEquals("The sky is blue", deepest.getAsString("super_secret")) + } + + @Test + void testDetectedSchema() throws LookupFailureException { + runner.disableControllerService(lookupService) + runner.setProperty(lookupService, ElasticSearchLookupService.INDEX, "complex") + runner.setProperty(lookupService, ElasticSearchLookupService.TYPE, "complex") + runner.enableControllerService(lookupService) + def coordinates = ["_id": "1" ] + + Optional response = lookupService.lookup(coordinates) + Assert.assertNotNull(response) + Record rec = response.get() + Record subRec = getSubRecord(rec, "subField") + + def r2 = new MapRecord(rec.schema, [:]) + def path = RecordPath.compile("/subField/longField") + def result = path.evaluate(r2) + result.selectedFields.findFirst().get().updateValue(1234567890L) + + Assert.assertNotNull(rec) + Assert.assertNotNull(subRec) + Assert.assertEquals("Hello, world", rec.getValue("msg")) + Assert.assertNotNull(rec.getValue("subField")) + Assert.assertEquals(new Long(100000), subRec.getValue("longField")) + Assert.assertEquals("2018-04-10T12:18:05Z", subRec.getValue("dateField")) + } + + Record getSubRecord(Record rec, String fieldName) { + RecordSchema schema = rec.schema + RecordSchema subSchema = ((RecordDataType)schema.getField(fieldName).get().dataType).childSchema + rec.getAsRecord(fieldName, subSchema) + } + + @Test + void testMappings() { + runner.disableControllerService(lookupService) + runner.setProperty(lookupService, "\$.subField.longField", "/longField2") + runner.setProperty(lookupService, '$.subField.dateField', '/dateField2') + runner.setProperty(lookupService, ElasticSearchLookupService.INDEX, "nested") + runner.setProperty(lookupService, ElasticSearchLookupService.TYPE, "nested_complex") + runner.enableControllerService(lookupService) + + def coordinates = ["msg": "Hello, world"] + def result = lookupService.lookup(coordinates) + Assert.assertTrue(result.isPresent()) + def rec = result.get() + ["dateField": "2018-08-14T10:08:00Z", "longField": 150000L].each { field -> + def value = rec.getValue(field.key) + Assert.assertEquals(field.value, value) + } + } +} diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.java b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.groovy similarity index 51% rename from nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.java rename to nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.groovy index 674cc147b446..75e9dbd1e06c 100644 --- a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.java +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestControllerServiceProcessor.groovy @@ -3,7 +3,7 @@ * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with + * (the "License") you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 @@ -15,35 +15,37 @@ * limitations under the License. */ -package org.apache.nifi.elasticsearch.integration; +package org.apache.nifi.elasticsearch.integration -import org.apache.nifi.components.PropertyDescriptor; -import org.apache.nifi.elasticsearch.ElasticSearchClientServiceImpl; -import org.apache.nifi.processor.AbstractProcessor; -import org.apache.nifi.processor.ProcessContext; -import org.apache.nifi.processor.ProcessSession; -import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.components.PropertyDescriptor +import org.apache.nifi.elasticsearch.ElasticSearchClientServiceImpl +import org.apache.nifi.elasticsearch.ElasticSearchLookupService +import org.apache.nifi.processor.AbstractProcessor +import org.apache.nifi.processor.ProcessContext +import org.apache.nifi.processor.ProcessSession +import org.apache.nifi.processor.exception.ProcessException -import java.util.ArrayList; -import java.util.List; +class TestControllerServiceProcessor extends AbstractProcessor { -public class TestControllerServiceProcessor extends AbstractProcessor { - - static final PropertyDescriptor CLIENT_SERVICE = new PropertyDescriptor.Builder() + public static final PropertyDescriptor CLIENT_SERVICE = new PropertyDescriptor.Builder() .name("Client Service") .description("ElasticSearchClientServiceImpl") .identifiesControllerService(ElasticSearchClientServiceImpl.class) .required(true) - .build(); + .build() + public static final PropertyDescriptor LOOKUP_SERVICE = new PropertyDescriptor.Builder() + .name("Lookup Service") + .description("ElasticSearchClientServiceImpl") + .identifiesControllerService(ElasticSearchLookupService.class) + .required(false) + .build() @Override - public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { + void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException { } @Override protected List getSupportedPropertyDescriptors() { - List propDescs = new ArrayList<>(); - propDescs.add(CLIENT_SERVICE); - return propDescs; + [ CLIENT_SERVICE, LOOKUP_SERVICE ] } } diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestElasticSearchClientService.groovy b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestElasticSearchClientService.groovy new file mode 100644 index 000000000000..3b5fc0a15a8a --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestElasticSearchClientService.groovy @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License") you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.elasticsearch.integration + +import org.apache.nifi.controller.AbstractControllerService +import org.apache.nifi.elasticsearch.DeleteOperationResponse +import org.apache.nifi.elasticsearch.ElasticSearchClientService +import org.apache.nifi.elasticsearch.IndexOperationRequest +import org.apache.nifi.elasticsearch.IndexOperationResponse +import org.apache.nifi.elasticsearch.SearchResponse + +class TestElasticSearchClientService extends AbstractControllerService implements ElasticSearchClientService { + @Override + IndexOperationResponse add(IndexOperationRequest operation) throws IOException { + return null + } + + @Override + IndexOperationResponse add(List operations) throws IOException { + return null + } + + @Override + DeleteOperationResponse deleteById(String index, String type, String id) throws IOException { + return null + } + + @Override + DeleteOperationResponse deleteById(String index, String type, List ids) throws IOException { + return null + } + + @Override + DeleteOperationResponse deleteByQuery(String query, String index, String type) throws IOException { + return null + } + + @Override + Map get(String index, String type, String id) throws IOException { + return null + } + + @Override + SearchResponse search(String query, String index, String type) throws IOException { + List hits = [[ + "_source": [ + "username": "john.smith", + "password": "testing1234", + "email": "john.smith@test.com", + "position": "Software Engineer" + ] + ]] + return new SearchResponse(hits, null, 1, 100, false) + } + + @Override + String getTransitUrl(String index, String type) { + return "" + } +} diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestSchemaRegistry.groovy b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestSchemaRegistry.groovy new file mode 100644 index 000000000000..2a0bd8eca5b3 --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/groovy/org/apache/nifi/elasticsearch/integration/TestSchemaRegistry.groovy @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License") you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.elasticsearch.integration + +import org.apache.nifi.controller.AbstractControllerService +import org.apache.nifi.schema.access.SchemaField +import org.apache.nifi.schemaregistry.services.SchemaRegistry +import org.apache.nifi.serialization.SimpleRecordSchema +import org.apache.nifi.serialization.record.RecordField +import org.apache.nifi.serialization.record.RecordFieldType +import org.apache.nifi.serialization.record.RecordSchema +import org.apache.nifi.serialization.record.SchemaIdentifier + +class TestSchemaRegistry extends AbstractControllerService implements SchemaRegistry { + @Override + RecordSchema retrieveSchema(SchemaIdentifier schemaIdentifier) { + new SimpleRecordSchema([ + new RecordField("msg", RecordFieldType.STRING.dataType) + ]) + } + + @Override + Set getSuppliedSchemaFields() { + [ SchemaField.SCHEMA_NAME ] + } +} diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/.gitignore b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/.gitignore new file mode 100644 index 000000000000..00aca0c29684 --- /dev/null +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/.gitignore @@ -0,0 +1 @@ +# This is a placeholder to force Maven to compile the groovy code. \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/org/apache/nifi/elasticsearch/integration/ElasticSearchClientService_IT.java b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/org/apache/nifi/elasticsearch/integration/ElasticSearchClientService_IT.java deleted file mode 100644 index 687faf03b67d..000000000000 --- a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/java/org/apache/nifi/elasticsearch/integration/ElasticSearchClientService_IT.java +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.nifi.elasticsearch.integration; - -import org.apache.nifi.elasticsearch.DeleteOperationResponse; -import org.apache.nifi.elasticsearch.ElasticSearchClientService; -import org.apache.nifi.elasticsearch.ElasticSearchClientServiceImpl; -import org.apache.nifi.elasticsearch.IndexOperationRequest; -import org.apache.nifi.elasticsearch.SearchResponse; -import org.apache.nifi.util.TestRunner; -import org.apache.nifi.util.TestRunners; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class ElasticSearchClientService_IT { - - private TestRunner runner; - private ElasticSearchClientServiceImpl service; - - static final String INDEX = "messages"; - static final String TYPE = "message"; - - @Before - public void before() throws Exception { - runner = TestRunners.newTestRunner(TestControllerServiceProcessor.class); - service = new ElasticSearchClientServiceImpl(); - runner.addControllerService("Client Service", service); - runner.setProperty(service, ElasticSearchClientService.HTTP_HOSTS, "http://localhost:9400"); - runner.setProperty(service, ElasticSearchClientService.CONNECT_TIMEOUT, "10000"); - runner.setProperty(service, ElasticSearchClientService.SOCKET_TIMEOUT, "60000"); - runner.setProperty(service, ElasticSearchClientService.RETRY_TIMEOUT, "60000"); - try { - runner.enableControllerService(service); - } catch (Exception ex) { - ex.printStackTrace(); - throw ex; - } - - Map expected = new HashMap<>(); - expected.put("one", 1); - expected.put("two", 2); - expected.put("three", 3); - expected.put("four", 4); - expected.put("five", 5); - - - int index = 1; - List docs = new ArrayList<>(); - for (Map.Entry entry : expected.entrySet()) { - for (int idx = 0; idx < entry.getValue(); idx++) { - Map fields = new HashMap<>(); - fields.put("msg", entry.getKey()); - IndexOperationRequest ior = new IndexOperationRequest(INDEX, TYPE, String.valueOf(index++), fields); - docs.add(ior); - } - } - service.add(docs); - } - - @After - public void after() throws Exception { - service.onDisabled(); - } - - @Test - public void testBasicSearch() throws Exception { - String query = "{\n" + - "\t\"size\": 10,\n" + - "\t\"query\": {\n" + - "\t\t\"match_all\": {}\n" + - "\t},\n" + - "\t\"aggs\": {\n" + - "\t\t\"term_counts\": {\n" + - "\t\t\t\"terms\": {\n" + - "\t\t\t\t\"field\": \"msg.keyword\",\n" + - "\t\t\t\t\"size\": 5\n" + - "\t\t\t}\n" + - "\t\t}\n" + - "\t}\n" + - "}"; - SearchResponse response = service.search(query, INDEX, TYPE); - Assert.assertNotNull("Response was null", response); - - Assert.assertEquals("Wrong count", 15, response.getNumberOfHits()); - Assert.assertFalse("Timed out", response.isTimedOut()); - Assert.assertNotNull("Hits was null", response.getHits()); - Assert.assertEquals("Wrong number of hits", 10, response.getHits().size()); - Assert.assertNotNull("Aggregations are missing", response.getAggregations()); - Assert.assertEquals("Aggregation count is wrong", 1, response.getAggregations().size()); - - Map termCounts = (Map) response.getAggregations().get("term_counts"); - Assert.assertNotNull("Term counts was missing", termCounts); - List> buckets = (List>) termCounts.get("buckets"); - Assert.assertNotNull("Buckets branch was empty", buckets); - Map expected = new HashMap<>(); - expected.put("one", 1); - expected.put("two", 2); - expected.put("three", 3); - expected.put("four", 4); - expected.put("five", 5); - - for (Map aggRes : buckets) { - String key = (String)aggRes.get("key"); - Integer docCount = (Integer)aggRes.get("doc_count"); - - Assert.assertEquals(String.format("%s did not match", key), expected.get(key), docCount); - } - } - - @Test - public void testDeleteByQuery() throws Exception { - String query = "{\"query\":{\"match\":{\"msg\":\"five\"}}}"; - DeleteOperationResponse response = service.deleteByQuery(query, INDEX, TYPE); - Assert.assertNotNull(response); - Assert.assertTrue(response.getTook() > 0); - } - - @Test - public void testDeleteById() throws Exception { - final String ID = "1"; - DeleteOperationResponse response = service.deleteById(INDEX, TYPE, ID); - Assert.assertNotNull(response); - Assert.assertTrue(response.getTook() > 0); - Map doc = service.get(INDEX, TYPE, ID); - Assert.assertNull(doc); - doc = service.get(INDEX, TYPE, "2"); - Assert.assertNotNull(doc); - } - - @Test - public void testGet() throws IOException { - Map old = null; - for (int index = 1; index <= 15; index++) { - String id = String.valueOf(index); - Map doc = service.get(INDEX, TYPE, id); - Assert.assertNotNull(doc); - Assert.assertNotNull(doc.toString() + "\t" + doc.keySet().toString(), doc.get("msg")); - Assert.assertFalse(doc == old); - old = doc; - } - } -} diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/resources/setup.script b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/resources/setup.script index 8cf4c9704108..a17a039d530e 100644 --- a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/resources/setup.script +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-client-service/src/test/resources/setup.script @@ -13,7 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. #create mapping +PUT:user_details/:{ "mappings":{"details":{ "properties":{ "email":{"type":"keyword"},"phone":{"type": "keyword"},"accessKey":{"type": "keyword"}}}}} PUT:messages/:{ "mappings":{"message":{ "properties":{ "msg":{"type":"keyword"}}}}} +PUT:complex/:{"mappings":{"complex":{"properties":{"msg":{"type":"keyword"},"subField":{"type":"nested","properties":{"longField":{"type":"long"},"dateField":{"type":"date"}}}}}}} +PUT:nested/:{"mappings":{"nested_complex":{"properties":{"msg":{"type":"keyword"},"subField":{"type":"nested","properties":{"longField":{"type":"long"},"dateField":{"type":"date"},"deeper":{"type":"nested","properties":{"secretz":{"type":"keyword"},"deepest":{"type":"nested","properties":{"super_secret":{"type":"keyword"}}}}}}}}}}} #add document PUT:messages/message/1:{ "msg":"one" } PUT:messages/message/2:{ "msg":"two" } @@ -29,4 +32,10 @@ PUT:messages/message/11:{ "msg":"five" } PUT:messages/message/12:{ "msg":"five" } PUT:messages/message/13:{ "msg":"five" } PUT:messages/message/14:{ "msg":"five" } -PUT:messages/message/15:{ "msg":"five" } \ No newline at end of file +PUT:messages/message/15:{ "msg":"five" } +PUT:complex/complex/1:{"msg":"Hello, world","subField":{"longField":100000,"dateField":"2018-04-10T12:18:05Z"}} +PUT:user_details/details/1:{ "email": "john.smith@company.com", "phone": "123-456-7890", "accessKey": "ABCDE"} +PUT:user_details/details/2:{ "email": "jane.doe@company.com", "phone": "098-765-4321", "accessKey": "GHIJK"} +PUT:nested/nested_complex/1:{"msg":"Hello, world","subField":{"longField":150000,"dateField":"2018-08-14T10:08:00Z","deeper":{"secretz":"No one should see this!","deepest":{"super_secret":"Got nothin to hide"}}}} +PUT:nested/nested_complex/2:{"msg":"Hello, world","subField":{"longField":150000,"dateField":"2018-08-14T10:08:00Z","deeper":{"secretz":"Hello, world!","deepest":{"super_secret":"I could tell, but then I would have to kill you"}}}} +PUT:nested/nested_complex/3:{"msg":"Hello, world","subField":{"longField":150000,"dateField":"2018-08-14T10:08:00Z","deeper":{"secretz":"Buongiorno, mondo!!","deepest":{"super_secret":"The sky is blue"}}}} \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-processors/pom.xml b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-processors/pom.xml index 94a07fc309d6..dfe72086604b 100644 --- a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-processors/pom.xml +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-processors/pom.xml @@ -24,7 +24,7 @@ language governing permissions and limitations under the License. --> 1.7.12 2.1.0 5.3.1 - 2.9.5 + 2.9.7 @@ -63,7 +63,7 @@ language governing permissions and limitations under the License. --> org.apache.commons commons-text - 1.3 + 1.4 org.apache.lucene diff --git a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-restapi-processors/pom.xml b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-restapi-processors/pom.xml index 54cc43129af6..eecad55c9684 100644 --- a/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-restapi-processors/pom.xml +++ b/nifi-nar-bundles/nifi-elasticsearch-bundle/nifi-elasticsearch-restapi-processors/pom.xml @@ -80,7 +80,7 @@ language governing permissions and limitations under the License. --> com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/pom.xml b/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/pom.xml index cd93e2387a56..0f536fd8a15d 100644 --- a/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/pom.xml +++ b/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/pom.xml @@ -43,7 +43,7 @@ org.apache.commons commons-email - 1.4 + 1.5 com.sun.mail @@ -88,7 +88,7 @@ org.springframework.integration spring-integration-mail - 4.3.0.RELEASE + 4.3.17.RELEASE org.springframework.retry @@ -104,7 +104,7 @@ org.apache.poi poi-scratchpad - 3.17 + 4.0.0 org.apache.nifi @@ -120,7 +120,7 @@ com.icegreen greenmail - 1.5.2 + 1.5.8 test diff --git a/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/java/org/apache/nifi/processors/email/TestListenSMTP.java b/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/java/org/apache/nifi/processors/email/TestListenSMTP.java index bc4b441d84e2..f092ce309821 100644 --- a/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/java/org/apache/nifi/processors/email/TestListenSMTP.java +++ b/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/java/org/apache/nifi/processors/email/TestListenSMTP.java @@ -92,8 +92,8 @@ public void validateSuccessfulInteraction() throws Exception, EmailException { @Test public void validateSuccessfulInteractionWithTls() throws Exception, EmailException { System.setProperty("mail.smtp.ssl.trust", "*"); - System.setProperty("javax.net.ssl.keyStore", "src/test/resources/localhost-ks.jks"); - System.setProperty("javax.net.ssl.keyStorePassword", "localtest"); + System.setProperty("javax.net.ssl.keyStore", "src/test/resources/keystore.jks"); + System.setProperty("javax.net.ssl.keyStorePassword", "passwordpassword"); int port = NetworkUtils.availablePort(); TestRunner runner = TestRunners.newTestRunner(ListenSMTP.class); @@ -103,11 +103,11 @@ public void validateSuccessfulInteractionWithTls() throws Exception, EmailExcept // Setup the SSL Context SSLContextService sslContextService = new StandardRestrictedSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/keystore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); runner.enableControllerService(sslContextService); diff --git a/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/resources/keystore.jks b/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/resources/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/resources/truststore.jks b/nifi-nar-bundles/nifi-email-bundle/nifi-email-processors/src/test/resources/truststore.jks new file mode 100644 index 0000000000000000000000000000000000000000..87f4be1cb74419252a6dd4a8cb8ed6063e7ade2f GIT binary patch literal 911 zcmezO_TO6u1_mY|W(3omd6{XMy2+_UB|wqXzwHA57+53pObsj<7?^zxnwUKenwSh1 zFf%bSF>!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-enrich-bundle/nifi-enrich-processors/pom.xml b/nifi-nar-bundles/nifi-enrich-bundle/nifi-enrich-processors/pom.xml index 6c55cce621f7..2e55a4c5d8e1 100644 --- a/nifi-nar-bundles/nifi-enrich-bundle/nifi-enrich-processors/pom.xml +++ b/nifi-nar-bundles/nifi-enrich-bundle/nifi-enrich-processors/pom.xml @@ -31,7 +31,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-hadoop-utils/pom.xml b/nifi-nar-bundles/nifi-extension-utils/nifi-hadoop-utils/pom.xml index 8c7aa2cd747c..f1a36b0f0316 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-hadoop-utils/pom.xml +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-hadoop-utils/pom.xml @@ -43,7 +43,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-processor-utils/pom.xml b/nifi-nar-bundles/nifi-extension-utils/nifi-processor-utils/pom.xml index 4f59729f8d5a..e4e30d92f198 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-processor-utils/pom.xml +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-processor-utils/pom.xml @@ -59,7 +59,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/avro/AvroTypeUtil.java b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/avro/AvroTypeUtil.java index 23f74b86d01e..2e8898a49502 100755 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/avro/AvroTypeUtil.java +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/avro/AvroTypeUtil.java @@ -17,28 +17,6 @@ package org.apache.nifi.avro; -import java.io.IOException; -import java.math.BigDecimal; -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.sql.Time; -import java.sql.Timestamp; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.stream.Collectors; - import org.apache.avro.Conversions; import org.apache.avro.JsonProperties; import org.apache.avro.LogicalType; @@ -72,6 +50,27 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.sql.Time; +import java.sql.Timestamp; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + public class AvroTypeUtil { private static final Logger logger = LoggerFactory.getLogger(AvroTypeUtil.class); public static final String AVRO_SCHEMA_FORMAT = "avro"; @@ -308,7 +307,7 @@ public static DataType determineDataType(final Schema avroSchema, Map getNonNullSubSchemas(Schema avroSchema) { - List unionFieldSchemas = avroSchema.getTypes(); + private static List getNonNullSubSchemas(final Schema avroSchema) { + final List unionFieldSchemas = avroSchema.getTypes(); if (unionFieldSchemas == null) { return Collections.emptyList(); } - return unionFieldSchemas.stream() - .filter(s -> s.getType() != Type.NULL) - .collect(Collectors.toList()); + + final List nonNullTypes = new ArrayList<>(unionFieldSchemas.size()); + for (final Schema fieldSchema : unionFieldSchemas) { + if (fieldSchema.getType() != Type.NULL) { + nonNullTypes.add(fieldSchema); + } + } + + return nonNullTypes; } public static RecordSchema createSchema(final Schema avroSchema) { + return createSchema(avroSchema, true); + } + + public static RecordSchema createSchema(final Schema avroSchema, final boolean includeText) { if (avroSchema == null) { throw new IllegalArgumentException("Avro Schema cannot be null"); } SchemaIdentifier identifier = new StandardSchemaIdentifier.Builder().name(avroSchema.getName()).build(); - return createSchema(avroSchema, avroSchema.toString(), identifier); + return createSchema(avroSchema, includeText ? avroSchema.toString() : null, identifier); } /** @@ -385,10 +394,10 @@ public static RecordSchema createSchema(final Schema avroSchema, final String sc throw new IllegalArgumentException("Avro Schema cannot be null"); } - String schemaFullName = avroSchema.getNamespace() + "." + avroSchema.getName(); - SimpleRecordSchema recordSchema = new SimpleRecordSchema(schemaText, AVRO_SCHEMA_FORMAT, schemaId); - DataType recordSchemaType = RecordFieldType.RECORD.getRecordDataType(recordSchema); - Map knownRecords = new HashMap<>(); + final String schemaFullName = avroSchema.getNamespace() + "." + avroSchema.getName(); + final SimpleRecordSchema recordSchema = schemaText == null ? new SimpleRecordSchema(schemaId) : new SimpleRecordSchema(schemaText, AVRO_SCHEMA_FORMAT, schemaId); + final DataType recordSchemaType = RecordFieldType.RECORD.getRecordDataType(recordSchema); + final Map knownRecords = new HashMap<>(); knownRecords.put(schemaFullName, recordSchemaType); final List recordFields = new ArrayList<>(avroSchema.getFields().size()); @@ -752,36 +761,39 @@ public static Map convertAvroRecordToMap(final GenericRecord avr * @param conversion the conversion function which takes a non-null field schema within the union field and returns a converted value * @return a converted value */ - private static Object convertUnionFieldValue(Object originalValue, Schema fieldSchema, Function conversion, final String fieldName) { - // Ignore null types in union - final List nonNullFieldSchemas = getNonNullSubSchemas(fieldSchema); - - // If at least one non-null type exists, find the first compatible type - if (nonNullFieldSchemas.size() >= 1) { - for (final Schema nonNullFieldSchema : nonNullFieldSchemas) { - final DataType desiredDataType = AvroTypeUtil.determineDataType(nonNullFieldSchema); - try { - final Object convertedValue = conversion.apply(nonNullFieldSchema); - - if (isCompatibleDataType(convertedValue, desiredDataType)) { - return convertedValue; - } + private static Object convertUnionFieldValue(final Object originalValue, final Schema fieldSchema, final Function conversion, final String fieldName) { + boolean foundNonNull = false; + for (final Schema subSchema : fieldSchema.getTypes()) { + if (subSchema.getType() == Type.NULL) { + continue; + } - // For logical types those store with different type (e.g. BigDecimal as ByteBuffer), check compatibility using the original rawValue - if (nonNullFieldSchema.getLogicalType() != null && DataTypeUtils.isCompatibleDataType(originalValue, desiredDataType)) { - return convertedValue; - } - } catch (Exception e) { - // If failed with one of possible types, continue with the next available option. - if (logger.isDebugEnabled()) { - logger.debug("Cannot convert value {} to type {}", originalValue, desiredDataType, e); - } + foundNonNull = true; + final DataType desiredDataType = AvroTypeUtil.determineDataType(subSchema); + try { + final Object convertedValue = conversion.apply(subSchema); + + if (isCompatibleDataType(convertedValue, desiredDataType)) { + return convertedValue; + } + + // For logical types those store with different type (e.g. BigDecimal as ByteBuffer), check compatibility using the original rawValue + if (subSchema.getLogicalType() != null && DataTypeUtils.isCompatibleDataType(originalValue, desiredDataType)) { + return convertedValue; + } + } catch (Exception e) { + // If failed with one of possible types, continue with the next available option. + if (logger.isDebugEnabled()) { + logger.debug("Cannot convert value {} to type {}", originalValue, desiredDataType, e); } } + } + if (foundNonNull) { throw new IllegalTypeConversionException("Cannot convert value " + originalValue + " of type " + originalValue.getClass() + " because no compatible types exist in the UNION for field " + fieldName); } + return null; } @@ -875,7 +887,7 @@ private static Object normalizeValue(final Object value, final Schema avroSchema final Object fieldValue = normalizeValue(avroFieldValue, field.schema(), fieldName + "/" + field.name()); values.put(field.name(), fieldValue); } - final RecordSchema childSchema = AvroTypeUtil.createSchema(recordSchema); + final RecordSchema childSchema = AvroTypeUtil.createSchema(recordSchema, false); return new MapRecord(childSchema, values); case BYTES: final ByteBuffer bb = (ByteBuffer) value; diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/schema/access/WriteAvroSchemaAttributeStrategy.java b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/schema/access/WriteAvroSchemaAttributeStrategy.java index 5f94679dbc21..36484a5c6e9d 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/schema/access/WriteAvroSchemaAttributeStrategy.java +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-avro-record-utils/src/main/java/org/apache/nifi/schema/access/WriteAvroSchemaAttributeStrategy.java @@ -17,6 +17,10 @@ package org.apache.nifi.schema.access; +import org.apache.avro.Schema; +import org.apache.nifi.avro.AvroTypeUtil; +import org.apache.nifi.serialization.record.RecordSchema; + import java.io.IOException; import java.io.OutputStream; import java.util.Collections; @@ -26,10 +30,6 @@ import java.util.Optional; import java.util.Set; -import org.apache.avro.Schema; -import org.apache.nifi.avro.AvroTypeUtil; -import org.apache.nifi.serialization.record.RecordSchema; - public class WriteAvroSchemaAttributeStrategy implements SchemaAccessWriter { private final Map avroSchemaTextCache = new LinkedHashMap() { @Override @@ -53,11 +53,21 @@ public Map getAttributes(final RecordSchema schema) { } } - String schemaText = avroSchemaTextCache.get(schema); + String schemaText; + synchronized (avroSchemaTextCache) { + schemaText = avroSchemaTextCache.get(schema); + } + if (schemaText == null) { final Schema avroSchema = AvroTypeUtil.extractAvroSchema(schema); schemaText = avroSchema.toString(); - avroSchemaTextCache.put(schema, schemaText); + + synchronized (avroSchemaTextCache) { + final String existing = avroSchemaTextCache.putIfAbsent(schema, schemaText); + if (existing != null) { + schemaText = existing; + } + } } return Collections.singletonMap("avro.schema", schemaText); diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/pom.xml b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/pom.xml index e82bf62ed61c..36f565ff60a5 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/pom.xml +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/pom.xml @@ -54,6 +54,11 @@ org.apache.commons commons-csv + 1.5 + + + org.apache.commons + commons-text 1.4 diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVUtils.java b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVUtils.java index f379bea3827f..3f3814ee99d0 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVUtils.java +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVUtils.java @@ -19,7 +19,7 @@ import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.QuoteMode; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.components.AllowableValue; import org.apache.nifi.components.PropertyDescriptor; import org.apache.nifi.components.PropertyValue; diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVValidators.java b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVValidators.java index 5979407c9f31..0f6a22f7b4f9 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVValidators.java +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/nifi-standard-record-utils/src/main/java/org/apache/nifi/csv/CSVValidators.java @@ -17,7 +17,7 @@ package org.apache.nifi.csv; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.components.ValidationContext; import org.apache.nifi.components.ValidationResult; import org.apache.nifi.components.Validator; diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/pom.xml b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/pom.xml index d516ba4e8810..985b25f8a89f 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/pom.xml +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-record-utils/pom.xml @@ -30,4 +30,14 @@ nifi-mock-record-utils + + + + + io.netty + netty + 3.7.1.Final + + + diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-reporting-utils/pom.xml b/nifi-nar-bundles/nifi-extension-utils/nifi-reporting-utils/pom.xml index 87a388cb6590..681589c33d92 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-reporting-utils/pom.xml +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-reporting-utils/pom.xml @@ -38,7 +38,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 com.yammer.metrics diff --git a/nifi-nar-bundles/nifi-extension-utils/nifi-syslog-utils/pom.xml b/nifi-nar-bundles/nifi-extension-utils/nifi-syslog-utils/pom.xml index dd94370aff85..e78e694c20ff 100644 --- a/nifi-nar-bundles/nifi-extension-utils/nifi-syslog-utils/pom.xml +++ b/nifi-nar-bundles/nifi-extension-utils/nifi-syslog-utils/pom.xml @@ -26,7 +26,7 @@ com.github.palindromicity simple-syslog-5424 - 0.0.7 + 0.0.8 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework-nar/src/main/resources/META-INF/NOTICE b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework-nar/src/main/resources/META-INF/NOTICE index e6a732228697..fae5c91e4a2e 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework-nar/src/main/resources/META-INF/NOTICE +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework-nar/src/main/resources/META-INF/NOTICE @@ -212,6 +212,6 @@ SIL OFL 1.1 ****************** The following binary components are provided under the SIL Open Font License 1.1 - (SIL OFL 1.1) FontAwesome (4.6.1 - http://fortawesome.github.io/Font-Awesome/license/) + (SIL OFL 1.1) FontAwesome (4.7.0 - https://fontawesome.com/license/free) diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ConnectionDTO.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ConnectionDTO.java index a2272e07367f..f62feac7801c 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ConnectionDTO.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/ConnectionDTO.java @@ -27,6 +27,9 @@ */ @XmlType(name = "connection") public class ConnectionDTO extends ComponentDTO { + public static final String LOAD_BALANCE_NOT_CONFIGURED = "LOAD_BALANCE_NOT_CONFIGURED"; + public static final String LOAD_BALANCE_INACTIVE = "LOAD_BALANCE_INACTIVE"; + public static final String LOAD_BALANCE_ACTIVE = "LOAD_BALANCE_ACTIVE"; private ConnectableDTO source; private ConnectableDTO destination; @@ -42,6 +45,11 @@ public class ConnectionDTO extends ComponentDTO { private List prioritizers; private List bends; + private String loadBalanceStrategy; + private String loadBalancePartitionAttribute; + private String loadBalanceCompression; + private String loadBalanceStatus; + /** * The source of this connection. * @@ -231,6 +239,47 @@ public void setPrioritizers(List prioritizers) { this.prioritizers = prioritizers; } + @ApiModelProperty(value = "How to load balance the data in this Connection across the nodes in the cluster.", + allowableValues = "DO_NOT_LOAD_BALANCE, PARTITION_BY_ATTRIBUTE, ROUND_ROBIN, SINGLE_NODE") + public String getLoadBalanceStrategy() { + return loadBalanceStrategy; + } + + public void setLoadBalanceStrategy(String loadBalanceStrategy) { + this.loadBalanceStrategy = loadBalanceStrategy; + } + + @ApiModelProperty(value = "The FlowFile Attribute to use for determining which node a FlowFile will go to if the Load Balancing Strategy is set to PARTITION_BY_ATTRIBUTE") + public String getLoadBalancePartitionAttribute() { + return loadBalancePartitionAttribute; + } + + public void setLoadBalancePartitionAttribute(String partitionAttribute) { + this.loadBalancePartitionAttribute = partitionAttribute; + } + + @ApiModelProperty(value = "Whether or not data should be compressed when being transferred between nodes in the cluster.", + allowableValues = "DO_NOT_COMPRESS, COMPRESS_ATTRIBUTES_ONLY, COMPRESS_ATTRIBUTES_AND_CONTENT") + public String getLoadBalanceCompression() { + return loadBalanceCompression; + } + + public void setLoadBalanceCompression(String compression) { + this.loadBalanceCompression = compression; + } + + @ApiModelProperty(value = "The current status of the Connection's Load Balancing Activities. Status can indicate that Load Balancing is not configured for the connection, that Load Balancing " + + "is configured but inactive (not currently transferring data to another node), or that Load Balancing is configured and actively transferring data to another node.", + allowableValues = LOAD_BALANCE_NOT_CONFIGURED + ", " + LOAD_BALANCE_INACTIVE + ", " + LOAD_BALANCE_ACTIVE, + readOnly = true) + public String getLoadBalanceStatus() { + return loadBalanceStatus; + } + + public void setLoadBalanceStatus(String status) { + this.loadBalanceStatus = status; + } + @Override public String toString() { return "ConnectionDTO [id: " + getId() + "]"; diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsDTO.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsDTO.java index 951ac414b21d..05e9aff8da88 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsDTO.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsDTO.java @@ -14,135 +14,44 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.nifi.web.api.dto.diagnostics; -import javax.xml.bind.annotation.XmlType; - +import io.swagger.annotations.ApiModelProperty; import org.apache.nifi.web.api.dto.ConnectionDTO; -import io.swagger.annotations.ApiModelProperty; +import javax.xml.bind.annotation.XmlType; +import java.util.List; -@XmlType(name = "connectionDiagnostics") +@XmlType(name="connectionDiagnostics") public class ConnectionDiagnosticsDTO { private ConnectionDTO connection; - private int totalFlowFileCount; - private long totalByteCount; - private int activeQueueFlowFileCount; - private long activeQueueByteCount; - private int swapFlowFileCount; - private long swapByteCount; - private int swapFiles; - private int inFlightFlowFileCount; - private long inFlightByteCount; - private Boolean allActiveQueueFlowFilesPenalized; - private Boolean anyActiveQueueFlowFilesPenalized; + private ConnectionDiagnosticsSnapshotDTO aggregateSnapshot; + private List nodeSnapshots; - @ApiModelProperty("Information about the Connection") + @ApiModelProperty(value = "Details about the connection", readOnly = true) public ConnectionDTO getConnection() { return connection; } - public void setConnection(ConnectionDTO connection) { + public void setConnection(final ConnectionDTO connection) { this.connection = connection; } - @ApiModelProperty("Total number of FlowFiles owned by the Connection") - public int getTotalFlowFileCount() { - return totalFlowFileCount; - } - - public void setTotalFlowFileCount(int totalFlowFileCount) { - this.totalFlowFileCount = totalFlowFileCount; - } - - @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles owned by this Connection") - public long getTotalByteCount() { - return totalByteCount; - } - - public void setTotalByteCount(long totalByteCount) { - this.totalByteCount = totalByteCount; - } - - @ApiModelProperty("Total number of FlowFiles that exist in the Connection's Active Queue, immediately available to be offered up to a component") - public int getActiveQueueFlowFileCount() { - return activeQueueFlowFileCount; - } - - public void setActiveQueueFlowFileCount(int activeQueueFlowFileCount) { - this.activeQueueFlowFileCount = activeQueueFlowFileCount; - } - - @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles that are present in the Connection's Active Queue") - public long getActiveQueueByteCount() { - return activeQueueByteCount; - } - - public void setActiveQueueByteCount(long activeQueueByteCount) { - this.activeQueueByteCount = activeQueueByteCount; - } - - @ApiModelProperty("The total number of FlowFiles that are swapped out for this Connection") - public int getSwapFlowFileCount() { - return swapFlowFileCount; - } - - public void setSwapFlowFileCount(int swapFlowFileCount) { - this.swapFlowFileCount = swapFlowFileCount; - } - - @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles that are swapped out to disk for the Connection") - public long getSwapByteCount() { - return swapByteCount; - } - - public void setSwapByteCount(long swapByteCount) { - this.swapByteCount = swapByteCount; - } - - @ApiModelProperty("The number of Swap Files that exist for this Connection") - public int getSwapFiles() { - return swapFiles; - } - - public void setSwapFiles(int swapFiles) { - this.swapFiles = swapFiles; - } - - @ApiModelProperty("The number of In-Flight FlowFiles for this Connection. These are FlowFiles that belong to the connection but are currently being operated on by a Processor, Port, etc.") - public int getInFlightFlowFileCount() { - return inFlightFlowFileCount; - } - - public void setInFlightFlowFileCount(int inFlightFlowFileCount) { - this.inFlightFlowFileCount = inFlightFlowFileCount; - } - - @ApiModelProperty("The number bytes that make up the content of the FlowFiles that are In-Flight") - public long getInFlightByteCount() { - return inFlightByteCount; - } - - public void setInFlightByteCount(long inFlightByteCount) { - this.inFlightByteCount = inFlightByteCount; - } - - @ApiModelProperty("Whether or not all of the FlowFiles in the Active Queue are penalized") - public Boolean getAllActiveQueueFlowFilesPenalized() { - return allActiveQueueFlowFilesPenalized; + @ApiModelProperty(value = "Aggregate values for all nodes in the cluster, or for this instance if not clustered", readOnly = true) + public ConnectionDiagnosticsSnapshotDTO getAggregateSnapshot() { + return aggregateSnapshot; } - public void setAllActiveQueueFlowFilesPenalized(Boolean allFlowFilesPenalized) { - this.allActiveQueueFlowFilesPenalized = allFlowFilesPenalized; + public void setAggregateSnapshot(final ConnectionDiagnosticsSnapshotDTO aggregateSnapshot) { + this.aggregateSnapshot = aggregateSnapshot; } - @ApiModelProperty("Whether or not any of the FlowFiles in the Active Queue are penalized") - public Boolean getAnyActiveQueueFlowFilesPenalized() { - return anyActiveQueueFlowFilesPenalized; + @ApiModelProperty(value = "A list of values for each node in the cluster, if clustered.", readOnly = true) + public List getNodeSnapshots() { + return nodeSnapshots; } - public void setAnyActiveQueueFlowFilesPenalized(Boolean anyFlowFilesPenalized) { - this.anyActiveQueueFlowFilesPenalized = anyFlowFilesPenalized; + public void setNodeSnapshots(final List nodeSnapshots) { + this.nodeSnapshots = nodeSnapshots; } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsSnapshotDTO.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsSnapshotDTO.java new file mode 100644 index 000000000000..5926f8da7aa4 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ConnectionDiagnosticsSnapshotDTO.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.web.api.dto.diagnostics; + +import io.swagger.annotations.ApiModelProperty; + +import javax.xml.bind.annotation.XmlType; +import java.util.List; + +@XmlType(name = "connectionDiagnosticsSnapshot") +public class ConnectionDiagnosticsSnapshotDTO { + private int totalFlowFileCount; + private long totalByteCount; + private String nodeIdentifier; + private LocalQueuePartitionDTO localQueuePartition; + private List remoteQueuePartitions; + + @ApiModelProperty("Total number of FlowFiles owned by the Connection") + public int getTotalFlowFileCount() { + return totalFlowFileCount; + } + + public void setTotalFlowFileCount(int totalFlowFileCount) { + this.totalFlowFileCount = totalFlowFileCount; + } + + @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles owned by this Connection") + public long getTotalByteCount() { + return totalByteCount; + } + + public void setTotalByteCount(long totalByteCount) { + this.totalByteCount = totalByteCount; + } + + @ApiModelProperty("The Node Identifier that this information pertains to") + public String getNodeIdentifier() { + return nodeIdentifier; + } + + public void setNodeIdentifier(final String nodeIdentifier) { + this.nodeIdentifier = nodeIdentifier; + } + + @ApiModelProperty("The local queue partition, from which components can pull FlowFiles on this node.") + public LocalQueuePartitionDTO getLocalQueuePartition() { + return localQueuePartition; + } + + public void setLocalQueuePartition(LocalQueuePartitionDTO localQueuePartition) { + this.localQueuePartition = localQueuePartition; + } + + public List getRemoteQueuePartitions() { + return remoteQueuePartitions; + } + + public void setRemoteQueuePartitions(List remoteQueuePartitions) { + this.remoteQueuePartitions = remoteQueuePartitions; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/LocalQueuePartitionDTO.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/LocalQueuePartitionDTO.java new file mode 100644 index 000000000000..971c62a3ff90 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/LocalQueuePartitionDTO.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.web.api.dto.diagnostics; + +import io.swagger.annotations.ApiModelProperty; + +import javax.xml.bind.annotation.XmlType; + +@XmlType(name = "localQueuePartition") +public class LocalQueuePartitionDTO { + private int totalFlowFileCount; + private long totalByteCount; + private int activeQueueFlowFileCount; + private long activeQueueByteCount; + private int swapFlowFileCount; + private long swapByteCount; + private int swapFiles; + private int inFlightFlowFileCount; + private long inFlightByteCount; + private Boolean allActiveQueueFlowFilesPenalized; + private Boolean anyActiveQueueFlowFilesPenalized; + + @ApiModelProperty("Total number of FlowFiles owned by the Connection") + public int getTotalFlowFileCount() { + return totalFlowFileCount; + } + + public void setTotalFlowFileCount(int totalFlowFileCount) { + this.totalFlowFileCount = totalFlowFileCount; + } + + @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles owned by this Connection") + public long getTotalByteCount() { + return totalByteCount; + } + + public void setTotalByteCount(long totalByteCount) { + this.totalByteCount = totalByteCount; + } + + @ApiModelProperty("Total number of FlowFiles that exist in the Connection's Active Queue, immediately available to be offered up to a component") + public int getActiveQueueFlowFileCount() { + return activeQueueFlowFileCount; + } + + public void setActiveQueueFlowFileCount(int activeQueueFlowFileCount) { + this.activeQueueFlowFileCount = activeQueueFlowFileCount; + } + + @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles that are present in the Connection's Active Queue") + public long getActiveQueueByteCount() { + return activeQueueByteCount; + } + + public void setActiveQueueByteCount(long activeQueueByteCount) { + this.activeQueueByteCount = activeQueueByteCount; + } + + @ApiModelProperty("The total number of FlowFiles that are swapped out for this Connection") + public int getSwapFlowFileCount() { + return swapFlowFileCount; + } + + public void setSwapFlowFileCount(int swapFlowFileCount) { + this.swapFlowFileCount = swapFlowFileCount; + } + + @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles that are swapped out to disk for the Connection") + public long getSwapByteCount() { + return swapByteCount; + } + + public void setSwapByteCount(long swapByteCount) { + this.swapByteCount = swapByteCount; + } + + @ApiModelProperty("The number of Swap Files that exist for this Connection") + public int getSwapFiles() { + return swapFiles; + } + + public void setSwapFiles(int swapFiles) { + this.swapFiles = swapFiles; + } + + @ApiModelProperty("The number of In-Flight FlowFiles for this Connection. These are FlowFiles that belong to the connection but are currently being operated on by a Processor, Port, etc.") + public int getInFlightFlowFileCount() { + return inFlightFlowFileCount; + } + + public void setInFlightFlowFileCount(int inFlightFlowFileCount) { + this.inFlightFlowFileCount = inFlightFlowFileCount; + } + + @ApiModelProperty("The number bytes that make up the content of the FlowFiles that are In-Flight") + public long getInFlightByteCount() { + return inFlightByteCount; + } + + public void setInFlightByteCount(long inFlightByteCount) { + this.inFlightByteCount = inFlightByteCount; + } + + @ApiModelProperty("Whether or not all of the FlowFiles in the Active Queue are penalized") + public Boolean getAllActiveQueueFlowFilesPenalized() { + return allActiveQueueFlowFilesPenalized; + } + + public void setAllActiveQueueFlowFilesPenalized(Boolean allFlowFilesPenalized) { + this.allActiveQueueFlowFilesPenalized = allFlowFilesPenalized; + } + + @ApiModelProperty("Whether or not any of the FlowFiles in the Active Queue are penalized") + public Boolean getAnyActiveQueueFlowFilesPenalized() { + return anyActiveQueueFlowFilesPenalized; + } + + public void setAnyActiveQueueFlowFilesPenalized(Boolean anyFlowFilesPenalized) { + this.anyActiveQueueFlowFilesPenalized = anyFlowFilesPenalized; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ProcessorDiagnosticsDTO.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ProcessorDiagnosticsDTO.java index 77f5499e9d1c..ccf759e01983 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ProcessorDiagnosticsDTO.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/ProcessorDiagnosticsDTO.java @@ -17,15 +17,13 @@ package org.apache.nifi.web.api.dto.diagnostics; -import java.util.List; -import java.util.Set; - -import javax.xml.bind.annotation.XmlType; - +import io.swagger.annotations.ApiModelProperty; import org.apache.nifi.web.api.dto.ProcessorDTO; import org.apache.nifi.web.api.dto.status.ProcessorStatusDTO; -import io.swagger.annotations.ApiModelProperty; +import javax.xml.bind.annotation.XmlType; +import java.util.List; +import java.util.Set; @XmlType(name = "processorDiagnostics") public class ProcessorDiagnosticsDTO { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/RemoteQueuePartitionDTO.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/RemoteQueuePartitionDTO.java new file mode 100644 index 000000000000..9248714d01a0 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-client-dto/src/main/java/org/apache/nifi/web/api/dto/diagnostics/RemoteQueuePartitionDTO.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.web.api.dto.diagnostics; + +import io.swagger.annotations.ApiModelProperty; + +import javax.xml.bind.annotation.XmlType; + +@XmlType(name = "remoteQueuePartition") +public class RemoteQueuePartitionDTO { + private String nodeId; + private int totalFlowFileCount; + private long totalByteCount; + private int activeQueueFlowFileCount; + private long activeQueueByteCount; + private int swapFlowFileCount; + private long swapByteCount; + private int swapFiles; + private int inFlightFlowFileCount; + private long inFlightByteCount; + + @ApiModelProperty("The Node Identifier that this queue partition is sending to") + public String getNodeIdentifier() { + return nodeId; + } + + public void setNodeIdentifier(String nodeId) { + this.nodeId = nodeId; + } + + @ApiModelProperty("Total number of FlowFiles owned by the Connection") + public int getTotalFlowFileCount() { + return totalFlowFileCount; + } + + public void setTotalFlowFileCount(int totalFlowFileCount) { + this.totalFlowFileCount = totalFlowFileCount; + } + + @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles owned by this Connection") + public long getTotalByteCount() { + return totalByteCount; + } + + public void setTotalByteCount(long totalByteCount) { + this.totalByteCount = totalByteCount; + } + + @ApiModelProperty("Total number of FlowFiles that exist in the Connection's Active Queue, immediately available to be offered up to a component") + public int getActiveQueueFlowFileCount() { + return activeQueueFlowFileCount; + } + + public void setActiveQueueFlowFileCount(int activeQueueFlowFileCount) { + this.activeQueueFlowFileCount = activeQueueFlowFileCount; + } + + @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles that are present in the Connection's Active Queue") + public long getActiveQueueByteCount() { + return activeQueueByteCount; + } + + public void setActiveQueueByteCount(long activeQueueByteCount) { + this.activeQueueByteCount = activeQueueByteCount; + } + + @ApiModelProperty("The total number of FlowFiles that are swapped out for this Connection") + public int getSwapFlowFileCount() { + return swapFlowFileCount; + } + + public void setSwapFlowFileCount(int swapFlowFileCount) { + this.swapFlowFileCount = swapFlowFileCount; + } + + @ApiModelProperty("Total number of bytes that make up the content for the FlowFiles that are swapped out to disk for the Connection") + public long getSwapByteCount() { + return swapByteCount; + } + + public void setSwapByteCount(long swapByteCount) { + this.swapByteCount = swapByteCount; + } + + @ApiModelProperty("The number of Swap Files that exist for this Connection") + public int getSwapFiles() { + return swapFiles; + } + + public void setSwapFiles(int swapFiles) { + this.swapFiles = swapFiles; + } + + @ApiModelProperty("The number of In-Flight FlowFiles for this Connection. These are FlowFiles that belong to the connection but are currently being operated on by a Processor, Port, etc.") + public int getInFlightFlowFileCount() { + return inFlightFlowFileCount; + } + + public void setInFlightFlowFileCount(int inFlightFlowFileCount) { + this.inFlightFlowFileCount = inFlightFlowFileCount; + } + + @ApiModelProperty("The number bytes that make up the content of the FlowFiles that are In-Flight") + public long getInFlightByteCount() { + return inFlightByteCount; + } + + public void setInFlightByteCount(long inFlightByteCount) { + this.inFlightByteCount = inFlightByteCount; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/main/java/org/apache/nifi/authorization/FileAccessPolicyProvider.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/main/java/org/apache/nifi/authorization/FileAccessPolicyProvider.java index b1a6f918a2b2..3174e34c78a4 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/main/java/org/apache/nifi/authorization/FileAccessPolicyProvider.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/main/java/org/apache/nifi/authorization/FileAccessPolicyProvider.java @@ -232,16 +232,21 @@ public void onConfigured(AuthorizerConfigurationContext configurationContext) th nodeGroupIdentifier = null; if (nodeGroupName != null) { - for (Group group : userGroupProvider.getGroups()) { - if (group.getName().equals(nodeGroupName)) { - nodeGroupIdentifier = group.getIdentifier(); - break; + if (!StringUtils.isBlank(nodeGroupName)) { + logger.debug("Trying to load node group '{}' from the underlying userGroupProvider", nodeGroupName); + for (Group group : userGroupProvider.getGroups()) { + if (group.getName().equals(nodeGroupName)) { + nodeGroupIdentifier = group.getIdentifier(); + break; + } } - } - if (nodeGroupIdentifier == null) { - throw new AuthorizerCreationException(String.format( + if (nodeGroupIdentifier == null) { + throw new AuthorizerCreationException(String.format( "Authorizations node group '%s' could not be found", nodeGroupName)); + } + } else { + logger.debug("Empty node group name provided"); } } @@ -633,6 +638,7 @@ private void populateNodes(Authorizations authorizations) { if (node == null) { throw new AuthorizerCreationException("Unable to locate node " + nodeIdentity + " to seed policies."); } + logger.debug("Populating default authorizations for node '{}' ({})", node.getIdentity(), node.getIdentifier()); // grant access to the proxy resource addUserToAccessPolicy(authorizations, ResourceType.Proxy.getValue(), node.getIdentifier(), WRITE_CODE); @@ -645,6 +651,7 @@ private void populateNodes(Authorizations authorizations) { // authorize dynamic nodes (node group) if (nodeGroupIdentifier != null) { + logger.debug("Populating default authorizations for group '{}' ({})", userGroupProvider.getGroup(nodeGroupIdentifier).getName(), nodeGroupIdentifier); addGroupToAccessPolicy(authorizations, ResourceType.Proxy.getValue(), nodeGroupIdentifier, WRITE_CODE); if (rootGroupId != null) { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/test/java/org/apache/nifi/authorization/FileAccessPolicyProviderTest.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/test/java/org/apache/nifi/authorization/FileAccessPolicyProviderTest.java index d02ada7abc2a..f13f7f12028f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/test/java/org/apache/nifi/authorization/FileAccessPolicyProviderTest.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-file-authorizer/src/test/java/org/apache/nifi/authorization/FileAccessPolicyProviderTest.java @@ -767,8 +767,8 @@ public void testOnConfiguredWhenNodeGroupProvided() throws Exception { userGroupProvider.onConfigured(configurationContext); accessPolicyProvider.onConfigured(configurationContext); - User nodeUser1 = userGroupProvider.getUserByIdentity(nodeIdentity1); - User nodeUser2 = userGroupProvider.getUserByIdentity(nodeIdentity2); + assertNotNull(userGroupProvider.getUserByIdentity(nodeIdentity1)); + assertNotNull(userGroupProvider.getUserByIdentity(nodeIdentity2)); AccessPolicy proxyWritePolicy = accessPolicyProvider.getAccessPolicy(ResourceType.Proxy.getValue(), RequestAction.WRITE); @@ -776,6 +776,41 @@ public void testOnConfiguredWhenNodeGroupProvided() throws Exception { assertTrue(proxyWritePolicy.getGroups().contains(nodeGroupIdentifier)); } + @Test + public void testOnConfiguredWhenNodeGroupEmpty() throws Exception { + final String adminIdentity = "admin-user"; + final String nodeGroupIdentifier = "cluster-nodes"; + + when(configurationContext.getProperty(eq(FileAccessPolicyProvider.PROP_INITIAL_ADMIN_IDENTITY))) + .thenReturn(new StandardPropertyValue(adminIdentity, null)); + when(configurationContext.getProperty(eq(FileAccessPolicyProvider.PROP_NODE_GROUP_NAME))) + .thenReturn(new StandardPropertyValue("", null)); + + writeFile(primaryAuthorizations, EMPTY_AUTHORIZATIONS_CONCISE); + writeFile(primaryTenants, TENANTS_FOR_ADMIN_AND_NODE_GROUP); + + userGroupProvider.onConfigured(configurationContext); + accessPolicyProvider.onConfigured(configurationContext); + + assertNull(accessPolicyProvider.getAccessPolicy(ResourceType.Proxy.getValue(), RequestAction.WRITE)); + } + + @Test(expected = AuthorizerCreationException.class) + public void testOnConfiguredWhenNodeGroupDoesNotExist() throws Exception { + final String adminIdentity = "admin-user"; + + when(configurationContext.getProperty(eq(FileAccessPolicyProvider.PROP_INITIAL_ADMIN_IDENTITY))) + .thenReturn(new StandardPropertyValue(adminIdentity, null)); + when(configurationContext.getProperty(eq(FileAccessPolicyProvider.PROP_NODE_GROUP_NAME))) + .thenReturn(new StandardPropertyValue("nonexistent", null)); + + writeFile(primaryAuthorizations, EMPTY_AUTHORIZATIONS_CONCISE); + writeFile(primaryTenants, TENANTS_FOR_ADMIN_AND_NODE_GROUP); + + userGroupProvider.onConfigured(configurationContext); + accessPolicyProvider.onConfigured(configurationContext); + } + @Test public void testOnConfiguredWhenTenantsAndAuthorizationsFileDoesNotExist() { userGroupProvider.onConfigured(configurationContext); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-flowfile-repo-serialization/src/main/java/org/apache/nifi/controller/repository/SchemaRepositoryRecordSerde.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-flowfile-repo-serialization/src/main/java/org/apache/nifi/controller/repository/SchemaRepositoryRecordSerde.java index 970d45e6f9b5..0013846af9e7 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-flowfile-repo-serialization/src/main/java/org/apache/nifi/controller/repository/SchemaRepositoryRecordSerde.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-flowfile-repo-serialization/src/main/java/org/apache/nifi/controller/repository/SchemaRepositoryRecordSerde.java @@ -17,12 +17,6 @@ package org.apache.nifi.controller.repository; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.EOFException; -import java.io.IOException; -import java.util.Map; - import org.apache.nifi.controller.queue.FlowFileQueue; import org.apache.nifi.controller.repository.claim.ContentClaim; import org.apache.nifi.controller.repository.claim.ResourceClaimManager; @@ -34,6 +28,7 @@ import org.apache.nifi.controller.repository.schema.RepositoryRecordUpdate; import org.apache.nifi.repository.schema.FieldType; import org.apache.nifi.repository.schema.Record; +import org.apache.nifi.repository.schema.RecordIterator; import org.apache.nifi.repository.schema.RecordSchema; import org.apache.nifi.repository.schema.Repetition; import org.apache.nifi.repository.schema.SchemaRecordReader; @@ -43,6 +38,13 @@ import org.slf4j.LoggerFactory; import org.wali.SerDe; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.File; +import java.io.IOException; +import java.util.Map; + public class SchemaRepositoryRecordSerde extends RepositoryRecordSerde implements SerDe { private static final Logger logger = LoggerFactory.getLogger(SchemaRepositoryRecordSerde.class); private static final int MAX_ENCODING_VERSION = 2; @@ -51,7 +53,8 @@ public class SchemaRepositoryRecordSerde extends RepositoryRecordSerde implement private final RecordSchema contentClaimSchema = ContentClaimSchema.CONTENT_CLAIM_SCHEMA_V1; private final ResourceClaimManager resourceClaimManager; - private volatile RecordSchema recoverySchema; + private volatile SchemaRecordReader reader; + private RecordIterator recordIterator = null; public SchemaRepositoryRecordSerde(final ResourceClaimManager resourceClaimManager) { this.resourceClaimManager = resourceClaimManager; @@ -101,7 +104,8 @@ protected void serializeRecord(final RepositoryRecord record, final DataOutputSt @Override public void readHeader(final DataInputStream in) throws IOException { - recoverySchema = RecordSchema.readFrom(in); + final RecordSchema recoverySchema = RecordSchema.readFrom(in); + reader = SchemaRecordReader.fromSchema(recoverySchema); } @Override @@ -120,8 +124,41 @@ public RepositoryRecord deserializeEdit(final DataInputStream in, final Map * Responsible for coordinating nodes in the cluster @@ -61,6 +62,30 @@ public interface ClusterCoordinator { */ void finishNodeConnection(NodeIdentifier nodeId); + /** + * Indicates that the node has finished being offloaded + * + * @param nodeId the identifier of the node + */ + void finishNodeOffload(NodeIdentifier nodeId); + + /** + * Sends a request to the node to be offloaded. + * The node will be marked as offloading immediately. + *

+ * When a node is offloaded: + *

    + *
  • all processors on the node are stopped
  • + *
  • all processors on the node are terminated
  • + *
  • all remote process groups on the node stop transmitting
  • + *
  • all flowfiles on the node are sent to other nodes in the cluster
  • + *
+ * @param nodeId the identifier of the node + * @param offloadCode the code that represents why this node is being asked to be offloaded + * @param explanation an explanation as to why the node is being asked to be offloaded + */ + void requestNodeOffload(NodeIdentifier nodeId, OffloadCode offloadCode, String explanation); + /** * Sends a request to the node to disconnect from the cluster. * The node will be marked as disconnected immediately. @@ -127,12 +152,12 @@ public interface ClusterCoordinator { * true if the node is blocked, false if the node is * allowed through the firewall or if there is no firewall configured * - * @param hostname the hostname of the node that is attempting to connect to the cluster + * @param nodeIdentities the identities of the node that is attempting to connect to the cluster * * @return true if the node is blocked, false if the node is * allowed through the firewall or if there is no firewall configured */ - boolean isBlockedByFirewall(String hostname); + boolean isBlockedByFirewall(Set nodeIdentities); /** * Reports that some event occurred that is relevant to the cluster @@ -244,4 +269,16 @@ public interface ClusterCoordinator { * @throws IOException thrown when it failed to communicate with the cluster coordinator. */ Map getClusterWorkload() throws IOException; + + /** + * Registers the given event listener so that it is notified whenever a cluster topology event occurs + * @param eventListener the event listener to notify + */ + void registerEventListener(ClusterTopologyEventListener eventListener); + + /** + * Stops notifying the given listener when cluster topology events occurs + * @param eventListener the event listener to stop notifying + */ + void unregisterEventListener(ClusterTopologyEventListener eventListener); } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/ClusterTopologyEventListener.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/ClusterTopologyEventListener.java new file mode 100644 index 000000000000..d31339b91a32 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/ClusterTopologyEventListener.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.cluster.coordination; + +import org.apache.nifi.cluster.coordination.node.NodeConnectionState; +import org.apache.nifi.cluster.protocol.NodeIdentifier; + +public interface ClusterTopologyEventListener { + + void onNodeAdded(NodeIdentifier nodeId); + + void onNodeRemoved(NodeIdentifier nodeId); + + void onLocalNodeIdentifierSet(NodeIdentifier localNodeId); + + void onNodeStateChange(NodeIdentifier nodeId, NodeConnectionState newState); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionState.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionState.java index 8d5824f17103..d79552c8cdb8 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionState.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionState.java @@ -36,12 +36,22 @@ public enum NodeConnectionState { */ CONNECTED, + /** + * A node that is in the process of offloading its flow files from the node. + */ + OFFLOADING, + /** * A node that is in the process of disconnecting from the cluster. * A DISCONNECTING node will always transition to DISCONNECTED. */ DISCONNECTING, + /** + * A node that has offloaded its flow files from the node. + */ + OFFLOADED, + /** * A node that is not connected to the cluster. * A DISCONNECTED node can transition to CONNECTING. diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionStatus.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionStatus.java index 34bd1279e3e7..7d8a94049cc5 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionStatus.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/NodeConnectionStatus.java @@ -35,47 +35,53 @@ public class NodeConnectionStatus { private final long updateId; private final NodeIdentifier nodeId; private final NodeConnectionState state; + private final OffloadCode offloadCode; private final DisconnectionCode disconnectCode; - private final String disconnectReason; + private final String reason; private final Long connectionRequestTime; public NodeConnectionStatus(final NodeIdentifier nodeId, final NodeConnectionState state) { - this(nodeId, state, null, null, null); + this(nodeId, state, null, null, null, null); } public NodeConnectionStatus(final NodeIdentifier nodeId, final DisconnectionCode disconnectionCode) { - this(nodeId, NodeConnectionState.DISCONNECTED, disconnectionCode, disconnectionCode.toString(), null); + this(nodeId, NodeConnectionState.DISCONNECTED, null, disconnectionCode, disconnectionCode.toString(), null); + } + + public NodeConnectionStatus(final NodeIdentifier nodeId, final NodeConnectionState state, final OffloadCode offloadCode, final String offloadExplanation) { + this(nodeId, state, offloadCode, null, offloadExplanation, null); } public NodeConnectionStatus(final NodeIdentifier nodeId, final DisconnectionCode disconnectionCode, final String disconnectionExplanation) { - this(nodeId, NodeConnectionState.DISCONNECTED, disconnectionCode, disconnectionExplanation, null); + this(nodeId, NodeConnectionState.DISCONNECTED, null, disconnectionCode, disconnectionExplanation, null); } public NodeConnectionStatus(final NodeIdentifier nodeId, final NodeConnectionState state, final DisconnectionCode disconnectionCode) { - this(nodeId, state, disconnectionCode, disconnectionCode == null ? null : disconnectionCode.toString(), null); + this(nodeId, state, null, disconnectionCode, disconnectionCode == null ? null : disconnectionCode.toString(), null); } public NodeConnectionStatus(final NodeConnectionStatus status) { - this(status.getNodeIdentifier(), status.getState(), status.getDisconnectCode(), status.getDisconnectReason(), status.getConnectionRequestTime()); + this(status.getNodeIdentifier(), status.getState(), status.getOffloadCode(), status.getDisconnectCode(), status.getReason(), status.getConnectionRequestTime()); } - public NodeConnectionStatus(final NodeIdentifier nodeId, final NodeConnectionState state, final DisconnectionCode disconnectCode, - final String disconnectReason, final Long connectionRequestTime) { - this(idGenerator.getAndIncrement(), nodeId, state, disconnectCode, disconnectReason, connectionRequestTime); + public NodeConnectionStatus(final NodeIdentifier nodeId, final NodeConnectionState state, final OffloadCode offloadCode, + final DisconnectionCode disconnectCode, final String reason, final Long connectionRequestTime) { + this(idGenerator.getAndIncrement(), nodeId, state, offloadCode, disconnectCode, reason, connectionRequestTime); } - public NodeConnectionStatus(final long updateId, final NodeIdentifier nodeId, final NodeConnectionState state, final DisconnectionCode disconnectCode, - final String disconnectReason, final Long connectionRequestTime) { + public NodeConnectionStatus(final long updateId, final NodeIdentifier nodeId, final NodeConnectionState state, final OffloadCode offloadCode, + final DisconnectionCode disconnectCode, final String reason, final Long connectionRequestTime) { this.updateId = updateId; this.nodeId = nodeId; this.state = state; + this.offloadCode = offloadCode; if (state == NodeConnectionState.DISCONNECTED && disconnectCode == null) { this.disconnectCode = DisconnectionCode.UNKNOWN; - this.disconnectReason = this.disconnectCode.toString(); + this.reason = this.disconnectCode.toString(); } else { this.disconnectCode = disconnectCode; - this.disconnectReason = disconnectReason; + this.reason = reason; } this.connectionRequestTime = (connectionRequestTime == null && state == NodeConnectionState.CONNECTING) ? Long.valueOf(System.currentTimeMillis()) : connectionRequestTime; @@ -93,12 +99,16 @@ public NodeConnectionState getState() { return state; } + public OffloadCode getOffloadCode() { + return offloadCode; + } + public DisconnectionCode getDisconnectCode() { return disconnectCode; } - public String getDisconnectReason() { - return disconnectReason; + public String getReason() { + return reason; } public Long getConnectionRequestTime() { @@ -110,8 +120,11 @@ public String toString() { final StringBuilder sb = new StringBuilder(); final NodeConnectionState state = getState(); sb.append("NodeConnectionStatus[nodeId=").append(nodeId).append(", state=").append(state); + if (state == NodeConnectionState.OFFLOADED || state == NodeConnectionState.OFFLOADING) { + sb.append(", Offload Code=").append(getOffloadCode()).append(", Offload Reason=").append(getReason()); + } if (state == NodeConnectionState.DISCONNECTED || state == NodeConnectionState.DISCONNECTING) { - sb.append(", Disconnect Code=").append(getDisconnectCode()).append(", Disconnect Reason=").append(getDisconnectReason()); + sb.append(", Disconnect Code=").append(getDisconnectCode()).append(", Disconnect Reason=").append(getReason()); } sb.append(", updateId=").append(getUpdateIdentifier()); sb.append("]"); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/OffloadCode.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/OffloadCode.java new file mode 100644 index 000000000000..fb4d30bbc5f0 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/coordination/node/OffloadCode.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.cluster.coordination.node; + +/** + * An enumeration of the reasons that a node may be offloaded + */ +public enum OffloadCode { + + /** + * A user explicitly offloaded the node + */ + OFFLOADED("Node Offloaded"); + + private final String description; + + OffloadCode(final String description) { + this.description = description; + } + + @Override + public String toString() { + return description; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ClusterCoordinationProtocolSender.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ClusterCoordinationProtocolSender.java index 986231efd466..b5485ccd5652 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ClusterCoordinationProtocolSender.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ClusterCoordinationProtocolSender.java @@ -19,6 +19,7 @@ import java.util.Set; import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; +import org.apache.nifi.cluster.protocol.message.OffloadMessage; import org.apache.nifi.cluster.protocol.message.DisconnectMessage; import org.apache.nifi.cluster.protocol.message.NodeStatusChangeMessage; import org.apache.nifi.cluster.protocol.message.ReconnectionRequestMessage; @@ -40,6 +41,14 @@ public interface ClusterCoordinationProtocolSender { */ ReconnectionResponseMessage requestReconnection(ReconnectionRequestMessage msg) throws ProtocolException; + /** + * Sends an "offload request" message to a node. + * + * @param msg a message + * @throws ProtocolException if communication failed + */ + void offload(OffloadMessage msg) throws ProtocolException; + /** * Sends a "disconnection request" message to a node. * diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/NodeIdentifier.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/NodeIdentifier.java index f4475df93b6c..74c5538414f1 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/NodeIdentifier.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/NodeIdentifier.java @@ -16,11 +16,16 @@ */ package org.apache.nifi.cluster.protocol; +import org.apache.commons.lang3.StringUtils; +import org.apache.nifi.util.NiFiProperties; + import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; - -import org.apache.commons.lang3.StringUtils; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; /** * A node identifier denoting the coordinates of a flow controller that is @@ -63,10 +68,20 @@ public class NodeIdentifier { private final String socketAddress; /** - * the port to use use for sending requests to the node's internal interface + * the port to use for sending requests to the node's internal interface */ private final int socketPort; + /** + * The IP or hostname to use for sending FlowFiles when load balancing a connection + */ + private final String loadBalanceAddress; + + /** + * the port to use for sending FlowFiles when load balancing a connection + */ + private final int loadBalancePort; + /** * the IP or hostname that external clients should use to communicate with this node via Site-to-Site */ @@ -89,15 +104,21 @@ public class NodeIdentifier { private final Boolean siteToSiteSecure; - private final String nodeDn; + private final Set nodeIdentities; public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort, + final String siteToSiteAddress, final Integer siteToSitePort, final Integer siteToSiteHttpApiPort, final boolean siteToSiteSecure) { + this(id, apiAddress, apiPort, socketAddress, socketPort, socketAddress, NiFiProperties.DEFAULT_LOAD_BALANCE_PORT, siteToSiteAddress, siteToSitePort, siteToSiteHttpApiPort, siteToSiteSecure, + null); + } + + public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort, final String loadBalanceAddress, final int loadBalancePort, final String siteToSiteAddress, final Integer siteToSitePort, final Integer siteToSiteHttpApiPort, final boolean siteToSiteSecure) { - this(id, apiAddress, apiPort, socketAddress, socketPort, siteToSiteAddress, siteToSitePort, siteToSiteHttpApiPort, siteToSiteSecure, null); + this(id, apiAddress, apiPort, socketAddress, socketPort, loadBalanceAddress, loadBalancePort, siteToSiteAddress, siteToSitePort, siteToSiteHttpApiPort, siteToSiteSecure, null); } - public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort, - final String siteToSiteAddress, final Integer siteToSitePort, final Integer siteToSiteHttpApiPort, final boolean siteToSiteSecure, final String dn) { + public NodeIdentifier(final String id, final String apiAddress, final int apiPort, final String socketAddress, final int socketPort, final String loadBalanceAddress, final int loadBalancePort, + final String siteToSiteAddress, final Integer siteToSitePort, final Integer siteToSiteHttpApiPort, final boolean siteToSiteSecure, final Set nodeIdentities) { if (StringUtils.isBlank(id)) { throw new IllegalArgumentException("Node ID may not be empty or null."); @@ -109,6 +130,7 @@ public NodeIdentifier(final String id, final String apiAddress, final int apiPor validatePort(apiPort); validatePort(socketPort); + validatePort(loadBalancePort); if (siteToSitePort != null) { validatePort(siteToSitePort); } @@ -118,7 +140,9 @@ public NodeIdentifier(final String id, final String apiAddress, final int apiPor this.apiPort = apiPort; this.socketAddress = socketAddress; this.socketPort = socketPort; - this.nodeDn = dn; + this.loadBalanceAddress = loadBalanceAddress; + this.loadBalancePort = loadBalancePort; + this.nodeIdentities = nodeIdentities == null ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(nodeIdentities)); this.siteToSiteAddress = siteToSiteAddress == null ? apiAddress : siteToSiteAddress; this.siteToSitePort = siteToSitePort; this.siteToSiteHttpApiPort = siteToSiteHttpApiPort; @@ -134,7 +158,9 @@ public NodeIdentifier() { this.apiPort = 0; this.socketAddress = null; this.socketPort = 0; - this.nodeDn = null; + this.loadBalanceAddress = null; + this.loadBalancePort = 0; + this.nodeIdentities = Collections.emptySet(); this.siteToSiteAddress = null; this.siteToSitePort = null; this.siteToSiteHttpApiPort = null; @@ -145,8 +171,8 @@ public String getId() { return id; } - public String getDN() { - return nodeDn; + public Set getNodeIdentities() { + return nodeIdentities; } public String getApiAddress() { @@ -165,6 +191,14 @@ public int getSocketPort() { return socketPort; } + public String getLoadBalanceAddress() { + return loadBalanceAddress; + } + + public int getLoadBalancePort() { + return loadBalancePort; + } + private void validatePort(final int port) { if (port < 1 || port > 65535) { throw new IllegalArgumentException("Port must be inclusively in the range [1, 65535]. Port given: " + port); @@ -223,13 +257,16 @@ public boolean logicallyEquals(final NodeIdentifier other) { if (other == null) { return false; } - if ((this.apiAddress == null) ? (other.apiAddress != null) : !this.apiAddress.equals(other.apiAddress)) { + if (other == this) { + return true; + } + if (!Objects.equals(apiAddress, other.apiAddress)) { return false; } if (this.apiPort != other.apiPort) { return false; } - if ((this.socketAddress == null) ? (other.socketAddress != null) : !this.socketAddress.equals(other.socketAddress)) { + if (!Objects.equals(socketAddress, other.socketAddress)) { return false; } if (this.socketPort != other.socketPort) { @@ -251,4 +288,10 @@ public String toString() { return apiAddress + ":" + apiPort; } + public String getFullDescription() { + return "NodeIdentifier[UUID=" + id + ", API Address = " + apiAddress + ":" + apiPort + ", Cluster Socket Address = " + socketAddress + ":" + socketPort + + ", Load Balance Address = " + loadBalanceAddress + ":" + loadBalancePort + ", Site-to-Site Raw Address = " + siteToSiteAddress + ":" + siteToSitePort + + ", Site-to-Site HTTP Address = " + apiAddress + ":" + siteToSiteHttpApiPort + ", Site-to-Site Secure = " + siteToSiteSecure + ", Node Identities = " + nodeIdentities + "]"; + } + } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ProtocolHandler.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ProtocolHandler.java index b2bace9699f4..836ad7ac0349 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ProtocolHandler.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/ProtocolHandler.java @@ -18,6 +18,8 @@ import org.apache.nifi.cluster.protocol.message.ProtocolMessage; +import java.util.Set; + /** * A handler for processing protocol messages. * @@ -30,11 +32,12 @@ public interface ProtocolHandler { * should be returned. * * @param msg a message + * @param nodeIdentities the set of identities for this node * @return a response or null, if no response is necessary * * @throws ProtocolException if the message could not be processed */ - ProtocolMessage handle(ProtocolMessage msg) throws ProtocolException; + ProtocolMessage handle(ProtocolMessage msg, Set nodeIdentities) throws ProtocolException; /** * @param msg a message diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/ClusterCoordinationProtocolSenderListener.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/ClusterCoordinationProtocolSenderListener.java index ae3a0e50571e..74cc6b476a25 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/ClusterCoordinationProtocolSenderListener.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/ClusterCoordinationProtocolSenderListener.java @@ -26,6 +26,7 @@ import org.apache.nifi.cluster.protocol.ProtocolException; import org.apache.nifi.cluster.protocol.ProtocolHandler; import org.apache.nifi.cluster.protocol.ProtocolListener; +import org.apache.nifi.cluster.protocol.message.OffloadMessage; import org.apache.nifi.cluster.protocol.message.DisconnectMessage; import org.apache.nifi.cluster.protocol.message.NodeStatusChangeMessage; import org.apache.nifi.cluster.protocol.message.ReconnectionRequestMessage; @@ -100,6 +101,11 @@ public ReconnectionResponseMessage requestReconnection(final ReconnectionRequest return sender.requestReconnection(msg); } + @Override + public void offload(OffloadMessage msg) throws ProtocolException { + sender.offload(msg); + } + @Override public void disconnect(DisconnectMessage msg) throws ProtocolException { sender.disconnect(msg); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/SocketProtocolListener.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/SocketProtocolListener.java index e31a54734b8c..c588a6807dc2 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/SocketProtocolListener.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/SocketProtocolListener.java @@ -16,15 +16,6 @@ */ package org.apache.nifi.cluster.protocol.impl; -import java.io.IOException; -import java.io.InputStream; -import java.net.Socket; -import java.security.cert.CertificateException; -import java.util.Collection; -import java.util.Collections; -import java.util.UUID; -import java.util.concurrent.CopyOnWriteArrayList; - import org.apache.nifi.cluster.protocol.NodeIdentifier; import org.apache.nifi.cluster.protocol.ProtocolContext; import org.apache.nifi.cluster.protocol.ProtocolException; @@ -33,6 +24,7 @@ import org.apache.nifi.cluster.protocol.ProtocolMessageMarshaller; import org.apache.nifi.cluster.protocol.ProtocolMessageUnmarshaller; import org.apache.nifi.cluster.protocol.message.ConnectionRequestMessage; +import org.apache.nifi.cluster.protocol.message.OffloadMessage; import org.apache.nifi.cluster.protocol.message.DisconnectMessage; import org.apache.nifi.cluster.protocol.message.FlowRequestMessage; import org.apache.nifi.cluster.protocol.message.HeartbeatMessage; @@ -49,6 +41,22 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; +import java.io.IOException; +import java.io.InputStream; +import java.net.Socket; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.Collection; +import java.util.Collections; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.stream.Collectors; + /** * Implements a listener for protocol messages sent over unicast socket. * @@ -82,7 +90,6 @@ public void setBulletinRepository(final BulletinRepository bulletinRepository) { @Override public void start() throws IOException { - if (super.isRunning()) { throw new IllegalStateException("Instance is already started."); } @@ -92,7 +99,6 @@ public void start() throws IOException { @Override public void stop() throws IOException { - if (super.isRunning() == false) { throw new IOException("Instance is already stopped."); } @@ -128,8 +134,6 @@ public void dispatchRequest(final Socket socket) { final String requestId = UUID.randomUUID().toString(); logger.debug("Received request {} from {}", requestId, hostname); - String requestorDn = getRequestorDN(socket); - // unmarshall message final ProtocolMessageUnmarshaller unmarshaller = protocolContext.createUnmarshaller(); final ByteCountingInputStream countingIn = new ByteCountingInputStream(socket.getInputStream()); @@ -151,7 +155,7 @@ public void dispatchRequest(final Socket socket) { } } - request.setRequestorDN(requestorDn); + final Set nodeIdentities = getCertificateIdentities(socket); // dispatch message to handler ProtocolHandler desiredHandler = null; @@ -168,7 +172,7 @@ public void dispatchRequest(final Socket socket) { logger.error("Received request of type {} but none of the following Protocol Handlers were able to process the request: {}", request.getType(), handlers); throw new ProtocolException("No handler assigned to handle message type: " + request.getType()); } else { - final ProtocolMessage response = desiredHandler.handle(request); + final ProtocolMessage response = desiredHandler.handle(request, nodeIdentities); if (response != null) { try { logger.debug("Sending response for request {}", requestId); @@ -207,6 +211,8 @@ private NodeIdentifier getNodeIdentifier(final ProtocolMessage message) { return ((ConnectionRequestMessage) message).getConnectionRequest().getProposedNodeIdentifier(); case HEARTBEAT: return ((HeartbeatMessage) message).getHeartbeat().getNodeIdentifier(); + case OFFLOAD_REQUEST: + return ((OffloadMessage) message).getNodeId(); case DISCONNECTION_REQUEST: return ((DisconnectMessage) message).getNodeId(); case FLOW_REQUEST: @@ -218,11 +224,32 @@ private NodeIdentifier getNodeIdentifier(final ProtocolMessage message) { } } - private String getRequestorDN(Socket socket) { - try { - return CertificateUtils.extractPeerDNFromSSLSocket(socket); - } catch (CertificateException e) { - throw new ProtocolException(e); + private Set getCertificateIdentities(final Socket socket) throws IOException { + if (socket instanceof SSLSocket) { + try { + final SSLSession sslSession = ((SSLSocket) socket).getSession(); + return getCertificateIdentities(sslSession); + } catch (CertificateException e) { + throw new IOException("Could not extract Subject Alternative Names from client's certificate", e); + } + } else { + return Collections.emptySet(); + } + } + + private Set getCertificateIdentities(final SSLSession sslSession) throws CertificateException, SSLPeerUnverifiedException { + final Certificate[] certs = sslSession.getPeerCertificates(); + if (certs == null || certs.length == 0) { + throw new SSLPeerUnverifiedException("No certificates found"); } + + final X509Certificate cert = CertificateUtils.convertAbstractX509Certificate(certs[0]); + cert.checkValidity(); + + final Set identities = CertificateUtils.getSubjectAlternativeNames(cert).stream() + .map(CertificateUtils::extractUsername) + .collect(Collectors.toSet()); + + return identities; } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/StandardClusterCoordinationProtocolSender.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/StandardClusterCoordinationProtocolSender.java index 167ddec93284..b21068ffe5e2 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/StandardClusterCoordinationProtocolSender.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/impl/StandardClusterCoordinationProtocolSender.java @@ -36,6 +36,7 @@ import org.apache.nifi.cluster.protocol.ProtocolException; import org.apache.nifi.cluster.protocol.ProtocolMessageMarshaller; import org.apache.nifi.cluster.protocol.ProtocolMessageUnmarshaller; +import org.apache.nifi.cluster.protocol.message.OffloadMessage; import org.apache.nifi.cluster.protocol.message.DisconnectMessage; import org.apache.nifi.cluster.protocol.message.NodeConnectionStatusRequestMessage; import org.apache.nifi.cluster.protocol.message.NodeConnectionStatusResponseMessage; @@ -128,6 +129,31 @@ public ReconnectionResponseMessage requestReconnection(final ReconnectionRequest } } + /** + * Requests a node to be offloaded. The configured value for + * handshake timeout is applied to the socket before making the request. + * + * @param msg a message + * @throws ProtocolException if the message failed to be sent + */ + @Override + public void offload(final OffloadMessage msg) throws ProtocolException { + Socket socket = null; + try { + socket = createSocket(msg.getNodeId(), true); + + // marshal message to output stream + try { + final ProtocolMessageMarshaller marshaller = protocolContext.createMarshaller(); + marshaller.marshal(msg, socket.getOutputStream()); + } catch (final IOException ioe) { + throw new ProtocolException("Failed marshalling '" + msg.getType() + "' protocol message due to: " + ioe, ioe); + } + } finally { + SocketUtils.closeQuietly(socket); + } + } + /** * Requests a node to disconnect from the cluster. The configured value for * handshake timeout is applied to the socket before making the request. diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeConnectionStatus.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeConnectionStatus.java index c8c4acf646a3..5eae83e0e11b 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeConnectionStatus.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeConnectionStatus.java @@ -17,6 +17,7 @@ package org.apache.nifi.cluster.protocol.jaxb.message; +import org.apache.nifi.cluster.coordination.node.OffloadCode; import org.apache.nifi.cluster.coordination.node.DisconnectionCode; import org.apache.nifi.cluster.coordination.node.NodeConnectionState; import org.apache.nifi.cluster.protocol.NodeIdentifier; @@ -25,8 +26,9 @@ public class AdaptedNodeConnectionStatus { private Long updateId; private NodeIdentifier nodeId; private NodeConnectionState state; + private OffloadCode offloadCode; private DisconnectionCode disconnectCode; - private String disconnectReason; + private String reason; private Long connectionRequestTime; public Long getUpdateId() { @@ -53,20 +55,28 @@ public void setState(NodeConnectionState state) { this.state = state; } + public OffloadCode getOffloadCode() { + return offloadCode; + } + public DisconnectionCode getDisconnectCode() { return disconnectCode; } + public void setOffloadCode(OffloadCode offloadCode) { + this.offloadCode = offloadCode; + } + public void setDisconnectCode(DisconnectionCode disconnectCode) { this.disconnectCode = disconnectCode; } - public String getDisconnectReason() { - return disconnectReason; + public String getReason() { + return reason; } - public void setDisconnectReason(String disconnectReason) { - this.disconnectReason = disconnectReason; + public void setReason(String reason) { + this.reason = reason; } public Long getConnectionRequestTime() { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeIdentifier.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeIdentifier.java index a2d996863cf2..dbc988b456ea 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeIdentifier.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/AdaptedNodeIdentifier.java @@ -25,6 +25,8 @@ public class AdaptedNodeIdentifier { private int apiPort; private String socketAddress; private int socketPort; + private String loadBalanceAddress; + private int loadBalancePort; private String siteToSiteAddress; private Integer siteToSitePort; private Integer siteToSiteHttpApiPort; @@ -74,6 +76,22 @@ public void setSocketPort(int socketPort) { this.socketPort = socketPort; } + public String getLoadBalanceAddress() { + return loadBalanceAddress; + } + + public void setLoadBalanceAddress(final String loadBalanceAddress) { + this.loadBalanceAddress = loadBalanceAddress; + } + + public int getLoadBalancePort() { + return loadBalancePort; + } + + public void setLoadBalancePort(final int loadBalancePort) { + this.loadBalancePort = loadBalancePort; + } + public String getSiteToSiteAddress() { return siteToSiteAddress; } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeConnectionStatusAdapter.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeConnectionStatusAdapter.java index ec209de1f540..47e92e8d2a34 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeConnectionStatusAdapter.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeConnectionStatusAdapter.java @@ -28,8 +28,9 @@ public NodeConnectionStatus unmarshal(final AdaptedNodeConnectionStatus adapted) return new NodeConnectionStatus(adapted.getUpdateId(), adapted.getNodeId(), adapted.getState(), + adapted.getOffloadCode(), adapted.getDisconnectCode(), - adapted.getDisconnectReason(), + adapted.getReason(), adapted.getConnectionRequestTime()); } @@ -40,8 +41,9 @@ public AdaptedNodeConnectionStatus marshal(final NodeConnectionStatus toAdapt) t adapted.setUpdateId(toAdapt.getUpdateIdentifier()); adapted.setNodeId(toAdapt.getNodeIdentifier()); adapted.setConnectionRequestTime(toAdapt.getConnectionRequestTime()); + adapted.setOffloadCode(toAdapt.getOffloadCode()); adapted.setDisconnectCode(toAdapt.getDisconnectCode()); - adapted.setDisconnectReason(toAdapt.getDisconnectReason()); + adapted.setReason(toAdapt.getReason()); adapted.setState(toAdapt.getState()); } return adapted; diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeIdentifierAdapter.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeIdentifierAdapter.java index 4a2660f7c1b9..29aa451dc822 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeIdentifierAdapter.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/NodeIdentifierAdapter.java @@ -34,6 +34,8 @@ public AdaptedNodeIdentifier marshal(final NodeIdentifier ni) { aNi.setApiPort(ni.getApiPort()); aNi.setSocketAddress(ni.getSocketAddress()); aNi.setSocketPort(ni.getSocketPort()); + aNi.setLoadBalanceAddress(ni.getLoadBalanceAddress()); + aNi.setLoadBalancePort(ni.getLoadBalancePort()); aNi.setSiteToSiteAddress(ni.getSiteToSiteAddress()); aNi.setSiteToSitePort(ni.getSiteToSitePort()); aNi.setSiteToSiteHttpApiPort(ni.getSiteToSiteHttpApiPort()); @@ -47,7 +49,7 @@ public NodeIdentifier unmarshal(final AdaptedNodeIdentifier aNi) { if (aNi == null) { return null; } else { - return new NodeIdentifier(aNi.getId(), aNi.getApiAddress(), aNi.getApiPort(), aNi.getSocketAddress(), aNi.getSocketPort(), + return new NodeIdentifier(aNi.getId(), aNi.getApiAddress(), aNi.getApiPort(), aNi.getSocketAddress(), aNi.getSocketPort(), aNi.getLoadBalanceAddress(), aNi.getLoadBalancePort(), aNi.getSiteToSiteAddress(), aNi.getSiteToSitePort(),aNi.getSiteToSiteHttpApiPort(), aNi.isSiteToSiteSecure()); } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/ObjectFactory.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/ObjectFactory.java index 9a594a403e8d..2f02e5e6fd5b 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/ObjectFactory.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/jaxb/message/ObjectFactory.java @@ -20,6 +20,7 @@ import org.apache.nifi.cluster.protocol.message.ConnectionRequestMessage; import org.apache.nifi.cluster.protocol.message.ConnectionResponseMessage; +import org.apache.nifi.cluster.protocol.message.OffloadMessage; import org.apache.nifi.cluster.protocol.message.DisconnectMessage; import org.apache.nifi.cluster.protocol.message.FlowRequestMessage; import org.apache.nifi.cluster.protocol.message.FlowResponseMessage; @@ -52,6 +53,10 @@ public ReconnectionResponseMessage createReconnectionResponseMessage() { return new ReconnectionResponseMessage(); } + public OffloadMessage createDecomissionMessage() { + return new OffloadMessage(); + } + public DisconnectMessage createDisconnectionMessage() { return new DisconnectMessage(); } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/OffloadMessage.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/OffloadMessage.java new file mode 100644 index 000000000000..a7acd56ad60e --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/OffloadMessage.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.cluster.protocol.message; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.cluster.protocol.jaxb.message.NodeIdentifierAdapter; + +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; + +@XmlRootElement(name = "offloadMessage") +public class OffloadMessage extends ProtocolMessage { + + private NodeIdentifier nodeId; + private String explanation; + + @XmlJavaTypeAdapter(NodeIdentifierAdapter.class) + public NodeIdentifier getNodeId() { + return nodeId; + } + + public void setNodeId(NodeIdentifier nodeId) { + this.nodeId = nodeId; + } + + public String getExplanation() { + return explanation; + } + + public void setExplanation(String explanation) { + this.explanation = explanation; + } + + @Override + public MessageType getType() { + return MessageType.OFFLOAD_REQUEST; + } + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/ProtocolMessage.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/ProtocolMessage.java index 1cab62f8ba8b..fe26c7a2cc12 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/ProtocolMessage.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/java/org/apache/nifi/cluster/protocol/message/ProtocolMessage.java @@ -18,11 +18,10 @@ public abstract class ProtocolMessage { - private volatile String requestorDN; - public static enum MessageType { CONNECTION_REQUEST, CONNECTION_RESPONSE, + OFFLOAD_REQUEST, DISCONNECTION_REQUEST, EXCEPTION, FLOW_REQUEST, @@ -42,21 +41,4 @@ public static enum MessageType { public abstract MessageType getType(); - /** - * Sets the DN of the entity making the request - * - * @param dn dn of the entity making the request - */ - public void setRequestorDN(final String dn) { - this.requestorDN = dn; - } - - /** - * @return the DN of the entity that made the request, if using a secure - * socket. Otherwise, returns null - */ - public String getRequestorDN() { - return requestorDN; - } - } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/resources/nifi-cluster-protocol-context.xml b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/resources/nifi-cluster-protocol-context.xml index 63ab689f1960..e2d5bf2660d4 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/resources/nifi-cluster-protocol-context.xml +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/main/resources/nifi-cluster-protocol-context.xml @@ -27,17 +27,17 @@ - + - + - + @@ -49,13 +49,13 @@ - + - + @@ -81,7 +81,7 @@ - + diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/DelayedProtocolHandler.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/DelayedProtocolHandler.java index d6d83ef7ef0b..aff4b11dbba8 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/DelayedProtocolHandler.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/DelayedProtocolHandler.java @@ -16,12 +16,14 @@ */ package org.apache.nifi.cluster.protocol.impl.testutils; -import java.util.ArrayList; -import java.util.List; import org.apache.nifi.cluster.protocol.ProtocolException; import org.apache.nifi.cluster.protocol.ProtocolHandler; import org.apache.nifi.cluster.protocol.message.ProtocolMessage; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + /** */ public class DelayedProtocolHandler implements ProtocolHandler { @@ -34,7 +36,7 @@ public DelayedProtocolHandler(int delay) { } @Override - public ProtocolMessage handle(ProtocolMessage msg) throws ProtocolException { + public ProtocolMessage handle(ProtocolMessage msg, Set nodeIdentities) throws ProtocolException { try { messages.add(msg); Thread.sleep(delay); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/ReflexiveProtocolHandler.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/ReflexiveProtocolHandler.java index ccf2c4c83705..05d5b770c55e 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/ReflexiveProtocolHandler.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster-protocol/src/test/java/org/apache/nifi/cluster/protocol/impl/testutils/ReflexiveProtocolHandler.java @@ -16,12 +16,14 @@ */ package org.apache.nifi.cluster.protocol.impl.testutils; -import java.util.ArrayList; -import java.util.List; import org.apache.nifi.cluster.protocol.ProtocolException; import org.apache.nifi.cluster.protocol.ProtocolHandler; import org.apache.nifi.cluster.protocol.message.ProtocolMessage; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + /** */ public class ReflexiveProtocolHandler implements ProtocolHandler { @@ -29,7 +31,7 @@ public class ReflexiveProtocolHandler implements ProtocolHandler { private List messages = new ArrayList<>(); @Override - public ProtocolMessage handle(ProtocolMessage msg) throws ProtocolException { + public ProtocolMessage handle(ProtocolMessage msg, Set nodeIdentities) throws ProtocolException { messages.add(msg); return msg; } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/AbstractHeartbeatMonitor.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/AbstractHeartbeatMonitor.java index 35bf510bb177..5fbe3f8bfd3d 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/AbstractHeartbeatMonitor.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/AbstractHeartbeatMonitor.java @@ -29,6 +29,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.Map; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -198,7 +199,7 @@ private void processHeartbeat(final NodeHeartbeat heartbeat) { final NodeIdentifier nodeId = heartbeat.getNodeIdentifier(); // Do not process heartbeat if it's blocked by firewall. - if (clusterCoordinator.isBlockedByFirewall(nodeId.getSocketAddress())) { + if (clusterCoordinator.isBlockedByFirewall(Collections.singleton(nodeId.getSocketAddress()))) { clusterCoordinator.reportEvent(nodeId, Severity.WARNING, "Firewall blocked received heartbeat. Issuing disconnection request."); // request node to disconnect @@ -227,6 +228,14 @@ private void processHeartbeat(final NodeHeartbeat heartbeat) { return; } + if (NodeConnectionState.OFFLOADED == connectionState || NodeConnectionState.OFFLOADING == connectionState) { + // Cluster Coordinator can ignore this heartbeat since the node is offloaded + clusterCoordinator.reportEvent(nodeId, Severity.INFO, "Received heartbeat from node that is offloading " + + "or offloaded. Removing this heartbeat. Offloaded nodes will only be reconnected to the cluster by an " + + "explicit connection request or restarting the node."); + removeHeartbeat(nodeId); + } + if (NodeConnectionState.DISCONNECTED == connectionState) { // ignore heartbeats from nodes disconnected by means other than lack of heartbeat, unless it is // the only node. We allow it if it is the only node because if we have a one-node cluster, then @@ -248,7 +257,7 @@ private void processHeartbeat(final NodeHeartbeat heartbeat) { default: { // disconnected nodes should not heartbeat, so we need to issue a disconnection request. logger.info("Ignoring received heartbeat from disconnected node " + nodeId + ". Issuing disconnection request."); - clusterCoordinator.requestNodeDisconnect(nodeId, disconnectionCode, connectionStatus.getDisconnectReason()); + clusterCoordinator.requestNodeDisconnect(nodeId, disconnectionCode, connectionStatus.getReason()); removeHeartbeat(nodeId); break; } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/ClusterProtocolHeartbeatMonitor.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/ClusterProtocolHeartbeatMonitor.java index 2d6f02327e1f..43f3f2b15f77 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/ClusterProtocolHeartbeatMonitor.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/heartbeat/ClusterProtocolHeartbeatMonitor.java @@ -16,16 +16,6 @@ */ package org.apache.nifi.cluster.coordination.heartbeat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.function.Function; -import java.util.stream.Collectors; import org.apache.nifi.cluster.coordination.ClusterCoordinator; import org.apache.nifi.cluster.coordination.node.NodeConnectionState; import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; @@ -46,6 +36,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; +import java.util.stream.Collectors; + /** * Uses Apache ZooKeeper to advertise the address to send heartbeats to, and * then relies on the NiFi Cluster Protocol to receive heartbeat messages from @@ -134,7 +135,7 @@ public synchronized long getPurgeTimestamp() { } @Override - public ProtocolMessage handle(final ProtocolMessage msg) throws ProtocolException { + public ProtocolMessage handle(final ProtocolMessage msg, Set nodeIds) throws ProtocolException { switch (msg.getType()) { case HEARTBEAT: return handleHeartbeat((HeartbeatMessage) msg); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapper.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapper.java index 96f2592b93db..0cd550b913c5 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapper.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapper.java @@ -166,7 +166,7 @@ public NodeResponse mapResponses(final URI uri, final String httpMethod, final S // If we have a response that is a 3xx, 4xx, or 5xx, then we want to choose that. // Otherwise, it doesn't matter which one we choose. We do this because if we replicate // a mutable request, it's possible that one node will respond with a 409, for instance, while - // others respond with a 150-Continue. We do not want to pick the 150-Continue; instead, we want + // others respond with a 202-Accepted. We do not want to pick the 202-Accepted; instead, we want // the failed response. final NodeResponse clientResponse = nodeResponses.stream().filter(p -> p.getStatus() > 299).findAny().orElse(nodeResponses.iterator().next()); @@ -236,7 +236,7 @@ private void drainResponses(final Set responses, final NodeRespons responses.stream() .parallel() // "parallelize" the draining of the responses, since we have multiple streams to consume .filter(response -> response != exclude) // don't include the explicitly excluded node - .filter(response -> response.getStatus() != RequestReplicator.NODE_CONTINUE_STATUS_CODE) // don't include any 150-NodeContinue responses because they contain no content + .filter(response -> response.getStatus() != RequestReplicator.NODE_CONTINUE_STATUS_CODE) // don't include any continue responses because they contain no content .forEach(response -> drainResponse(response)); // drain all node responses that didn't get filtered out } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/RequestReplicator.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/RequestReplicator.java index a7177d43b62f..8a98ed7035d7 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/RequestReplicator.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/RequestReplicator.java @@ -30,13 +30,13 @@ public interface RequestReplicator { public static final String CLUSTER_ID_GENERATION_SEED_HEADER = "X-Cluster-Id-Generation-Seed"; /** - * The HTTP header that the requestor specifies to ask a node if they are able to process a given request. The value - * is always 150-NodeContinue. The node will respond with 150 CONTINUE if it is able to + * The HTTP header that the requestor specifies to ask a node if they are able to process a given request. + * The value is always 202-Accepted. The node will respond with 202 ACCEPTED if it is able to * process the request, 417 EXPECTATION_FAILED otherwise. */ public static final String REQUEST_VALIDATION_HTTP_HEADER = "X-Validation-Expects"; - public static final String NODE_CONTINUE = "150-NodeContinue"; - public static final int NODE_CONTINUE_STATUS_CODE = 150; + public static final String NODE_CONTINUE = "202-Accepted"; + public static final int NODE_CONTINUE_STATUS_CODE = 202; /** * Indicates that the request is intended to cancel a transaction that was previously created without performing the action diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/ThreadPoolRequestReplicator.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/ThreadPoolRequestReplicator.java index 93804beb61de..b3a3ab965cf2 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/ThreadPoolRequestReplicator.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/ThreadPoolRequestReplicator.java @@ -17,6 +17,36 @@ package org.apache.nifi.cluster.coordination.http.replication; +import org.apache.commons.lang3.StringUtils; +import org.apache.nifi.authorization.AccessDeniedException; +import org.apache.nifi.authorization.user.NiFiUser; +import org.apache.nifi.authorization.user.NiFiUserUtils; +import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.http.HttpResponseMapper; +import org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper; +import org.apache.nifi.cluster.coordination.node.NodeConnectionState; +import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; +import org.apache.nifi.cluster.manager.NodeResponse; +import org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException; +import org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException; +import org.apache.nifi.cluster.manager.exception.IllegalClusterStateException; +import org.apache.nifi.cluster.manager.exception.NoConnectedNodesException; +import org.apache.nifi.cluster.manager.exception.OffloadedNodeMutableRequestException; +import org.apache.nifi.cluster.manager.exception.UnknownNodeException; +import org.apache.nifi.cluster.manager.exception.UriConstructionException; +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.reporting.Severity; +import org.apache.nifi.util.ComponentIdGenerator; +import org.apache.nifi.util.NiFiProperties; +import org.apache.nifi.web.security.ProxiedEntitiesUtils; +import org.apache.nifi.web.security.jwt.JwtAuthenticationFilter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.ws.rs.HttpMethod; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -45,36 +75,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import javax.ws.rs.HttpMethod; -import javax.ws.rs.core.Response; -import javax.ws.rs.core.Response.Status; - -import org.apache.commons.lang3.StringUtils; -import org.apache.nifi.authorization.AccessDeniedException; -import org.apache.nifi.authorization.user.NiFiUser; -import org.apache.nifi.authorization.user.NiFiUserUtils; -import org.apache.nifi.cluster.coordination.ClusterCoordinator; -import org.apache.nifi.cluster.coordination.http.HttpResponseMapper; -import org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper; -import org.apache.nifi.cluster.coordination.node.NodeConnectionState; -import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; -import org.apache.nifi.cluster.manager.NodeResponse; -import org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException; -import org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException; -import org.apache.nifi.cluster.manager.exception.IllegalClusterStateException; -import org.apache.nifi.cluster.manager.exception.NoConnectedNodesException; -import org.apache.nifi.cluster.manager.exception.UnknownNodeException; -import org.apache.nifi.cluster.manager.exception.UriConstructionException; -import org.apache.nifi.cluster.protocol.NodeIdentifier; -import org.apache.nifi.events.EventReporter; -import org.apache.nifi.reporting.Severity; -import org.apache.nifi.util.ComponentIdGenerator; -import org.apache.nifi.util.NiFiProperties; -import org.apache.nifi.web.security.ProxiedEntitiesUtils; -import org.apache.nifi.web.security.jwt.JwtAuthenticationFilter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - public class ThreadPoolRequestReplicator implements RequestReplicator { private static final Logger logger = LoggerFactory.getLogger(ThreadPoolRequestReplicator.class); @@ -171,6 +171,24 @@ public AsyncClusterResponse replicate(NiFiUser user, String method, URI uri, Obj // If the request is mutable, ensure that all nodes are connected. if (mutable) { + final List offloaded = stateMap.get(NodeConnectionState.OFFLOADED); + if (offloaded != null && !offloaded.isEmpty()) { + if (offloaded.size() == 1) { + throw new OffloadedNodeMutableRequestException("Node " + offloaded.iterator().next() + " is currently offloaded"); + } else { + throw new OffloadedNodeMutableRequestException(offloaded.size() + " Nodes are currently offloaded"); + } + } + + final List offloading = stateMap.get(NodeConnectionState.OFFLOADING); + if (offloading != null && !offloading.isEmpty()) { + if (offloading.size() == 1) { + throw new OffloadedNodeMutableRequestException("Node " + offloading.iterator().next() + " is currently offloading"); + } else { + throw new OffloadedNodeMutableRequestException(offloading.size() + " Nodes are currently offloading"); + } + } + final List disconnected = stateMap.get(NodeConnectionState.DISCONNECTED); if (disconnected != null && !disconnected.isEmpty()) { if (disconnected.size() == 1) { @@ -503,10 +521,10 @@ public void onCompletion(final NodeResponse nodeResponse) { if (allNodesResponded) { clusterResponse.addTiming("Verification Completed", "All Nodes", nanos); - // Check if we have any requests that do not have a 150-Continue status code. + // Check if we have any requests that do not have a 202-Accepted status code. final long dissentingCount = nodeResponses.stream().filter(p -> p.getStatus() != NODE_CONTINUE_STATUS_CODE).count(); - // If all nodes responded with 150-Continue, then we can replicate the original request + // If all nodes responded with 202-Accepted, then we can replicate the original request // to all nodes and we are finished. if (dissentingCount == 0) { logger.debug("Received verification from all {} nodes that mutable request {} {} can be made", numNodes, method, uri.getPath()); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/okhttp/OkHttpReplicationClient.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/okhttp/OkHttpReplicationClient.java index b0f0a394292d..81229de57beb 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/okhttp/OkHttpReplicationClient.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/http/replication/okhttp/OkHttpReplicationClient.java @@ -21,6 +21,35 @@ import com.fasterxml.jackson.annotation.JsonInclude.Value; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.module.jaxb.JaxbAnnotationIntrospector; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.security.KeyStore; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.zip.GZIPInputStream; +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; +import javax.ws.rs.HttpMethod; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.Response; import okhttp3.Call; import okhttp3.ConnectionPool; import okhttp3.Headers; @@ -42,36 +71,6 @@ import org.slf4j.LoggerFactory; import org.springframework.util.StreamUtils; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.SSLSocketFactory; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; -import javax.net.ssl.X509TrustManager; -import javax.ws.rs.HttpMethod; -import javax.ws.rs.core.MultivaluedHashMap; -import javax.ws.rs.core.MultivaluedMap; -import javax.ws.rs.core.Response; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.URI; -import java.security.KeyStore; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import java.util.zip.GZIPInputStream; - public class OkHttpReplicationClient implements HttpReplicationClient { private static final Logger logger = LoggerFactory.getLogger(OkHttpReplicationClient.class); private static final Set gzipEncodings = Stream.of("gzip", "x-gzip").collect(Collectors.toSet()); @@ -95,12 +94,35 @@ public OkHttpReplicationClient(final NiFiProperties properties) { @Override public PreparedRequest prepareRequest(final String method, final Map headers, final Object entity) { final boolean gzip = isUseGzip(headers); + checkContentLengthHeader(method, headers); final RequestBody requestBody = createRequestBody(headers, entity, gzip); final Map updatedHeaders = gzip ? updateHeadersForGzip(headers) : headers; return new OkHttpPreparedRequest(method, updatedHeaders, entity, requestBody); } + /** + * Checks the content length header on DELETE requests to ensure it is set to '0', avoiding request timeouts on replicated requests. + * @param method the HTTP method of the request + * @param headers the header keys and values + */ + private void checkContentLengthHeader(String method, Map headers) { + // Only applies to DELETE requests + if (HttpMethod.DELETE.equalsIgnoreCase(method)) { + // Find the Content-Length header if present + final String CONTENT_LENGTH_HEADER_KEY = "Content-Length"; + Map.Entry contentLengthEntry = headers.entrySet().stream().filter(entry -> entry.getKey().equalsIgnoreCase(CONTENT_LENGTH_HEADER_KEY)).findFirst().orElse(null); + // If no CL header, do nothing + if (contentLengthEntry != null) { + // If the provided CL value is non-zero, override it + if (contentLengthEntry.getValue() != null && !contentLengthEntry.getValue().equalsIgnoreCase("0")) { + logger.warn("This is a DELETE request; the provided Content-Length was {}; setting Content-Length to 0", contentLengthEntry.getValue()); + headers.put(CONTENT_LENGTH_HEADER_KEY, "0"); + } + } + } + } + @Override public Response replicate(final PreparedRequest request, final String uri) throws IOException { if (!(Objects.requireNonNull(request) instanceof OkHttpPreparedRequest)) { @@ -140,7 +162,7 @@ private byte[] getResponseBytes(final okhttp3.Response callResponse) throws IOEx final String contentEncoding = callResponse.header("Content-Encoding"); if (gzipEncodings.contains(contentEncoding)) { try (final InputStream gzipIn = new GZIPInputStream(new ByteArrayInputStream(rawBytes)); - final ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + final ByteArrayOutputStream baos = new ByteArrayOutputStream()) { StreamUtils.copy(gzipIn, baos); return baos.toByteArray(); @@ -183,7 +205,7 @@ private Call createCall(final OkHttpPreparedRequest request, final String uri) { @SuppressWarnings("unchecked") private HttpUrl buildUrl(final OkHttpPreparedRequest request, final String uri) { - HttpUrl.Builder urlBuilder = HttpUrl.parse(uri.toString()).newBuilder(); + HttpUrl.Builder urlBuilder = HttpUrl.parse(uri).newBuilder(); switch (request.getMethod().toUpperCase()) { case HttpMethod.DELETE: case HttpMethod.HEAD: @@ -226,7 +248,7 @@ private String getContentType(final Map headers, final String de private byte[] serializeEntity(final Object entity, final String contentType, final boolean gzip) { try (final ByteArrayOutputStream baos = new ByteArrayOutputStream(); - final OutputStream out = gzip ? new GZIPOutputStream(baos, 1) : baos) { + final OutputStream out = gzip ? new GZIPOutputStream(baos, 1) : baos) { getSerializer(contentType).serialize(entity, out); out.close(); @@ -269,10 +291,10 @@ private boolean isUseGzip(final Map headers) { } else { final String[] acceptEncodingTokens = rawAcceptEncoding.split(","); return Stream.of(acceptEncodingTokens) - .map(String::trim) - .filter(StringUtils::isNotEmpty) - .map(String::toLowerCase) - .anyMatch(gzipEncodings::contains); + .map(String::trim) + .filter(StringUtils::isNotEmpty) + .map(String::toLowerCase) + .anyMatch(gzipEncodings::contains); } } @@ -286,7 +308,8 @@ private OkHttpClient createOkHttpClient(final NiFiProperties properties) { okHttpClientBuilder.connectTimeout(connectionTimeoutMs, TimeUnit.MILLISECONDS); okHttpClientBuilder.readTimeout(readTimeoutMs, TimeUnit.MILLISECONDS); okHttpClientBuilder.followRedirects(true); - okHttpClientBuilder.connectionPool(new ConnectionPool(0, 5, TimeUnit.MINUTES)); + final int connectionPoolSize = properties.getClusterNodeMaxConcurrentRequests(); + okHttpClientBuilder.connectionPool(new ConnectionPool(connectionPoolSize, 5, TimeUnit.MINUTES)); final Tuple tuple = createSslSocketFactory(properties); if (tuple != null) { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinator.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinator.java index 4e4625cc9607..8c83a1d8a70f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinator.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinator.java @@ -16,18 +16,25 @@ */ package org.apache.nifi.cluster.coordination.node; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.commons.collections4.queue.CircularFifoQueue; import org.apache.commons.lang3.StringUtils; import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.ClusterTopologyEventListener; import org.apache.nifi.cluster.coordination.flow.FlowElection; import org.apache.nifi.cluster.coordination.http.HttpResponseMapper; import org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper; import org.apache.nifi.cluster.coordination.http.replication.RequestCompletionCallback; +import org.apache.nifi.cluster.coordination.node.state.NodeIdentifierDescriptor; import org.apache.nifi.cluster.event.Event; import org.apache.nifi.cluster.event.NodeEvent; import org.apache.nifi.cluster.exception.NoClusterCoordinatorException; import org.apache.nifi.cluster.firewall.ClusterNodeFirewall; import org.apache.nifi.cluster.manager.NodeResponse; +import org.apache.nifi.cluster.manager.exception.IllegalNodeOffloadException; import org.apache.nifi.cluster.manager.exception.IllegalNodeDisconnectionException; import org.apache.nifi.cluster.protocol.ComponentRevision; import org.apache.nifi.cluster.protocol.ConnectionRequest; @@ -43,14 +50,21 @@ import org.apache.nifi.cluster.protocol.message.ClusterWorkloadResponseMessage; import org.apache.nifi.cluster.protocol.message.ConnectionRequestMessage; import org.apache.nifi.cluster.protocol.message.ConnectionResponseMessage; +import org.apache.nifi.cluster.protocol.message.OffloadMessage; import org.apache.nifi.cluster.protocol.message.DisconnectMessage; import org.apache.nifi.cluster.protocol.message.NodeConnectionStatusResponseMessage; import org.apache.nifi.cluster.protocol.message.NodeStatusChangeMessage; import org.apache.nifi.cluster.protocol.message.ProtocolMessage; import org.apache.nifi.cluster.protocol.message.ProtocolMessage.MessageType; import org.apache.nifi.cluster.protocol.message.ReconnectionRequestMessage; +import org.apache.nifi.components.state.Scope; +import org.apache.nifi.components.state.StateManager; +import org.apache.nifi.components.state.StateManagerProvider; +import org.apache.nifi.components.state.StateMap; import org.apache.nifi.controller.leader.election.LeaderElectionManager; +import org.apache.nifi.controller.state.manager.StandardStateManagerProvider; import org.apache.nifi.events.EventReporter; +import org.apache.nifi.registry.VariableRegistry; import org.apache.nifi.reporting.Severity; import org.apache.nifi.services.FlowService; import org.apache.nifi.util.NiFiProperties; @@ -59,6 +73,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.StringWriter; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -69,6 +84,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; import java.util.regex.Pattern; @@ -93,6 +109,7 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl private final AtomicLong latestUpdateId = new AtomicLong(-1); private final FlowElection flowElection; private final NodeProtocolSender nodeProtocolSender; + private final StateManager stateManager; private volatile FlowService flowService; private volatile boolean connected; @@ -102,9 +119,18 @@ public class NodeClusterCoordinator implements ClusterCoordinator, ProtocolHandl private final ConcurrentMap nodeStatuses = new ConcurrentHashMap<>(); private final ConcurrentMap> nodeEvents = new ConcurrentHashMap<>(); + private final List eventListeners = new CopyOnWriteArrayList<>(); + + public NodeClusterCoordinator(final ClusterCoordinationProtocolSenderListener senderListener, final EventReporter eventReporter, final LeaderElectionManager leaderElectionManager, + final FlowElection flowElection, final ClusterNodeFirewall firewall, final RevisionManager revisionManager, final NiFiProperties nifiProperties, + final NodeProtocolSender nodeProtocolSender) throws IOException { + this(senderListener, eventReporter, leaderElectionManager, flowElection, firewall, revisionManager, nifiProperties, nodeProtocolSender, + StandardStateManagerProvider.create(nifiProperties, VariableRegistry.EMPTY_REGISTRY)); + } + public NodeClusterCoordinator(final ClusterCoordinationProtocolSenderListener senderListener, final EventReporter eventReporter, final LeaderElectionManager leaderElectionManager, final FlowElection flowElection, final ClusterNodeFirewall firewall, final RevisionManager revisionManager, final NiFiProperties nifiProperties, - final NodeProtocolSender nodeProtocolSender) { + final NodeProtocolSender nodeProtocolSender, final StateManagerProvider stateManagerProvider) throws IOException { this.senderListener = senderListener; this.flowService = null; this.eventReporter = eventReporter; @@ -114,10 +140,98 @@ public NodeClusterCoordinator(final ClusterCoordinationProtocolSenderListener se this.leaderElectionManager = leaderElectionManager; this.flowElection = flowElection; this.nodeProtocolSender = nodeProtocolSender; + this.stateManager = stateManagerProvider.getStateManager("Cluster Coordinator"); + + recoverState(); senderListener.addHandler(this); } + private void recoverState() throws IOException { + final StateMap stateMap = stateManager.getState(Scope.LOCAL); + if (stateMap == null) { + logger.debug("No state to restore"); + return; + } + + final ObjectMapper mapper = new ObjectMapper(); + final JsonFactory jsonFactory = new JsonFactory(); + jsonFactory.setCodec(mapper); + + final Map connectionStatusMap = new HashMap<>(); + NodeIdentifier localNodeId = null; + + final Map state = stateMap.toMap(); + for (final Map.Entry entry : state.entrySet()) { + final String nodeUuid = entry.getKey(); + final String nodeIdentifierJson = entry.getValue(); + logger.debug("Recovering state for {} = {}", nodeUuid, nodeIdentifierJson); + + try (final JsonParser jsonParser = jsonFactory.createParser(nodeIdentifierJson)) { + final NodeIdentifierDescriptor nodeIdDesc = jsonParser.readValueAs(NodeIdentifierDescriptor.class); + final NodeIdentifier nodeId = nodeIdDesc.toNodeIdentifier(); + + connectionStatusMap.put(nodeId, new NodeConnectionStatus(nodeId, DisconnectionCode.NOT_YET_CONNECTED)); + if (nodeIdDesc.isLocalNodeIdentifier()) { + if (localNodeId == null) { + localNodeId = nodeId; + } else { + logger.warn("When recovering state, determined that two Node Identifiers claim to be the local Node Identifier: {} and {}. Will ignore both of these and wait until " + + "connecting to cluster to determine which Node Identiifer is the local Node Identifier", localNodeId.getFullDescription(), nodeId.getFullDescription()); + localNodeId = null; + } + } + } + } + + if (!connectionStatusMap.isEmpty()) { + resetNodeStatuses(connectionStatusMap); + } + + if (localNodeId != null) { + logger.debug("Recovered state indicating that Local Node Identifier is {}", localNodeId); + setLocalNodeIdentifier(localNodeId); + } + } + + private void storeState() { + final ObjectMapper mapper = new ObjectMapper(); + final JsonFactory jsonFactory = new JsonFactory(); + jsonFactory.setCodec(mapper); + + try { + final Map stateMap = new HashMap<>(); + + final NodeIdentifier localNodeId = getLocalNodeIdentifier(); + for (final NodeIdentifier nodeId : getNodeIdentifiers()) { + final boolean isLocalId = nodeId.equals(localNodeId); + final NodeIdentifierDescriptor descriptor = NodeIdentifierDescriptor.fromNodeIdentifier(nodeId, isLocalId); + + try (final StringWriter writer = new StringWriter()) { + final JsonGenerator jsonGenerator = jsonFactory.createGenerator(writer); + jsonGenerator.writeObject(descriptor); + + final String serializedDescriptor = writer.toString(); + stateMap.put(nodeId.getId(), serializedDescriptor); + } + } + + stateManager.setState(stateMap, Scope.LOCAL); + logger.debug("Stored the following state as the Cluster Topology: {}", stateMap); + } catch (final Exception e) { + logger.warn("Failed to store cluster topology to local State Manager. Upon restart of NiFi, the cluster topology may not be accurate until joining the cluster.", e); + } + } + + + public void registerEventListener(final ClusterTopologyEventListener eventListener) { + this.eventListeners.add(eventListener); + } + + public void unregisterEventListener(final ClusterTopologyEventListener eventListener) { + this.eventListeners.remove(eventListener); + } + @Override public void shutdown() { if (closed) { @@ -136,8 +250,13 @@ public void shutdown() { @Override public void setLocalNodeIdentifier(final NodeIdentifier nodeId) { + if (nodeId == null || nodeId.equals(this.nodeId)) { + return; + } + this.nodeId = nodeId; nodeStatuses.computeIfAbsent(nodeId, id -> new NodeConnectionStatus(id, DisconnectionCode.NOT_YET_CONNECTED)); + eventListeners.forEach(listener -> listener.onLocalNodeIdentifierSet(nodeId)); } @Override @@ -170,7 +289,7 @@ private NodeIdentifier waitForNodeIdentifier(final Supplier fetc return localNodeId; } - private String getElectedActiveCoordinatorAddress() throws IOException { + private String getElectedActiveCoordinatorAddress() { return leaderElectionManager.getLeader(ClusterRoles.CLUSTER_COORDINATOR); } @@ -185,11 +304,68 @@ public void resetNodeStatuses(final Map st final NodeConnectionStatus proposedStatus = entry.getValue(); if (proposedStatus.getState() == NodeConnectionState.REMOVED) { - nodeStatuses.remove(nodeId); + removeNode(nodeId); } else { - nodeStatuses.put(nodeId, proposedStatus); + updateNodeStatus(nodeId, proposedStatus, false); + } + } + + storeState(); + } + + private NodeConnectionStatus removeNode(final NodeIdentifier nodeId) { + final NodeConnectionStatus status = nodeStatuses.remove(nodeId); + nodeEvents.remove(nodeId); + if (status != null) { + onNodeRemoved(nodeId); + } + + return status; + } + + private boolean removeNodeConditionally(final NodeIdentifier nodeId, final NodeConnectionStatus expectedStatus) { + final boolean removed = nodeStatuses.remove(nodeId, expectedStatus); + if (removed) { + nodeEvents.remove(nodeId); + onNodeRemoved(nodeId); + } + + return removed; + } + + private NodeConnectionStatus updateNodeStatus(final NodeIdentifier nodeId, final NodeConnectionStatus updatedStatus) { + return updateNodeStatus(nodeId, updatedStatus, true); + } + + private NodeConnectionStatus updateNodeStatus(final NodeIdentifier nodeId, final NodeConnectionStatus updatedStatus, final boolean storeState) { + final NodeConnectionStatus evictedStatus = nodeStatuses.put(nodeId, updatedStatus); + if (evictedStatus == null) { + onNodeAdded(nodeId, storeState); + } else { + onNodeStateChange(nodeId, updatedStatus.getState()); + } + + return evictedStatus; + } + + private boolean updateNodeStatusConditionally(final NodeIdentifier nodeId, final NodeConnectionStatus expectedStatus, final NodeConnectionStatus updatedStatus) { + final boolean updated; + if (expectedStatus == null) { + final NodeConnectionStatus existingValue = nodeStatuses.putIfAbsent(nodeId, updatedStatus); + updated = existingValue == null; + + if (updated) { + onNodeAdded(nodeId, true); } + } else { + updated = nodeStatuses.replace(nodeId, expectedStatus, updatedStatus); + } + + if (updated) { + onNodeStateChange(nodeId, updatedStatus.getState()); } + + return updated; } @Override @@ -228,17 +404,21 @@ private boolean replaceNodeStatus(final NodeIdentifier nodeId, final NodeConnect if (currentStatus == null) { if (newStatus.getState() == NodeConnectionState.REMOVED) { - return nodeStatuses.remove(nodeId, currentStatus); + return removeNodeConditionally(nodeId, currentStatus); } else { - final NodeConnectionStatus existingValue = nodeStatuses.putIfAbsent(nodeId, newStatus); - return existingValue == null; + return updateNodeStatusConditionally(nodeId, null, newStatus); } } if (newStatus.getState() == NodeConnectionState.REMOVED) { - return nodeStatuses.remove(nodeId, currentStatus); + if (removeNodeConditionally(nodeId, currentStatus)) { + storeState(); + return true; + } else { + return false; + } } else { - return nodeStatuses.replace(nodeId, currentStatus, newStatus); + return updateNodeStatusConditionally(nodeId, currentStatus, newStatus); } } @@ -259,7 +439,7 @@ public void requestNodeConnect(final NodeIdentifier nodeId, final String userDn) reportEvent(nodeId, Severity.INFO, "Requesting that node connect to cluster on behalf of " + userDn); } - updateNodeStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.CONNECTING, null, null, System.currentTimeMillis())); + updateNodeStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.CONNECTING, null, null, null, System.currentTimeMillis())); // create the request final ReconnectionRequestMessage request = new ReconnectionRequestMessage(); @@ -297,6 +477,50 @@ public void finishNodeConnection(final NodeIdentifier nodeId) { updateNodeStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.CONNECTED)); } + @Override + public void finishNodeOffload(final NodeIdentifier nodeId) { + final NodeConnectionState state = getConnectionState(nodeId); + if (state == null) { + logger.warn("Attempted to finish node offload for {} but node is not known.", nodeId); + return; + } + + if (state != NodeConnectionState.OFFLOADING) { + logger.warn("Attempted to finish node offload for {} but node is not in the offloading state, it is currently {}.", nodeId, state); + return; + } + + logger.info("{} is now offloaded", nodeId); + + updateNodeStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.OFFLOADED)); + } + + @Override + public void requestNodeOffload(final NodeIdentifier nodeId, final OffloadCode offloadCode, final String explanation) { + final Set offloadNodeIds = getNodeIdentifiers(NodeConnectionState.OFFLOADING, NodeConnectionState.OFFLOADED); + if (offloadNodeIds.contains(nodeId)) { + logger.debug("Attempted to offload node but the node is already offloading or offloaded"); + // no need to do anything here, the node is currently offloading or already offloaded + return; + } + + final Set disconnectedNodeIds = getNodeIdentifiers(NodeConnectionState.DISCONNECTED); + if (!disconnectedNodeIds.contains(nodeId)) { + throw new IllegalNodeOffloadException("Cannot offload node " + nodeId + " because it is not currently disconnected"); + } + + logger.info("Requesting that {} is offloaded due to {}", nodeId, explanation == null ? offloadCode : explanation); + + updateNodeStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.OFFLOADING, offloadCode, explanation)); + + final OffloadMessage request = new OffloadMessage(); + request.setNodeId(nodeId); + request.setExplanation(explanation); + + addNodeEvent(nodeId, "Offload requested due to " + explanation); + offloadAsynchronously(request, 10, 5); + } + @Override public void requestNodeDisconnect(final NodeIdentifier nodeId, final DisconnectionCode disconnectionCode, final String explanation) { final Set connectedNodeIds = getNodeIdentifiers(NodeConnectionState.CONNECTED); @@ -348,9 +572,25 @@ public void disconnectionRequestedByNode(final NodeIdentifier nodeId, final Disc @Override public void removeNode(final NodeIdentifier nodeId, final String userDn) { reportEvent(nodeId, Severity.INFO, "User " + userDn + " requested that node be removed from cluster"); - nodeStatuses.remove(nodeId); - nodeEvents.remove(nodeId); notifyOthersOfNodeStatusChange(new NodeConnectionStatus(nodeId, NodeConnectionState.REMOVED)); + removeNode(nodeId); + + storeState(); + } + + private void onNodeRemoved(final NodeIdentifier nodeId) { + eventListeners.forEach(listener -> listener.onNodeRemoved(nodeId)); + } + + private void onNodeAdded(final NodeIdentifier nodeId, final boolean storeState) { + if (storeState) { + storeState(); + } + eventListeners.forEach(listener -> listener.onNodeAdded(nodeId)); + } + + private void onNodeStateChange(final NodeIdentifier nodeId, final NodeConnectionState nodeConnectionState) { + eventListeners.forEach(listener -> listener.onNodeStateChange(nodeId, nodeConnectionState)); } @Override @@ -381,8 +621,18 @@ public Map> getConnectionStates() { } @Override - public boolean isBlockedByFirewall(final String hostname) { - return firewall != null && !firewall.isPermissible(hostname); + public boolean isBlockedByFirewall(final Set nodeIdentities) { + if (firewall == null) { + return false; + } + + for (final String nodeId : nodeIdentities) { + if (firewall.isPermissible(nodeId)) { + return false; + } + } + + return true; } @Override @@ -455,28 +705,21 @@ public NodeIdentifier getElectedActiveCoordinatorNode() { } private NodeIdentifier getElectedActiveCoordinatorNode(final boolean warnOnError) { - final String electedNodeAddress; + String electedNodeAddress; try { electedNodeAddress = getElectedActiveCoordinatorAddress(); } catch (final NoClusterCoordinatorException ncce) { logger.debug("There is currently no elected active Cluster Coordinator"); - return null; - } catch (final IOException ioe) { - if (warnOnError) { - logger.warn("Failed to determine which node is elected active Cluster Coordinator. There may be no coordinator currently: " + ioe); - if (logger.isDebugEnabled()) { - logger.warn("", ioe); - } - } - return null; } - if (electedNodeAddress == null) { + if (electedNodeAddress == null || electedNodeAddress.trim().isEmpty()) { logger.debug("There is currently no elected active Cluster Coordinator"); return null; } + electedNodeAddress = electedNodeAddress.trim(); + final int colonLoc = electedNodeAddress.indexOf(':'); if (colonLoc < 1) { if (warnOnError) { @@ -519,6 +762,7 @@ private NodeIdentifier getElectedActiveCoordinatorNode(final boolean warnOnError final NodeConnectionStatus existingStatus = this.nodeStatuses.putIfAbsent(connectionStatus.getNodeIdentifier(), connectionStatus); if (existingStatus == null) { + onNodeAdded(connectionStatus.getNodeIdentifier(), true); return connectionStatus.getNodeIdentifier(); } else { return existingStatus.getNodeIdentifier(); @@ -594,7 +838,7 @@ void updateNodeStatus(final NodeConnectionStatus status, final boolean waitForCo // this method is called when something occurs that causes this node to change the status of the // node in question. We only use comparisons against the current value when we receive an update // about a node status from a different node, since those may be received out-of-order. - final NodeConnectionStatus currentStatus = nodeStatuses.put(nodeId, status); + final NodeConnectionStatus currentStatus = updateNodeStatus(nodeId, status); final NodeConnectionState currentState = currentStatus == null ? null : currentStatus.getState(); logger.info("Status of {} changed from {} to {}", nodeId, currentStatus, status); logger.debug("State of cluster nodes is now {}", nodeStatuses); @@ -631,7 +875,7 @@ void notifyOthersOfNodeStatusChange(final NodeConnectionStatus updatedStatus, fi // Otherwise, get the active coordinator (or wait for one to become active) and then notify the coordinator. final Set nodesToNotify; if (notifyAllNodes) { - nodesToNotify = getNodeIdentifiers(NodeConnectionState.CONNECTED, NodeConnectionState.CONNECTING); + nodesToNotify = getNodeIdentifiers(); // Do not notify ourselves because we already know about the status update. nodesToNotify.remove(getLocalNodeIdentifier()); @@ -651,6 +895,34 @@ void notifyOthersOfNodeStatusChange(final NodeConnectionStatus updatedStatus, fi senderListener.notifyNodeStatusChange(nodesToNotify, message); } + private void offloadAsynchronously(final OffloadMessage request, final int attempts, final int retrySeconds) { + final Thread offloadThread = new Thread(new Runnable() { + @Override + public void run() { + final NodeIdentifier nodeId = request.getNodeId(); + + for (int i = 0; i < attempts; i++) { + try { + senderListener.offload(request); + reportEvent(nodeId, Severity.INFO, "Node was offloaded due to " + request.getExplanation()); + return; + } catch (final Exception e) { + logger.error("Failed to notify {} that it has been offloaded due to {}", request.getNodeId(), request.getExplanation(), e); + + try { + Thread.sleep(retrySeconds * 1000L); + } catch (final InterruptedException ie) { + Thread.currentThread().interrupt(); + return; + } + } + } + } + }, "Offload " + request.getNodeId()); + + offloadThread.start(); + } + private void disconnectAsynchronously(final DisconnectMessage request, final int attempts, final int retrySeconds) { final Thread disconnectThread = new Thread(new Runnable() { @Override @@ -741,10 +1013,10 @@ public void run() { } @Override - public ProtocolMessage handle(final ProtocolMessage protocolMessage) throws ProtocolException { + public ProtocolMessage handle(final ProtocolMessage protocolMessage, final Set nodeIdentities) throws ProtocolException { switch (protocolMessage.getType()) { case CONNECTION_REQUEST: - return handleConnectionRequest((ConnectionRequestMessage) protocolMessage); + return handleConnectionRequest((ConnectionRequestMessage) protocolMessage, nodeIdentities); case NODE_STATUS_CHANGE: handleNodeStatusChange((NodeStatusChangeMessage) protocolMessage); return null; @@ -771,8 +1043,8 @@ private String summarizeStatusChange(final NodeConnectionStatus oldStatus, final if (oldStatus == null || status.getState() != oldStatus.getState()) { sb.append("Node Status changed from ").append(oldStatus == null ? "[Unknown Node]" : oldStatus.getState().toString()).append(" to ").append(status.getState().toString()); - if (status.getDisconnectReason() != null) { - sb.append(" due to ").append(status.getDisconnectReason()); + if (status.getReason() != null) { + sb.append(" due to ").append(status.getReason()); } else if (status.getDisconnectCode() != null) { sb.append(" due to ").append(status.getDisconnectCode().toString()); } @@ -790,9 +1062,11 @@ private void handleNodeStatusChange(final NodeStatusChangeMessage statusChangeMe // Either remove the value from the map or update the map depending on the connection state if (statusChangeMessage.getNodeConnectionStatus().getState() == NodeConnectionState.REMOVED) { - nodeStatuses.remove(nodeId, oldStatus); + if (removeNodeConditionally(nodeId, oldStatus)) { + storeState(); + } } else { - nodeStatuses.put(nodeId, updatedStatus); + updateNodeStatus(nodeId, updatedStatus); } logger.info("Status of {} changed from {} to {}", statusChangeMessage.getNodeId(), oldStatus, updatedStatus); @@ -837,45 +1111,56 @@ private NodeIdentifier resolveNodeId(final NodeIdentifier proposedIdentifier) { if (existingStatus == null) { // there is no node with that ID resolvedNodeId = proposedIdentifier; - logger.debug("No existing node with ID {}; resolved node ID is as-proposed", proposedIdentifier.getId()); + logger.debug("No existing node with ID {}; resolved node ID is as-proposed", proposedIdentifier.getFullDescription()); + onNodeAdded(resolvedNodeId, true); } else if (existingStatus.getNodeIdentifier().logicallyEquals(proposedIdentifier)) { // there is a node with that ID but it's the same node. resolvedNodeId = proposedIdentifier; - logger.debug("No existing node with ID {}; resolved node ID is as-proposed", proposedIdentifier.getId()); + logger.debug("A node already exists with ID {} and is logically equivalent; resolved node ID is as-proposed: {}", proposedIdentifier.getId(), proposedIdentifier.getFullDescription()); } else { // there is a node with that ID and it's a different node resolvedNodeId = new NodeIdentifier(UUID.randomUUID().toString(), proposedIdentifier.getApiAddress(), proposedIdentifier.getApiPort(), - proposedIdentifier.getSocketAddress(), proposedIdentifier.getSocketPort(), proposedIdentifier.getSiteToSiteAddress(), - proposedIdentifier.getSiteToSitePort(), proposedIdentifier.getSiteToSiteHttpApiPort(), proposedIdentifier.isSiteToSiteSecure()); + proposedIdentifier.getSocketAddress(), proposedIdentifier.getSocketPort(), proposedIdentifier.getLoadBalanceAddress(), proposedIdentifier.getLoadBalancePort(), + proposedIdentifier.getSiteToSiteAddress(), proposedIdentifier.getSiteToSitePort(), proposedIdentifier.getSiteToSiteHttpApiPort(), proposedIdentifier.isSiteToSiteSecure()); + logger.debug("A node already exists with ID {}. Proposed Node Identifier was {}; existing Node Identifier is {}; Resolved Node Identifier is {}", - proposedIdentifier.getId(), proposedIdentifier, getNodeIdentifier(proposedIdentifier.getId()), resolvedNodeId); + proposedIdentifier.getId(), proposedIdentifier.getFullDescription(), getNodeIdentifier(proposedIdentifier.getId()).getFullDescription(), resolvedNodeId.getFullDescription()); } return resolvedNodeId; } - private ConnectionResponseMessage handleConnectionRequest(final ConnectionRequestMessage requestMessage) { + private ConnectionResponseMessage handleConnectionRequest(final ConnectionRequestMessage requestMessage, final Set nodeIdentities) { final NodeIdentifier proposedIdentifier = requestMessage.getConnectionRequest().getProposedNodeIdentifier(); - final NodeIdentifier withRequestorDn = addRequestorDn(proposedIdentifier, requestMessage.getRequestorDN()); + final NodeIdentifier withNodeIdentities = addNodeIdentities(proposedIdentifier, nodeIdentities); final DataFlow dataFlow = requestMessage.getConnectionRequest().getDataFlow(); - final ConnectionRequest requestWithDn = new ConnectionRequest(withRequestorDn, dataFlow); + final ConnectionRequest requestWithNodeIdentities = new ConnectionRequest(withNodeIdentities, dataFlow); // Resolve Node identifier. final NodeIdentifier resolvedNodeId = resolveNodeId(proposedIdentifier); + if (isBlockedByFirewall(nodeIdentities)) { + // if the socket address is not listed in the firewall, then return a null response + logger.info("Firewall blocked connection request from node " + resolvedNodeId + " with Node Identities " + nodeIdentities); + final ConnectionResponse response = ConnectionResponse.createBlockedByFirewallResponse(); + final ConnectionResponseMessage responseMessage = new ConnectionResponseMessage(); + responseMessage.setConnectionResponse(response); + return responseMessage; + } + if (requireElection) { - final DataFlow electedDataFlow = flowElection.castVote(dataFlow, withRequestorDn); + final DataFlow electedDataFlow = flowElection.castVote(dataFlow, withNodeIdentities); if (electedDataFlow == null) { - logger.info("Received Connection Request from {}; responding with Flow Election In Progress message", withRequestorDn); + logger.info("Received Connection Request from {}; responding with Flow Election In Progress message", withNodeIdentities); return createFlowElectionInProgressResponse(); } else { - logger.info("Received Connection Request from {}; responding with DataFlow that was elected", withRequestorDn); - return createConnectionResponse(requestWithDn, resolvedNodeId, electedDataFlow); + logger.info("Received Connection Request from {}; responding with DataFlow that was elected", withNodeIdentities); + return createConnectionResponse(requestWithNodeIdentities, resolvedNodeId, electedDataFlow); } } - logger.info("Received Connection Request from {}; responding with my DataFlow", withRequestorDn); - return createConnectionResponse(requestWithDn, resolvedNodeId); + logger.info("Received Connection Request from {}; responding with my DataFlow", withNodeIdentities); + return createConnectionResponse(requestWithNodeIdentities, resolvedNodeId); } private ConnectionResponseMessage createFlowElectionInProgressResponse() { @@ -901,15 +1186,6 @@ private ConnectionResponseMessage createConnectionResponse(final ConnectionReque private ConnectionResponseMessage createConnectionResponse(final ConnectionRequest request, final NodeIdentifier resolvedNodeIdentifier, final DataFlow clusterDataFlow) { - if (isBlockedByFirewall(resolvedNodeIdentifier.getSocketAddress())) { - // if the socket address is not listed in the firewall, then return a null response - logger.info("Firewall blocked connection request from node " + resolvedNodeIdentifier); - final ConnectionResponse response = ConnectionResponse.createBlockedByFirewallResponse(); - final ConnectionResponseMessage responseMessage = new ConnectionResponseMessage(); - responseMessage.setConnectionResponse(response); - return responseMessage; - } - if (clusterDataFlow == null) { final ConnectionResponseMessage responseMessage = new ConnectionResponseMessage(); responseMessage.setConnectionResponse(new ConnectionResponse(5, "The cluster dataflow is not yet available")); @@ -924,7 +1200,7 @@ private ConnectionResponseMessage createConnectionResponse(final ConnectionReque addNodeEvent(resolvedNodeIdentifier, "Connection requested from existing node. Setting status to connecting."); } - status = new NodeConnectionStatus(resolvedNodeIdentifier, NodeConnectionState.CONNECTING, null, null, System.currentTimeMillis()); + status = new NodeConnectionStatus(resolvedNodeIdentifier, NodeConnectionState.CONNECTING, null, null, null, System.currentTimeMillis()); updateNodeStatus(status); final ConnectionResponse response = new ConnectionResponse(resolvedNodeIdentifier, clusterDataFlow, instanceId, getConnectionStatuses(), @@ -936,11 +1212,12 @@ private ConnectionResponseMessage createConnectionResponse(final ConnectionReque } - private NodeIdentifier addRequestorDn(final NodeIdentifier nodeId, final String dn) { + private NodeIdentifier addNodeIdentities(final NodeIdentifier nodeId, final Set nodeIdentities) { return new NodeIdentifier(nodeId.getId(), nodeId.getApiAddress(), nodeId.getApiPort(), nodeId.getSocketAddress(), nodeId.getSocketPort(), + nodeId.getLoadBalanceAddress(), nodeId.getLoadBalancePort(), nodeId.getSiteToSiteAddress(), nodeId.getSiteToSitePort(), - nodeId.getSiteToSiteHttpApiPort(), nodeId.isSiteToSiteSecure(), dn); + nodeId.getSiteToSiteHttpApiPort(), nodeId.isSiteToSiteSecure(), nodeIdentities); } @Override diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/state/NodeIdentifierDescriptor.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/state/NodeIdentifierDescriptor.java new file mode 100644 index 000000000000..eeabbe34171a --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/coordination/node/state/NodeIdentifierDescriptor.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.cluster.coordination.node.state; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class NodeIdentifierDescriptor { + private String id; + private String apiAddress; + private int apiPort; + private String socketAddress; + private int socketPort; + private String loadBalanceAddress; + private int loadBalancePort; + private String siteToSiteAddress; + private Integer siteToSitePort; + private Integer siteToSiteHttpApiPort; + private Boolean siteToSiteSecure; + private Set nodeIdentities; + private boolean localNodeIdentifier; + + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + public String getApiAddress() { + return apiAddress; + } + + public void setApiAddress(final String apiAddress) { + this.apiAddress = apiAddress; + } + + public int getApiPort() { + return apiPort; + } + + public void setApiPort(final int apiPort) { + this.apiPort = apiPort; + } + + public String getSocketAddress() { + return socketAddress; + } + + public void setSocketAddress(final String socketAddress) { + this.socketAddress = socketAddress; + } + + public int getSocketPort() { + return socketPort; + } + + public void setSocketPort(final int socketPort) { + this.socketPort = socketPort; + } + + public String getLoadBalanceAddress() { + return loadBalanceAddress; + } + + public void setLoadBalanceAddress(final String loadBalanceAddress) { + this.loadBalanceAddress = loadBalanceAddress; + } + + public int getLoadBalancePort() { + return loadBalancePort; + } + + public void setLoadBalancePort(final int loadBalancePort) { + this.loadBalancePort = loadBalancePort; + } + + public String getSiteToSiteAddress() { + return siteToSiteAddress; + } + + public void setSiteToSiteAddress(final String siteToSiteAddress) { + this.siteToSiteAddress = siteToSiteAddress; + } + + public Integer getSiteToSitePort() { + return siteToSitePort; + } + + public void setSiteToSitePort(final Integer siteToSitePort) { + this.siteToSitePort = siteToSitePort; + } + + public Integer getSiteToSiteHttpApiPort() { + return siteToSiteHttpApiPort; + } + + public void setSiteToSiteHttpApiPort(final Integer siteToSiteHttpApiPort) { + this.siteToSiteHttpApiPort = siteToSiteHttpApiPort; + } + + public Boolean getSiteToSiteSecure() { + return siteToSiteSecure; + } + + public void setSiteToSiteSecure(final Boolean siteToSiteSecure) { + this.siteToSiteSecure = siteToSiteSecure; + } + + public Set getNodeIdentities() { + return nodeIdentities; + } + + public void setNodeIdentities(final Set nodeIdentities) { + this.nodeIdentities = Collections.unmodifiableSet(new HashSet<>(nodeIdentities)); + } + + public boolean isLocalNodeIdentifier() { + return localNodeIdentifier; + } + + public void setLocalNodeIdentifier(final boolean localNodeIdentifier) { + this.localNodeIdentifier = localNodeIdentifier; + } + + public static NodeIdentifierDescriptor fromNodeIdentifier(final NodeIdentifier nodeId, final boolean localNodeId) { + final NodeIdentifierDescriptor descriptor = new NodeIdentifierDescriptor(); + descriptor.setId(nodeId.getId()); + descriptor.setApiAddress(nodeId.getApiAddress()); + descriptor.setApiPort(nodeId.getApiPort()); + descriptor.setSocketAddress(nodeId.getSocketAddress()); + descriptor.setSocketPort(nodeId.getSocketPort()); + descriptor.setSiteToSiteAddress(nodeId.getSiteToSiteAddress()); + descriptor.setSiteToSitePort(nodeId.getSiteToSitePort()); + descriptor.setSiteToSiteHttpApiPort(nodeId.getSiteToSiteHttpApiPort()); + descriptor.setSiteToSiteSecure(nodeId.isSiteToSiteSecure()); + descriptor.setNodeIdentities(nodeId.getNodeIdentities()); + descriptor.setLoadBalanceAddress(nodeId.getLoadBalanceAddress()); + descriptor.setLoadBalancePort(nodeId.getLoadBalancePort()); + descriptor.setLocalNodeIdentifier(localNodeId); + return descriptor; + } + + public NodeIdentifier toNodeIdentifier() { + return new NodeIdentifier(getId(), getApiAddress(), getApiPort(), getSocketAddress(), getSocketPort(), getLoadBalanceAddress(), getLoadBalancePort(), + getSiteToSiteAddress(), getSiteToSitePort(), getSiteToSiteHttpApiPort(), getSiteToSiteSecure(), getNodeIdentities()); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ConnectionEntityMerger.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ConnectionEntityMerger.java index 7e3bc5d92c26..a25fb4bd725a 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ConnectionEntityMerger.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ConnectionEntityMerger.java @@ -17,10 +17,12 @@ package org.apache.nifi.cluster.manager; import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.web.api.dto.ConnectionDTO; import org.apache.nifi.web.api.dto.status.ConnectionStatusDTO; import org.apache.nifi.web.api.entity.ConnectionEntity; import java.util.Map; +import java.util.Objects; public class ConnectionEntityMerger implements ComponentEntityMerger, ComponentEntityStatusMerger { @@ -33,6 +35,22 @@ public void merge(ConnectionEntity clientEntity, Map status.equals(ConnectionDTO.LOAD_BALANCE_ACTIVE)); + + if (anyActive) { + clientEntity.getComponent().setLoadBalanceStatus(ConnectionDTO.LOAD_BALANCE_ACTIVE); + } + } } @Override diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ProcessorDiagnosticsEntityMerger.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ProcessorDiagnosticsEntityMerger.java index 1f163f4702b9..826ecf702934 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ProcessorDiagnosticsEntityMerger.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/ProcessorDiagnosticsEntityMerger.java @@ -17,22 +17,27 @@ package org.apache.nifi.cluster.manager; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; - import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.web.api.dto.diagnostics.ConnectionDiagnosticsDTO; +import org.apache.nifi.web.api.dto.diagnostics.ConnectionDiagnosticsSnapshotDTO; import org.apache.nifi.web.api.dto.diagnostics.ControllerServiceDiagnosticsDTO; import org.apache.nifi.web.api.dto.diagnostics.JVMDiagnosticsSnapshotDTO; +import org.apache.nifi.web.api.dto.diagnostics.LocalQueuePartitionDTO; import org.apache.nifi.web.api.dto.diagnostics.NodeJVMDiagnosticsSnapshotDTO; import org.apache.nifi.web.api.dto.diagnostics.ProcessorDiagnosticsDTO; +import org.apache.nifi.web.api.dto.diagnostics.RemoteQueuePartitionDTO; import org.apache.nifi.web.api.dto.diagnostics.ThreadDumpDTO; import org.apache.nifi.web.api.entity.ControllerServiceEntity; import org.apache.nifi.web.api.entity.ProcessorDiagnosticsEntity; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + public class ProcessorDiagnosticsEntityMerger implements ComponentEntityMerger { private final long componentStatusSnapshotMillis; @@ -46,6 +51,11 @@ public void mergeComponents(final ProcessorDiagnosticsEntity clientEntity, final final List nodeJvmDiagnosticsSnapshots = new ArrayList<>(entityMap.size()); + // Merge connection diagnostics + mergeConnectionDiagnostics(clientEntity, entityMap, entity -> entity.getComponent().getIncomingConnections()); + mergeConnectionDiagnostics(clientEntity, entityMap, entity -> entity.getComponent().getOutgoingConnections()); + + // Merge the Processor Statuses and create a separate NodeJVMDiagnosticsSnapshotDTO for each. We do both of these // together simply because we are already iterating over the entityMap and we have to create the Node-specific JVM diagnostics // before we start merging the values, in the second iteration over the map. @@ -99,7 +109,7 @@ public void mergeComponents(final ProcessorDiagnosticsEntity clientEntity, final // Merge permissions on referenced controller services final Map serviceEntityById = clientDto.getReferencedControllerServices().stream() - .map(diagnosticsDto -> diagnosticsDto.getControllerService()) + .map(ControllerServiceDiagnosticsDTO::getControllerService) .collect(Collectors.toMap(ControllerServiceEntity::getId, Function.identity())); for (final Map.Entry entry : entityMap.entrySet()) { @@ -114,6 +124,129 @@ public void mergeComponents(final ProcessorDiagnosticsEntity clientEntity, final } } } + } + + private void mergeConnectionDiagnostics(final ProcessorDiagnosticsEntity clientEntity, final Map entityMap, + final Function> extractConnections) { + + final Map> snapshotByConnectionId = new HashMap<>(); + final Map connectionById = new HashMap<>(); + + for (final Map.Entry entry : entityMap.entrySet()) { + final NodeIdentifier nodeId = entry.getKey(); + final ProcessorDiagnosticsEntity entity = entry.getValue(); + + final Set connections = extractConnections.apply(entity); + for (final ConnectionDiagnosticsDTO connectionDiagnostics : connections) { + final String connectionId = connectionDiagnostics.getConnection().getId(); + final ConnectionDiagnosticsSnapshotDTO snapshot = connectionDiagnostics.getAggregateSnapshot(); + + snapshot.setNodeIdentifier(nodeId.getApiAddress() + ":" + nodeId.getApiPort()); + + final List snapshots = snapshotByConnectionId.computeIfAbsent(connectionId, id -> new ArrayList<>()); + snapshots.add(snapshot); + + if (entity == clientEntity){ + connectionById.put(connectionId, connectionDiagnostics); + } + } + } + + for (final Map.Entry> entry : snapshotByConnectionId.entrySet()) { + final String connectionId = entry.getKey(); + final List snapshots = entry.getValue(); + + final ConnectionDiagnosticsDTO dto = connectionById.get(connectionId); + dto.setNodeSnapshots(snapshots); + + dto.setAggregateSnapshot(mergeConnectionSnapshots(snapshots)); + } + } + + + + private ConnectionDiagnosticsSnapshotDTO mergeConnectionSnapshots(final List snapshots) { + final ConnectionDiagnosticsSnapshotDTO aggregate = new ConnectionDiagnosticsSnapshotDTO(); + + final Map> remotePartitionsByNodeId = new HashMap<>(); + + final LocalQueuePartitionDTO localPartition = new LocalQueuePartitionDTO(); + localPartition.setActiveQueueByteCount(0); + localPartition.setActiveQueueFlowFileCount(0); + localPartition.setAllActiveQueueFlowFilesPenalized(true); // set to true because we will update this value by AND'ing it with the snapshot value + localPartition.setAnyActiveQueueFlowFilesPenalized(false); // set to false because we will update this value by OR'ing it with the snapshot value + localPartition.setInFlightByteCount(0); + localPartition.setInFlightFlowFileCount(0); + localPartition.setSwapByteCount(0); + localPartition.setSwapFiles(0); + localPartition.setSwapFlowFileCount(0); + localPartition.setTotalByteCount(0); + localPartition.setTotalFlowFileCount(0); + + aggregate.setTotalByteCount(0L); + aggregate.setTotalFlowFileCount(0); + aggregate.setLocalQueuePartition(localPartition); + + for (final ConnectionDiagnosticsSnapshotDTO snapshot : snapshots) { + aggregate.setTotalByteCount(aggregate.getTotalByteCount() + snapshot.getTotalByteCount()); + aggregate.setTotalFlowFileCount(aggregate.getTotalFlowFileCount() + snapshot.getTotalFlowFileCount()); + + final LocalQueuePartitionDTO snapshotLocalPartition = snapshot.getLocalQueuePartition(); + localPartition.setActiveQueueByteCount(localPartition.getActiveQueueByteCount() + snapshotLocalPartition.getActiveQueueByteCount()); + localPartition.setActiveQueueFlowFileCount(localPartition.getActiveQueueFlowFileCount() + snapshotLocalPartition.getActiveQueueFlowFileCount()); + localPartition.setAllActiveQueueFlowFilesPenalized(localPartition.getAllActiveQueueFlowFilesPenalized() && snapshotLocalPartition.getAllActiveQueueFlowFilesPenalized()); + localPartition.setAnyActiveQueueFlowFilesPenalized(localPartition.getAnyActiveQueueFlowFilesPenalized() || snapshotLocalPartition.getAnyActiveQueueFlowFilesPenalized()); + localPartition.setInFlightByteCount(localPartition.getInFlightByteCount() + snapshotLocalPartition.getInFlightByteCount()); + localPartition.setInFlightFlowFileCount(localPartition.getInFlightFlowFileCount() + snapshotLocalPartition.getInFlightFlowFileCount()); + localPartition.setSwapByteCount(localPartition.getSwapByteCount() + snapshotLocalPartition.getSwapByteCount()); + localPartition.setSwapFiles(localPartition.getSwapFiles() + snapshotLocalPartition.getSwapFiles()); + localPartition.setSwapFlowFileCount(localPartition.getSwapFlowFileCount() + snapshotLocalPartition.getSwapFlowFileCount()); + localPartition.setTotalByteCount(localPartition.getTotalByteCount() + snapshotLocalPartition.getTotalByteCount()); + localPartition.setTotalFlowFileCount(localPartition.getTotalFlowFileCount() + snapshotLocalPartition.getTotalFlowFileCount()); + + for (final RemoteQueuePartitionDTO remoteQueuePartition : snapshot.getRemoteQueuePartitions()) { + final String nodeId = remoteQueuePartition.getNodeIdentifier(); + final List partitionsForNodeId = remotePartitionsByNodeId.computeIfAbsent(nodeId, key -> new ArrayList<>()); + partitionsForNodeId.add(remoteQueuePartition); + } + } + + final List mergedRemoteQueuePartitions = new ArrayList<>(); + for (final List partitions : remotePartitionsByNodeId.values()) { + final RemoteQueuePartitionDTO merged = mergeRemoteQueuePartitions(partitions); + mergedRemoteQueuePartitions.add(merged); + } + + aggregate.setRemoteQueuePartitions(mergedRemoteQueuePartitions); + + return aggregate; + } + + private RemoteQueuePartitionDTO mergeRemoteQueuePartitions(final List partitions) { + final RemoteQueuePartitionDTO merged = new RemoteQueuePartitionDTO(); + merged.setActiveQueueByteCount(0); + merged.setActiveQueueFlowFileCount(0); + merged.setInFlightByteCount(0); + merged.setInFlightFlowFileCount(0); + merged.setSwapByteCount(0); + merged.setSwapFiles(0); + merged.setSwapFlowFileCount(0); + merged.setTotalByteCount(0); + merged.setTotalFlowFileCount(0); + + for (final RemoteQueuePartitionDTO partition : partitions) { + merged.setActiveQueueByteCount(merged.getActiveQueueByteCount() + partition.getActiveQueueByteCount()); + merged.setActiveQueueFlowFileCount(merged.getActiveQueueFlowFileCount() + partition.getActiveQueueFlowFileCount()); + merged.setInFlightByteCount(merged.getInFlightByteCount() + partition.getInFlightByteCount()); + merged.setInFlightFlowFileCount(merged.getInFlightFlowFileCount() + partition.getInFlightFlowFileCount()); + merged.setSwapByteCount(merged.getSwapByteCount() + partition.getSwapByteCount()); + merged.setSwapFiles(merged.getSwapFiles() + partition.getSwapFiles()); + merged.setSwapFlowFileCount(merged.getSwapFlowFileCount() + partition.getSwapFlowFileCount()); + merged.setTotalByteCount(merged.getTotalByteCount() + partition.getTotalByteCount()); + merged.setTotalFlowFileCount(merged.getTotalFlowFileCount() + partition.getTotalFlowFileCount()); + merged.setNodeIdentifier(partition.getNodeIdentifier()); + } + return merged; } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMerger.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMerger.java index 5207524e2338..d21087399903 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMerger.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/main/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMerger.java @@ -23,6 +23,7 @@ import org.apache.nifi.web.api.dto.status.RemoteProcessGroupStatusDTO; import org.apache.nifi.web.api.entity.RemoteProcessGroupEntity; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -124,11 +125,20 @@ private static void mergeDtos(final RemoteProcessGroupDTO clientDto, final Map + logger.info("[${name?.toUpperCase()}] ${(args as List).join(" ")}") + } + } + + private static StandardNiFiProperties mockNiFiProperties() { + [getClusterNodeConnectionTimeout: { -> "10 ms" }, + getClusterNodeReadTimeout : { -> "10 ms" }, + getProperty : { String prop -> + logger.mock("Requested getProperty(${prop}) -> \"\"") + "" + }] as StandardNiFiProperties + } + + @Test + void testShouldReplaceNonZeroContentLengthHeader() { + // Arrange + def headers = ["Content-Length": "123", "Other-Header": "arbitrary value"] + String method = "DELETE" + logger.info("Original headers: ${headers}") + + NiFiProperties mockProperties = mockNiFiProperties() + + OkHttpReplicationClient client = new OkHttpReplicationClient(mockProperties) + + // Act + client.checkContentLengthHeader(method, headers) + logger.info("Checked headers: ${headers}") + + // Assert + assert headers.size() == 2 + assert headers."Content-Length" == "0" + } + + @Test + void testShouldReplaceNonZeroContentLengthHeaderOnDeleteCaseInsensitive() { + // Arrange + def headers = ["Content-Length": "123", "Other-Header": "arbitrary value"] + String method = "delete" + logger.info("Original headers: ${headers}") + + NiFiProperties mockProperties = mockNiFiProperties() + + OkHttpReplicationClient client = new OkHttpReplicationClient(mockProperties) + + // Act + client.checkContentLengthHeader(method, headers) + logger.info("Checked headers: ${headers}") + + // Assert + assert headers.size() == 2 + assert headers."Content-Length" == "0" + } + + @Test + void testShouldNotReplaceContentLengthHeaderWhenZeroOrNull() { + // Arrange + String method = "DELETE" + def zeroOrNullContentLengths = [null, "0"] + + NiFiProperties mockProperties = mockNiFiProperties() + + OkHttpReplicationClient client = new OkHttpReplicationClient(mockProperties) + + // Act + zeroOrNullContentLengths.each { String contentLength -> + def headers = ["Content-Length": contentLength, "Other-Header": "arbitrary value"] + logger.info("Original headers: ${headers}") + + logger.info("Trying method ${method}") + client.checkContentLengthHeader(method, headers) + logger.info("Checked headers: ${headers}") + + // Assert + assert headers.size() == 2 + assert headers."Content-Length" == contentLength + } + } + + @Test + void testShouldNotReplaceNonZeroContentLengthHeaderOnOtherMethod() { + // Arrange + def headers = ["Content-Length": "123", "Other-Header": "arbitrary value"] + logger.info("Original headers: ${headers}") + + NiFiProperties mockProperties = mockNiFiProperties() + + OkHttpReplicationClient client = new OkHttpReplicationClient(mockProperties) + + def nonDeleteMethods = ["POST", "PUT", "GET", "HEAD"] + + // Act + nonDeleteMethods.each { String method -> + logger.info("Trying method ${method}") + client.checkContentLengthHeader(method, headers) + logger.info("Checked headers: ${headers}") + + // Assert + assert headers.size() == 2 + assert headers."Content-Length" == "123" + } + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinatorSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinatorSpec.groovy new file mode 100644 index 000000000000..2751b3ead18e --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/coordination/node/NodeClusterCoordinatorSpec.groovy @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.cluster.coordination.node + +import org.apache.nifi.cluster.coordination.flow.FlowElection +import org.apache.nifi.cluster.firewall.ClusterNodeFirewall +import org.apache.nifi.cluster.protocol.NodeIdentifier +import org.apache.nifi.cluster.protocol.NodeProtocolSender +import org.apache.nifi.cluster.protocol.impl.ClusterCoordinationProtocolSenderListener +import org.apache.nifi.cluster.protocol.message.OffloadMessage +import org.apache.nifi.components.state.Scope +import org.apache.nifi.components.state.StateManager +import org.apache.nifi.components.state.StateManagerProvider +import org.apache.nifi.controller.leader.election.LeaderElectionManager +import org.apache.nifi.events.EventReporter +import org.apache.nifi.reporting.Severity +import org.apache.nifi.state.MockStateMap +import org.apache.nifi.util.NiFiProperties +import org.apache.nifi.web.revision.RevisionManager +import spock.lang.Specification +import spock.util.concurrent.BlockingVariable + +import java.util.concurrent.TimeUnit + +class NodeClusterCoordinatorSpec extends Specification { + def "requestNodeOffload"() { + given: 'mocked collaborators' + def clusterCoordinationProtocolSenderListener = Mock(ClusterCoordinationProtocolSenderListener) + def eventReporter = Mock EventReporter + def stateManager = Mock StateManager + def stateMap = new MockStateMap([:], 1) + stateManager.getState(_ as Scope) >> stateMap + def stateManagerProvider = Mock StateManagerProvider + stateManagerProvider.getStateManager(_ as String) >> stateManager + + and: 'a NodeClusterCoordinator that manages node status in a synchronized list' + List nodeStatuses = [].asSynchronized() + def clusterCoordinator = new NodeClusterCoordinator(clusterCoordinationProtocolSenderListener, eventReporter, Mock(LeaderElectionManager), + Mock(FlowElection), Mock(ClusterNodeFirewall), + Mock(RevisionManager), NiFiProperties.createBasicNiFiProperties('src/test/resources/conf/nifi.properties', [:]), + Mock(NodeProtocolSender), stateManagerProvider) { + @Override + void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean notifyAllNodes, boolean waitForCoordinator) { + nodeStatuses.add(updatedStatus) + } + } + + and: 'two nodes' + def nodeIdentifier1 = createNodeIdentifier 1 + def nodeIdentifier2 = createNodeIdentifier 2 + + and: 'node 1 is connected, node 2 is disconnected' + clusterCoordinator.updateNodeStatus new NodeConnectionStatus(nodeIdentifier1, NodeConnectionState.CONNECTED) + clusterCoordinator.updateNodeStatus new NodeConnectionStatus(nodeIdentifier2, NodeConnectionState.DISCONNECTED) + while (nodeStatuses.size() < 2) { + Thread.sleep(10) + } + nodeStatuses.clear() + + def waitForReportEvent = new BlockingVariable(5, TimeUnit.SECONDS) + + when: 'a node is requested to offload' + clusterCoordinator.requestNodeOffload nodeIdentifier2, OffloadCode.OFFLOADED, 'unit test for offloading node' + waitForReportEvent.get() + + then: 'no exceptions are thrown' + noExceptionThrown() + + and: 'expected methods on collaborators are invoked' + 1 * clusterCoordinationProtocolSenderListener.offload({ OffloadMessage msg -> msg.nodeId == nodeIdentifier2 } as OffloadMessage) + 1 * eventReporter.reportEvent(Severity.INFO, 'Clustering', { msg -> msg.contains "$nodeIdentifier2.apiAddress:$nodeIdentifier2.apiPort" } as String) >> { + waitForReportEvent.set(it) + } + + and: 'the status of the offloaded node is known by the cluster coordinator to be offloading' + nodeStatuses[0].nodeIdentifier == nodeIdentifier2 + nodeStatuses[0].state == NodeConnectionState.OFFLOADING + } + + private static NodeIdentifier createNodeIdentifier(final int index) { + new NodeIdentifier("node-id-$index", "localhost", 8000 + index, "localhost", 9000 + index, + "localhost", 10000 + index, 11000 + index, false) + } + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/integration/OffloadNodeITSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/integration/OffloadNodeITSpec.groovy new file mode 100644 index 000000000000..a8dd15835150 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/groovy/org/apache/nifi/cluster/integration/OffloadNodeITSpec.groovy @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.cluster.integration + +import org.apache.nifi.cluster.coordination.node.DisconnectionCode +import org.apache.nifi.cluster.coordination.node.OffloadCode +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class OffloadNodeITSpec extends Specification { + def "requestNodeOffload"() { + given: 'a cluster with 3 nodes' + System.setProperty 'nifi.properties.file.path', 'src/test/resources/conf/nifi.properties' + def cluster = new Cluster() + cluster.start() + cluster.createNode() + def nodeToOffload = cluster.createNode() + cluster.createNode() + cluster.waitUntilAllNodesConnected 20, TimeUnit.SECONDS + + when: 'the node to offload is disconnected successfully' + cluster.currentClusterCoordinator.clusterCoordinator.requestNodeDisconnect nodeToOffload.identifier, DisconnectionCode.USER_DISCONNECTED, + 'integration test user disconnect' + cluster.currentClusterCoordinator.assertNodeDisconnects nodeToOffload.identifier, 10, TimeUnit.SECONDS + + and: 'the node to offload is requested to offload' + nodeToOffload.getClusterCoordinator().requestNodeOffload nodeToOffload.identifier, OffloadCode.OFFLOADED, 'integration test offload' + + then: 'the node has been successfully offloaded' + cluster.currentClusterCoordinator.assertNodeIsOffloaded nodeToOffload.identifier, 10, TimeUnit.SECONDS + + cleanup: + cluster.stop() + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/flow/TestPopularVoteFlowElection.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/flow/TestPopularVoteFlowElection.java index 1915b9b8beda..9c833b5d0efe 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/flow/TestPopularVoteFlowElection.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/flow/TestPopularVoteFlowElection.java @@ -218,7 +218,7 @@ private NiFiProperties getNiFiProperties() { } private NodeIdentifier createNodeId(final int index) { - return new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 9000 + index, "localhost", 9000 + index, "localhost", 9000 + index, 9000 + index, true); + return new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 9000 + index, "localhost", 9000 + index, "localhost", 9000 + index, "localhost", 9000 + index, 9000 + index, true); } private DataFlow createDataFlow(final byte[] flow) { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/heartbeat/TestAbstractHeartbeatMonitor.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/heartbeat/TestAbstractHeartbeatMonitor.java index 50bdd0d12ba5..4aeff7b3eb95 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/heartbeat/TestAbstractHeartbeatMonitor.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/heartbeat/TestAbstractHeartbeatMonitor.java @@ -19,6 +19,8 @@ import org.apache.nifi.cluster.ReportedEvent; import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.ClusterTopologyEventListener; +import org.apache.nifi.cluster.coordination.node.OffloadCode; import org.apache.nifi.cluster.coordination.node.DisconnectionCode; import org.apache.nifi.cluster.coordination.node.NodeConnectionState; import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; @@ -55,7 +57,7 @@ public class TestAbstractHeartbeatMonitor { @Before public void setup() throws Exception { System.setProperty(NiFiProperties.PROPERTIES_FILE_PATH, "src/test/resources/conf/nifi.properties"); - nodeId = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 9999, "localhost", 8888, "localhost", null, null, false); + nodeId = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 9999, "localhost", 8888, "localhost", 777, "localhost", null, null, false); } @After @@ -136,7 +138,7 @@ public synchronized void requestNodeDisconnect(final NodeIdentifier nodeId, fina @Test public void testDisconnectionOfTerminatedNodeDueToLackOfHeartbeat() throws Exception { final NodeIdentifier nodeId1 = nodeId; - final NodeIdentifier nodeId2 = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 7777, "localhost", 6666, "localhost", null, null, false); + final NodeIdentifier nodeId2 = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 7777, "localhost", 6666, "localhost", 5555, "localhost", null, null, false); final ClusterCoordinatorAdapter adapter = new ClusterCoordinatorAdapter(); final TestFriendlyHeartbeatMonitor monitor = createMonitor(adapter); @@ -243,6 +245,16 @@ public synchronized void finishNodeConnection(NodeIdentifier nodeId) { statuses.put(nodeId, new NodeConnectionStatus(nodeId, NodeConnectionState.CONNECTED)); } + @Override + public synchronized void finishNodeOffload(NodeIdentifier nodeId) { + statuses.put(nodeId, new NodeConnectionStatus(nodeId, NodeConnectionState.OFFLOADED)); + } + + @Override + public synchronized void requestNodeOffload(NodeIdentifier nodeId, OffloadCode offloadCode, String explanation) { + statuses.put(nodeId, new NodeConnectionStatus(nodeId, NodeConnectionState.OFFLOADED)); + } + @Override public synchronized void requestNodeDisconnect(NodeIdentifier nodeId, DisconnectionCode disconnectionCode, String explanation) { statuses.put(nodeId, new NodeConnectionStatus(nodeId, NodeConnectionState.DISCONNECTED)); @@ -272,7 +284,7 @@ public synchronized Set getNodeIdentifiers(NodeConnectionState.. } @Override - public synchronized boolean isBlockedByFirewall(String hostname) { + public synchronized boolean isBlockedByFirewall(Set nodeIds) { return false; } @@ -369,6 +381,14 @@ public String getFlowElectionStatus() { public Map getClusterWorkload() throws IOException { return null; } + + @Override + public void registerEventListener(final ClusterTopologyEventListener eventListener) { + } + + @Override + public void unregisterEventListener(final ClusterTopologyEventListener eventListener) { + } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapperSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapperSpec.groovy index f2d3a24b56a7..75b7ae3d1b3f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapperSpec.groovy +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/StandardHttpResponseMapperSpec.groovy @@ -64,7 +64,8 @@ class StandardHttpResponseMapperSpec extends Specification { int n = it.node def response = Mock(Response) mockToRequestEntity.put response, it - new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, 'stsaddr', n * 100, n * 1000, false, null), "get", requestUri, response, 500L, requestId) + new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, 'sktaddr', n * 10, 'stsaddr', n * 100, n * 1000, false, null), "get", requestUri, response, 500L, + requestId) } as Set when: @@ -102,7 +103,8 @@ class StandardHttpResponseMapperSpec extends Specification { ++n def response = Mock(Response) mockToRequestEntity.put response, it - new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, 'stsaddr', n * 100, n * 1000, false, null), "get", requestUri, response, 500L, requestId) + new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, 'sktaddr', n * 11, 'stsaddr', n * 100, n * 1000, false, null), "get", requestUri, response, 500L, + requestId) } as Set when: diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/CurrentUserEndpointMergerTest.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/CurrentUserEndpointMergerTest.java index c1cfdf8a46a4..a93cd68e6650 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/CurrentUserEndpointMergerTest.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/CurrentUserEndpointMergerTest.java @@ -39,7 +39,7 @@ public class CurrentUserEndpointMergerTest { @Test public void testMergeUserPermissions() { - final NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", 9000, "localhost", 9001, "localhost", 9002, 9003, false); + final NodeIdentifier nodeId1 = new NodeIdentifier("1", "localhost", 9000, "localhost", 9001, "localhost", 9006, "localhost", 9002, 9003, false); final CurrentUserEntity userNode1 = new CurrentUserEntity(); userNode1.setControllerPermissions(buildPermissions(true, false)); userNode1.setCountersPermissions(buildPermissions(true, true)); @@ -55,7 +55,7 @@ public void testMergeUserPermissions() { componentRestrictionsNode1.add(buildComponentRestriction(RequiredPermission.READ_FILESYSTEM, true, true)); userNode1.setComponentRestrictionPermissions(componentRestrictionsNode1); - final NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", 8000, "localhost", 8001, "localhost", 8002, 8003, false); + final NodeIdentifier nodeId2 = new NodeIdentifier("2", "localhost", 8000, "localhost", 8001, "localhost", 9006,"localhost", 8002, 8003, false); final CurrentUserEntity userNode2 = new CurrentUserEntity(); userNode2.setControllerPermissions(buildPermissions(false, true)); userNode2.setCountersPermissions(buildPermissions(true, false)); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/StatusHistoryEndpointMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/StatusHistoryEndpointMergerSpec.groovy index 232c562e57f3..104e69b29b13 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/StatusHistoryEndpointMergerSpec.groovy +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/endpoints/StatusHistoryEndpointMergerSpec.groovy @@ -57,7 +57,8 @@ class StatusHistoryEndpointMergerSpec extends Specification { ++n def response = Mock(Response) mockToRequestEntity.put response, it - new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, 'stsaddr', n * 100, n * 1000, false, null), "get", requestUri, response, 500L, requestId) + new NodeResponse(new NodeIdentifier("cluster-node-$n", 'addr', n, 'sktaddr', n * 10, null, n * 10, 'stsaddr', n * 100, n * 1000, false, null), + "GET", requestUri, response, 500L, requestId) } as Set when: diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/replication/TestThreadPoolRequestReplicator.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/replication/TestThreadPoolRequestReplicator.java index 15b777481caf..70579eaf7655 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/replication/TestThreadPoolRequestReplicator.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/http/replication/TestThreadPoolRequestReplicator.java @@ -248,7 +248,7 @@ protected NodeResponse replicateRequest(final PreparedRequest request, final Nod final int statusCode; if (requestCount.incrementAndGet() == 1) { assertEquals(ThreadPoolRequestReplicator.NODE_CONTINUE, expectsHeader); - statusCode = 150; + statusCode = Status.ACCEPTED.getStatusCode(); } else { assertNull(expectsHeader); statusCode = Status.OK.getStatusCode(); @@ -390,7 +390,7 @@ protected NodeResponse replicateRequest(final PreparedRequest request, final Nod if (requestIndex == 1) { final Response clientResponse = mock(Response.class); - when(clientResponse.getStatus()).thenReturn(150); + when(clientResponse.getStatus()).thenReturn(202); return new NodeResponse(nodeId, request.getMethod(), uri, clientResponse, -1L, requestId); } else { final IllegalClusterStateException explanation = new IllegalClusterStateException("Intentional Exception for Unit Testing"); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/node/TestNodeClusterCoordinator.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/node/TestNodeClusterCoordinator.java index 1378d3ba9e71..5ce2985c03d7 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/node/TestNodeClusterCoordinator.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/coordination/node/TestNodeClusterCoordinator.java @@ -16,24 +16,6 @@ */ package org.apache.nifi.cluster.coordination.node; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; - import org.apache.nifi.cluster.coordination.flow.FlowElection; import org.apache.nifi.cluster.manager.exception.IllegalNodeDisconnectionException; import org.apache.nifi.cluster.protocol.ConnectionRequest; @@ -47,8 +29,12 @@ import org.apache.nifi.cluster.protocol.message.NodeStatusChangeMessage; import org.apache.nifi.cluster.protocol.message.ProtocolMessage; import org.apache.nifi.cluster.protocol.message.ReconnectionRequestMessage; +import org.apache.nifi.components.state.Scope; +import org.apache.nifi.components.state.StateManager; +import org.apache.nifi.components.state.StateManagerProvider; import org.apache.nifi.events.EventReporter; import org.apache.nifi.services.FlowService; +import org.apache.nifi.state.MockStateMap; import org.apache.nifi.util.NiFiProperties; import org.apache.nifi.web.revision.RevisionManager; import org.junit.Assert; @@ -58,11 +44,33 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.when; + public class TestNodeClusterCoordinator { private NodeClusterCoordinator coordinator; private ClusterCoordinationProtocolSenderListener senderListener; private List nodeStatuses; + private StateManagerProvider stateManagerProvider; private NiFiProperties createProperties() { final Map addProps = new HashMap<>(); @@ -76,12 +84,18 @@ public void setup() throws IOException { senderListener = Mockito.mock(ClusterCoordinationProtocolSenderListener.class); nodeStatuses = Collections.synchronizedList(new ArrayList<>()); + stateManagerProvider = Mockito.mock(StateManagerProvider.class); + + final StateManager stateManager = Mockito.mock(StateManager.class); + when(stateManager.getState(any(Scope.class))).thenReturn(new MockStateMap(Collections.emptyMap(), 1)); + when(stateManagerProvider.getStateManager(anyString())).thenReturn(stateManager); + final EventReporter eventReporter = Mockito.mock(EventReporter.class); final RevisionManager revisionManager = Mockito.mock(RevisionManager.class); - Mockito.when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); + when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); - coordinator = new NodeClusterCoordinator(senderListener, eventReporter, null, new FirstVoteWinsFlowElection(), null, revisionManager, createProperties(), null) { + coordinator = new NodeClusterCoordinator(senderListener, eventReporter, null, new FirstVoteWinsFlowElection(), null, revisionManager, createProperties(), null, stateManagerProvider) { @Override void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean notifyAllNodes, boolean waitForCoordinator) { nodeStatuses.add(updatedStatus); @@ -90,7 +104,7 @@ void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean final FlowService flowService = Mockito.mock(FlowService.class); final StandardDataFlow dataFlow = new StandardDataFlow(new byte[50], new byte[50], new byte[50], new HashSet<>()); - Mockito.when(flowService.createDataFlow()).thenReturn(dataFlow); + when(flowService.createDataFlow()).thenReturn(dataFlow); coordinator.setFlowService(flowService); } @@ -130,14 +144,14 @@ public void testConnectionResponseIndicatesAllNodes() throws IOException { } @Test - public void testTryAgainIfNoFlowServiceSet() { + public void testTryAgainIfNoFlowServiceSet() throws IOException { final ClusterCoordinationProtocolSenderListener senderListener = Mockito.mock(ClusterCoordinationProtocolSenderListener.class); final EventReporter eventReporter = Mockito.mock(EventReporter.class); final RevisionManager revisionManager = Mockito.mock(RevisionManager.class); - Mockito.when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); + when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); final NodeClusterCoordinator coordinator = new NodeClusterCoordinator(senderListener, eventReporter, null, new FirstVoteWinsFlowElection(), - null, revisionManager, createProperties(), null) { + null, revisionManager, createProperties(), null, stateManagerProvider) { @Override void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean notifyAllNodes, boolean waitForCoordinator) { } @@ -150,7 +164,7 @@ void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean coordinator.setConnected(true); - final ProtocolMessage protocolResponse = coordinator.handle(requestMsg); + final ProtocolMessage protocolResponse = coordinator.handle(requestMsg, Collections.emptySet()); assertNotNull(protocolResponse); assertTrue(protocolResponse instanceof ConnectionResponseMessage); @@ -164,7 +178,7 @@ public void testUnknownNodeAskedToConnectOnAttemptedConnectionComplete() throws final ClusterCoordinationProtocolSenderListener senderListener = Mockito.mock(ClusterCoordinationProtocolSenderListener.class); final AtomicReference requestRef = new AtomicReference<>(); - Mockito.when(senderListener.requestReconnection(Mockito.any(ReconnectionRequestMessage.class))).thenAnswer(new Answer() { + when(senderListener.requestReconnection(any(ReconnectionRequestMessage.class))).thenAnswer(new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { final ReconnectionRequestMessage msg = invocation.getArgumentAt(0, ReconnectionRequestMessage.class); @@ -175,10 +189,10 @@ public Object answer(InvocationOnMock invocation) throws Throwable { final EventReporter eventReporter = Mockito.mock(EventReporter.class); final RevisionManager revisionManager = Mockito.mock(RevisionManager.class); - Mockito.when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); + when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); final NodeClusterCoordinator coordinator = new NodeClusterCoordinator(senderListener, eventReporter, null, new FirstVoteWinsFlowElection(), - null, revisionManager, createProperties(), null) { + null, revisionManager, createProperties(), null, stateManagerProvider) { @Override void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean notifyAllNodes, boolean waitForCoordinator) { } @@ -186,7 +200,7 @@ void notifyOthersOfNodeStatusChange(NodeConnectionStatus updatedStatus, boolean final FlowService flowService = Mockito.mock(FlowService.class); final StandardDataFlow dataFlow = new StandardDataFlow(new byte[50], new byte[50], new byte[50], new HashSet<>()); - Mockito.when(flowService.createDataFlowFromController()).thenReturn(dataFlow); + when(flowService.createDataFlowFromController()).thenReturn(dataFlow); coordinator.setFlowService(flowService); coordinator.setConnected(true); @@ -232,7 +246,7 @@ public void testFinishNodeConnectionResultsInConnectedState() throws IOException @Test(timeout = 5000) public void testStatusChangesReplicated() throws InterruptedException, IOException { final RevisionManager revisionManager = Mockito.mock(RevisionManager.class); - Mockito.when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); + when(revisionManager.getAllRevisions()).thenReturn(Collections.emptyList()); // Create a connection request message and send to the coordinator final NodeIdentifier requestedNodeId = createNodeId(1); @@ -266,7 +280,7 @@ public void testStatusChangesReplicated() throws InterruptedException, IOExcepti assertNotNull(statusChange); assertEquals(createNodeId(1), statusChange.getNodeIdentifier()); assertEquals(DisconnectionCode.NODE_SHUTDOWN, statusChange.getDisconnectCode()); - assertEquals("Unit Test", statusChange.getDisconnectReason()); + assertEquals("Unit Test", statusChange.getReason()); } @Test @@ -393,11 +407,11 @@ public void testUpdateNodeStatusOutOfOrder() throws InterruptedException { nodeStatuses.clear(); final NodeConnectionStatus oldStatus = new NodeConnectionStatus(-1L, nodeId1, NodeConnectionState.DISCONNECTED, - DisconnectionCode.BLOCKED_BY_FIREWALL, null, 0L); + null, DisconnectionCode.BLOCKED_BY_FIREWALL, null, 0L); final NodeStatusChangeMessage msg = new NodeStatusChangeMessage(); msg.setNodeId(nodeId1); msg.setNodeConnectionStatus(oldStatus); - coordinator.handle(msg); + coordinator.handle(msg, Collections.emptySet()); // Ensure that no status change message was send Thread.sleep(1000); @@ -413,7 +427,7 @@ public void testProposedIdentifierResolvedIfConflict() { final ConnectionRequestMessage crm = new ConnectionRequestMessage(); crm.setConnectionRequest(connectionRequest); - final ProtocolMessage response = coordinator.handle(crm); + final ProtocolMessage response = coordinator.handle(crm, Collections.emptySet()); assertNotNull(response); assertTrue(response instanceof ConnectionResponseMessage); final ConnectionResponseMessage responseMessage = (ConnectionResponseMessage) response; @@ -424,7 +438,7 @@ public void testProposedIdentifierResolvedIfConflict() { final ConnectionRequestMessage crm2 = new ConnectionRequestMessage(); crm2.setConnectionRequest(conRequest2); - final ProtocolMessage conflictingResponse = coordinator.handle(crm2); + final ProtocolMessage conflictingResponse = coordinator.handle(crm2, Collections.emptySet()); assertNotNull(conflictingResponse); assertTrue(conflictingResponse instanceof ConnectionResponseMessage); final ConnectionResponseMessage conflictingResponseMessage = (ConnectionResponseMessage) conflictingResponse; @@ -446,7 +460,7 @@ private ProtocolMessage requestConnection(final NodeIdentifier requestedNodeId, final ConnectionRequest request = new ConnectionRequest(requestedNodeId, new StandardDataFlow(new byte[0], new byte[0], new byte[0], new HashSet<>())); final ConnectionRequestMessage requestMsg = new ConnectionRequestMessage(); requestMsg.setConnectionRequest(request); - return coordinator.handle(requestMsg); + return coordinator.handle(requestMsg, Collections.emptySet()); } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Cluster.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Cluster.java index dab073da85d3..370d6dc5a1c3 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Cluster.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Cluster.java @@ -144,6 +144,10 @@ public Node createNode() { return node; } + public Node getCurrentClusterCoordinator() { + return getNodes().stream().filter(node -> node.hasRole(ClusterRoles.CLUSTER_COORDINATOR)).findFirst().orElse(null); + } + public Node waitForClusterCoordinator(final long time, final TimeUnit timeUnit) { return ClusterUtils.waitUntilNonNull(time, timeUnit, () -> getNodes().stream().filter(node -> node.hasRole(ClusterRoles.CLUSTER_COORDINATOR)).findFirst().orElse(null)); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/ClusterConnectionIT.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/ClusterConnectionIT.java index 45a2e4261c22..3980865fa5c4 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/ClusterConnectionIT.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/ClusterConnectionIT.java @@ -219,7 +219,7 @@ public void testNodeInheritsClusterTopologyOnHeartbeat() throws InterruptedExcep cluster.waitUntilAllNodesConnected(10, TimeUnit.SECONDS); final Node coordinator = cluster.waitForClusterCoordinator(10, TimeUnit.SECONDS); - final NodeIdentifier node4NotReallyInCluster = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 9283, "localhost", 9284, "localhost", 9285, null, false, null); + final NodeIdentifier node4NotReallyInCluster = new NodeIdentifier(UUID.randomUUID().toString(), "localhost", 9283, "localhost", 9284, "localhost", 9286, "localhost", 9285, null, false, null); final Map replacementStatuses = new HashMap<>(); replacementStatuses.put(node1.getIdentifier(), new NodeConnectionStatus(node1.getIdentifier(), DisconnectionCode.USER_DISCONNECTED)); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Node.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Node.java index e0d8a971d975..b2a499aaba4e 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Node.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/integration/Node.java @@ -17,17 +17,6 @@ package org.apache.nifi.cluster.integration; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.ServerSocket; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.nifi.authorization.Authorizer; import org.apache.nifi.bundle.Bundle; @@ -73,6 +62,18 @@ import org.junit.Assert; import org.mockito.Mockito; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + public class Node { private final NodeIdentifier nodeId; private final NiFiProperties nodeProperties; @@ -107,6 +108,8 @@ public String getProperty(String key) { return String.valueOf(nodeId.getSocketPort()); }else if(key.equals(NiFiProperties.WEB_HTTP_PORT)){ return String.valueOf(nodeId.getApiPort()); + }else if(key.equals(NiFiProperties.LOAD_BALANCE_PORT)){ + return String.valueOf(nodeId.getLoadBalancePort()); }else { return properties.getProperty(key); } @@ -133,7 +136,7 @@ public Set getPropertyKeys() { private static NodeIdentifier createNodeId() { - return new NodeIdentifier(UUID.randomUUID().toString(), "localhost", createPort(), "localhost", createPort(), "localhost", null, null, false, null); + return new NodeIdentifier(UUID.randomUUID().toString(), "localhost", createPort(), "localhost", createPort(), "localhost", createPort(), "localhost", null, null, false, null); } /** @@ -296,8 +299,13 @@ public void reportEvent(Severity severity, String category, String message) { } final ClusterCoordinationProtocolSenderListener protocolSenderListener = new ClusterCoordinationProtocolSenderListener(createCoordinatorProtocolSender(), protocolListener); - return new NodeClusterCoordinator(protocolSenderListener, eventReporter, electionManager, flowElection, null, - revisionManager, nodeProperties, protocolSender); + try { + return new NodeClusterCoordinator(protocolSenderListener, eventReporter, electionManager, flowElection, null, + revisionManager, nodeProperties, protocolSender); + } catch (IOException e) { + Assert.fail(e.toString()); + return null; + } } @@ -380,4 +388,17 @@ public void assertNodeDisconnects(final NodeIdentifier nodeId, final long time, public void assertNodeIsConnected(final NodeIdentifier nodeId) { Assert.assertEquals(NodeConnectionState.CONNECTED, getClusterCoordinator().getConnectionStatus(nodeId).getState()); } + + /** + * Assert that the node with the given ID is offloaded (according to this node!) within the given amount of time + * + * @param nodeId id of the node + * @param time how long to wait + * @param timeUnit unit of time provided by the 'time' argument + */ + public void assertNodeIsOffloaded(final NodeIdentifier nodeId, final long time, final TimeUnit timeUnit) { + ClusterUtils.waitUntilConditionMet(time, timeUnit, + () -> getClusterCoordinator().getConnectionStatus(nodeId).getState() == NodeConnectionState.OFFLOADED, + () -> "Connection Status is " + getClusterCoordinator().getConnectionStatus(nodeId).toString()); + } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ConnectionEntityMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ConnectionEntityMergerSpec.groovy index ffa3429d48e5..83d301b1a3ea 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ConnectionEntityMergerSpec.groovy +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ConnectionEntityMergerSpec.groovy @@ -63,6 +63,6 @@ class ConnectionEntityMergerSpec extends Specification { } def createNodeIdentifier(int id) { - new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, 'stsaddr', id * 100, id * 1000, false, null) + new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, null, id * 10, 'stsaddr', id * 100, id * 1000, false, null) } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ControllerServiceEntityMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ControllerServiceEntityMergerSpec.groovy index bb1d595bf4c7..3997bece7f8f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ControllerServiceEntityMergerSpec.groovy +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/ControllerServiceEntityMergerSpec.groovy @@ -147,6 +147,6 @@ class ControllerServiceEntityMergerSpec extends Specification { } def createNodeIdentifier(int id) { - new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, 'stsaddr', id * 100, id * 1000, false, null) + new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, null, id * 10, 'stsaddr', id * 100, id * 1000, false, null) } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/LabelEntityMergerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/LabelEntityMergerSpec.groovy index 028c864442e7..0a485b268b8a 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/LabelEntityMergerSpec.groovy +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/LabelEntityMergerSpec.groovy @@ -55,6 +55,6 @@ class LabelEntityMergerSpec extends Specification { } def createNodeIdentifier(int id) { - new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, 'stsaddr', id * 100, id * 1000, false, null) + new NodeIdentifier("cluster-node-$id", 'addr', id, 'sktaddr', id * 10, null, id * 10, 'stsaddr', id * 100, id * 1000, false, null) } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMergerTest.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMergerTest.java index ac73df7255d8..e519a4b04995 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMergerTest.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-cluster/src/test/java/org/apache/nifi/cluster/manager/RemoteProcessGroupEntityMergerTest.java @@ -122,4 +122,83 @@ public void testMergeRemoteProcessGroups() throws Exception { assertEquals(1, entity1.getComponent().getContents().getOutputPorts().size()); assertEquals("out1", entity1.getComponent().getContents().getOutputPorts().iterator().next().getName()); } + + @Test + public void testNoPortsAvailableOnOneNode() throws Exception { + final NodeIdentifier node1 = new NodeIdentifier("node-1", "host-1", 8080, "host-1", 19998, null, null, null, false); + final NodeIdentifier node2 = new NodeIdentifier("node-2", "host-2", 8081, "host-2", 19999, null, null, null, false); + + final PermissionsDTO permissions = new PermissionsDTO(); + permissions.setCanRead(true); + permissions.setCanWrite(true); + + final PermissionsDTO opsPermissions = new PermissionsDTO(); + opsPermissions.setCanRead(false); + opsPermissions.setCanWrite(false); + + final RemoteProcessGroupStatusDTO status = new RemoteProcessGroupStatusDTO(); + status.setAggregateSnapshot(new RemoteProcessGroupStatusSnapshotDTO()); + + final RemoteProcessGroupPortDTO in1_1 = new RemoteProcessGroupPortDTO(); + in1_1.setName("in1"); + + final RemoteProcessGroupPortDTO in1_2 = new RemoteProcessGroupPortDTO(); + in1_2.setName("in2"); + + final Set inputs1 = new HashSet<>(); + inputs1.add(in1_1); + inputs1.add(in1_2); + + final RemoteProcessGroupPortDTO out1_1 = new RemoteProcessGroupPortDTO(); + out1_1.setName("out1"); + + final Set outputs1 = new HashSet<>(); + outputs1.add(out1_1); + + final RemoteProcessGroupContentsDTO contents1 = new RemoteProcessGroupContentsDTO(); + contents1.setInputPorts(inputs1); + contents1.setOutputPorts(outputs1); + + final RemoteProcessGroupDTO rpg1 = new RemoteProcessGroupDTO(); + rpg1.setContents(contents1); + rpg1.setInputPortCount(2); + rpg1.setOutputPortCount(1); + + final RemoteProcessGroupEntity entity1 = new RemoteProcessGroupEntity(); + entity1.setPermissions(permissions); + entity1.setOperatePermissions(opsPermissions); + entity1.setStatus(status); + entity1.setComponent(rpg1); + + final Set inputs2 = new HashSet<>(); + final Set outputs2 = new HashSet<>(); + + final RemoteProcessGroupContentsDTO contents2 = new RemoteProcessGroupContentsDTO(); + contents2.setInputPorts(inputs2); + contents2.setOutputPorts(outputs2); + + final RemoteProcessGroupDTO rpg2 = new RemoteProcessGroupDTO(); + rpg2.setContents(contents2); + rpg2.setInputPortCount(0); + rpg2.setOutputPortCount(0); + + final RemoteProcessGroupEntity entity2 = new RemoteProcessGroupEntity(); + entity2.setPermissions(permissions); + entity2.setOperatePermissions(opsPermissions); + entity2.setStatus(status); + entity2.setComponent(rpg2); + + final Map nodeMap = new HashMap<>(); + nodeMap.put(node1, entity1); + nodeMap.put(node2, entity2); + + final RemoteProcessGroupEntityMerger merger = new RemoteProcessGroupEntityMerger(); + merger.merge(entity1, nodeMap); + + // should only include ports in common to all rpg's + assertEquals(0, entity1.getComponent().getContents().getInputPorts().size()); + assertEquals(0, entity1.getComponent().getContents().getOutputPorts().size()); + assertEquals(0, entity1.getComponent().getInputPortCount().intValue()); + assertEquals(0, entity1.getComponent().getOutputPortCount().intValue()); + } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/DropFlowFileRequest.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/queue/DropFlowFileRequest.java similarity index 82% rename from nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/DropFlowFileRequest.java rename to nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/queue/DropFlowFileRequest.java index 6f55e79fcaaf..69a0b92da818 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/DropFlowFileRequest.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/queue/DropFlowFileRequest.java @@ -15,11 +15,7 @@ * limitations under the License. */ -package org.apache.nifi.controller; - -import org.apache.nifi.controller.queue.DropFlowFileState; -import org.apache.nifi.controller.queue.DropFlowFileStatus; -import org.apache.nifi.controller.queue.QueueSize; +package org.apache.nifi.controller.queue; public class DropFlowFileRequest implements DropFlowFileStatus { private final String identifier; @@ -53,7 +49,7 @@ public QueueSize getOriginalSize() { return originalSize; } - void setOriginalSize(final QueueSize originalSize) { + public void setOriginalSize(final QueueSize originalSize) { this.originalSize = originalSize; } @@ -62,7 +58,7 @@ public QueueSize getCurrentSize() { return currentSize; } - void setCurrentSize(final QueueSize queueSize) { + public void setCurrentSize(final QueueSize queueSize) { this.currentSize = queueSize; } @@ -71,7 +67,7 @@ public QueueSize getDroppedSize() { return droppedSize; } - void setDroppedSize(final QueueSize droppedSize) { + public void setDroppedSize(final QueueSize droppedSize) { this.droppedSize = droppedSize; } @@ -90,17 +86,17 @@ public String getFailureReason() { return failureReason; } - synchronized void setState(final DropFlowFileState state) { + public synchronized void setState(final DropFlowFileState state) { setState(state, null); } - synchronized void setState(final DropFlowFileState state, final String explanation) { + public synchronized void setState(final DropFlowFileState state, final String explanation) { this.state = state; this.failureReason = explanation; this.lastUpdated = System.currentTimeMillis(); } - synchronized boolean cancel() { + public synchronized boolean cancel() { if (this.state == DropFlowFileState.COMPLETE || this.state == DropFlowFileState.CANCELED) { return false; } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/repository/ContentNotFoundException.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/repository/ContentNotFoundException.java index 5aeb5c57535d..b63be535565b 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/repository/ContentNotFoundException.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/controller/repository/ContentNotFoundException.java @@ -18,6 +18,8 @@ import org.apache.nifi.controller.repository.claim.ContentClaim; +import java.util.Optional; + /** * */ @@ -25,23 +27,37 @@ public class ContentNotFoundException extends RuntimeException { private static final long serialVersionUID = 19048239082L; private final transient ContentClaim claim; + private final transient FlowFileRecord flowFile; public ContentNotFoundException(final ContentClaim claim) { super("Could not find content for " + claim); this.claim = claim; + this.flowFile = null; } public ContentNotFoundException(final ContentClaim claim, final Throwable t) { super("Could not find content for " + claim, t); this.claim = claim; + this.flowFile = null; } public ContentNotFoundException(final ContentClaim claim, final String message) { super("Could not find content for " + claim + ": " + message); this.claim = claim; + this.flowFile = null; + } + + public ContentNotFoundException(final FlowFileRecord flowFile, final ContentClaim claim, final String message) { + super("Could not find content for " + claim + ": " + message); + this.claim = claim; + this.flowFile = flowFile; } public ContentClaim getMissingClaim() { return claim; } + + public Optional getFlowFile() { + return Optional.ofNullable(flowFile); + } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/registry/flow/FlowRegistryClient.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/registry/flow/FlowRegistryClient.java index 77c2761404bb..06d8ff567b1b 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/registry/flow/FlowRegistryClient.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/registry/flow/FlowRegistryClient.java @@ -23,9 +23,19 @@ public interface FlowRegistryClient { FlowRegistry getFlowRegistry(String registryId); default String getFlowRegistryId(String registryUrl) { + if (registryUrl.endsWith("/")) { + registryUrl = registryUrl.substring(0, registryUrl.length() - 1); + } + for (final String registryClientId : getRegistryIdentifiers()) { final FlowRegistry registry = getFlowRegistry(registryClientId); - if (registry.getURL().equals(registryUrl)) { + + String registryClientUrl = registry.getURL(); + if (registryClientUrl.endsWith("/")) { + registryClientUrl = registryClientUrl.substring(0, registryClientUrl.length() - 1); + } + + if (registryClientUrl.equals(registryUrl)) { return registryClientId; } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/web/revision/RevisionManager.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/web/revision/RevisionManager.java index 54ccd56d8eef..357f56a4b2ff 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/web/revision/RevisionManager.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/main/java/org/apache/nifi/web/revision/RevisionManager.java @@ -33,20 +33,19 @@ * *

* Clients that will modify a resource must do so using a two-phase commit. First, - * the client will issue a request that includes an HTTP Header of "X-NcmExpects". + * the client will issue a request that includes an HTTP Header of "X-Validation-Expects". * This indicates that the request will not actually be performed but rather that the * node should validate that the request could in fact be performed. If all nodes respond - * with a 150-Continue response, then the second phase will commence. The second phase - * will consist of replicating the same request but without the "X-NcmExpects" header. + * with a 202-Accepted response, then the second phase will commence. The second phase + * will consist of replicating the same request but without the "X-Validation-Expects" header. *

* *

* When the first phase of the two-phase commit is processed, the Revision Manager should - * be used to verify that the client-provided Revisions are current by calling the - * {@link #verifyRevisions(Collection)} - * method. If the revisions are up-to-date, the method will return successfully and the - * request validation may continue. Otherwise, the request should fail and the second phase - * should not be performed. + * be used to retrieve the current revision by calling the {@link #getRevision(String)} method + * to verify that the client-provided Revisions are current. + * If the revisions are up-to-date, the request validation may continue. + * Otherwise, the request should fail and the second phase should not be performed. *

* *

diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/test/java/org/apache/nifi/registry/flow/TestFlowRegistryClient.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/test/java/org/apache/nifi/registry/flow/TestFlowRegistryClient.java new file mode 100644 index 000000000000..208558c8f787 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core-api/src/test/java/org/apache/nifi/registry/flow/TestFlowRegistryClient.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.registry.flow; + +import org.junit.Before; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TestFlowRegistryClient { + + private FlowRegistryClient flowRegistryClient; + + @Before + public void setup() { + flowRegistryClient = new MockFlowRegistryClient(); + } + + @Test + public void testParamWithTrailingSlash() { + flowRegistryClient.addFlowRegistry("1", "Registry 1", "http://localhost:1111", "NA"); + flowRegistryClient.addFlowRegistry("2", "Registry 2", "http://localhost:2222", "NA"); + flowRegistryClient.addFlowRegistry("3", "Registry 3", "http://localhost:3333", "NA"); + + final String flowRegistryId = flowRegistryClient.getFlowRegistryId("http://localhost:1111/"); + assertNotNull(flowRegistryId); + assertEquals("1", flowRegistryId); + } + + @Test + public void testClientWithTrailingSlash() { + flowRegistryClient.addFlowRegistry("1", "Registry 1", "http://localhost:1111", "NA"); + flowRegistryClient.addFlowRegistry("2", "Registry 2", "http://localhost:2222/", "NA"); + flowRegistryClient.addFlowRegistry("3", "Registry 3", "http://localhost:3333", "NA"); + + final String flowRegistryId = flowRegistryClient.getFlowRegistryId("http://localhost:2222"); + assertNotNull(flowRegistryId); + assertEquals("2", flowRegistryId); + } + + @Test + public void testNoTrailingSlash() { + flowRegistryClient.addFlowRegistry("1", "Registry 1", "http://localhost:1111", "NA"); + flowRegistryClient.addFlowRegistry("2", "Registry 2", "http://localhost:2222", "NA"); + flowRegistryClient.addFlowRegistry("3", "Registry 3", "http://localhost:3333", "NA"); + + final String flowRegistryId = flowRegistryClient.getFlowRegistryId("http://localhost:3333"); + assertNotNull(flowRegistryId); + assertEquals("3", flowRegistryId); + } + + + private static class MockFlowRegistryClient implements FlowRegistryClient { + + private Map registryMap = new HashMap<>(); + + @Override + public FlowRegistry getFlowRegistry(String registryId) { + return registryMap.get(registryId); + } + + @Override + public Set getRegistryIdentifiers() { + return registryMap.keySet(); + } + + @Override + public void addFlowRegistry(FlowRegistry registry) { + registryMap.put(registry.getIdentifier(), registry); + } + + @Override + public FlowRegistry addFlowRegistry(String registryId, String registryName, String registryUrl, String description) { + final FlowRegistry flowRegistry = mock(FlowRegistry.class); + when(flowRegistry.getIdentifier()).thenReturn(registryId); + when(flowRegistry.getName()).thenReturn(registryName); + when(flowRegistry.getURL()).thenReturn(registryUrl); + when(flowRegistry.getDescription()).thenReturn(description); + registryMap.put(flowRegistry.getIdentifier(), flowRegistry); + return flowRegistry; + } + + @Override + public FlowRegistry removeFlowRegistry(String registryId) { + return registryMap.remove(registryId); + } + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/connectable/StandardConnection.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/connectable/StandardConnection.java index 617287422f42..fe605853b416 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/connectable/StandardConnection.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/connectable/StandardConnection.java @@ -29,18 +29,16 @@ import org.apache.nifi.authorization.resource.Authorizable; import org.apache.nifi.authorization.user.NiFiUser; import org.apache.nifi.controller.ProcessScheduler; -import org.apache.nifi.controller.StandardFlowFileQueue; +import org.apache.nifi.controller.queue.ConnectionEventListener; import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.FlowFileQueueFactory; +import org.apache.nifi.controller.queue.LoadBalanceStrategy; import org.apache.nifi.controller.repository.FlowFileRecord; -import org.apache.nifi.controller.repository.FlowFileRepository; -import org.apache.nifi.controller.repository.FlowFileSwapManager; -import org.apache.nifi.controller.repository.claim.ResourceClaimManager; -import org.apache.nifi.events.EventReporter; import org.apache.nifi.groups.ProcessGroup; import org.apache.nifi.processor.FlowFileFilter; import org.apache.nifi.processor.Relationship; -import org.apache.nifi.provenance.ProvenanceEventRepository; import org.apache.nifi.remote.RemoteGroupPort; +import org.apache.nifi.scheduling.SchedulingStrategy; import java.util.ArrayList; import java.util.Collection; @@ -60,7 +58,7 @@ * one or more relationships that map the source component to the destination * component. */ -public final class StandardConnection implements Connection { +public final class StandardConnection implements Connection, ConnectionEventListener { private final String id; private final AtomicReference processGroup; @@ -69,13 +67,16 @@ public final class StandardConnection implements Connection { private final Connectable source; private final AtomicReference destination; private final AtomicReference> relationships; - private final StandardFlowFileQueue flowFileQueue; private final AtomicInteger labelIndex = new AtomicInteger(1); private final AtomicLong zIndex = new AtomicLong(0L); private final AtomicReference versionedComponentId = new AtomicReference<>(); private final ProcessScheduler scheduler; + private final FlowFileQueueFactory flowFileQueueFactory; + private final boolean clustered; private final int hashCode; + private volatile FlowFileQueue flowFileQueue; + private StandardConnection(final Builder builder) { id = builder.id; name = new AtomicReference<>(builder.name); @@ -85,9 +86,10 @@ private StandardConnection(final Builder builder) { destination = new AtomicReference<>(builder.destination); relationships = new AtomicReference<>(Collections.unmodifiableCollection(builder.relationships)); scheduler = builder.scheduler; - flowFileQueue = new StandardFlowFileQueue(id, this, builder.flowFileRepository, builder.provenanceRepository, builder.resourceClaimManager, - scheduler, builder.swapManager, builder.eventReporter, builder.queueSwapThreshold, - builder.defaultBackPressureObjectThreshold, builder.defaultBackPressureDataSizeThreshold); + flowFileQueueFactory = builder.flowFileQueueFactory; + clustered = builder.clustered; + + flowFileQueue = flowFileQueueFactory.createFlowFileQueue(LoadBalanceStrategy.DO_NOT_LOAD_BALANCE, null, this); hashCode = new HashCodeBuilder(7, 67).append(id).toHashCode(); } @@ -147,6 +149,20 @@ public String getSafeDescription() { }; } + @Override + public void triggerDestinationEvent() { + if (getDestination().getSchedulingStrategy() == SchedulingStrategy.EVENT_DRIVEN) { + scheduler.registerEvent(getDestination()); + } + } + + @Override + public void triggerSourceEvent() { + if (getSource().getSchedulingStrategy() == SchedulingStrategy.EVENT_DRIVEN) { + scheduler.registerEvent(getSource()); + } + } + @Override public Authorizable getSourceAuthorizable() { final Connectable sourceConnectable = getSource(); @@ -297,7 +313,7 @@ public void setDestination(final Connectable newDestination) { throw new IllegalStateException("Cannot change destination of Connection because the current destination is running"); } - if (getFlowFileQueue().getUnacknowledgedQueueSize().getObjectCount() > 0) { + if (getFlowFileQueue().isUnacknowledgedFlowFile()) { throw new IllegalStateException("Cannot change destination of Connection because FlowFiles from this Connection are currently held by " + previousDestination); } @@ -354,7 +370,7 @@ public int hashCode() { @Override public String toString() { - return "Connection[Source ID=" + id + ",Dest ID=" + getDestination().getIdentifier() + "]"; + return "Connection[ID=" + getIdentifier() + ", Source ID=" + getSource().getIdentifier() + ", Dest ID=" + getDestination().getIdentifier() + "]"; } /** @@ -386,14 +402,8 @@ public static class Builder { private Connectable source; private Connectable destination; private Collection relationships; - private FlowFileSwapManager swapManager; - private EventReporter eventReporter; - private FlowFileRepository flowFileRepository; - private ProvenanceEventRepository provenanceRepository; - private ResourceClaimManager resourceClaimManager; - private int queueSwapThreshold; - private Long defaultBackPressureObjectThreshold; - private String defaultBackPressureDataSizeThreshold; + private FlowFileQueueFactory flowFileQueueFactory; + private boolean clustered = false; public Builder(final ProcessScheduler scheduler) { this.scheduler = scheduler; @@ -440,43 +450,13 @@ public Builder addBendPoint(final Position bendPoint) { return this; } - public Builder swapManager(final FlowFileSwapManager swapManager) { - this.swapManager = swapManager; - return this; - } - - public Builder eventReporter(final EventReporter eventReporter) { - this.eventReporter = eventReporter; - return this; - } - - public Builder flowFileRepository(final FlowFileRepository flowFileRepository) { - this.flowFileRepository = flowFileRepository; + public Builder flowFileQueueFactory(final FlowFileQueueFactory flowFileQueueFactory) { + this.flowFileQueueFactory = flowFileQueueFactory; return this; } - public Builder provenanceRepository(final ProvenanceEventRepository provenanceRepository) { - this.provenanceRepository = provenanceRepository; - return this; - } - - public Builder resourceClaimManager(final ResourceClaimManager resourceClaimManager) { - this.resourceClaimManager = resourceClaimManager; - return this; - } - - public Builder queueSwapThreshold(final int queueSwapThreshold) { - this.queueSwapThreshold = queueSwapThreshold; - return this; - } - - public Builder defaultBackPressureObjectThreshold(final long defaultBackPressureObjectThreshold) { - this.defaultBackPressureObjectThreshold = defaultBackPressureObjectThreshold; - return this; - } - - public Builder defaultBackPressureDataSizeThreshold(final String defaultBackPressureDataSizeThreshold) { - this.defaultBackPressureDataSizeThreshold = defaultBackPressureDataSizeThreshold; + public Builder clustered(final boolean clustered) { + this.clustered = clustered; return this; } @@ -487,17 +467,8 @@ public StandardConnection build() { if (destination == null) { throw new IllegalStateException("Cannot build a Connection without a Destination"); } - if (swapManager == null) { - throw new IllegalStateException("Cannot build a Connection without a FlowFileSwapManager"); - } - if (flowFileRepository == null) { - throw new IllegalStateException("Cannot build a Connection without a FlowFile Repository"); - } - if (provenanceRepository == null) { - throw new IllegalStateException("Cannot build a Connection without a Provenance Repository"); - } - if (resourceClaimManager == null) { - throw new IllegalStateException("Cannot build a Connection without a Resource Claim Manager"); + if (flowFileQueueFactory == null) { + throw new IllegalStateException("Cannot build a Connection without a FlowFileQueueFactory"); } if (relationships == null) { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FileSystemSwapManager.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FileSystemSwapManager.java index 208bbceb0987..5f8f92517117 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FileSystemSwapManager.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FileSystemSwapManager.java @@ -16,6 +16,26 @@ */ package org.apache.nifi.controller; +import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.controller.repository.SwapContents; +import org.apache.nifi.controller.repository.SwapManagerInitializationContext; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.controller.repository.claim.ResourceClaimManager; +import org.apache.nifi.controller.swap.SchemaSwapDeserializer; +import org.apache.nifi.controller.swap.SchemaSwapSerializer; +import org.apache.nifi.controller.swap.SimpleSwapDeserializer; +import org.apache.nifi.controller.swap.SwapDeserializer; +import org.apache.nifi.controller.swap.SwapSerializer; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.reporting.Severity; +import org.apache.nifi.stream.io.StreamUtils; +import org.apache.nifi.util.NiFiProperties; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -29,34 +49,19 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Objects; +import java.util.Set; import java.util.UUID; import java.util.regex.Pattern; - -import org.apache.nifi.controller.queue.FlowFileQueue; -import org.apache.nifi.controller.repository.FlowFileRecord; -import org.apache.nifi.controller.repository.FlowFileRepository; -import org.apache.nifi.controller.repository.FlowFileSwapManager; -import org.apache.nifi.controller.repository.SwapContents; -import org.apache.nifi.controller.repository.SwapManagerInitializationContext; -import org.apache.nifi.controller.repository.SwapSummary; -import org.apache.nifi.controller.repository.claim.ResourceClaimManager; -import org.apache.nifi.controller.swap.SchemaSwapDeserializer; -import org.apache.nifi.controller.swap.SchemaSwapSerializer; -import org.apache.nifi.controller.swap.SimpleSwapDeserializer; -import org.apache.nifi.controller.swap.SwapDeserializer; -import org.apache.nifi.controller.swap.SwapSerializer; -import org.apache.nifi.events.EventReporter; -import org.apache.nifi.reporting.Severity; -import org.apache.nifi.stream.io.StreamUtils; -import org.apache.nifi.util.NiFiProperties; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** *

@@ -66,9 +71,8 @@ */ public class FileSystemSwapManager implements FlowFileSwapManager { - public static final int MINIMUM_SWAP_COUNT = 10000; - private static final Pattern SWAP_FILE_PATTERN = Pattern.compile("\\d+-.+\\.swap"); - private static final Pattern TEMP_SWAP_FILE_PATTERN = Pattern.compile("\\d+-.+\\.swap\\.part"); + private static final Pattern SWAP_FILE_PATTERN = Pattern.compile("\\d+-.+?(\\..*?)?\\.swap"); + private static final Pattern TEMP_SWAP_FILE_PATTERN = Pattern.compile("\\d+-.+?(\\..*?)?\\.swap\\.part"); public static final int SWAP_ENCODING_VERSION = 10; public static final String EVENT_CATEGORY = "Swap FlowFiles"; @@ -106,13 +110,18 @@ public synchronized void initialize(final SwapManagerInitializationContext initi this.flowFileRepository = initializationContext.getFlowFileRepository(); } + @Override - public String swapOut(final List toSwap, final FlowFileQueue flowFileQueue) throws IOException { + public String swapOut(final List toSwap, final FlowFileQueue flowFileQueue, final String partitionName) throws IOException { if (toSwap == null || toSwap.isEmpty()) { return null; } - final File swapFile = new File(storageDirectory, System.currentTimeMillis() + "-" + flowFileQueue.getIdentifier() + "-" + UUID.randomUUID().toString() + ".swap"); + final String swapFilePrefix = System.currentTimeMillis() + "-" + flowFileQueue.getIdentifier() + "-" + UUID.randomUUID().toString(); + final String swapFileBaseName = partitionName == null ? swapFilePrefix : swapFilePrefix + "." + partitionName; + final String swapFileName = swapFileBaseName + ".swap"; + + final File swapFile = new File(storageDirectory, swapFileName); final File swapTempFile = new File(swapFile.getParentFile(), swapFile.getName() + ".part"); final String swapLocation = swapFile.getAbsolutePath(); @@ -185,8 +194,55 @@ public boolean accept(final File dir, final String name) { } } + private String getOwnerQueueIdentifier(final File swapFile) { + final String[] splits = swapFile.getName().split("-"); + if (splits.length > 6) { + final String queueIdentifier = splits[1] + "-" + splits[2] + "-" + splits[3] + "-" + splits[4] + "-" + splits[5]; + return queueIdentifier; + } + + return null; + } + + private String getOwnerPartition(final File swapFile) { + final String filename = swapFile.getName(); + final int indexOfDot = filename.indexOf("."); + if (indexOfDot < 1) { + return null; + } + + final int lastIndexOfDot = filename.lastIndexOf("."); + if (lastIndexOfDot == indexOfDot) { + return null; + } + + return filename.substring(indexOfDot + 1, lastIndexOfDot); + } + + @Override + public Set getSwappedPartitionNames(final FlowFileQueue queue) { + final File[] swapFiles = storageDirectory.listFiles(new FilenameFilter() { + @Override + public boolean accept(final File dir, final String name) { + return SWAP_FILE_PATTERN.matcher(name).matches(); + } + }); + + if (swapFiles == null) { + return Collections.emptySet(); + } + + final String queueId = queue.getIdentifier(); + + return Stream.of(swapFiles) + .filter(swapFile -> queueId.equals(getOwnerQueueIdentifier(swapFile))) + .map(this::getOwnerPartition) + .filter(Objects::nonNull) + .collect(Collectors.toSet()); + } + @Override - public List recoverSwapLocations(final FlowFileQueue flowFileQueue) throws IOException { + public List recoverSwapLocations(final FlowFileQueue flowFileQueue, final String partitionName) throws IOException { final File[] swapFiles = storageDirectory.listFiles(new FilenameFilter() { @Override public boolean accept(final File dir, final String name) { @@ -212,15 +268,21 @@ public boolean accept(final File dir, final String name) { } // split the filename by dashes. The old filenaming scheme was "-.swap" but the new naming scheme is - // "--.swap". If we have two dashes, then we can just check if the queue ID is equal - // to the id of the queue given and if not we can just move on. - final String[] splits = swapFile.getName().split("-"); - if (splits.length > 6) { - final String queueIdentifier = splits[1] + "-" + splits[2] + "-" + splits[3] + "-" + splits[4] + "-" + splits[5]; - if (queueIdentifier.equals(flowFileQueue.getIdentifier())) { - swapLocations.add(swapFile.getAbsolutePath()); + // "--.[partition name.]swap". + final String ownerQueueId = getOwnerQueueIdentifier(swapFile); + if (ownerQueueId != null) { + if (!ownerQueueId.equals(flowFileQueue.getIdentifier())) { + continue; + } + + if (partitionName != null) { + final String ownerPartition = getOwnerPartition(swapFile); + if (!partitionName.equals(ownerPartition)) { + continue; + } } + swapLocations.add(swapFile.getAbsolutePath()); continue; } @@ -357,4 +419,28 @@ private Long getTimestampFromFilename(final String fullyQualifiedFilename) { } } } + + @Override + public String changePartitionName(final String swapLocation, final String newPartitionName) throws IOException { + final File existingFile = new File(swapLocation); + if (!existingFile.exists()) { + throw new FileNotFoundException("Could not change name of partition for swap location " + swapLocation + " because no swap file exists at that location"); + } + + final String existingFilename = existingFile.getName(); + + final String newFilename; + final int dotIndex = existingFilename.indexOf("."); + if (dotIndex < 0) { + newFilename = existingFilename + "." + newPartitionName + ".swap"; + } else { + newFilename = existingFilename.substring(0, dotIndex) + "." + newPartitionName + ".swap"; + } + + final File newFile = new File(existingFile.getParentFile(), newFilename); + // Use Files.move and convert to Path's instead of File.rename so that we get an IOException on failure that describes why we failed. + Files.move(existingFile.toPath(), newFile.toPath()); + + return newFile.getAbsolutePath(); + } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java index 21c61e9368b1..ebd809c57230 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/FlowController.java @@ -35,6 +35,8 @@ import org.apache.nifi.authorization.resource.ProvenanceDataAuthorizable; import org.apache.nifi.authorization.resource.ResourceFactory; import org.apache.nifi.authorization.user.NiFiUser; +import org.apache.nifi.authorization.util.IdentityMapping; +import org.apache.nifi.authorization.util.IdentityMappingUtil; import org.apache.nifi.bundle.Bundle; import org.apache.nifi.bundle.BundleCoordinate; import org.apache.nifi.cluster.coordination.ClusterCoordinator; @@ -76,8 +78,23 @@ import org.apache.nifi.controller.label.StandardLabel; import org.apache.nifi.controller.leader.election.LeaderElectionManager; import org.apache.nifi.controller.leader.election.LeaderElectionStateChangeListener; +import org.apache.nifi.controller.queue.ConnectionEventListener; import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.FlowFileQueueFactory; +import org.apache.nifi.controller.queue.LoadBalanceStrategy; import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.StandardFlowFileQueue; +import org.apache.nifi.controller.queue.clustered.ContentRepositoryFlowFileAccess; +import org.apache.nifi.controller.queue.clustered.SocketLoadBalancedFlowFileQueue; +import org.apache.nifi.controller.queue.clustered.client.StandardLoadBalanceFlowFileCodec; +import org.apache.nifi.controller.queue.clustered.client.async.nio.NioAsyncLoadBalanceClientFactory; +import org.apache.nifi.controller.queue.clustered.client.async.nio.NioAsyncLoadBalanceClientRegistry; +import org.apache.nifi.controller.queue.clustered.client.async.nio.NioAsyncLoadBalanceClientTask; +import org.apache.nifi.controller.queue.clustered.server.ClusterLoadBalanceAuthorizer; +import org.apache.nifi.controller.queue.clustered.server.ConnectionLoadBalanceServer; +import org.apache.nifi.controller.queue.clustered.server.LoadBalanceAuthorizer; +import org.apache.nifi.controller.queue.clustered.server.LoadBalanceProtocol; +import org.apache.nifi.controller.queue.clustered.server.StandardLoadBalanceProtocol; import org.apache.nifi.controller.reporting.ReportingTaskInstantiationException; import org.apache.nifi.controller.reporting.ReportingTaskProvider; import org.apache.nifi.controller.reporting.StandardReportingInitializationContext; @@ -243,6 +260,7 @@ import java.io.OutputStream; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; +import java.net.InetSocketAddress; import java.net.URL; import java.util.ArrayList; import java.util.Collection; @@ -324,6 +342,11 @@ public class FlowController implements EventAccess, ControllerServiceProvider, R private final VariableRegistry variableRegistry; private final ConcurrentMap rootControllerServices = new ConcurrentHashMap<>(); + private final ConnectionLoadBalanceServer loadBalanceServer; + private final NioAsyncLoadBalanceClientRegistry loadBalanceClientRegistry; + private final FlowEngine loadBalanceClientThreadPool; + private final Set loadBalanceClientTasks = new HashSet<>(); + private final ConcurrentMap allProcessors = new ConcurrentHashMap<>(); private final ConcurrentMap allProcessGroups = new ConcurrentHashMap<>(); private final ConcurrentMap allConnections = new ConcurrentHashMap<>(); @@ -673,8 +696,40 @@ public void run() { leaderElectionManager.start(); heartbeatMonitor.start(); + + final InetSocketAddress loadBalanceAddress = nifiProperties.getClusterLoadBalanceAddress(); + // Setup Load Balancing Server + final EventReporter eventReporter = createEventReporter(bulletinRepository); + final List identityMappings = IdentityMappingUtil.getIdentityMappings(nifiProperties); + final LoadBalanceAuthorizer authorizeConnection = new ClusterLoadBalanceAuthorizer(clusterCoordinator, eventReporter); + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepository, provenanceRepository, this, authorizeConnection); + + final int numThreads = nifiProperties.getIntegerProperty(NiFiProperties.LOAD_BALANCE_MAX_THREAD_COUNT, NiFiProperties.DEFAULT_LOAD_BALANCE_MAX_THREAD_COUNT); + final String timeoutPeriod = nifiProperties.getProperty(NiFiProperties.LOAD_BALANCE_COMMS_TIMEOUT, NiFiProperties.DEFAULT_LOAD_BALANCE_COMMS_TIMEOUT); + final int timeoutMillis = (int) FormatUtils.getTimeDuration(timeoutPeriod, TimeUnit.MILLISECONDS); + + loadBalanceServer = new ConnectionLoadBalanceServer(loadBalanceAddress.getHostName(), loadBalanceAddress.getPort(), sslContext, + numThreads, loadBalanceProtocol, eventReporter, timeoutMillis); + + + final int connectionsPerNode = nifiProperties.getIntegerProperty(NiFiProperties.LOAD_BALANCE_CONNECTIONS_PER_NODE, NiFiProperties.DEFAULT_LOAD_BALANCE_CONNECTIONS_PER_NODE); + final NioAsyncLoadBalanceClientFactory asyncClientFactory = new NioAsyncLoadBalanceClientFactory(sslContext, timeoutMillis, new ContentRepositoryFlowFileAccess(contentRepository), + eventReporter, new StandardLoadBalanceFlowFileCodec()); + loadBalanceClientRegistry = new NioAsyncLoadBalanceClientRegistry(asyncClientFactory, connectionsPerNode); + + final int loadBalanceClientThreadCount = nifiProperties.getIntegerProperty(NiFiProperties.LOAD_BALANCE_MAX_THREAD_COUNT, NiFiProperties.DEFAULT_LOAD_BALANCE_MAX_THREAD_COUNT); + loadBalanceClientThreadPool = new FlowEngine(loadBalanceClientThreadCount, "Load-Balanced Client", true); + + for (int i=0; i < loadBalanceClientThreadCount; i++) { + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(loadBalanceClientRegistry, clusterCoordinator, eventReporter); + loadBalanceClientTasks.add(clientTask); + loadBalanceClientThreadPool.submit(clientTask); + } } else { + loadBalanceClientRegistry = null; heartbeater = null; + loadBalanceServer = null; + loadBalanceClientThreadPool = null; } } @@ -775,6 +830,10 @@ public void initializeFlow() throws IOException { listener.start(); } + if (loadBalanceServer != null) { + loadBalanceServer.start(); + } + notifyComponentsConfigurationRestored(); timerDrivenEngineRef.get().scheduleWithFixedDelay(new Runnable() { @@ -940,11 +999,19 @@ public void trigger(final ComponentNode component) { startConnectablesAfterInitialization.clear(); startRemoteGroupPortsAfterInitialization.clear(); } + + for (final Connection connection : getRootGroup().findAllConnections()) { + connection.getFlowFileQueue().startLoadBalancing(); + } } finally { writeLock.unlock("onFlowInitialized"); } } + public boolean isStartAfterInitialization(final Connectable component) { + return startConnectablesAfterInitialization.contains(component) || startRemoteGroupPortsAfterInitialization.contains(component); + } + private ContentRepository createContentRepository(final NiFiProperties properties) throws InstantiationException, IllegalAccessException, ClassNotFoundException { final String implementationClassName = properties.getProperty(NiFiProperties.CONTENT_REPOSITORY_IMPLEMENTATION, DEFAULT_CONTENT_REPO_IMPLEMENTATION); if (implementationClassName == null) { @@ -1040,20 +1107,35 @@ public EventReporter getEventReporter() { swapManager.initialize(initializationContext); } - return builder.id(requireNonNull(id).intern()) + final FlowFileQueueFactory flowFileQueueFactory = new FlowFileQueueFactory() { + @Override + public FlowFileQueue createFlowFileQueue(final LoadBalanceStrategy loadBalanceStrategy, final String partitioningAttribute, final ConnectionEventListener eventListener) { + final FlowFileQueue flowFileQueue; + + if (clusterCoordinator == null) { + flowFileQueue = new StandardFlowFileQueue(id, eventListener, flowFileRepository, provenanceRepository, resourceClaimManager, processScheduler, swapManager, + eventReporter, nifiProperties.getQueueSwapThreshold(), nifiProperties.getDefaultBackPressureObjectThreshold(), nifiProperties.getDefaultBackPressureDataSizeThreshold()); + } else { + flowFileQueue = new SocketLoadBalancedFlowFileQueue(id, eventListener, processScheduler, flowFileRepository, provenanceRepository, contentRepository, resourceClaimManager, + clusterCoordinator, loadBalanceClientRegistry, swapManager, nifiProperties.getQueueSwapThreshold(), eventReporter); + + flowFileQueue.setBackPressureObjectThreshold(nifiProperties.getDefaultBackPressureObjectThreshold()); + flowFileQueue.setBackPressureDataSizeThreshold(nifiProperties.getDefaultBackPressureDataSizeThreshold()); + } + + return flowFileQueue; + } + }; + + final Connection connection = builder.id(requireNonNull(id).intern()) .name(name == null ? null : name.intern()) .relationships(relationships) .source(requireNonNull(source)) .destination(destination) - .swapManager(swapManager) - .queueSwapThreshold(nifiProperties.getQueueSwapThreshold()) - .defaultBackPressureObjectThreshold(nifiProperties.getDefaultBackPressureObjectThreshold()) - .defaultBackPressureDataSizeThreshold(nifiProperties.getDefaultBackPressureDataSizeThreshold()) - .eventReporter(eventReporter) - .resourceClaimManager(resourceClaimManager) - .flowFileRepository(flowFileRepository) - .provenanceRepository(provenanceRepository) + .flowFileQueueFactory(flowFileQueueFactory) .build(); + + return connection; } /** @@ -1561,6 +1643,11 @@ public void shutdown(final boolean kill) { zooKeeperStateServer.shutdown(); } + if (loadBalanceClientThreadPool != null) { + loadBalanceClientThreadPool.shutdownNow(); + } + loadBalanceClientTasks.forEach(NioAsyncLoadBalanceClientTask::stop); + // Trigger any processors' methods marked with @OnShutdown to be called getRootGroup().shutdown(); @@ -1606,6 +1693,14 @@ public void shutdown(final boolean kill) { listener.stop(); } + if (loadBalanceServer != null) { + loadBalanceServer.stop(); + } + + if (loadBalanceClientRegistry != null) { + loadBalanceClientRegistry.stop(); + } + if (processScheduler != null) { processScheduler.shutdown(); } @@ -2226,6 +2321,13 @@ private void instantiateSnippet(final ProcessGroup group, final FlowSnippetDTO d queue.setPriorities(newPrioritizers); } + final String loadBalanceStrategyName = connectionDTO.getLoadBalanceStrategy(); + if (loadBalanceStrategyName != null) { + final LoadBalanceStrategy loadBalanceStrategy = LoadBalanceStrategy.valueOf(loadBalanceStrategyName); + final String partitioningAttribute = connectionDTO.getLoadBalancePartitionAttribute(); + queue.setLoadBalanceStrategy(loadBalanceStrategy, partitioningAttribute); + } + connection.setProcessGroup(group); group.addConnection(connection); } @@ -2737,6 +2839,10 @@ public ProcessorNode getProcessorNode(final String id) { public void onConnectionAdded(final Connection connection) { allConnections.put(connection.getIdentifier(), connection); + + if (isInitialized()) { + connection.getFlowFileQueue().startLoadBalancing(); + } } public void onConnectionRemoved(final Connection connection) { @@ -3494,6 +3600,19 @@ public void startTransmitting(final RemoteGroupPort remoteGroupPort) { } } + public void stopTransmitting(final RemoteGroupPort remoteGroupPort) { + writeLock.lock(); + try { + if (initialized.get()) { + remoteGroupPort.getRemoteProcessGroup().stopTransmitting(remoteGroupPort); + } else { + startRemoteGroupPortsAfterInitialization.remove(remoteGroupPort); + } + } finally { + writeLock.unlock("stopTransmitting"); + } + } + public void stopProcessor(final String parentGroupId, final String processorId) { final ProcessGroup group = lookupGroup(parentGroupId); final ProcessorNode node = group.getProcessor(processorId); @@ -4344,10 +4463,11 @@ public void setClustered(final boolean clustered, final String clusterInstanceId leaderElectionManager.start(); stateManagerProvider.enableClusterProvider(); + loadBalanceClientRegistry.start(); + heartbeat(); } else { stateManagerProvider.disableClusterProvider(); - setPrimary(false); } @@ -4369,6 +4489,8 @@ public void setClustered(final boolean clustered, final String clusterInstanceId } } + + /** * @return true if this instance is the primary node in the cluster; false * otherwise diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowFileQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowFileQueue.java deleted file mode 100644 index 5eab4d9eddd4..000000000000 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowFileQueue.java +++ /dev/null @@ -1,1572 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.nifi.controller; - -import org.apache.nifi.connectable.Connection; -import org.apache.nifi.controller.queue.DropFlowFileState; -import org.apache.nifi.controller.queue.DropFlowFileStatus; -import org.apache.nifi.controller.queue.FlowFileQueue; -import org.apache.nifi.controller.queue.FlowFileSummary; -import org.apache.nifi.controller.queue.ListFlowFileRequest; -import org.apache.nifi.controller.queue.ListFlowFileState; -import org.apache.nifi.controller.queue.ListFlowFileStatus; -import org.apache.nifi.controller.queue.QueueSize; -import org.apache.nifi.controller.repository.FlowFileRecord; -import org.apache.nifi.controller.repository.FlowFileRepository; -import org.apache.nifi.controller.repository.FlowFileSwapManager; -import org.apache.nifi.controller.repository.IncompleteSwapFileException; -import org.apache.nifi.controller.repository.RepositoryRecord; -import org.apache.nifi.controller.repository.RepositoryRecordType; -import org.apache.nifi.controller.repository.SwapContents; -import org.apache.nifi.controller.repository.SwapSummary; -import org.apache.nifi.controller.repository.claim.ContentClaim; -import org.apache.nifi.controller.repository.claim.ResourceClaim; -import org.apache.nifi.controller.repository.claim.ResourceClaimManager; -import org.apache.nifi.controller.swap.StandardSwapSummary; -import org.apache.nifi.events.EventReporter; -import org.apache.nifi.flowfile.FlowFile; -import org.apache.nifi.flowfile.FlowFilePrioritizer; -import org.apache.nifi.flowfile.attributes.CoreAttributes; -import org.apache.nifi.processor.DataUnit; -import org.apache.nifi.processor.FlowFileFilter; -import org.apache.nifi.processor.FlowFileFilter.FlowFileFilterResult; -import org.apache.nifi.provenance.ProvenanceEventBuilder; -import org.apache.nifi.provenance.ProvenanceEventRecord; -import org.apache.nifi.provenance.ProvenanceEventRepository; -import org.apache.nifi.provenance.ProvenanceEventType; -import org.apache.nifi.reporting.Severity; -import org.apache.nifi.scheduling.SchedulingStrategy; -import org.apache.nifi.util.FormatUtils; -import org.apache.nifi.util.concurrency.TimedLock; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.PriorityQueue; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/** - * A FlowFileQueue is used to queue FlowFile objects that are awaiting further - * processing. Must be thread safe. - * - */ -public class StandardFlowFileQueue implements FlowFileQueue { - - public static final int MAX_EXPIRED_RECORDS_PER_ITERATION = 100000; - public static final int SWAP_RECORD_POLL_SIZE = 10000; - - private static final Logger logger = LoggerFactory.getLogger(StandardFlowFileQueue.class); - - private PriorityQueue activeQueue = null; - - // guarded by lock - private ArrayList swapQueue = null; - - private final AtomicReference size = new AtomicReference<>(new FlowFileQueueSize(0, 0L, 0, 0L, 0, 0, 0L)); - - private boolean swapMode = false; - - private final AtomicReference maxQueueSize = new AtomicReference<>(); - private final AtomicReference expirationPeriod = new AtomicReference<>(new TimePeriod("0 mins", 0L)); - - private final EventReporter eventReporter; - private final Connection connection; - private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); - private final List priorities; - private final int swapThreshold; - private final FlowFileSwapManager swapManager; - private final List swapLocations = new ArrayList<>(); - private final TimedLock readLock; - private final TimedLock writeLock; - private final String identifier; - private final FlowFileRepository flowFileRepository; - private final ProvenanceEventRepository provRepository; - private final ResourceClaimManager resourceClaimManager; - - private final ConcurrentMap dropRequestMap = new ConcurrentHashMap<>(); - private final ConcurrentMap listRequestMap = new ConcurrentHashMap<>(); - - // SCHEDULER CANNOT BE NOTIFIED OF EVENTS WITH THE WRITE LOCK HELD! DOING SO WILL RESULT IN A DEADLOCK! - private final ProcessScheduler scheduler; - - public StandardFlowFileQueue(final String identifier, final Connection connection, final FlowFileRepository flowFileRepo, final ProvenanceEventRepository provRepo, - final ResourceClaimManager resourceClaimManager, final ProcessScheduler scheduler, final FlowFileSwapManager swapManager, final EventReporter eventReporter, - final int swapThreshold, final long defaultBackPressureObjectThreshold, final String defaultBackPressureDataSizeThreshold) { - activeQueue = new PriorityQueue<>(20, new Prioritizer(new ArrayList())); - priorities = new ArrayList<>(); - swapQueue = new ArrayList<>(); - this.eventReporter = eventReporter; - this.swapManager = swapManager; - this.flowFileRepository = flowFileRepo; - this.provRepository = provRepo; - this.resourceClaimManager = resourceClaimManager; - - this.identifier = identifier; - this.swapThreshold = swapThreshold; - this.scheduler = scheduler; - this.connection = connection; - - readLock = new TimedLock(this.lock.readLock(), identifier + " Read Lock", 100); - writeLock = new TimedLock(this.lock.writeLock(), identifier + " Write Lock", 100); - - final MaxQueueSize initialMaxQueueSize = new MaxQueueSize(defaultBackPressureDataSizeThreshold, - DataUnit.parseDataSize(defaultBackPressureDataSizeThreshold, DataUnit.B).longValue(), defaultBackPressureObjectThreshold); - this.maxQueueSize.set(initialMaxQueueSize); - } - - @Override - public String getIdentifier() { - return identifier; - } - - @Override - public List getPriorities() { - return Collections.unmodifiableList(priorities); - } - - @Override - public void setPriorities(final List newPriorities) { - writeLock.lock(); - try { - final PriorityQueue newQueue = new PriorityQueue<>(Math.max(20, activeQueue.size()), new Prioritizer(newPriorities)); - newQueue.addAll(activeQueue); - activeQueue = newQueue; - priorities.clear(); - priorities.addAll(newPriorities); - } finally { - writeLock.unlock("setPriorities"); - } - } - - @Override - public void setBackPressureObjectThreshold(final long threshold) { - boolean updated = false; - while (!updated) { - MaxQueueSize maxSize = maxQueueSize.get(); - final MaxQueueSize updatedSize = new MaxQueueSize(maxSize.getMaxSize(), maxSize.getMaxBytes(), threshold); - updated = maxQueueSize.compareAndSet(maxSize, updatedSize); - } - } - - @Override - public long getBackPressureObjectThreshold() { - return maxQueueSize.get().getMaxCount(); - } - - @Override - public void setBackPressureDataSizeThreshold(final String maxDataSize) { - final long maxBytes = DataUnit.parseDataSize(maxDataSize, DataUnit.B).longValue(); - - boolean updated = false; - while (!updated) { - MaxQueueSize maxSize = maxQueueSize.get(); - final MaxQueueSize updatedSize = new MaxQueueSize(maxDataSize, maxBytes, maxSize.getMaxCount()); - updated = maxQueueSize.compareAndSet(maxSize, updatedSize); - } - } - - @Override - public String getBackPressureDataSizeThreshold() { - return maxQueueSize.get().getMaxSize(); - } - - @Override - public QueueSize size() { - return getQueueSize(); - } - - - private QueueSize getQueueSize() { - return size.get().toQueueSize(); - } - - @Override - public boolean isEmpty() { - return size.get().isEmpty(); - } - - @Override - public boolean isActiveQueueEmpty() { - final FlowFileQueueSize queueSize = size.get(); - return queueSize.activeQueueCount == 0 && queueSize.swappedCount == 0; - } - - @Override - public QueueSize getActiveQueueSize() { - return size.get().activeQueueSize(); - } - - @Override - public QueueSize getSwapQueueSize() { - return size.get().swapQueueSize(); - } - - @Override - public int getSwapFileCount() { - readLock.lock(); - try { - return this.swapLocations.size(); - } finally { - readLock.unlock("getSwapFileCount"); - } - } - - @Override - public boolean isAllActiveFlowFilesPenalized() { - readLock.lock(); - try { - // If there are no elements then we return false - if (activeQueue.isEmpty()) { - return false; - } - - // If the first element on the queue is penalized, then we know they all are, - // because our Comparator will put Penalized FlowFiles at the end. If the first - // FlowFile is not penalized, then we also know that they are not all penalized, - // so we can simplify this by looking solely at the first FlowFile in the queue. - final FlowFileRecord first = activeQueue.peek(); - return first.isPenalized(); - } finally { - readLock.unlock("isAllActiveFlowFilesPenalized"); - } - } - - @Override - public boolean isAnyActiveFlowFilePenalized() { - readLock.lock(); - try { - return activeQueue.stream().anyMatch(FlowFileRecord::isPenalized); - } finally { - readLock.unlock("isAnyActiveFlowFilePenalized"); - } - } - - @Override - public void acknowledge(final FlowFileRecord flowFile) { - incrementUnacknowledgedQueueSize(-1, -flowFile.getSize()); - - if (connection.getSource().getSchedulingStrategy() == SchedulingStrategy.EVENT_DRIVEN) { - // queue was full but no longer is. Notify that the source may now be available to run, - // because of back pressure caused by this queue. - scheduler.registerEvent(connection.getSource()); - } - } - - @Override - public void acknowledge(final Collection flowFiles) { - long totalSize = 0L; - for (final FlowFileRecord flowFile : flowFiles) { - totalSize += flowFile.getSize(); - } - - incrementUnacknowledgedQueueSize(-flowFiles.size(), -totalSize); - - if (connection.getSource().getSchedulingStrategy() == SchedulingStrategy.EVENT_DRIVEN) { - // it's possible that queue was full but no longer is. Notify that the source may now be available to run, - // because of back pressure caused by this queue. - scheduler.registerEvent(connection.getSource()); - } - } - - @Override - public boolean isFull() { - final MaxQueueSize maxSize = maxQueueSize.get(); - - // Check if max size is set - if (maxSize.getMaxBytes() <= 0 && maxSize.getMaxCount() <= 0) { - return false; - } - - final QueueSize queueSize = getQueueSize(); - if (maxSize.getMaxCount() > 0 && queueSize.getObjectCount() >= maxSize.getMaxCount()) { - return true; - } - - if (maxSize.getMaxBytes() > 0 && queueSize.getByteCount() >= maxSize.getMaxBytes()) { - return true; - } - - return false; - } - - - @Override - public void put(final FlowFileRecord file) { - writeLock.lock(); - try { - if (swapMode || activeQueue.size() >= swapThreshold) { - swapQueue.add(file); - incrementSwapQueueSize(1, file.getSize(), 0); - swapMode = true; - writeSwapFilesIfNecessary(); - } else { - incrementActiveQueueSize(1, file.getSize()); - activeQueue.add(file); - } - } finally { - writeLock.unlock("put(FlowFileRecord)"); - } - - if (connection.getDestination().getSchedulingStrategy() == SchedulingStrategy.EVENT_DRIVEN) { - scheduler.registerEvent(connection.getDestination()); - } - } - - @Override - public void putAll(final Collection files) { - final int numFiles = files.size(); - long bytes = 0L; - for (final FlowFile flowFile : files) { - bytes += flowFile.getSize(); - } - - writeLock.lock(); - try { - if (swapMode || activeQueue.size() >= swapThreshold - numFiles) { - swapQueue.addAll(files); - incrementSwapQueueSize(numFiles, bytes, 0); - swapMode = true; - writeSwapFilesIfNecessary(); - } else { - incrementActiveQueueSize(numFiles, bytes); - activeQueue.addAll(files); - } - } finally { - writeLock.unlock("putAll"); - } - - if (connection.getDestination().getSchedulingStrategy() == SchedulingStrategy.EVENT_DRIVEN) { - scheduler.registerEvent(connection.getDestination()); - } - } - - - private boolean isLaterThan(final Long maxAge) { - if (maxAge == null) { - return false; - } - return maxAge < System.currentTimeMillis(); - } - - private Long getExpirationDate(final FlowFile flowFile, final long expirationMillis) { - if (flowFile == null) { - return null; - } - if (expirationMillis <= 0) { - return null; - } else { - final long entryDate = flowFile.getEntryDate(); - final long expirationDate = entryDate + expirationMillis; - return expirationDate; - } - } - - @Override - public FlowFileRecord poll(final Set expiredRecords) { - FlowFileRecord flowFile = null; - - // First check if we have any records Pre-Fetched. - final long expirationMillis = expirationPeriod.get().getMillis(); - writeLock.lock(); - try { - flowFile = doPoll(expiredRecords, expirationMillis); - return flowFile; - } finally { - writeLock.unlock("poll(Set)"); - - if (flowFile != null) { - incrementUnacknowledgedQueueSize(1, flowFile.getSize()); - } - } - } - - private FlowFileRecord doPoll(final Set expiredRecords, final long expirationMillis) { - FlowFileRecord flowFile; - boolean isExpired; - - migrateSwapToActive(); - - long expiredBytes = 0L; - do { - flowFile = this.activeQueue.poll(); - - isExpired = isLaterThan(getExpirationDate(flowFile, expirationMillis)); - if (isExpired) { - expiredRecords.add(flowFile); - expiredBytes += flowFile.getSize(); - flowFile = null; - - if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) { - break; - } - } else if (flowFile != null && flowFile.isPenalized()) { - this.activeQueue.add(flowFile); - flowFile = null; - break; - } - - if (flowFile != null) { - incrementActiveQueueSize(-1, -flowFile.getSize()); - } - } - while (isExpired); - - if (!expiredRecords.isEmpty()) { - incrementActiveQueueSize(-expiredRecords.size(), -expiredBytes); - } - - return flowFile; - } - - @Override - public List poll(int maxResults, final Set expiredRecords) { - final List records = new ArrayList<>(Math.min(1024, maxResults)); - - // First check if we have any records Pre-Fetched. - writeLock.lock(); - try { - doPoll(records, maxResults, expiredRecords); - } finally { - writeLock.unlock("poll(int, Set)"); - } - return records; - } - - private void doPoll(final List records, int maxResults, final Set expiredRecords) { - migrateSwapToActive(); - - final long bytesDrained = drainQueue(activeQueue, records, maxResults, expiredRecords); - - long expiredBytes = 0L; - for (final FlowFileRecord record : expiredRecords) { - expiredBytes += record.getSize(); - } - - incrementActiveQueueSize(-(expiredRecords.size() + records.size()), -bytesDrained); - incrementUnacknowledgedQueueSize(records.size(), bytesDrained - expiredBytes); - } - - /** - * If there are FlowFiles waiting on the swap queue, move them to the active - * queue until we meet our threshold. This prevents us from having to swap - * them to disk & then back out. - * - * This method MUST be called with the writeLock held. - */ - private void migrateSwapToActive() { - // Migrate as many FlowFiles as we can from the Swap Queue to the Active Queue, so that we don't - // have to swap them out & then swap them back in. - // If we don't do this, we could get into a situation where we have potentially thousands of FlowFiles - // sitting on the Swap Queue but not getting processed because there aren't enough to be swapped out. - // In particular, this can happen if the queue is typically filled with surges. - // For example, if the queue has 25,000 FlowFiles come in, it may process 20,000 of them and leave - // 5,000 sitting on the Swap Queue. If it then takes an hour for an additional 5,000 FlowFiles to come in, - // those FlowFiles sitting on the Swap Queue will sit there for an hour, waiting to be swapped out and - // swapped back in again. - // Calling this method when records are polled prevents this condition by migrating FlowFiles from the - // Swap Queue to the Active Queue. However, we don't do this if there are FlowFiles already swapped out - // to disk, because we want them to be swapped back in in the same order that they were swapped out. - - final int activeQueueSize = activeQueue.size(); - if (activeQueueSize > 0 && activeQueueSize > swapThreshold - SWAP_RECORD_POLL_SIZE) { - return; - } - - // If there are swap files waiting to be swapped in, swap those in first. We do this in order to ensure that those that - // were swapped out first are then swapped back in first. If we instead just immediately migrated the FlowFiles from the - // swap queue to the active queue, and we never run out of FlowFiles in the active queue (because destination cannot - // keep up with queue), we will end up always processing the new FlowFiles first instead of the FlowFiles that arrived - // first. - if (!swapLocations.isEmpty()) { - final String swapLocation = swapLocations.get(0); - boolean partialContents = false; - SwapContents swapContents = null; - try { - swapContents = swapManager.swapIn(swapLocation, this); - swapLocations.remove(0); - } catch (final IncompleteSwapFileException isfe) { - logger.error("Failed to swap in all FlowFiles from Swap File {}; Swap File ended prematurely. The records that were present will still be swapped in", swapLocation); - logger.error("", isfe); - swapContents = isfe.getPartialContents(); - partialContents = true; - swapLocations.remove(0); - } catch (final FileNotFoundException fnfe) { - logger.error("Failed to swap in FlowFiles from Swap File {} because the Swap File can no longer be found", swapLocation); - if (eventReporter != null) { - eventReporter.reportEvent(Severity.ERROR, "Swap File", "Failed to swap in FlowFiles from Swap File " + swapLocation + " because the Swap File can no longer be found"); - } - - swapLocations.remove(0); - return; - } catch (final IOException ioe) { - logger.error("Failed to swap in FlowFiles from Swap File {}; Swap File appears to be corrupt!", swapLocation); - logger.error("", ioe); - if (eventReporter != null) { - eventReporter.reportEvent(Severity.ERROR, "Swap File", "Failed to swap in FlowFiles from Swap File " + - swapLocation + "; Swap File appears to be corrupt! Some FlowFiles in the queue may not be accessible. See logs for more information."); - } - - // We do not remove the Swap File from swapLocations because the IOException may be recoverable later. For instance, the file may be on a network - // drive and we may have connectivity problems, etc. - return; - } catch (final Throwable t) { - logger.error("Failed to swap in FlowFiles from Swap File {}", swapLocation, t); - - // We do not remove the Swap File from swapLocations because this is an unexpected failure that may be retry-able. For example, if there were - // an OOME, etc. then we don't want to he queue to still reflect that the data is around but never swap it in. By leaving the Swap File - // in swapLocations, we will continue to retry. - throw t; - } - - final QueueSize swapSize = swapContents.getSummary().getQueueSize(); - final long contentSize = swapSize.getByteCount(); - final int flowFileCount = swapSize.getObjectCount(); - incrementSwapQueueSize(-flowFileCount, -contentSize, -1); - - if (partialContents) { - // if we have partial results, we need to calculate the content size of the flowfiles - // actually swapped back in. - long contentSizeSwappedIn = 0L; - for (final FlowFileRecord swappedIn : swapContents.getFlowFiles()) { - contentSizeSwappedIn += swappedIn.getSize(); - } - - incrementActiveQueueSize(swapContents.getFlowFiles().size(), contentSizeSwappedIn); - } else { - // we swapped in the whole swap file. We can just use the info that we got from the summary. - incrementActiveQueueSize(flowFileCount, contentSize); - } - - activeQueue.addAll(swapContents.getFlowFiles()); - return; - } - - // this is the most common condition (nothing is swapped out), so do the check first and avoid the expense - // of other checks for 99.999% of the cases. - if (size.get().swappedCount == 0 && swapQueue.isEmpty()) { - return; - } - - if (size.get().swappedCount > swapQueue.size()) { - // we already have FlowFiles swapped out, so we won't migrate the queue; we will wait for - // the files to be swapped back in first - return; - } - - int recordsMigrated = 0; - long bytesMigrated = 0L; - final Iterator swapItr = swapQueue.iterator(); - while (activeQueue.size() < swapThreshold && swapItr.hasNext()) { - final FlowFileRecord toMigrate = swapItr.next(); - activeQueue.add(toMigrate); - bytesMigrated += toMigrate.getSize(); - recordsMigrated++; - swapItr.remove(); - } - - if (recordsMigrated > 0) { - incrementActiveQueueSize(recordsMigrated, bytesMigrated); - incrementSwapQueueSize(-recordsMigrated, -bytesMigrated, 0); - } - - if (size.get().swappedCount == 0) { - swapMode = false; - } - } - - /** - * This method MUST be called with the write lock held - */ - private void writeSwapFilesIfNecessary() { - if (swapQueue.size() < SWAP_RECORD_POLL_SIZE) { - return; - } - - migrateSwapToActive(); - - final int numSwapFiles = swapQueue.size() / SWAP_RECORD_POLL_SIZE; - - int originalSwapQueueCount = swapQueue.size(); - long originalSwapQueueBytes = 0L; - for (final FlowFileRecord flowFile : swapQueue) { - originalSwapQueueBytes += flowFile.getSize(); - } - - // Create a new Priority queue with the prioritizers that are set, but reverse the - // prioritizers because we want to pull the lowest-priority FlowFiles to swap out - final PriorityQueue tempQueue = new PriorityQueue<>(activeQueue.size() + swapQueue.size(), Collections.reverseOrder(new Prioritizer(priorities))); - tempQueue.addAll(activeQueue); - tempQueue.addAll(swapQueue); - - long bytesSwappedOut = 0L; - int flowFilesSwappedOut = 0; - final List swapLocations = new ArrayList<>(numSwapFiles); - for (int i = 0; i < numSwapFiles; i++) { - // Create a new swap file for the next SWAP_RECORD_POLL_SIZE records - final List toSwap = new ArrayList<>(SWAP_RECORD_POLL_SIZE); - for (int j = 0; j < SWAP_RECORD_POLL_SIZE; j++) { - final FlowFileRecord flowFile = tempQueue.poll(); - toSwap.add(flowFile); - bytesSwappedOut += flowFile.getSize(); - flowFilesSwappedOut++; - } - - try { - Collections.reverse(toSwap); // currently ordered in reverse priority order based on the ordering of the temp queue. - final String swapLocation = swapManager.swapOut(toSwap, this); - swapLocations.add(swapLocation); - } catch (final IOException ioe) { - tempQueue.addAll(toSwap); // if we failed, we must add the FlowFiles back to the queue. - logger.error("FlowFile Queue with identifier {} has {} FlowFiles queued up. Attempted to spill FlowFile information over to disk in order to avoid exhausting " - + "the Java heap space but failed to write information to disk due to {}", getIdentifier(), getQueueSize().getObjectCount(), ioe.toString()); - logger.error("", ioe); - if (eventReporter != null) { - eventReporter.reportEvent(Severity.ERROR, "Failed to Overflow to Disk", "Flowfile Queue with identifier " + getIdentifier() + " has " + getQueueSize().getObjectCount() + - " queued up. Attempted to spill FlowFile information over to disk in order to avoid exhausting the Java heap space but failed to write information to disk. " - + "See logs for more information."); - } - - break; - } - } - - // Pull any records off of the temp queue that won't fit back on the active queue, and add those to the - // swap queue. Then add the records back to the active queue. - swapQueue.clear(); - long updatedSwapQueueBytes = 0L; - while (tempQueue.size() > swapThreshold) { - final FlowFileRecord record = tempQueue.poll(); - swapQueue.add(record); - updatedSwapQueueBytes += record.getSize(); - } - - Collections.reverse(swapQueue); // currently ordered in reverse priority order based on the ordering of the temp queue - - // replace the contents of the active queue, since we've merged it with the swap queue. - activeQueue.clear(); - FlowFileRecord toRequeue; - long activeQueueBytes = 0L; - while ((toRequeue = tempQueue.poll()) != null) { - activeQueue.offer(toRequeue); - activeQueueBytes += toRequeue.getSize(); - } - - boolean updated = false; - while (!updated) { - final FlowFileQueueSize originalSize = size.get(); - - final int addedSwapRecords = swapQueue.size() - originalSwapQueueCount; - final long addedSwapBytes = updatedSwapQueueBytes - originalSwapQueueBytes; - - final FlowFileQueueSize newSize = new FlowFileQueueSize(activeQueue.size(), activeQueueBytes, - originalSize.swappedCount + addedSwapRecords + flowFilesSwappedOut, - originalSize.swappedBytes + addedSwapBytes + bytesSwappedOut, - originalSize.swapFiles + numSwapFiles, - originalSize.unacknowledgedCount, originalSize.unacknowledgedBytes); - updated = size.compareAndSet(originalSize, newSize); - } - - this.swapLocations.addAll(swapLocations); - } - - - @Override - public long drainQueue(final Queue sourceQueue, final List destination, int maxResults, final Set expiredRecords) { - long drainedSize = 0L; - FlowFileRecord pulled = null; - - final long expirationMillis = expirationPeriod.get().getMillis(); - while (destination.size() < maxResults && (pulled = sourceQueue.poll()) != null) { - if (isLaterThan(getExpirationDate(pulled, expirationMillis))) { - expiredRecords.add(pulled); - if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) { - break; - } - } else { - if (pulled.isPenalized()) { - sourceQueue.add(pulled); - break; - } - destination.add(pulled); - } - drainedSize += pulled.getSize(); - } - return drainedSize; - } - - @Override - public List poll(final FlowFileFilter filter, final Set expiredRecords) { - long bytesPulled = 0L; - int flowFilesPulled = 0; - - writeLock.lock(); - try { - migrateSwapToActive(); - - final long expirationMillis = expirationPeriod.get().getMillis(); - - final List selectedFlowFiles = new ArrayList<>(); - final List unselected = new ArrayList<>(); - - while (true) { - FlowFileRecord flowFile = this.activeQueue.poll(); - if (flowFile == null) { - break; - } - - final boolean isExpired = isLaterThan(getExpirationDate(flowFile, expirationMillis)); - if (isExpired) { - expiredRecords.add(flowFile); - bytesPulled += flowFile.getSize(); - flowFilesPulled++; - - if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) { - break; - } else { - continue; - } - } else if (flowFile.isPenalized()) { - this.activeQueue.add(flowFile); - flowFile = null; - break; // just stop searching because the rest are all penalized. - } - - final FlowFileFilterResult result = filter.filter(flowFile); - if (result.isAccept()) { - bytesPulled += flowFile.getSize(); - flowFilesPulled++; - - incrementUnacknowledgedQueueSize(1, flowFile.getSize()); - selectedFlowFiles.add(flowFile); - } else { - unselected.add(flowFile); - } - - if (!result.isContinue()) { - break; - } - } - - this.activeQueue.addAll(unselected); - incrementActiveQueueSize(-flowFilesPulled, -bytesPulled); - - return selectedFlowFiles; - } finally { - writeLock.unlock("poll(Filter, Set)"); - } - } - - - - private static final class Prioritizer implements Comparator, Serializable { - - private static final long serialVersionUID = 1L; - private final transient List prioritizers = new ArrayList<>(); - - private Prioritizer(final List priorities) { - if (null != priorities) { - prioritizers.addAll(priorities); - } - } - - @Override - public int compare(final FlowFileRecord f1, final FlowFileRecord f2) { - int returnVal = 0; - final boolean f1Penalized = f1.isPenalized(); - final boolean f2Penalized = f2.isPenalized(); - - if (f1Penalized && !f2Penalized) { - return 1; - } else if (!f1Penalized && f2Penalized) { - return -1; - } - - if (f1Penalized && f2Penalized) { - if (f1.getPenaltyExpirationMillis() < f2.getPenaltyExpirationMillis()) { - return -1; - } else if (f1.getPenaltyExpirationMillis() > f2.getPenaltyExpirationMillis()) { - return 1; - } - } - - if (!prioritizers.isEmpty()) { - for (final FlowFilePrioritizer prioritizer : prioritizers) { - returnVal = prioritizer.compare(f1, f2); - if (returnVal != 0) { - return returnVal; - } - } - } - - final ContentClaim claim1 = f1.getContentClaim(); - final ContentClaim claim2 = f2.getContentClaim(); - - // put the one without a claim first - if (claim1 == null && claim2 != null) { - return -1; - } else if (claim1 != null && claim2 == null) { - return 1; - } else if (claim1 != null && claim2 != null) { - final int claimComparison = claim1.compareTo(claim2); - if (claimComparison != 0) { - return claimComparison; - } - - final int claimOffsetComparison = Long.compare(f1.getContentClaimOffset(), f2.getContentClaimOffset()); - if (claimOffsetComparison != 0) { - return claimOffsetComparison; - } - } - - return Long.compare(f1.getId(), f2.getId()); - } - } - - @Override - public String getFlowFileExpiration() { - return expirationPeriod.get().getPeriod(); - } - - @Override - public int getFlowFileExpiration(final TimeUnit timeUnit) { - return (int) timeUnit.convert(expirationPeriod.get().getMillis(), TimeUnit.MILLISECONDS); - } - - @Override - public void setFlowFileExpiration(final String flowExpirationPeriod) { - final long millis = FormatUtils.getTimeDuration(flowExpirationPeriod, TimeUnit.MILLISECONDS); - if (millis < 0) { - throw new IllegalArgumentException("FlowFile Expiration Period must be positive"); - } - - expirationPeriod.set(new TimePeriod(flowExpirationPeriod, millis)); - } - - - @Override - public void purgeSwapFiles() { - swapManager.purge(); - } - - @Override - public SwapSummary recoverSwappedFlowFiles() { - int swapFlowFileCount = 0; - long swapByteCount = 0L; - Long maxId = null; - List resourceClaims = new ArrayList<>(); - final long startNanos = System.nanoTime(); - - writeLock.lock(); - try { - final List swapLocations; - try { - swapLocations = swapManager.recoverSwapLocations(this); - } catch (final IOException ioe) { - logger.error("Failed to determine whether or not any Swap Files exist for FlowFile Queue {}", getIdentifier()); - logger.error("", ioe); - if (eventReporter != null) { - eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to determine whether or not any Swap Files exist for FlowFile Queue " + - getIdentifier() + "; see logs for more detials"); - } - return null; - } - - for (final String swapLocation : swapLocations) { - try { - final SwapSummary summary = swapManager.getSwapSummary(swapLocation); - final QueueSize queueSize = summary.getQueueSize(); - final Long maxSwapRecordId = summary.getMaxFlowFileId(); - if (maxSwapRecordId != null) { - if (maxId == null || maxSwapRecordId > maxId) { - maxId = maxSwapRecordId; - } - } - - swapFlowFileCount += queueSize.getObjectCount(); - swapByteCount += queueSize.getByteCount(); - resourceClaims.addAll(summary.getResourceClaims()); - } catch (final IOException ioe) { - logger.error("Failed to recover FlowFiles from Swap File {}; the file appears to be corrupt", swapLocation, ioe.toString()); - logger.error("", ioe); - if (eventReporter != null) { - eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to recover FlowFiles from Swap File " + swapLocation + - "; the file appears to be corrupt. See logs for more details"); - } - } - } - - incrementSwapQueueSize(swapFlowFileCount, swapByteCount, swapLocations.size()); - this.swapLocations.addAll(swapLocations); - } finally { - writeLock.unlock("Recover Swap Files"); - } - - if (!swapLocations.isEmpty()) { - final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); - logger.info("Recovered {} swap files for {} in {} millis", swapLocations.size(), this, millis); - } - - return new StandardSwapSummary(new QueueSize(swapFlowFileCount, swapByteCount), maxId, resourceClaims); - } - - - @Override - public String toString() { - return "FlowFileQueue[id=" + identifier + "]"; - } - - - @Override - public ListFlowFileStatus listFlowFiles(final String requestIdentifier, final int maxResults) { - // purge any old requests from the map just to keep it clean. But if there are very few requests, which is usually the case, then don't bother - if (listRequestMap.size() > 10) { - final List toDrop = new ArrayList<>(); - for (final Map.Entry entry : listRequestMap.entrySet()) { - final ListFlowFileRequest request = entry.getValue(); - final boolean completed = request.getState() == ListFlowFileState.COMPLETE || request.getState() == ListFlowFileState.FAILURE; - - if (completed && System.currentTimeMillis() - request.getLastUpdated() > TimeUnit.MINUTES.toMillis(5L)) { - toDrop.add(entry.getKey()); - } - } - - for (final String requestId : toDrop) { - listRequestMap.remove(requestId); - } - } - - // numSteps = 1 for each swap location + 1 for active queue + 1 for swap queue. - final ListFlowFileRequest listRequest = new ListFlowFileRequest(requestIdentifier, maxResults, size()); - - final Thread t = new Thread(new Runnable() { - @Override - public void run() { - int position = 0; - int resultCount = 0; - final List summaries = new ArrayList<>(); - - // Create an ArrayList that contains all of the contents of the active queue. - // We do this so that we don't have to hold the lock any longer than absolutely necessary. - // We cannot simply pull the first 'maxResults' records from the queue, however, because the - // Iterator provided by PriorityQueue does not return records in order. So we would have to either - // use a writeLock and 'pop' the first 'maxResults' records off the queue or use a read lock and - // do a shallow copy of the queue. The shallow copy is generally quicker because it doesn't have to do - // the sorting to put the records back. So even though this has an expensive of Java Heap to create the - // extra collection, we are making this trade-off to avoid locking the queue any longer than required. - final List allFlowFiles; - final Prioritizer prioritizer; - readLock.lock(); - try { - logger.debug("{} Acquired lock to perform listing of FlowFiles", StandardFlowFileQueue.this); - allFlowFiles = new ArrayList<>(activeQueue); - prioritizer = new Prioritizer(StandardFlowFileQueue.this.priorities); - } finally { - readLock.unlock("List FlowFiles"); - } - - listRequest.setState(ListFlowFileState.CALCULATING_LIST); - - // sort the FlowFileRecords so that we have the list in the same order as on the queue. - Collections.sort(allFlowFiles, prioritizer); - - for (final FlowFileRecord flowFile : allFlowFiles) { - summaries.add(summarize(flowFile, ++position)); - if (summaries.size() >= maxResults) { - break; - } - } - - logger.debug("{} Finished listing FlowFiles for active queue with a total of {} results", StandardFlowFileQueue.this, resultCount); - listRequest.setFlowFileSummaries(summaries); - listRequest.setState(ListFlowFileState.COMPLETE); - } - }, "List FlowFiles for Connection " + getIdentifier()); - t.setDaemon(true); - t.start(); - - listRequestMap.put(requestIdentifier, listRequest); - return listRequest; - } - - private FlowFileSummary summarize(final FlowFile flowFile, final int position) { - // extract all of the information that we care about into new variables rather than just - // wrapping the FlowFile object with a FlowFileSummary object. We do this because we want to - // be able to hold many FlowFileSummary objects in memory and if we just wrap the FlowFile object, - // we will end up holding the entire FlowFile (including all Attributes) in the Java heap as well, - // which can be problematic if we expect them to be swapped out. - final String uuid = flowFile.getAttribute(CoreAttributes.UUID.key()); - final String filename = flowFile.getAttribute(CoreAttributes.FILENAME.key()); - final long size = flowFile.getSize(); - final Long lastQueuedTime = flowFile.getLastQueueDate(); - final long lineageStart = flowFile.getLineageStartDate(); - final boolean penalized = flowFile.isPenalized(); - - return new FlowFileSummary() { - @Override - public String getUuid() { - return uuid; - } - - @Override - public String getFilename() { - return filename; - } - - @Override - public int getPosition() { - return position; - } - - @Override - public long getSize() { - return size; - } - - @Override - public long getLastQueuedTime() { - return lastQueuedTime == null ? 0L : lastQueuedTime; - } - - @Override - public long getLineageStartDate() { - return lineageStart; - } - - @Override - public boolean isPenalized() { - return penalized; - } - }; - } - - - @Override - public ListFlowFileStatus getListFlowFileStatus(final String requestIdentifier) { - return listRequestMap.get(requestIdentifier); - } - - @Override - public ListFlowFileStatus cancelListFlowFileRequest(final String requestIdentifier) { - logger.info("Canceling ListFlowFile Request with ID {}", requestIdentifier); - final ListFlowFileRequest request = listRequestMap.remove(requestIdentifier); - if (request != null) { - request.cancel(); - } - - return request; - } - - @Override - public FlowFileRecord getFlowFile(final String flowFileUuid) throws IOException { - if (flowFileUuid == null) { - return null; - } - - readLock.lock(); - try { - // read through all of the FlowFiles in the queue, looking for the FlowFile with the given ID - for (final FlowFileRecord flowFile : activeQueue) { - if (flowFileUuid.equals(flowFile.getAttribute(CoreAttributes.UUID.key()))) { - return flowFile; - } - } - } finally { - readLock.unlock("getFlowFile"); - } - - return null; - } - - - @Override - public void verifyCanList() throws IllegalStateException { - } - - @Override - public DropFlowFileStatus dropFlowFiles(final String requestIdentifier, final String requestor) { - logger.info("Initiating drop of FlowFiles from {} on behalf of {} (request identifier={})", this, requestor, requestIdentifier); - - // purge any old requests from the map just to keep it clean. But if there are very requests, which is usually the case, then don't bother - if (dropRequestMap.size() > 10) { - final List toDrop = new ArrayList<>(); - for (final Map.Entry entry : dropRequestMap.entrySet()) { - final DropFlowFileRequest request = entry.getValue(); - final boolean completed = request.getState() == DropFlowFileState.COMPLETE || request.getState() == DropFlowFileState.FAILURE; - - if (completed && System.currentTimeMillis() - request.getLastUpdated() > TimeUnit.MINUTES.toMillis(5L)) { - toDrop.add(entry.getKey()); - } - } - - for (final String requestId : toDrop) { - dropRequestMap.remove(requestId); - } - } - - final DropFlowFileRequest dropRequest = new DropFlowFileRequest(requestIdentifier); - final QueueSize originalSize = getQueueSize(); - dropRequest.setCurrentSize(originalSize); - dropRequest.setOriginalSize(originalSize); - if (originalSize.getObjectCount() == 0) { - dropRequest.setDroppedSize(originalSize); - dropRequest.setState(DropFlowFileState.COMPLETE); - dropRequestMap.put(requestIdentifier, dropRequest); - return dropRequest; - } - - final Thread t = new Thread(new Runnable() { - @Override - public void run() { - writeLock.lock(); - try { - dropRequest.setState(DropFlowFileState.DROPPING_FLOWFILES); - logger.debug("For DropFlowFileRequest {}, original size is {}", requestIdentifier, getQueueSize()); - - try { - final List activeQueueRecords = new ArrayList<>(activeQueue); - - QueueSize droppedSize; - try { - if (dropRequest.getState() == DropFlowFileState.CANCELED) { - logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier); - return; - } - - droppedSize = drop(activeQueueRecords, requestor); - logger.debug("For DropFlowFileRequest {}, Dropped {} from active queue", requestIdentifier, droppedSize); - } catch (final IOException ioe) { - logger.error("Failed to drop the FlowFiles from queue {} due to {}", StandardFlowFileQueue.this.getIdentifier(), ioe.toString()); - logger.error("", ioe); - - dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + ioe.toString()); - return; - } - - activeQueue.clear(); - incrementActiveQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount()); - dropRequest.setCurrentSize(getQueueSize()); - dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize)); - - final QueueSize swapSize = size.get().swapQueueSize(); - logger.debug("For DropFlowFileRequest {}, Swap Queue has {} elements, Swapped Record Count = {}, Swapped Content Size = {}", - requestIdentifier, swapQueue.size(), swapSize.getObjectCount(), swapSize.getByteCount()); - if (dropRequest.getState() == DropFlowFileState.CANCELED) { - logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier); - return; - } - - try { - droppedSize = drop(swapQueue, requestor); - } catch (final IOException ioe) { - logger.error("Failed to drop the FlowFiles from queue {} due to {}", StandardFlowFileQueue.this.getIdentifier(), ioe.toString()); - logger.error("", ioe); - - dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + ioe.toString()); - return; - } - - swapQueue.clear(); - dropRequest.setCurrentSize(getQueueSize()); - dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize)); - swapMode = false; - incrementSwapQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount(), 0); - logger.debug("For DropFlowFileRequest {}, dropped {} from Swap Queue", requestIdentifier, droppedSize); - - final int swapFileCount = swapLocations.size(); - final Iterator swapLocationItr = swapLocations.iterator(); - while (swapLocationItr.hasNext()) { - final String swapLocation = swapLocationItr.next(); - - SwapContents swapContents = null; - try { - if (dropRequest.getState() == DropFlowFileState.CANCELED) { - logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier); - return; - } - - swapContents = swapManager.swapIn(swapLocation, StandardFlowFileQueue.this); - droppedSize = drop(swapContents.getFlowFiles(), requestor); - } catch (final IncompleteSwapFileException isfe) { - swapContents = isfe.getPartialContents(); - final String warnMsg = "Failed to swap in FlowFiles from Swap File " + swapLocation + " because the file was corrupt. " - + "Some FlowFiles may not be dropped from the queue until NiFi is restarted."; - - logger.warn(warnMsg); - if (eventReporter != null) { - eventReporter.reportEvent(Severity.WARNING, "Drop FlowFiles", warnMsg); - } - } catch (final IOException ioe) { - logger.error("Failed to swap in FlowFiles from Swap File {} in order to drop the FlowFiles for Connection {} due to {}", - swapLocation, StandardFlowFileQueue.this.getIdentifier(), ioe.toString()); - logger.error("", ioe); - if (eventReporter != null) { - eventReporter.reportEvent(Severity.ERROR, "Drop FlowFiles", "Failed to swap in FlowFiles from Swap File " + swapLocation - + ". The FlowFiles contained in this Swap File will not be dropped from the queue"); - } - - dropRequest.setState(DropFlowFileState.FAILURE, "Failed to swap in FlowFiles from Swap File " + swapLocation + " due to " + ioe.toString()); - if (swapContents != null) { - activeQueue.addAll(swapContents.getFlowFiles()); // ensure that we don't lose the FlowFiles from our queue. - } - - return; - } - - dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize)); - incrementSwapQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount(), -1); - - dropRequest.setCurrentSize(getQueueSize()); - swapLocationItr.remove(); - logger.debug("For DropFlowFileRequest {}, dropped {} for Swap File {}", requestIdentifier, droppedSize, swapLocation); - } - - logger.debug("Dropped FlowFiles from {} Swap Files", swapFileCount); - logger.info("Successfully dropped {} FlowFiles ({} bytes) from Connection with ID {} on behalf of {}", - dropRequest.getDroppedSize().getObjectCount(), dropRequest.getDroppedSize().getByteCount(), StandardFlowFileQueue.this.getIdentifier(), requestor); - dropRequest.setState(DropFlowFileState.COMPLETE); - } catch (final Exception e) { - logger.error("Failed to drop FlowFiles from Connection with ID {} due to {}", StandardFlowFileQueue.this.getIdentifier(), e.toString()); - logger.error("", e); - dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + e.toString()); - } - } finally { - writeLock.unlock("Drop FlowFiles"); - } - } - }, "Drop FlowFiles for Connection " + getIdentifier()); - t.setDaemon(true); - t.start(); - - dropRequestMap.put(requestIdentifier, dropRequest); - - return dropRequest; - } - - private QueueSize drop(final List flowFiles, final String requestor) throws IOException { - // Create a Provenance Event and a FlowFile Repository record for each FlowFile - final List provenanceEvents = new ArrayList<>(flowFiles.size()); - final List flowFileRepoRecords = new ArrayList<>(flowFiles.size()); - for (final FlowFileRecord flowFile : flowFiles) { - provenanceEvents.add(createDropEvent(flowFile, requestor)); - flowFileRepoRecords.add(createDeleteRepositoryRecord(flowFile)); - } - - long dropContentSize = 0L; - for (final FlowFileRecord flowFile : flowFiles) { - dropContentSize += flowFile.getSize(); - final ContentClaim contentClaim = flowFile.getContentClaim(); - if (contentClaim == null) { - continue; - } - - final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); - if (resourceClaim == null) { - continue; - } - - resourceClaimManager.decrementClaimantCount(resourceClaim); - } - - provRepository.registerEvents(provenanceEvents); - flowFileRepository.updateRepository(flowFileRepoRecords); - return new QueueSize(flowFiles.size(), dropContentSize); - } - - private ProvenanceEventRecord createDropEvent(final FlowFileRecord flowFile, final String requestor) { - final ProvenanceEventBuilder builder = provRepository.eventBuilder(); - builder.fromFlowFile(flowFile); - builder.setEventType(ProvenanceEventType.DROP); - builder.setLineageStartDate(flowFile.getLineageStartDate()); - builder.setComponentId(getIdentifier()); - builder.setComponentType("Connection"); - builder.setAttributes(flowFile.getAttributes(), Collections. emptyMap()); - builder.setDetails("FlowFile Queue emptied by " + requestor); - builder.setSourceQueueIdentifier(getIdentifier()); - - final ContentClaim contentClaim = flowFile.getContentClaim(); - if (contentClaim != null) { - final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); - builder.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset(), flowFile.getSize()); - } - - return builder.build(); - } - - private RepositoryRecord createDeleteRepositoryRecord(final FlowFileRecord flowFile) { - return new RepositoryRecord() { - @Override - public FlowFileQueue getDestination() { - return null; - } - - @Override - public FlowFileQueue getOriginalQueue() { - return StandardFlowFileQueue.this; - } - - @Override - public RepositoryRecordType getType() { - return RepositoryRecordType.DELETE; - } - - @Override - public ContentClaim getCurrentClaim() { - return flowFile.getContentClaim(); - } - - @Override - public ContentClaim getOriginalClaim() { - return flowFile.getContentClaim(); - } - - @Override - public long getCurrentClaimOffset() { - return flowFile.getContentClaimOffset(); - } - - @Override - public FlowFileRecord getCurrent() { - return flowFile; - } - - @Override - public boolean isAttributesChanged() { - return false; - } - - @Override - public boolean isMarkedForAbort() { - return false; - } - - @Override - public String getSwapLocation() { - return null; - } - - @Override - public List getTransientClaims() { - return Collections.emptyList(); - } - }; - } - - - @Override - public DropFlowFileRequest cancelDropFlowFileRequest(final String requestIdentifier) { - final DropFlowFileRequest request = dropRequestMap.remove(requestIdentifier); - if (request == null) { - return null; - } - - request.cancel(); - return request; - } - - @Override - public DropFlowFileStatus getDropFlowFileStatus(final String requestIdentifier) { - return dropRequestMap.get(requestIdentifier); - } - - /** - * Lock the queue so that other threads are unable to interact with the - * queue - */ - public void lock() { - writeLock.lock(); - } - - /** - * Unlock the queue - */ - public void unlock() { - writeLock.unlock("external unlock"); - } - - @Override - public QueueSize getUnacknowledgedQueueSize() { - return size.get().unacknowledgedQueueSize(); - } - - private void incrementActiveQueueSize(final int count, final long bytes) { - boolean updated = false; - while (!updated) { - final FlowFileQueueSize original = size.get(); - final FlowFileQueueSize newSize = new FlowFileQueueSize(original.activeQueueCount + count, original.activeQueueBytes + bytes, - original.swappedCount, original.swappedBytes, original.swapFiles, original.unacknowledgedCount, original.unacknowledgedBytes); - updated = size.compareAndSet(original, newSize); - - if (updated) { - logIfNegative(original, newSize, "active"); - } - } - } - - private void incrementSwapQueueSize(final int count, final long bytes, final int fileCount) { - boolean updated = false; - while (!updated) { - final FlowFileQueueSize original = size.get(); - final FlowFileQueueSize newSize = new FlowFileQueueSize(original.activeQueueCount, original.activeQueueBytes, - original.swappedCount + count, original.swappedBytes + bytes, original.swapFiles + fileCount, original.unacknowledgedCount, original.unacknowledgedBytes); - updated = size.compareAndSet(original, newSize); - - if (updated) { - logIfNegative(original, newSize, "swap"); - } - } - } - - private void incrementUnacknowledgedQueueSize(final int count, final long bytes) { - boolean updated = false; - while (!updated) { - final FlowFileQueueSize original = size.get(); - final FlowFileQueueSize newSize = new FlowFileQueueSize(original.activeQueueCount, original.activeQueueBytes, - original.swappedCount, original.swappedBytes, original.swapFiles, original.unacknowledgedCount + count, original.unacknowledgedBytes + bytes); - updated = size.compareAndSet(original, newSize); - - if (updated) { - logIfNegative(original, newSize, "Unacknowledged"); - } - } - } - - private void logIfNegative(final FlowFileQueueSize original, final FlowFileQueueSize newSize, final String counterName) { - if (newSize.activeQueueBytes < 0 || newSize.activeQueueCount < 0 || newSize.swappedBytes < 0 || newSize.swappedCount < 0 - || newSize.unacknowledgedBytes < 0 || newSize.unacknowledgedCount < 0) { - - logger.error("Updated Size of Queue " + counterName + " from " + original + " to " + newSize, new RuntimeException("Cannot create negative queue size")); - - } - } - - - private static class FlowFileQueueSize { - private final int activeQueueCount; - private final long activeQueueBytes; - private final int swappedCount; - private final long swappedBytes; - private final int swapFiles; - private final int unacknowledgedCount; - private final long unacknowledgedBytes; - - public FlowFileQueueSize(final int activeQueueCount, final long activeQueueBytes, final int swappedCount, final long swappedBytes, final int swapFileCount, - final int unacknowledgedCount, final long unacknowledgedBytes) { - this.activeQueueCount = activeQueueCount; - this.activeQueueBytes = activeQueueBytes; - this.swappedCount = swappedCount; - this.swappedBytes = swappedBytes; - this.swapFiles = swapFileCount; - this.unacknowledgedCount = unacknowledgedCount; - this.unacknowledgedBytes = unacknowledgedBytes; - } - - public boolean isEmpty() { - return activeQueueCount == 0 && swappedCount == 0 && unacknowledgedCount == 0; - } - - public QueueSize toQueueSize() { - return new QueueSize(activeQueueCount + swappedCount + unacknowledgedCount, activeQueueBytes + swappedBytes + unacknowledgedBytes); - } - - public QueueSize activeQueueSize() { - return new QueueSize(activeQueueCount, activeQueueBytes); - } - - public QueueSize unacknowledgedQueueSize() { - return new QueueSize(unacknowledgedCount, unacknowledgedBytes); - } - - public QueueSize swapQueueSize() { - return new QueueSize(swappedCount, swappedBytes); - } - - @Override - public String toString() { - return "FlowFile Queue Size[ ActiveQueue=[" + activeQueueCount + ", " + activeQueueBytes + - " Bytes], Swap Queue=[" + swappedCount + ", " + swappedBytes + - " Bytes], Swap Files=[" + swapFiles + "], Unacknowledged=[" + unacknowledgedCount + ", " + unacknowledgedBytes + " Bytes] ]"; - } - } - - - private static class MaxQueueSize { - private final String maxSize; - private final long maxBytes; - private final long maxCount; - - public MaxQueueSize(final String maxSize, final long maxBytes, final long maxCount) { - this.maxSize = maxSize; - this.maxBytes = maxBytes; - this.maxCount = maxCount; - } - - public String getMaxSize() { - return maxSize; - } - - public long getMaxBytes() { - return maxBytes; - } - - public long getMaxCount() { - return maxCount; - } - - @Override - public String toString() { - return maxCount + " Objects/" + maxSize; - } - } - - private static class TimePeriod { - private final String period; - private final long millis; - - public TimePeriod(final String period, final long millis) { - this.period = period; - this.millis = millis; - } - - public String getPeriod() { - return period; - } - - public long getMillis() { - return millis; - } - - @Override - public String toString() { - return period; - } - } -} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java index f2387c20a9d2..957357b112eb 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowService.java @@ -23,6 +23,7 @@ import org.apache.nifi.bundle.Bundle; import org.apache.nifi.cluster.ConnectionException; import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.node.OffloadCode; import org.apache.nifi.cluster.coordination.node.DisconnectionCode; import org.apache.nifi.cluster.coordination.node.NodeConnectionState; import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; @@ -36,6 +37,7 @@ import org.apache.nifi.cluster.protocol.StandardDataFlow; import org.apache.nifi.cluster.protocol.impl.NodeProtocolSenderListener; import org.apache.nifi.cluster.protocol.message.ConnectionRequestMessage; +import org.apache.nifi.cluster.protocol.message.OffloadMessage; import org.apache.nifi.cluster.protocol.message.DisconnectMessage; import org.apache.nifi.cluster.protocol.message.FlowRequestMessage; import org.apache.nifi.cluster.protocol.message.FlowResponseMessage; @@ -44,12 +46,14 @@ import org.apache.nifi.cluster.protocol.message.ReconnectionResponseMessage; import org.apache.nifi.components.state.Scope; import org.apache.nifi.components.state.StateManager; +import org.apache.nifi.controller.queue.FlowFileQueue; import org.apache.nifi.controller.serialization.FlowSerializationException; import org.apache.nifi.controller.serialization.FlowSynchronizationException; import org.apache.nifi.encrypt.StringEncryptor; import org.apache.nifi.engine.FlowEngine; import org.apache.nifi.events.BulletinFactory; import org.apache.nifi.groups.ProcessGroup; +import org.apache.nifi.groups.RemoteProcessGroup; import org.apache.nifi.lifecycle.LifeCycleStartException; import org.apache.nifi.logging.LogLevel; import org.apache.nifi.nar.NarClassLoaders; @@ -202,6 +206,7 @@ private StandardFlowService( final InetSocketAddress nodeApiAddress = nifiProperties.getNodeApiAddress(); final InetSocketAddress nodeSocketAddress = nifiProperties.getClusterNodeProtocolAddress(); + final InetSocketAddress loadBalanceAddress = nifiProperties.getClusterLoadBalanceAddress(); String nodeUuid = null; final StateManager stateManager = controller.getStateManagerProvider().getStateManager(CLUSTER_NODE_CONFIG); @@ -217,6 +222,7 @@ private StandardFlowService( this.nodeId = new NodeIdentifier(nodeUuid, nodeApiAddress.getHostName(), nodeApiAddress.getPort(), nodeSocketAddress.getHostName(), nodeSocketAddress.getPort(), + loadBalanceAddress.getHostName(), loadBalanceAddress.getPort(), nifiProperties.getRemoteInputHost(), nifiProperties.getRemoteInputPort(), nifiProperties.getRemoteInputHttpPort(), nifiProperties.isSiteToSiteSecure()); @@ -379,6 +385,7 @@ public void run() { public boolean canHandle(final ProtocolMessage msg) { switch (msg.getType()) { case RECONNECTION_REQUEST: + case OFFLOAD_REQUEST: case DISCONNECTION_REQUEST: case FLOW_REQUEST: return true; @@ -388,7 +395,7 @@ public boolean canHandle(final ProtocolMessage msg) { } @Override - public ProtocolMessage handle(final ProtocolMessage request) throws ProtocolException { + public ProtocolMessage handle(final ProtocolMessage request, final Set nodeIdentities) throws ProtocolException { final long startNanos = System.nanoTime(); try { switch (request.getType()) { @@ -413,6 +420,22 @@ public void run() { return new ReconnectionResponseMessage(); } + case OFFLOAD_REQUEST: { + final Thread t = new Thread(new Runnable() { + @Override + public void run() { + try { + handleOffloadRequest((OffloadMessage) request); + } catch (InterruptedException e) { + throw new ProtocolException("Could not complete offload request", e); + } + } + }, "Offload Flow Files from Node"); + t.setDaemon(true); + t.start(); + + return null; + } case DISCONNECTION_REQUEST: { final Thread t = new Thread(new Runnable() { @Override @@ -559,7 +582,7 @@ private void handleConnectionFailure(final Exception ex) { private FlowResponseMessage handleFlowRequest(final FlowRequestMessage request) throws ProtocolException { readLock.lock(); try { - logger.info("Received flow request message from manager."); + logger.info("Received flow request message from cluster coordinator."); // create the response final FlowResponseMessage response = new FlowResponseMessage(); @@ -629,7 +652,7 @@ private NodeIdentifier getNodeId() { private void handleReconnectionRequest(final ReconnectionRequestMessage request) { try { - logger.info("Processing reconnection request from manager."); + logger.info("Processing reconnection request from cluster coordinator."); // reconnect ConnectionResponse connectionResponse = new ConnectionResponse(getNodeId(), request.getDataFlow(), @@ -660,8 +683,48 @@ private void handleReconnectionRequest(final ReconnectionRequestMessage request) } } + private void handleOffloadRequest(final OffloadMessage request) throws InterruptedException { + logger.info("Received offload request message from cluster coordinator with explanation: " + request.getExplanation()); + offload(request.getExplanation()); + } + + private void offload(final String explanation) throws InterruptedException { + writeLock.lock(); + try { + logger.info("Offloading node due to " + explanation); + + // mark node as offloading + controller.setConnectionStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.OFFLOADING, OffloadCode.OFFLOADED, explanation)); + // request to stop all processors on node + controller.stopAllProcessors(); + // terminate all processors + controller.getRootGroup().findAllProcessors() + // filter stream, only stopped processors can be terminated + .stream().filter(pn -> pn.getScheduledState() == ScheduledState.STOPPED) + .forEach(pn -> pn.getProcessGroup().terminateProcessor(pn)); + // request to stop all remote process groups + controller.getRootGroup().findAllRemoteProcessGroups().forEach(RemoteProcessGroup::stopTransmitting); + // offload all queues on node + controller.getAllQueues().forEach(FlowFileQueue::offloadQueue); + // wait for rebalance of flowfiles on all queues + while (controller.getControllerStatus().getQueuedCount() > 0) { + logger.debug("Offloading queues on node {}, remaining queued count: {}", getNodeId(), controller.getControllerStatus().getQueuedCount()); + Thread.sleep(1000); + } + // finish offload + controller.getAllQueues().forEach(FlowFileQueue::resetOffloadedQueue); + controller.setConnectionStatus(new NodeConnectionStatus(nodeId, NodeConnectionState.OFFLOADED, OffloadCode.OFFLOADED, explanation)); + clusterCoordinator.finishNodeOffload(getNodeId()); + + logger.info("Node offloaded due to " + explanation); + + } finally { + writeLock.unlock(); + } + } + private void handleDisconnectionRequest(final DisconnectMessage request) { - logger.info("Received disconnection request message from manager with explanation: " + request.getExplanation()); + logger.info("Received disconnection request message from cluster coordinator with explanation: " + request.getExplanation()); disconnect(request.getExplanation()); } @@ -827,11 +890,11 @@ private ConnectionResponse connect(final boolean retryOnCommsFailure, final bool } } else if (response.getRejectionReason() != null) { logger.warn("Connection request was blocked by cluster coordinator with the explanation: " + response.getRejectionReason()); - // set response to null and treat a firewall blockage the same as getting no response from manager + // set response to null and treat a firewall blockage the same as getting no response from cluster coordinator response = null; break; } else { - // we received a successful connection response from manager + // we received a successful connection response from cluster coordinator break; } } catch (final NoClusterCoordinatorException ncce) { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowSynchronizer.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowSynchronizer.java index 15538b37ca0c..d47e198a289f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowSynchronizer.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardFlowSynchronizer.java @@ -16,34 +16,6 @@ */ package org.apache.nifi.controller; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.zip.GZIPInputStream; - -import javax.xml.XMLConstants; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.validation.Schema; -import javax.xml.validation.SchemaFactory; - import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.nifi.authorization.Authorizer; @@ -58,6 +30,8 @@ import org.apache.nifi.connectable.ConnectableType; import org.apache.nifi.connectable.Connection; import org.apache.nifi.connectable.Funnel; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.LoadBalanceStrategy; import org.apache.nifi.connectable.Port; import org.apache.nifi.connectable.Position; import org.apache.nifi.connectable.Size; @@ -127,6 +101,33 @@ import org.w3c.dom.NodeList; import org.xml.sax.SAXException; +import javax.xml.XMLConstants; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.validation.Schema; +import javax.xml.validation.SchemaFactory; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.zip.GZIPInputStream; + /** */ public class StandardFlowSynchronizer implements FlowSynchronizer { @@ -837,9 +838,10 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P final ProcessorDTO dto = FlowFromDOMFactory.getProcessor(processorElement, encryptor); final ProcessorNode procNode = processGroup.getProcessor(dto.getId()); + final ScheduledState procState = getScheduledState(procNode, controller); updateNonFingerprintedProcessorSettings(procNode, dto); - if (!procNode.getScheduledState().name().equals(dto.getState())) { + if (!procState.name().equals(dto.getState())) { try { switch (ScheduledState.valueOf(dto.getState())) { case DISABLED: @@ -855,9 +857,9 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P controller.startProcessor(procNode.getProcessGroupIdentifier(), procNode.getIdentifier(), false); break; case STOPPED: - if (procNode.getScheduledState() == ScheduledState.DISABLED) { + if (procState == ScheduledState.DISABLED) { procNode.getProcessGroup().enableProcessor(procNode); - } else if (procNode.getScheduledState() == ScheduledState.RUNNING) { + } else if (procState == ScheduledState.RUNNING) { controller.stopProcessor(procNode.getProcessGroupIdentifier(), procNode.getIdentifier()); } break; @@ -882,7 +884,9 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P final PortDTO dto = FlowFromDOMFactory.getPort(portElement); final Port port = processGroup.getInputPort(dto.getId()); - if (!port.getScheduledState().name().equals(dto.getState())) { + final ScheduledState portState = getScheduledState(port, controller); + + if (!portState.name().equals(dto.getState())) { switch (ScheduledState.valueOf(dto.getState())) { case DISABLED: // switch processor do disabled. This means we have to stop it (if it's already stopped, this method does nothing), @@ -896,9 +900,9 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P controller.startConnectable(port); break; case STOPPED: - if (port.getScheduledState() == ScheduledState.DISABLED) { + if (portState == ScheduledState.DISABLED) { port.getProcessGroup().enableInputPort(port); - } else if (port.getScheduledState() == ScheduledState.RUNNING) { + } else if (portState == ScheduledState.RUNNING) { controller.stopConnectable(port); } break; @@ -911,7 +915,9 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P final PortDTO dto = FlowFromDOMFactory.getPort(portElement); final Port port = processGroup.getOutputPort(dto.getId()); - if (!port.getScheduledState().name().equals(dto.getState())) { + final ScheduledState portState = getScheduledState(port, controller); + + if (!portState.name().equals(dto.getState())) { switch (ScheduledState.valueOf(dto.getState())) { case DISABLED: // switch processor do disabled. This means we have to stop it (if it's already stopped, this method does nothing), @@ -925,9 +931,9 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P controller.startConnectable(port); break; case STOPPED: - if (port.getScheduledState() == ScheduledState.DISABLED) { + if (portState == ScheduledState.DISABLED) { port.getProcessGroup().enableOutputPort(port); - } else if (port.getScheduledState() == ScheduledState.RUNNING) { + } else if (portState == ScheduledState.RUNNING) { controller.stopConnectable(port); } break; @@ -951,12 +957,14 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P continue; } + final ScheduledState portState = getScheduledState(inputPort, controller); + if (portDescriptor.isTransmitting()) { - if (inputPort.getScheduledState() != ScheduledState.RUNNING && inputPort.getScheduledState() != ScheduledState.STARTING) { - rpg.startTransmitting(inputPort); + if (portState != ScheduledState.RUNNING && portState != ScheduledState.STARTING) { + controller.startTransmitting(inputPort); } - } else if (inputPort.getScheduledState() != ScheduledState.STOPPED && inputPort.getScheduledState() != ScheduledState.STOPPING) { - rpg.stopTransmitting(inputPort); + } else if (portState != ScheduledState.STOPPED && portState != ScheduledState.STOPPING) { + controller.stopTransmitting(inputPort); } } @@ -970,12 +978,14 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P continue; } + final ScheduledState portState = getScheduledState(outputPort, controller); + if (portDescriptor.isTransmitting()) { - if (outputPort.getScheduledState() != ScheduledState.RUNNING && outputPort.getScheduledState() != ScheduledState.STARTING) { - rpg.startTransmitting(outputPort); + if (portState != ScheduledState.RUNNING && portState != ScheduledState.STARTING) { + controller.startTransmitting(outputPort); } - } else if (outputPort.getScheduledState() != ScheduledState.STOPPED && outputPort.getScheduledState() != ScheduledState.STOPPING) { - rpg.stopTransmitting(outputPort); + } else if (portState != ScheduledState.STOPPED && portState != ScheduledState.STOPPING) { + controller.stopTransmitting(outputPort); } } } @@ -1073,6 +1083,17 @@ private ProcessGroup updateProcessGroup(final FlowController controller, final P return processGroup; } + private ScheduledState getScheduledState(final T component, final FlowController flowController) { + final ScheduledState componentState = component.getScheduledState(); + if (componentState == ScheduledState.STOPPED) { + if (flowController.isStartAfterInitialization(component)) { + return ScheduledState.RUNNING; + } + } + + return componentState; + } + private Position toPosition(final PositionDTO dto) { return new Position(dto.getX(), dto.getY()); } @@ -1499,6 +1520,14 @@ private ProcessGroup addProcessGroup(final FlowController controller, final Proc connection.getFlowFileQueue().setFlowFileExpiration(dto.getFlowFileExpiration()); } + if (dto.getLoadBalanceStrategy() != null) { + connection.getFlowFileQueue().setLoadBalanceStrategy(LoadBalanceStrategy.valueOf(dto.getLoadBalanceStrategy()), dto.getLoadBalancePartitionAttribute()); + } + + if (dto.getLoadBalanceCompression() != null) { + connection.getFlowFileQueue().setLoadBalanceCompression(LoadBalanceCompression.valueOf(dto.getLoadBalanceCompression())); + } + processGroup.addConnection(connection); } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardProcessorNode.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardProcessorNode.java index c2b98e64584b..2cee3d418514 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardProcessorNode.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/StandardProcessorNode.java @@ -1426,9 +1426,14 @@ public synchronized List getActiveThreads() { @Override public synchronized int getTerminatedThreadCount() { - return (int) activeThreads.values().stream() - .filter(ActiveTask::isTerminated) - .count(); + int count = 0; + for (final ActiveTask task : activeThreads.values()) { + if (task.isTerminated()) { + count++; + } + } + + return count; } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/AbstractFlowFileQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/AbstractFlowFileQueue.java new file mode 100644 index 000000000000..5bf75a4cc881 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/AbstractFlowFileQueue.java @@ -0,0 +1,460 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import org.apache.nifi.controller.ProcessScheduler; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.RepositoryRecord; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaimManager; +import org.apache.nifi.flowfile.FlowFile; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.processor.DataUnit; +import org.apache.nifi.provenance.ProvenanceEventBuilder; +import org.apache.nifi.provenance.ProvenanceEventRecord; +import org.apache.nifi.provenance.ProvenanceEventRepository; +import org.apache.nifi.provenance.ProvenanceEventType; +import org.apache.nifi.util.FormatUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +public abstract class AbstractFlowFileQueue implements FlowFileQueue { + private static final Logger logger = LoggerFactory.getLogger(AbstractFlowFileQueue.class); + private final String identifier; + private final FlowFileRepository flowFileRepository; + private final ProvenanceEventRepository provRepository; + private final ResourceClaimManager resourceClaimManager; + private final ProcessScheduler scheduler; + + private final AtomicReference expirationPeriod = new AtomicReference<>(new TimePeriod("0 mins", 0L)); + private final AtomicReference maxQueueSize = new AtomicReference<>(new MaxQueueSize("1 GB", 1024 * 1024 * 1024, 10000)); + + private final ConcurrentMap listRequestMap = new ConcurrentHashMap<>(); + private final ConcurrentMap dropRequestMap = new ConcurrentHashMap<>(); + + private LoadBalanceStrategy loadBalanceStrategy = LoadBalanceStrategy.DO_NOT_LOAD_BALANCE; + private String partitioningAttribute = null; + + private LoadBalanceCompression compression = LoadBalanceCompression.DO_NOT_COMPRESS; + + + public AbstractFlowFileQueue(final String identifier, final ProcessScheduler scheduler, + final FlowFileRepository flowFileRepo, final ProvenanceEventRepository provRepo, final ResourceClaimManager resourceClaimManager) { + this.identifier = identifier; + this.scheduler = scheduler; + this.flowFileRepository = flowFileRepo; + this.provRepository = provRepo; + this.resourceClaimManager = resourceClaimManager; + } + + @Override + public String getIdentifier() { + return identifier; + } + + protected ProcessScheduler getScheduler() { + return scheduler; + } + + @Override + public String getFlowFileExpiration() { + return expirationPeriod.get().getPeriod(); + } + + @Override + public int getFlowFileExpiration(final TimeUnit timeUnit) { + return (int) timeUnit.convert(expirationPeriod.get().getMillis(), TimeUnit.MILLISECONDS); + } + + @Override + public void setFlowFileExpiration(final String flowExpirationPeriod) { + final long millis = FormatUtils.getTimeDuration(flowExpirationPeriod, TimeUnit.MILLISECONDS); + if (millis < 0) { + throw new IllegalArgumentException("FlowFile Expiration Period must be positive"); + } + + expirationPeriod.set(new TimePeriod(flowExpirationPeriod, millis)); + } + + @Override + public void setBackPressureObjectThreshold(final long threshold) { + boolean updated = false; + while (!updated) { + MaxQueueSize maxSize = getMaxQueueSize(); + final MaxQueueSize updatedSize = new MaxQueueSize(maxSize.getMaxSize(), maxSize.getMaxBytes(), threshold); + updated = maxQueueSize.compareAndSet(maxSize, updatedSize); + } + } + + @Override + public long getBackPressureObjectThreshold() { + return getMaxQueueSize().getMaxCount(); + } + + @Override + public void setBackPressureDataSizeThreshold(final String maxDataSize) { + final long maxBytes = DataUnit.parseDataSize(maxDataSize, DataUnit.B).longValue(); + + boolean updated = false; + while (!updated) { + MaxQueueSize maxSize = getMaxQueueSize(); + final MaxQueueSize updatedSize = new MaxQueueSize(maxDataSize, maxBytes, maxSize.getMaxCount()); + updated = maxQueueSize.compareAndSet(maxSize, updatedSize); + } + } + + @Override + public String getBackPressureDataSizeThreshold() { + return getMaxQueueSize().getMaxSize(); + } + + private MaxQueueSize getMaxQueueSize() { + return maxQueueSize.get(); + } + + @Override + public boolean isFull() { + final MaxQueueSize maxSize = getMaxQueueSize(); + + // Check if max size is set + if (maxSize.getMaxBytes() <= 0 && maxSize.getMaxCount() <= 0) { + return false; + } + + final QueueSize queueSize = size(); + if (maxSize.getMaxCount() > 0 && queueSize.getObjectCount() >= maxSize.getMaxCount()) { + return true; + } + + if (maxSize.getMaxBytes() > 0 && queueSize.getByteCount() >= maxSize.getMaxBytes()) { + return true; + } + + return false; + } + + + @Override + public ListFlowFileStatus listFlowFiles(final String requestIdentifier, final int maxResults) { + // purge any old requests from the map just to keep it clean. But if there are very few requests, which is usually the case, then don't bother + if (listRequestMap.size() > 10) { + final List toDrop = new ArrayList<>(); + for (final Map.Entry entry : listRequestMap.entrySet()) { + final ListFlowFileRequest request = entry.getValue(); + final boolean completed = request.getState() == ListFlowFileState.COMPLETE || request.getState() == ListFlowFileState.FAILURE; + + if (completed && System.currentTimeMillis() - request.getLastUpdated() > TimeUnit.MINUTES.toMillis(5L)) { + toDrop.add(entry.getKey()); + } + } + + for (final String requestId : toDrop) { + listRequestMap.remove(requestId); + } + } + + // numSteps = 1 for each swap location + 1 for active queue + 1 for swap queue. + final ListFlowFileRequest listRequest = new ListFlowFileRequest(requestIdentifier, maxResults, size()); + + final Thread t = new Thread(new Runnable() { + @Override + public void run() { + int position = 0; + int resultCount = 0; + final List summaries = new ArrayList<>(); + + // Create an ArrayList that contains all of the contents of the active queue. + // We do this so that we don't have to hold the lock any longer than absolutely necessary. + // We cannot simply pull the first 'maxResults' records from the queue, however, because the + // Iterator provided by PriorityQueue does not return records in order. So we would have to either + // use a writeLock and 'pop' the first 'maxResults' records off the queue or use a read lock and + // do a shallow copy of the queue. The shallow copy is generally quicker because it doesn't have to do + // the sorting to put the records back. So even though this has an expensive of Java Heap to create the + // extra collection, we are making this trade-off to avoid locking the queue any longer than required. + final List allFlowFiles = getListableFlowFiles(); + final QueuePrioritizer prioritizer = new QueuePrioritizer(getPriorities()); + + listRequest.setState(ListFlowFileState.CALCULATING_LIST); + + // sort the FlowFileRecords so that we have the list in the same order as on the queue. + allFlowFiles.sort(prioritizer); + + for (final FlowFileRecord flowFile : allFlowFiles) { + summaries.add(summarize(flowFile, ++position)); + if (summaries.size() >= maxResults) { + break; + } + } + + logger.debug("{} Finished listing FlowFiles for active queue with a total of {} results", this, resultCount); + listRequest.setFlowFileSummaries(summaries); + listRequest.setState(ListFlowFileState.COMPLETE); + } + }, "List FlowFiles for Connection " + getIdentifier()); + t.setDaemon(true); + t.start(); + + listRequestMap.put(requestIdentifier, listRequest); + return listRequest; + } + + @Override + public ListFlowFileStatus getListFlowFileStatus(final String requestIdentifier) { + return listRequestMap.get(requestIdentifier); + } + + @Override + public ListFlowFileStatus cancelListFlowFileRequest(final String requestIdentifier) { + logger.info("Canceling ListFlowFile Request with ID {}", requestIdentifier); + final ListFlowFileRequest request = listRequestMap.remove(requestIdentifier); + if (request != null) { + request.cancel(); + } + + return request; + } + + /** + * @return all FlowFiles that should be listed in response to a List Queue request + */ + protected abstract List getListableFlowFiles(); + + + @Override + public DropFlowFileStatus dropFlowFiles(final String requestIdentifier, final String requestor) { + logger.info("Initiating drop of FlowFiles from {} on behalf of {} (request identifier={})", this, requestor, requestIdentifier); + + // purge any old requests from the map just to keep it clean. But if there are very requests, which is usually the case, then don't bother + if (dropRequestMap.size() > 10) { + final List toDrop = new ArrayList<>(); + for (final Map.Entry entry : dropRequestMap.entrySet()) { + final DropFlowFileRequest request = entry.getValue(); + final boolean completed = request.getState() == DropFlowFileState.COMPLETE || request.getState() == DropFlowFileState.FAILURE; + + if (completed && System.currentTimeMillis() - request.getLastUpdated() > TimeUnit.MINUTES.toMillis(5L)) { + toDrop.add(entry.getKey()); + } + } + + for (final String requestId : toDrop) { + dropRequestMap.remove(requestId); + } + } + + final DropFlowFileRequest dropRequest = new DropFlowFileRequest(requestIdentifier); + final QueueSize originalSize = size(); + dropRequest.setCurrentSize(originalSize); + dropRequest.setOriginalSize(originalSize); + if (originalSize.getObjectCount() == 0) { + dropRequest.setDroppedSize(originalSize); + dropRequest.setState(DropFlowFileState.COMPLETE); + dropRequestMap.put(requestIdentifier, dropRequest); + return dropRequest; + } + + final Thread t = new Thread(new Runnable() { + @Override + public void run() { + dropFlowFiles(dropRequest, requestor); + } + }, "Drop FlowFiles for Connection " + getIdentifier()); + t.setDaemon(true); + t.start(); + + dropRequestMap.put(requestIdentifier, dropRequest); + + return dropRequest; + } + + + @Override + public DropFlowFileRequest cancelDropFlowFileRequest(final String requestIdentifier) { + final DropFlowFileRequest request = dropRequestMap.remove(requestIdentifier); + if (request == null) { + return null; + } + + request.cancel(); + return request; + } + + @Override + public DropFlowFileStatus getDropFlowFileStatus(final String requestIdentifier) { + return dropRequestMap.get(requestIdentifier); + } + + /** + * Synchronously drops all FlowFiles in the queue + * + * @param dropRequest the request + * @param requestor the identity of the user/agent who made the request + */ + protected abstract void dropFlowFiles(final DropFlowFileRequest dropRequest, final String requestor); + + @Override + public void verifyCanList() throws IllegalStateException { + } + + + protected FlowFileSummary summarize(final FlowFile flowFile, final int position) { + // extract all of the information that we care about into new variables rather than just + // wrapping the FlowFile object with a FlowFileSummary object. We do this because we want to + // be able to hold many FlowFileSummary objects in memory and if we just wrap the FlowFile object, + // we will end up holding the entire FlowFile (including all Attributes) in the Java heap as well, + // which can be problematic if we expect them to be swapped out. + final String uuid = flowFile.getAttribute(CoreAttributes.UUID.key()); + final String filename = flowFile.getAttribute(CoreAttributes.FILENAME.key()); + final long size = flowFile.getSize(); + final Long lastQueuedTime = flowFile.getLastQueueDate(); + final long lineageStart = flowFile.getLineageStartDate(); + final boolean penalized = flowFile.isPenalized(); + + return new FlowFileSummary() { + @Override + public String getUuid() { + return uuid; + } + + @Override + public String getFilename() { + return filename; + } + + @Override + public int getPosition() { + return position; + } + + @Override + public long getSize() { + return size; + } + + @Override + public long getLastQueuedTime() { + return lastQueuedTime == null ? 0L : lastQueuedTime; + } + + @Override + public long getLineageStartDate() { + return lineageStart; + } + + @Override + public boolean isPenalized() { + return penalized; + } + }; + } + + protected QueueSize drop(final List flowFiles, final String requestor) throws IOException { + // Create a Provenance Event and a FlowFile Repository record for each FlowFile + final List provenanceEvents = new ArrayList<>(flowFiles.size()); + final List flowFileRepoRecords = new ArrayList<>(flowFiles.size()); + for (final FlowFileRecord flowFile : flowFiles) { + provenanceEvents.add(createDropProvenanceEvent(flowFile, requestor)); + flowFileRepoRecords.add(createDeleteRepositoryRecord(flowFile)); + } + + long dropContentSize = 0L; + for (final FlowFileRecord flowFile : flowFiles) { + dropContentSize += flowFile.getSize(); + final ContentClaim contentClaim = flowFile.getContentClaim(); + if (contentClaim == null) { + continue; + } + + final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); + if (resourceClaim == null) { + continue; + } + + resourceClaimManager.decrementClaimantCount(resourceClaim); + } + + provRepository.registerEvents(provenanceEvents); + flowFileRepository.updateRepository(flowFileRepoRecords); + return new QueueSize(flowFiles.size(), dropContentSize); + } + + private ProvenanceEventRecord createDropProvenanceEvent(final FlowFileRecord flowFile, final String requestor) { + final ProvenanceEventBuilder builder = provRepository.eventBuilder(); + builder.fromFlowFile(flowFile); + builder.setEventType(ProvenanceEventType.DROP); + builder.setLineageStartDate(flowFile.getLineageStartDate()); + builder.setComponentId(getIdentifier()); + builder.setComponentType("Connection"); + builder.setAttributes(flowFile.getAttributes(), Collections.emptyMap()); + builder.setDetails("FlowFile Queue emptied by " + requestor); + builder.setSourceQueueIdentifier(getIdentifier()); + + final ContentClaim contentClaim = flowFile.getContentClaim(); + if (contentClaim != null) { + final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); + builder.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), contentClaim.getOffset(), flowFile.getSize()); + } + + return builder.build(); + } + + private RepositoryRecord createDeleteRepositoryRecord(final FlowFileRecord flowFile) { + return new DropFlowFileRepositoryRecord(this, flowFile); + } + + @Override + public synchronized void setLoadBalanceStrategy(final LoadBalanceStrategy strategy, final String partitioningAttribute) { + if (strategy == LoadBalanceStrategy.PARTITION_BY_ATTRIBUTE && !FlowFile.KeyValidator.isValid(partitioningAttribute)) { + throw new IllegalArgumentException("Cannot set Load Balance Strategy to " + strategy + " without providing a valid Partitioning Attribute"); + } + + this.loadBalanceStrategy = strategy; + this.partitioningAttribute = partitioningAttribute; + } + + @Override + public synchronized String getPartitioningAttribute() { + return partitioningAttribute; + } + + @Override + public synchronized LoadBalanceStrategy getLoadBalanceStrategy() { + return loadBalanceStrategy; + } + + @Override + public synchronized void setLoadBalanceCompression(final LoadBalanceCompression compression) { + this.compression = compression; + } + + @Override + public synchronized LoadBalanceCompression getLoadBalanceCompression() { + return compression; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/BlockingSwappablePriorityQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/BlockingSwappablePriorityQueue.java new file mode 100644 index 000000000000..9a220ae38555 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/BlockingSwappablePriorityQueue.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.events.EventReporter; + +import java.util.Collection; +import java.util.Set; + +public class BlockingSwappablePriorityQueue extends SwappablePriorityQueue { + private final Object monitor = new Object(); + + public BlockingSwappablePriorityQueue(final FlowFileSwapManager swapManager, final int swapThreshold, final EventReporter eventReporter, final FlowFileQueue flowFileQueue, + final DropFlowFileAction dropAction, final String partitionName) { + + super(swapManager, swapThreshold, eventReporter, flowFileQueue, dropAction, partitionName); + } + + @Override + public void put(final FlowFileRecord flowFile) { + super.put(flowFile); + + synchronized (monitor) { + monitor.notify(); + } + } + + @Override + public void putAll(final Collection flowFiles) { + super.putAll(flowFiles); + + synchronized (monitor) { + monitor.notifyAll(); + } + } + + public FlowFileRecord poll(final Set expiredRecords, final long expirationMillis, final long waitMillis) throws InterruptedException { + final long maxTimestamp = System.currentTimeMillis() + waitMillis; + + synchronized (monitor) { + FlowFileRecord flowFile = null; + do { + flowFile = super.poll(expiredRecords, expirationMillis); + if (flowFile != null) { + return flowFile; + } + + monitor.wait(waitMillis); + } while (System.currentTimeMillis() < maxTimestamp); + + return null; + } + } + + @Override + public void inheritQueueContents(final FlowFileQueueContents queueContents) { + // We have to override this method and synchronize on monitor before calling super.inheritQueueContents. + // If we don't do this, then our super class will obtain the write lock and call putAll, which will cause + // us to synchronize on monitor AFTER obtaining the write lock (WriteLock then monitor). + // If poll() is then called, we will synchronize on monitor, THEN attempt to obtain the write lock (monitor then WriteLock), + // which would cause a deadlock. + synchronized (monitor) { + super.inheritQueueContents(queueContents); + } + } + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/ConnectionEventListener.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/ConnectionEventListener.java new file mode 100644 index 000000000000..a3ae6ee79aa4 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/ConnectionEventListener.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public interface ConnectionEventListener { + void triggerSourceEvent(); + + void triggerDestinationEvent(); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileAction.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileAction.java new file mode 100644 index 000000000000..86cd1693cef9 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileAction.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import java.io.IOException; +import java.util.List; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +public interface DropFlowFileAction { + QueueSize drop(List flowFiles, String requestor) throws IOException; +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileRepositoryRecord.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileRepositoryRecord.java new file mode 100644 index 000000000000..f47b4eb88b6c --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/DropFlowFileRepositoryRecord.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import java.util.Collections; +import java.util.List; + +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.RepositoryRecord; +import org.apache.nifi.controller.repository.RepositoryRecordType; +import org.apache.nifi.controller.repository.claim.ContentClaim; + +public class DropFlowFileRepositoryRecord implements RepositoryRecord { + private final FlowFileQueue queue; + private final FlowFileRecord flowFile; + + public DropFlowFileRepositoryRecord(final FlowFileQueue queue, final FlowFileRecord flowFile) { + this.queue = queue; + this.flowFile = flowFile; + } + + @Override + public FlowFileQueue getDestination() { + return null; + } + + @Override + public FlowFileQueue getOriginalQueue() { + return queue; + } + + @Override + public RepositoryRecordType getType() { + return RepositoryRecordType.DELETE; + } + + @Override + public ContentClaim getCurrentClaim() { + return flowFile.getContentClaim(); + } + + @Override + public ContentClaim getOriginalClaim() { + return flowFile.getContentClaim(); + } + + @Override + public long getCurrentClaimOffset() { + return flowFile.getContentClaimOffset(); + } + + @Override + public FlowFileRecord getCurrent() { + return flowFile; + } + + @Override + public boolean isAttributesChanged() { + return false; + } + + @Override + public boolean isMarkedForAbort() { + return false; + } + + @Override + public String getSwapLocation() { + return null; + } + + @Override + public List getTransientClaims() { + return Collections.emptyList(); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueContents.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueContents.java new file mode 100644 index 000000000000..60ad64d83852 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueContents.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.List; + +public class FlowFileQueueContents { + private final List swapLocations; + private final List activeFlowFiles; + private final QueueSize swapSize; + + public FlowFileQueueContents(final List activeFlowFiles, final List swapLocations, final QueueSize swapSize) { + this.activeFlowFiles = activeFlowFiles; + this.swapLocations = swapLocations; + this.swapSize = swapSize; + } + + public List getActiveFlowFiles() { + return activeFlowFiles; + } + + public List getSwapLocations() { + return swapLocations; + } + + public QueueSize getSwapSize() { + return swapSize; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueFactory.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueFactory.java new file mode 100644 index 000000000000..dc6667f8d0da --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueFactory.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public interface FlowFileQueueFactory { + FlowFileQueue createFlowFileQueue(LoadBalanceStrategy loadBalanceStrategy, String partitioningAttribute, ConnectionEventListener connectionEventListener); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueSize.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueSize.java new file mode 100644 index 000000000000..7ebc017aa9c5 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/FlowFileQueueSize.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public class FlowFileQueueSize { + private final int activeQueueCount; + private final long activeQueueBytes; + private final int swappedCount; + private final long swappedBytes; + private final int swapFiles; + private final int unacknowledgedCount; + private final long unacknowledgedBytes; + + public FlowFileQueueSize(final int activeQueueCount, final long activeQueueBytes, final int swappedCount, final long swappedBytes, final int swapFileCount, + final int unacknowledgedCount, final long unacknowledgedBytes) { + this.activeQueueCount = activeQueueCount; + this.activeQueueBytes = activeQueueBytes; + this.swappedCount = swappedCount; + this.swappedBytes = swappedBytes; + this.swapFiles = swapFileCount; + this.unacknowledgedCount = unacknowledgedCount; + this.unacknowledgedBytes = unacknowledgedBytes; + } + + public int getSwappedCount() { + return swappedCount; + } + + public long getSwappedBytes() { + return swappedBytes; + } + + public int getSwapFileCount() { + return swapFiles; + } + + public int getActiveCount() { + return activeQueueCount; + } + + public long getActiveBytes() { + return activeQueueBytes; + } + + public int getUnacknowledgedCount() { + return unacknowledgedCount; + } + + public long getUnacknowledgedBytes() { + return unacknowledgedBytes; + } + + public boolean isEmpty() { + return activeQueueCount == 0 && swappedCount == 0 && unacknowledgedCount == 0; + } + + public QueueSize toQueueSize() { + return new QueueSize(activeQueueCount + swappedCount + unacknowledgedCount, activeQueueBytes + swappedBytes + unacknowledgedBytes); + } + + public QueueSize activeQueueSize() { + return new QueueSize(activeQueueCount, activeQueueBytes); + } + + public QueueSize unacknowledgedQueueSize() { + return new QueueSize(unacknowledgedCount, unacknowledgedBytes); + } + + public QueueSize swapQueueSize() { + return new QueueSize(swappedCount, swappedBytes); + } + + @Override + public String toString() { + return "FlowFile Queue Size[ ActiveQueue=[" + activeQueueCount + ", " + activeQueueBytes + + " Bytes], Swap Queue=[" + swappedCount + ", " + swappedBytes + + " Bytes], Swap Files=[" + swapFiles + "], Unacknowledged=[" + unacknowledgedCount + ", " + unacknowledgedBytes + " Bytes] ]"; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/MaxQueueSize.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/MaxQueueSize.java new file mode 100644 index 000000000000..94924354c9f8 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/MaxQueueSize.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public class MaxQueueSize { + private final String maxSize; + private final long maxBytes; + private final long maxCount; + + public MaxQueueSize(final String maxSize, final long maxBytes, final long maxCount) { + this.maxSize = maxSize; + this.maxBytes = maxBytes; + this.maxCount = maxCount; + } + + public String getMaxSize() { + return maxSize; + } + + public long getMaxBytes() { + return maxBytes; + } + + public long getMaxCount() { + return maxCount; + } + + @Override + public String toString() { + return maxCount + " Objects/" + maxSize; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/NopConnectionEventListener.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/NopConnectionEventListener.java new file mode 100644 index 000000000000..d641da4ea2bc --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/NopConnectionEventListener.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public class NopConnectionEventListener implements ConnectionEventListener { + @Override + public void triggerSourceEvent() { + } + + @Override + public void triggerDestinationEvent() { + + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/QueuePrioritizer.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/QueuePrioritizer.java new file mode 100644 index 000000000000..b78ccff6bfee --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/QueuePrioritizer.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; + +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.flowfile.FlowFilePrioritizer; + +public class QueuePrioritizer implements Comparator, Serializable { + private static final long serialVersionUID = 1L; + private final transient List prioritizers = new ArrayList<>(); + + public QueuePrioritizer(final List priorities) { + if (null != priorities) { + prioritizers.addAll(priorities); + } + } + + @Override + public int compare(final FlowFileRecord f1, final FlowFileRecord f2) { + int returnVal = 0; + final boolean f1Penalized = f1.isPenalized(); + final boolean f2Penalized = f2.isPenalized(); + + if (f1Penalized && !f2Penalized) { + return 1; + } else if (!f1Penalized && f2Penalized) { + return -1; + } + + if (f1Penalized && f2Penalized) { + if (f1.getPenaltyExpirationMillis() < f2.getPenaltyExpirationMillis()) { + return -1; + } else if (f1.getPenaltyExpirationMillis() > f2.getPenaltyExpirationMillis()) { + return 1; + } + } + + if (!prioritizers.isEmpty()) { + for (final FlowFilePrioritizer prioritizer : prioritizers) { + returnVal = prioritizer.compare(f1, f2); + if (returnVal != 0) { + return returnVal; + } + } + } + + final ContentClaim claim1 = f1.getContentClaim(); + final ContentClaim claim2 = f2.getContentClaim(); + + // put the one without a claim first + if (claim1 == null && claim2 != null) { + return -1; + } else if (claim1 != null && claim2 == null) { + return 1; + } else if (claim1 != null && claim2 != null) { + final int claimComparison = claim1.compareTo(claim2); + if (claimComparison != 0) { + return claimComparison; + } + + final int claimOffsetComparison = Long.compare(f1.getContentClaimOffset(), f2.getContentClaimOffset()); + if (claimOffsetComparison != 0) { + return claimOffsetComparison; + } + } + + return Long.compare(f1.getId(), f2.getId()); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardFlowFileQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardFlowFileQueue.java new file mode 100644 index 000000000000..8872ba7e6119 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardFlowFileQueue.java @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.controller.queue; + +import org.apache.nifi.controller.ProcessScheduler; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.controller.repository.claim.ResourceClaimManager; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.flowfile.FlowFilePrioritizer; +import org.apache.nifi.processor.FlowFileFilter; +import org.apache.nifi.provenance.ProvenanceEventRepository; +import org.apache.nifi.util.concurrency.TimedLock; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * A FlowFileQueue is used to queue FlowFile objects that are awaiting further + * processing. Must be thread safe. + * + */ +public class StandardFlowFileQueue extends AbstractFlowFileQueue implements FlowFileQueue { + + private final SwappablePriorityQueue queue; + private final ConnectionEventListener eventListener; + + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); + private final FlowFileSwapManager swapManager; + private final TimedLock writeLock; + + + public StandardFlowFileQueue(final String identifier, final ConnectionEventListener eventListener, final FlowFileRepository flowFileRepo, final ProvenanceEventRepository provRepo, + final ResourceClaimManager resourceClaimManager, final ProcessScheduler scheduler, final FlowFileSwapManager swapManager, final EventReporter eventReporter, + final int swapThreshold, final long defaultBackPressureObjectThreshold, final String defaultBackPressureDataSizeThreshold) { + + super(identifier, scheduler, flowFileRepo, provRepo, resourceClaimManager); + this.swapManager = swapManager; + this.queue = new SwappablePriorityQueue(swapManager, swapThreshold, eventReporter, this, this::drop, null); + this.eventListener = eventListener; + + writeLock = new TimedLock(this.lock.writeLock(), getIdentifier() + " Write Lock", 100); + + setBackPressureDataSizeThreshold(defaultBackPressureDataSizeThreshold); + setBackPressureObjectThreshold(defaultBackPressureObjectThreshold); + } + + @Override + public void startLoadBalancing() { + } + + @Override + public void stopLoadBalancing() { + } + + @Override + public void offloadQueue() { + } + + @Override + public void resetOffloadedQueue() { + } + + @Override + public boolean isActivelyLoadBalancing() { + return false; + } + + @Override + public void setPriorities(final List newPriorities) { + queue.setPriorities(newPriorities); + } + + @Override + public List getPriorities() { + return queue.getPriorities(); + } + + @Override + protected List getListableFlowFiles() { + return queue.getActiveFlowFiles(); + } + + @Override + public QueueDiagnostics getQueueDiagnostics() { + return new StandardQueueDiagnostics(queue.getQueueDiagnostics(), Collections.emptyList()); + } + + @Override + public void put(final FlowFileRecord file) { + queue.put(file); + + eventListener.triggerDestinationEvent(); + } + + @Override + public void putAll(final Collection files) { + queue.putAll(files); + + eventListener.triggerDestinationEvent(); + } + + + @Override + public FlowFileRecord poll(final Set expiredRecords) { + // First check if we have any records Pre-Fetched. + final long expirationMillis = getFlowFileExpiration(TimeUnit.MILLISECONDS); + return queue.poll(expiredRecords, expirationMillis); + } + + + @Override + public List poll(int maxResults, final Set expiredRecords) { + return queue.poll(maxResults, expiredRecords, getFlowFileExpiration(TimeUnit.MILLISECONDS)); + } + + + + @Override + public void acknowledge(final FlowFileRecord flowFile) { + queue.acknowledge(flowFile); + + eventListener.triggerSourceEvent(); + } + + @Override + public void acknowledge(final Collection flowFiles) { + queue.acknowledge(flowFiles); + + eventListener.triggerSourceEvent(); + } + + @Override + public boolean isUnacknowledgedFlowFile() { + return queue.isUnacknowledgedFlowFile(); + } + + @Override + public QueueSize size() { + return queue.size(); + } + + @Override + public boolean isEmpty() { + return queue.getFlowFileQueueSize().isEmpty(); + } + + @Override + public boolean isActiveQueueEmpty() { + final FlowFileQueueSize queueSize = queue.getFlowFileQueueSize(); + return queueSize.getActiveCount() == 0 && queueSize.getSwappedCount() == 0; + } + + @Override + public List poll(final FlowFileFilter filter, final Set expiredRecords) { + return queue.poll(filter, expiredRecords, getFlowFileExpiration(TimeUnit.MILLISECONDS)); + } + + @Override + public void purgeSwapFiles() { + swapManager.purge(); + } + + @Override + public SwapSummary recoverSwappedFlowFiles() { + return queue.recoverSwappedFlowFiles(); + } + + @Override + public String toString() { + return "FlowFileQueue[id=" + getIdentifier() + "]"; + } + + + @Override + public FlowFileRecord getFlowFile(final String flowFileUuid) throws IOException { + return queue.getFlowFile(flowFileUuid); + } + + + @Override + protected void dropFlowFiles(final DropFlowFileRequest dropRequest, final String requestor) { + queue.dropFlowFiles(dropRequest, requestor); + } + + + /** + * Lock the queue so that other threads are unable to interact with the queue + */ + public void lock() { + writeLock.lock(); + } + + /** + * Unlock the queue + */ + public void unlock() { + writeLock.unlock("external unlock"); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardLocalQueuePartitionDiagnostics.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardLocalQueuePartitionDiagnostics.java new file mode 100644 index 000000000000..ff31e77e89be --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardLocalQueuePartitionDiagnostics.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public class StandardLocalQueuePartitionDiagnostics implements LocalQueuePartitionDiagnostics { + private final FlowFileQueueSize queueSize; + private final boolean anyPenalized; + private final boolean allPenalized; + + public StandardLocalQueuePartitionDiagnostics(final FlowFileQueueSize queueSize, final boolean anyPenalized, final boolean allPenalized) { + this.queueSize = queueSize; + this.anyPenalized = anyPenalized; + this.allPenalized = allPenalized; + } + + @Override + public QueueSize getUnacknowledgedQueueSize() { + return new QueueSize(queueSize.getUnacknowledgedCount(), queueSize.getUnacknowledgedCount()); + } + + @Override + public QueueSize getActiveQueueSize() { + return new QueueSize(queueSize.getActiveCount(), queueSize.getActiveBytes()); + } + + @Override + public QueueSize getSwapQueueSize() { + return new QueueSize(queueSize.getSwappedCount(), queueSize.getSwappedBytes()); + } + + @Override + public int getSwapFileCount() { + return queueSize.getSwapFileCount(); + } + + @Override + public boolean isAnyActiveFlowFilePenalized() { + return anyPenalized; + } + + @Override + public boolean isAllActiveFlowFilesPenalized() { + return allPenalized; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardQueueDiagnostics.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardQueueDiagnostics.java new file mode 100644 index 000000000000..be42e2ea8aed --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardQueueDiagnostics.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import java.util.List; + +public class StandardQueueDiagnostics implements QueueDiagnostics { + final LocalQueuePartitionDiagnostics localQueuePartitionDiagnostics; + final List remoteQueuePartitionDiagnostics; + + public StandardQueueDiagnostics(final LocalQueuePartitionDiagnostics localQueuePartitionDiagnostics, final List remoteQueuePartitionDiagnostics) { + this.localQueuePartitionDiagnostics = localQueuePartitionDiagnostics; + this.remoteQueuePartitionDiagnostics = remoteQueuePartitionDiagnostics; + } + + @Override + public LocalQueuePartitionDiagnostics getLocalQueuePartitionDiagnostics() { + return localQueuePartitionDiagnostics; + } + + @Override + public List getRemoteQueuePartitionDiagnostics() { + return remoteQueuePartitionDiagnostics; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardRemoteQueuePartitionDiagnostics.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardRemoteQueuePartitionDiagnostics.java new file mode 100644 index 000000000000..67900557f7ac --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/StandardRemoteQueuePartitionDiagnostics.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public class StandardRemoteQueuePartitionDiagnostics implements RemoteQueuePartitionDiagnostics { + private final String nodeId; + private final FlowFileQueueSize queueSize; + + public StandardRemoteQueuePartitionDiagnostics(final String nodeId, final FlowFileQueueSize queueSize) { + this.nodeId = nodeId; + this.queueSize = queueSize; + } + + @Override + public String getNodeIdentifier() { + return nodeId; + } + + @Override + public QueueSize getUnacknowledgedQueueSize() { + return new QueueSize(queueSize.getUnacknowledgedCount(), queueSize.getUnacknowledgedCount()); + } + + @Override + public QueueSize getActiveQueueSize() { + return new QueueSize(queueSize.getActiveCount(), queueSize.getActiveBytes()); + } + + @Override + public QueueSize getSwapQueueSize() { + return new QueueSize(queueSize.getSwappedCount(), queueSize.getSwappedBytes()); + } + + @Override + public int getSwapFileCount() { + return queueSize.getSwapFileCount(); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/SwappablePriorityQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/SwappablePriorityQueue.java new file mode 100644 index 000000000000..6dfa77deea88 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/SwappablePriorityQueue.java @@ -0,0 +1,994 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.controller.repository.IncompleteSwapFileException; +import org.apache.nifi.controller.repository.SwapContents; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.controller.swap.StandardSwapSummary; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.flowfile.FlowFile; +import org.apache.nifi.flowfile.FlowFilePrioritizer; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.processor.FlowFileFilter; +import org.apache.nifi.processor.FlowFileFilter.FlowFileFilterResult; +import org.apache.nifi.reporting.Severity; +import org.apache.nifi.util.concurrency.TimedLock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantReadWriteLock; + + +public class SwappablePriorityQueue { + private static final Logger logger = LoggerFactory.getLogger(SwappablePriorityQueue.class); + private static final int SWAP_RECORD_POLL_SIZE = 10_000; + private static final int MAX_EXPIRED_RECORDS_PER_ITERATION = 10_000; + + private final int swapThreshold; + private final FlowFileSwapManager swapManager; + private final EventReporter eventReporter; + private final FlowFileQueue flowFileQueue; + private final DropFlowFileAction dropAction; + private final List priorities = new ArrayList<>(); + private final String swapPartitionName; + + private final List swapLocations = new ArrayList<>(); + private final AtomicReference size = new AtomicReference<>(new FlowFileQueueSize(0, 0L, 0, 0L, 0, 0, 0L)); + private final TimedLock readLock; + private final TimedLock writeLock; + + // We keep an "active queue" and a "swap queue" that both are able to hold records in heap. When + // FlowFiles are added to this FlowFileQueue, we first check if we are in "swap mode" and if so + // we add to the 'swap queue' instead of the 'active queue'. The code would be much simpler if we + // eliminated the 'swap queue' and instead just used the active queue and swapped out the 10,000 + // lowest priority FlowFiles from that. However, doing that would cause problems with the ordering + // of FlowFiles. If we swap out some FlowFiles, and then allow a new FlowFile to be written to the + // active queue, then we would end up processing the newer FlowFile before the swapped FlowFile. By + // keeping these separate, we are able to guarantee that FlowFiles are swapped in in the same order + // that they are swapped out. + // Guarded by lock. + private PriorityQueue activeQueue; + private ArrayList swapQueue; + private boolean swapMode = false; + + public SwappablePriorityQueue(final FlowFileSwapManager swapManager, final int swapThreshold, final EventReporter eventReporter, final FlowFileQueue flowFileQueue, + final DropFlowFileAction dropAction, final String swapPartitionName) { + this.swapManager = swapManager; + this.swapThreshold = swapThreshold; + + this.activeQueue = new PriorityQueue<>(20, new QueuePrioritizer(Collections.emptyList())); + this.swapQueue = new ArrayList<>(); + this.eventReporter = eventReporter; + this.flowFileQueue = flowFileQueue; + this.dropAction = dropAction; + this.swapPartitionName = swapPartitionName; + + final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); + readLock = new TimedLock(lock.readLock(), flowFileQueue.getIdentifier() + " Read Lock", 100); + writeLock = new TimedLock(lock.writeLock(), flowFileQueue.getIdentifier() + " Write Lock", 100); + } + + private String getQueueIdentifier() { + return flowFileQueue.getIdentifier(); + } + + public synchronized List getPriorities() { + readLock.lock(); + try { + return Collections.unmodifiableList(priorities); + } finally { + readLock.unlock("getPriorities"); + } + } + + public void setPriorities(final List newPriorities) { + writeLock.lock(); + try { + priorities.clear(); + priorities.addAll(newPriorities); + + final PriorityQueue newQueue = new PriorityQueue<>(Math.max(20, activeQueue.size()), new QueuePrioritizer(newPriorities)); + newQueue.addAll(activeQueue); + activeQueue = newQueue; + } finally { + writeLock.unlock("setPriorities"); + } + } + + + public LocalQueuePartitionDiagnostics getQueueDiagnostics() { + readLock.lock(); + try { + final boolean anyPenalized = !activeQueue.isEmpty() && activeQueue.peek().isPenalized(); + final boolean allPenalized = anyPenalized && activeQueue.stream().anyMatch(FlowFileRecord::isPenalized); + + return new StandardLocalQueuePartitionDiagnostics(getFlowFileQueueSize(), anyPenalized, allPenalized); + } finally { + readLock.unlock("getQueueDiagnostics"); + } + } + + public List getActiveFlowFiles() { + readLock.lock(); + try { + return new ArrayList<>(activeQueue); + } finally { + readLock.unlock("getActiveFlowFiles"); + } + } + + public boolean isUnacknowledgedFlowFile() { + return getFlowFileQueueSize().getUnacknowledgedCount() > 0; + } + + /** + * This method MUST be called with the write lock held + */ + private void writeSwapFilesIfNecessary() { + if (swapQueue.size() < SWAP_RECORD_POLL_SIZE) { + return; + } + + migrateSwapToActive(); + + final int numSwapFiles = swapQueue.size() / SWAP_RECORD_POLL_SIZE; + + int originalSwapQueueCount = swapQueue.size(); + long originalSwapQueueBytes = 0L; + for (final FlowFileRecord flowFile : swapQueue) { + originalSwapQueueBytes += flowFile.getSize(); + } + + // Create a new Priority queue with the prioritizers that are set, but reverse the + // prioritizers because we want to pull the lowest-priority FlowFiles to swap out + final PriorityQueue tempQueue = new PriorityQueue<>(activeQueue.size() + swapQueue.size(), Collections.reverseOrder(new QueuePrioritizer(getPriorities()))); + tempQueue.addAll(activeQueue); + tempQueue.addAll(swapQueue); + + long bytesSwappedOut = 0L; + int flowFilesSwappedOut = 0; + final List swapLocations = new ArrayList<>(numSwapFiles); + for (int i = 0; i < numSwapFiles; i++) { + // Create a new swap file for the next SWAP_RECORD_POLL_SIZE records + final List toSwap = new ArrayList<>(SWAP_RECORD_POLL_SIZE); + for (int j = 0; j < SWAP_RECORD_POLL_SIZE; j++) { + final FlowFileRecord flowFile = tempQueue.poll(); + toSwap.add(flowFile); + bytesSwappedOut += flowFile.getSize(); + flowFilesSwappedOut++; + } + + try { + Collections.reverse(toSwap); // currently ordered in reverse priority order based on the ordering of the temp queue. + final String swapLocation = swapManager.swapOut(toSwap, flowFileQueue, swapPartitionName); + swapLocations.add(swapLocation); + } catch (final IOException ioe) { + tempQueue.addAll(toSwap); // if we failed, we must add the FlowFiles back to the queue. + + final int objectCount = getFlowFileCount(); + logger.error("FlowFile Queue with identifier {} has {} FlowFiles queued up. Attempted to spill FlowFile information over to disk in order to avoid exhausting " + + "the Java heap space but failed to write information to disk due to {}", getQueueIdentifier(), objectCount, ioe.toString()); + logger.error("", ioe); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "Failed to Overflow to Disk", "Flowfile Queue with identifier " + getQueueIdentifier() + " has " + objectCount + + " queued up. Attempted to spill FlowFile information over to disk in order to avoid exhausting the Java heap space but failed to write information to disk. " + + "See logs for more information."); + } + + break; + } + } + + // Pull any records off of the temp queue that won't fit back on the active queue, and add those to the + // swap queue. Then add the records back to the active queue. + swapQueue.clear(); + long updatedSwapQueueBytes = 0L; + while (tempQueue.size() > swapThreshold) { + final FlowFileRecord record = tempQueue.poll(); + swapQueue.add(record); + updatedSwapQueueBytes += record.getSize(); + } + + Collections.reverse(swapQueue); // currently ordered in reverse priority order based on the ordering of the temp queue + + // replace the contents of the active queue, since we've merged it with the swap queue. + activeQueue.clear(); + FlowFileRecord toRequeue; + long activeQueueBytes = 0L; + while ((toRequeue = tempQueue.poll()) != null) { + activeQueue.offer(toRequeue); + activeQueueBytes += toRequeue.getSize(); + } + + boolean updated = false; + while (!updated) { + final FlowFileQueueSize originalSize = getFlowFileQueueSize(); + + final int addedSwapRecords = swapQueue.size() - originalSwapQueueCount; + final long addedSwapBytes = updatedSwapQueueBytes - originalSwapQueueBytes; + + final FlowFileQueueSize newSize = new FlowFileQueueSize(activeQueue.size(), activeQueueBytes, + originalSize.getSwappedCount() + addedSwapRecords + flowFilesSwappedOut, + originalSize.getSwappedBytes() + addedSwapBytes + bytesSwappedOut, + originalSize.getSwapFileCount() + numSwapFiles, + originalSize.getUnacknowledgedCount(), originalSize.getUnacknowledgedBytes()); + updated = updateSize(originalSize, newSize); + + if (updated) { + logIfNegative(originalSize, newSize, "swap"); + } + } + + this.swapLocations.addAll(swapLocations); + } + + private int getFlowFileCount() { + final FlowFileQueueSize size = getFlowFileQueueSize(); + return size.getActiveCount() + size.getSwappedCount() + size.getUnacknowledgedCount(); + } + + /** + * If there are FlowFiles waiting on the swap queue, move them to the active + * queue until we meet our threshold. This prevents us from having to swap + * them to disk & then back out. + * + * This method MUST be called with the writeLock held. + */ + private void migrateSwapToActive() { + // Migrate as many FlowFiles as we can from the Swap Queue to the Active Queue, so that we don't + // have to swap them out & then swap them back in. + // If we don't do this, we could get into a situation where we have potentially thousands of FlowFiles + // sitting on the Swap Queue but not getting processed because there aren't enough to be swapped out. + // In particular, this can happen if the queue is typically filled with surges. + // For example, if the queue has 25,000 FlowFiles come in, it may process 20,000 of them and leave + // 5,000 sitting on the Swap Queue. If it then takes an hour for an additional 5,000 FlowFiles to come in, + // those FlowFiles sitting on the Swap Queue will sit there for an hour, waiting to be swapped out and + // swapped back in again. + // Calling this method when records are polled prevents this condition by migrating FlowFiles from the + // Swap Queue to the Active Queue. However, we don't do this if there are FlowFiles already swapped out + // to disk, because we want them to be swapped back in in the same order that they were swapped out. + + final int activeQueueSize = activeQueue.size(); + if (activeQueueSize > 0 && activeQueueSize > swapThreshold - SWAP_RECORD_POLL_SIZE) { + return; + } + + // If there are swap files waiting to be swapped in, swap those in first. We do this in order to ensure that those that + // were swapped out first are then swapped back in first. If we instead just immediately migrated the FlowFiles from the + // swap queue to the active queue, and we never run out of FlowFiles in the active queue (because destination cannot + // keep up with queue), we will end up always processing the new FlowFiles first instead of the FlowFiles that arrived + // first. + if (!swapLocations.isEmpty()) { + swapIn(); + return; + } + + // this is the most common condition (nothing is swapped out), so do the check first and avoid the expense + // of other checks for 99.999% of the cases. + final FlowFileQueueSize size = getFlowFileQueueSize(); + if (size.getSwappedCount() == 0 && swapQueue.isEmpty()) { + return; + } + + if (size.getSwappedCount() > swapQueue.size()) { + // we already have FlowFiles swapped out, so we won't migrate the queue; we will wait for + // the files to be swapped back in first + return; + } + + int recordsMigrated = 0; + long bytesMigrated = 0L; + final Iterator swapItr = swapQueue.iterator(); + while (activeQueue.size() < swapThreshold && swapItr.hasNext()) { + final FlowFileRecord toMigrate = swapItr.next(); + activeQueue.add(toMigrate); + bytesMigrated += toMigrate.getSize(); + recordsMigrated++; + swapItr.remove(); + } + + if (recordsMigrated > 0) { + incrementActiveQueueSize(recordsMigrated, bytesMigrated); + incrementSwapQueueSize(-recordsMigrated, -bytesMigrated, 0); + } + + if (size.getSwappedCount() == 0) { + swapMode = false; + } + } + + private void swapIn() { + final String swapLocation = swapLocations.get(0); + boolean partialContents = false; + SwapContents swapContents; + try { + swapContents = swapManager.swapIn(swapLocation, flowFileQueue); + swapLocations.remove(0); + } catch (final IncompleteSwapFileException isfe) { + logger.error("Failed to swap in all FlowFiles from Swap File {}; Swap File ended prematurely. The records that were present will still be swapped in", swapLocation); + logger.error("", isfe); + swapContents = isfe.getPartialContents(); + partialContents = true; + swapLocations.remove(0); + } catch (final FileNotFoundException fnfe) { + logger.error("Failed to swap in FlowFiles from Swap File {} because the Swap File can no longer be found", swapLocation); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "Swap File", "Failed to swap in FlowFiles from Swap File " + swapLocation + " because the Swap File can no longer be found"); + } + + swapLocations.remove(0); + return; + } catch (final IOException ioe) { + logger.error("Failed to swap in FlowFiles from Swap File {}; Swap File appears to be corrupt!", swapLocation); + logger.error("", ioe); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "Swap File", "Failed to swap in FlowFiles from Swap File " + + swapLocation + "; Swap File appears to be corrupt! Some FlowFiles in the queue may not be accessible. See logs for more information."); + } + + // We do not remove the Swap File from swapLocations because the IOException may be recoverable later. For instance, the file may be on a network + // drive and we may have connectivity problems, etc. + return; + } catch (final Throwable t) { + logger.error("Failed to swap in FlowFiles from Swap File {}", swapLocation, t); + + // We do not remove the Swap File from swapLocations because this is an unexpected failure that may be retry-able. For example, if there were + // an OOME, etc. then we don't want to he queue to still reflect that the data is around but never swap it in. By leaving the Swap File + // in swapLocations, we will continue to retry. + throw t; + } + + final QueueSize swapSize = swapContents.getSummary().getQueueSize(); + final long contentSize = swapSize.getByteCount(); + final int flowFileCount = swapSize.getObjectCount(); + incrementSwapQueueSize(-flowFileCount, -contentSize, -1); + + if (partialContents) { + // if we have partial results, we need to calculate the content size of the flowfiles + // actually swapped back in. + long contentSizeSwappedIn = 0L; + for (final FlowFileRecord swappedIn : swapContents.getFlowFiles()) { + contentSizeSwappedIn += swappedIn.getSize(); + } + + incrementActiveQueueSize(swapContents.getFlowFiles().size(), contentSizeSwappedIn); + logger.debug("Swapped in partial contents containing {} FlowFiles ({} bytes) from {}", swapContents.getFlowFiles().size(), contentSizeSwappedIn, swapLocation); + } else { + // we swapped in the whole swap file. We can just use the info that we got from the summary. + incrementActiveQueueSize(flowFileCount, contentSize); + logger.debug("Successfully swapped in Swap File {}", swapLocation); + } + + activeQueue.addAll(swapContents.getFlowFiles()); + } + + public QueueSize size() { + return getFlowFileQueueSize().toQueueSize(); + } + + public boolean isEmpty() { + return getFlowFileQueueSize().isEmpty(); + } + + public boolean isActiveQueueEmpty() { + final FlowFileQueueSize queueSize = getFlowFileQueueSize(); + return queueSize.getActiveCount() == 0 && queueSize.getSwappedCount() == 0; + } + + public void acknowledge(final FlowFileRecord flowFile) { + logger.debug("{} Acknowledging {}", this, flowFile); + incrementUnacknowledgedQueueSize(-1, -flowFile.getSize()); + } + + public void acknowledge(final Collection flowFiles) { + logger.debug("{} Acknowledging {}", this, flowFiles); + final long totalSize = flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum(); + incrementUnacknowledgedQueueSize(-flowFiles.size(), -totalSize); + } + + + public void put(final FlowFileRecord flowFile) { + writeLock.lock(); + try { + if (swapMode || activeQueue.size() >= swapThreshold) { + swapQueue.add(flowFile); + incrementSwapQueueSize(1, flowFile.getSize(), 0); + swapMode = true; + writeSwapFilesIfNecessary(); + } else { + incrementActiveQueueSize(1, flowFile.getSize()); + activeQueue.add(flowFile); + } + + logger.debug("{} put to {}", flowFile, this); + } finally { + writeLock.unlock("put(FlowFileRecord)"); + } + } + + public void putAll(final Collection flowFiles) { + final int numFiles = flowFiles.size(); + long bytes = 0L; + for (final FlowFile flowFile : flowFiles) { + bytes += flowFile.getSize(); + } + + writeLock.lock(); + try { + if (swapMode || activeQueue.size() >= swapThreshold - numFiles) { + swapQueue.addAll(flowFiles); + incrementSwapQueueSize(numFiles, bytes, 0); + swapMode = true; + writeSwapFilesIfNecessary(); + } else { + incrementActiveQueueSize(numFiles, bytes); + activeQueue.addAll(flowFiles); + } + + logger.debug("{} put to {}", flowFiles, this); + } finally { + writeLock.unlock("putAll"); + } + } + + public FlowFileRecord poll(final Set expiredRecords, final long expirationMillis) { + FlowFileRecord flowFile; + + // First check if we have any records Pre-Fetched. + writeLock.lock(); + try { + flowFile = doPoll(expiredRecords, expirationMillis); + + if (flowFile != null) { + logger.debug("{} poll() returning {}", this, flowFile); + incrementUnacknowledgedQueueSize(1, flowFile.getSize()); + } + + return flowFile; + } finally { + writeLock.unlock("poll(Set)"); + } + } + + + private FlowFileRecord doPoll(final Set expiredRecords, final long expirationMillis) { + FlowFileRecord flowFile; + boolean isExpired; + + migrateSwapToActive(); + + long expiredBytes = 0L; + do { + flowFile = this.activeQueue.poll(); + + isExpired = isExpired(flowFile, expirationMillis); + if (isExpired) { + expiredRecords.add(flowFile); + expiredBytes += flowFile.getSize(); + flowFile = null; + + if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) { + break; + } + } else if (flowFile != null && flowFile.isPenalized()) { + this.activeQueue.add(flowFile); + flowFile = null; + break; + } + + if (flowFile != null) { + incrementActiveQueueSize(-1, -flowFile.getSize()); + } + } while (isExpired); + + if (!expiredRecords.isEmpty()) { + incrementActiveQueueSize(-expiredRecords.size(), -expiredBytes); + } + + return flowFile; + } + + public List poll(int maxResults, final Set expiredRecords, final long expirationMillis) { + final List records = new ArrayList<>(Math.min(1, maxResults)); + + // First check if we have any records Pre-Fetched. + writeLock.lock(); + try { + doPoll(records, maxResults, expiredRecords, expirationMillis); + } finally { + writeLock.unlock("poll(int, Set)"); + } + + if (!records.isEmpty()) { + logger.debug("{} poll() returning {}", this, records); + } + + return records; + } + + public List poll(final FlowFileFilter filter, final Set expiredRecords, final long expirationMillis) { + long bytesPulled = 0L; + int flowFilesPulled = 0; + + writeLock.lock(); + try { + migrateSwapToActive(); + + final List selectedFlowFiles = new ArrayList<>(); + final List unselected = new ArrayList<>(); + + while (true) { + FlowFileRecord flowFile = this.activeQueue.poll(); + if (flowFile == null) { + break; + } + + final boolean isExpired = isExpired(flowFile, expirationMillis); + if (isExpired) { + expiredRecords.add(flowFile); + bytesPulled += flowFile.getSize(); + flowFilesPulled++; + + if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) { + break; + } else { + continue; + } + } else if (flowFile.isPenalized()) { + this.activeQueue.add(flowFile); + break; // just stop searching because the rest are all penalized. + } + + final FlowFileFilterResult result = filter.filter(flowFile); + if (result.isAccept()) { + bytesPulled += flowFile.getSize(); + flowFilesPulled++; + + incrementUnacknowledgedQueueSize(1, flowFile.getSize()); + selectedFlowFiles.add(flowFile); + } else { + unselected.add(flowFile); + } + + if (!result.isContinue()) { + break; + } + } + + this.activeQueue.addAll(unselected); + incrementActiveQueueSize(-flowFilesPulled, -bytesPulled); + + if (!selectedFlowFiles.isEmpty()) { + logger.debug("{} poll() returning {}", this, selectedFlowFiles); + } + + return selectedFlowFiles; + } finally { + writeLock.unlock("poll(Filter, Set)"); + } + } + + private void doPoll(final List records, int maxResults, final Set expiredRecords, final long expirationMillis) { + migrateSwapToActive(); + + final long bytesDrained = drainQueue(activeQueue, records, maxResults, expiredRecords, expirationMillis); + + long expiredBytes = 0L; + for (final FlowFileRecord record : expiredRecords) { + expiredBytes += record.getSize(); + } + + incrementActiveQueueSize(-(expiredRecords.size() + records.size()), -bytesDrained); + incrementUnacknowledgedQueueSize(records.size(), bytesDrained - expiredBytes); + } + + + protected boolean isExpired(final FlowFile flowFile, final long expirationMillis) { + return isLaterThan(getExpirationDate(flowFile, expirationMillis)); + } + + private boolean isLaterThan(final Long maxAge) { + if (maxAge == null) { + return false; + } + return maxAge < System.currentTimeMillis(); + } + + private Long getExpirationDate(final FlowFile flowFile, final long expirationMillis) { + if (flowFile == null) { + return null; + } + + if (expirationMillis <= 0) { + return null; + } else { + final long entryDate = flowFile.getEntryDate(); + final long expirationDate = entryDate + expirationMillis; + return expirationDate; + } + } + + + private long drainQueue(final Queue sourceQueue, final List destination, int maxResults, final Set expiredRecords, final long expirationMillis) { + long drainedSize = 0L; + FlowFileRecord pulled; + + while (destination.size() < maxResults && (pulled = sourceQueue.poll()) != null) { + if (isExpired(pulled, expirationMillis)) { + expiredRecords.add(pulled); + if (expiredRecords.size() >= MAX_EXPIRED_RECORDS_PER_ITERATION) { + break; + } + } else { + if (pulled.isPenalized()) { + sourceQueue.add(pulled); + break; + } + destination.add(pulled); + } + drainedSize += pulled.getSize(); + } + return drainedSize; + } + + + public FlowFileRecord getFlowFile(final String flowFileUuid) { + if (flowFileUuid == null) { + return null; + } + + readLock.lock(); + try { + // read through all of the FlowFiles in the queue, looking for the FlowFile with the given ID + for (final FlowFileRecord flowFile : activeQueue) { + if (flowFileUuid.equals(flowFile.getAttribute(CoreAttributes.UUID.key()))) { + return flowFile; + } + } + } finally { + readLock.unlock("getFlowFile"); + } + + return null; + } + + + public void dropFlowFiles(final DropFlowFileRequest dropRequest, final String requestor) { + final String requestIdentifier = dropRequest.getRequestIdentifier(); + + writeLock.lock(); + try { + dropRequest.setState(DropFlowFileState.DROPPING_FLOWFILES); + logger.debug("For DropFlowFileRequest {}, original size is {}", requestIdentifier, size()); + + try { + final List activeQueueRecords = new ArrayList<>(activeQueue); + + QueueSize droppedSize; + try { + if (dropRequest.getState() == DropFlowFileState.CANCELED) { + logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier); + return; + } + + droppedSize = dropAction.drop(activeQueueRecords, requestor); + logger.debug("For DropFlowFileRequest {}, Dropped {} from active queue", requestIdentifier, droppedSize); + } catch (final IOException ioe) { + logger.error("Failed to drop the FlowFiles from queue {} due to {}", getQueueIdentifier(), ioe.toString()); + logger.error("", ioe); + + dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + ioe.toString()); + return; + } + + activeQueue.clear(); + incrementActiveQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount()); + dropRequest.setCurrentSize(size()); + dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize)); + + final QueueSize swapSize = getFlowFileQueueSize().swapQueueSize(); + logger.debug("For DropFlowFileRequest {}, Swap Queue has {} elements, Swapped Record Count = {}, Swapped Content Size = {}", + requestIdentifier, swapQueue.size(), swapSize.getObjectCount(), swapSize.getByteCount()); + if (dropRequest.getState() == DropFlowFileState.CANCELED) { + logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier); + return; + } + + try { + droppedSize = dropAction.drop(swapQueue, requestor); + } catch (final IOException ioe) { + logger.error("Failed to drop the FlowFiles from queue {} due to {}", getQueueIdentifier(), ioe.toString()); + logger.error("", ioe); + + dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + ioe.toString()); + return; + } + + swapQueue.clear(); + dropRequest.setCurrentSize(size()); + dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize)); + swapMode = false; + incrementSwapQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount(), 0); + logger.debug("For DropFlowFileRequest {}, dropped {} from Swap Queue", requestIdentifier, droppedSize); + + final int swapFileCount = swapLocations.size(); + final Iterator swapLocationItr = swapLocations.iterator(); + while (swapLocationItr.hasNext()) { + final String swapLocation = swapLocationItr.next(); + + SwapContents swapContents = null; + try { + if (dropRequest.getState() == DropFlowFileState.CANCELED) { + logger.info("Cancel requested for DropFlowFileRequest {}", requestIdentifier); + return; + } + + swapContents = swapManager.swapIn(swapLocation, flowFileQueue); + droppedSize = dropAction.drop(swapContents.getFlowFiles(), requestor); + } catch (final IncompleteSwapFileException isfe) { + swapContents = isfe.getPartialContents(); + final String warnMsg = "Failed to swap in FlowFiles from Swap File " + swapLocation + " because the file was corrupt. " + + "Some FlowFiles may not be dropped from the queue until NiFi is restarted."; + + logger.warn(warnMsg); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.WARNING, "Drop FlowFiles", warnMsg); + } + } catch (final IOException ioe) { + logger.error("Failed to swap in FlowFiles from Swap File {} in order to drop the FlowFiles for Connection {} due to {}", + swapLocation, getQueueIdentifier(), ioe.toString()); + logger.error("", ioe); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "Drop FlowFiles", "Failed to swap in FlowFiles from Swap File " + swapLocation + + ". The FlowFiles contained in this Swap File will not be dropped from the queue"); + } + + dropRequest.setState(DropFlowFileState.FAILURE, "Failed to swap in FlowFiles from Swap File " + swapLocation + " due to " + ioe.toString()); + if (swapContents != null) { + activeQueue.addAll(swapContents.getFlowFiles()); // ensure that we don't lose the FlowFiles from our queue. + } + + return; + } + + dropRequest.setDroppedSize(dropRequest.getDroppedSize().add(droppedSize)); + incrementSwapQueueSize(-droppedSize.getObjectCount(), -droppedSize.getByteCount(), -1); + + dropRequest.setCurrentSize(size()); + swapLocationItr.remove(); + logger.debug("For DropFlowFileRequest {}, dropped {} for Swap File {}", requestIdentifier, droppedSize, swapLocation); + } + + logger.debug("Dropped FlowFiles from {} Swap Files", swapFileCount); + logger.info("Successfully dropped {} FlowFiles ({} bytes) from Connection with ID {} on behalf of {}", + dropRequest.getDroppedSize().getObjectCount(), dropRequest.getDroppedSize().getByteCount(), getQueueIdentifier(), requestor); + dropRequest.setState(DropFlowFileState.COMPLETE); + } catch (final Exception e) { + logger.error("Failed to drop FlowFiles from Connection with ID {} due to {}", getQueueIdentifier(), e.toString()); + logger.error("", e); + dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + e.toString()); + } + } finally { + writeLock.unlock("Drop FlowFiles"); + } + } + + + + public SwapSummary recoverSwappedFlowFiles() { + int swapFlowFileCount = 0; + long swapByteCount = 0L; + Long maxId = null; + List resourceClaims = new ArrayList<>(); + final long startNanos = System.nanoTime(); + + writeLock.lock(); + try { + final List swapLocations; + try { + swapLocations = swapManager.recoverSwapLocations(flowFileQueue, swapPartitionName); + } catch (final IOException ioe) { + logger.error("Failed to determine whether or not any Swap Files exist for FlowFile Queue {}", getQueueIdentifier()); + logger.error("", ioe); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to determine whether or not any Swap Files exist for FlowFile Queue " + + getQueueIdentifier() + "; see logs for more detials"); + } + return null; + } + + logger.debug("Recovered {} Swap Files for {}: {}", swapLocations.size(), flowFileQueue, swapLocations); + for (final String swapLocation : swapLocations) { + try { + final SwapSummary summary = swapManager.getSwapSummary(swapLocation); + final QueueSize queueSize = summary.getQueueSize(); + final Long maxSwapRecordId = summary.getMaxFlowFileId(); + if (maxSwapRecordId != null) { + if (maxId == null || maxSwapRecordId > maxId) { + maxId = maxSwapRecordId; + } + } + + swapFlowFileCount += queueSize.getObjectCount(); + swapByteCount += queueSize.getByteCount(); + resourceClaims.addAll(summary.getResourceClaims()); + } catch (final IOException ioe) { + logger.error("Failed to recover FlowFiles from Swap File {}; the file appears to be corrupt", swapLocation, ioe.toString()); + logger.error("", ioe); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to recover FlowFiles from Swap File " + swapLocation + + "; the file appears to be corrupt. See logs for more details"); + } + } + } + + incrementSwapQueueSize(swapFlowFileCount, swapByteCount, swapLocations.size()); + this.swapLocations.addAll(swapLocations); + } finally { + writeLock.unlock("Recover Swap Files"); + } + + if (!swapLocations.isEmpty()) { + final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); + logger.info("Recovered {} swap files for {} in {} millis", swapLocations.size(), this, millis); + } + + return new StandardSwapSummary(new QueueSize(swapFlowFileCount, swapByteCount), maxId, resourceClaims); + } + + + + protected void incrementActiveQueueSize(final int count, final long bytes) { + boolean updated = false; + while (!updated) { + final FlowFileQueueSize original = size.get(); + final FlowFileQueueSize newSize = new FlowFileQueueSize( + original.getActiveCount() + count, original.getActiveBytes() + bytes, + original.getSwappedCount(), original.getSwappedBytes(), original.getSwapFileCount(), + original.getUnacknowledgedCount(), original.getUnacknowledgedBytes()); + + updated = updateSize(original, newSize); + + if (updated) { + logIfNegative(original, newSize, "active"); + } + } + } + + private void incrementSwapQueueSize(final int count, final long bytes, final int fileCount) { + boolean updated = false; + while (!updated) { + final FlowFileQueueSize original = getFlowFileQueueSize(); + final FlowFileQueueSize newSize = new FlowFileQueueSize(original.getActiveCount(), original.getActiveBytes(), + original.getSwappedCount() + count, original.getSwappedBytes() + bytes, original.getSwapFileCount() + fileCount, + original.getUnacknowledgedCount(), original.getUnacknowledgedBytes()); + + updated = updateSize(original, newSize); + if (updated) { + logIfNegative(original, newSize, "swap"); + } + } + } + + private void incrementUnacknowledgedQueueSize(final int count, final long bytes) { + boolean updated = false; + while (!updated) { + final FlowFileQueueSize original = size.get(); + final FlowFileQueueSize newSize = new FlowFileQueueSize(original.getActiveCount(), original.getActiveBytes(), + original.getSwappedCount(), original.getSwappedBytes(), original.getSwapFileCount(), + original.getUnacknowledgedCount() + count, original.getUnacknowledgedBytes() + bytes); + + updated = updateSize(original, newSize); + + if (updated) { + logIfNegative(original, newSize, "Unacknowledged"); + } + } + } + + private void logIfNegative(final FlowFileQueueSize original, final FlowFileQueueSize newSize, final String counterName) { + if (newSize.getActiveBytes() < 0 || newSize.getActiveCount() < 0 + || newSize.getSwappedBytes() < 0 || newSize.getSwappedCount() < 0 + || newSize.getUnacknowledgedBytes() < 0 || newSize.getUnacknowledgedCount() < 0) { + + logger.error("Updated Size of Queue " + counterName + " from " + original + " to " + newSize, new RuntimeException("Cannot create negative queue size")); + } + } + + + protected boolean updateSize(final FlowFileQueueSize expected, final FlowFileQueueSize updated) { + return size.compareAndSet(expected, updated); + } + + public FlowFileQueueSize getFlowFileQueueSize() { + return size.get(); + } + + public void inheritQueueContents(final FlowFileQueueContents queueContents) { + writeLock.lock(); + try { + putAll(queueContents.getActiveFlowFiles()); + swapLocations.addAll(queueContents.getSwapLocations()); + incrementSwapQueueSize(queueContents.getSwapSize().getObjectCount(), queueContents.getSwapSize().getByteCount(), queueContents.getSwapLocations().size()); + } finally { + writeLock.unlock("inheritQueueContents"); + } + } + + public FlowFileQueueContents packageForRebalance(final String newPartitionName) { + writeLock.lock(); + try { + final List activeRecords = new ArrayList<>(this.activeQueue); + + final List updatedSwapLocations = new ArrayList<>(swapLocations.size()); + for (final String swapLocation : swapLocations) { + try { + final String updatedSwapLocation = swapManager.changePartitionName(swapLocation, newPartitionName); + updatedSwapLocations.add(updatedSwapLocation); + } catch (final IOException ioe) { + logger.error("Failed to update Swap File {} to reflect that the contents are now owned by Partition '{}'", swapLocation, newPartitionName, ioe); + } + } + + this.swapLocations.clear(); + this.activeQueue.clear(); + + final int swapQueueCount = swapQueue.size(); + final long swapQueueBytes = swapQueue.stream().mapToLong(FlowFileRecord::getSize).sum(); + activeRecords.addAll(swapQueue); + swapQueue.clear(); + + this.swapMode = false; + + QueueSize swapSize; + boolean updated; + do { + final FlowFileQueueSize currentSize = getFlowFileQueueSize(); + swapSize = new QueueSize(currentSize.getSwappedCount() - swapQueueCount, currentSize.getSwappedBytes() - swapQueueBytes); + + final FlowFileQueueSize updatedSize = new FlowFileQueueSize(0, 0, 0, 0, 0, currentSize.getUnacknowledgedCount(), currentSize.getUnacknowledgedBytes()); + updated = updateSize(currentSize, updatedSize); + } while (!updated); + + return new FlowFileQueueContents(activeRecords, updatedSwapLocations, swapSize); + } finally { + writeLock.unlock("packageForRebalance(SwappablePriorityQueue)"); + } + } + + @Override + public String toString() { + return "SwappablePriorityQueue[queueId=" + flowFileQueue.getIdentifier() + "]"; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/TimePeriod.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/TimePeriod.java new file mode 100644 index 000000000000..40220cc65b07 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/TimePeriod.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue; + +public class TimePeriod { + private final String period; + private final long millis; + + public TimePeriod(final String period, final long millis) { + this.period = period; + this.millis = millis; + } + + public String getPeriod() { + return period; + } + + public long getMillis() { + return millis; + } + + @Override + public String toString() { + return period; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/ContentRepositoryFlowFileAccess.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/ContentRepositoryFlowFileAccess.java new file mode 100644 index 000000000000..28ae05d92a79 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/ContentRepositoryFlowFileAccess.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import org.apache.nifi.controller.repository.ContentNotFoundException; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.io.LimitedInputStream; +import org.apache.nifi.stream.io.StreamUtils; + +import java.io.EOFException; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; + +public class ContentRepositoryFlowFileAccess implements FlowFileContentAccess { + private final ContentRepository contentRepository; + + public ContentRepositoryFlowFileAccess(final ContentRepository contentRepository) { + this.contentRepository = contentRepository; + } + + @Override + public InputStream read(final FlowFileRecord flowFile) throws IOException { + final InputStream rawIn; + try { + rawIn = contentRepository.read(flowFile.getContentClaim()); + } catch (final ContentNotFoundException cnfe) { + throw new ContentNotFoundException(flowFile, flowFile.getContentClaim(), cnfe.getMessage()); + } + + if (flowFile.getContentClaimOffset() > 0) { + try { + StreamUtils.skip(rawIn, flowFile.getContentClaimOffset()); + } catch (final EOFException eof) { + throw new ContentNotFoundException(flowFile, flowFile.getContentClaim(), "FlowFile has a Content Claim Offset of " + + flowFile.getContentClaimOffset() + " bytes but the Content Claim does not have that many bytes"); + } + } + + final InputStream limitedIn = new LimitedInputStream(rawIn, flowFile.getSize()); + // Wrap the Content Repository's InputStream with one that ensures that we are able to consume all of the FlowFile's content or else throws EOFException + return new FilterInputStream(limitedIn) { + private long bytesRead = 0; + + @Override + public int read(final byte[] b, final int off, final int len) throws IOException { + return ensureNotTruncated(limitedIn.read(b, off, len)); + } + + @Override + public int read(final byte[] b) throws IOException { + return ensureNotTruncated(limitedIn.read(b)); + } + + @Override + public int read() throws IOException { + return ensureNotTruncated(limitedIn.read()); + } + + private int ensureNotTruncated(final int length) throws EOFException { + if (length > -1) { + bytesRead += length; + return length; + } + + if (bytesRead < flowFile.getSize()) { + throw new EOFException("Expected " + flowFile + " to contain " + flowFile.getSize() + " bytes but the content repository only had " + bytesRead + " bytes for it"); + } + + return length; + } + }; + } + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/FlowFileContentAccess.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/FlowFileContentAccess.java new file mode 100644 index 000000000000..4d956c6fbb20 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/FlowFileContentAccess.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import java.io.IOException; +import java.io.InputStream; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +public interface FlowFileContentAccess { + + InputStream read(FlowFileRecord flowFile) throws IOException; + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SimpleLimitThreshold.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SimpleLimitThreshold.java new file mode 100644 index 000000000000..8b0cfa227591 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SimpleLimitThreshold.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +public class SimpleLimitThreshold implements TransactionThreshold { + private final int countLimit; + private final long byteLimit; + + private int count = 0; + private long bytes = 0L; + + public SimpleLimitThreshold(final int count, final long bytes) { + this.countLimit = count; + this.byteLimit = bytes; + } + + @Override + public void adjust(final int flowFileCount, final long flowFileSize) { + count += flowFileCount; + bytes += flowFileSize; + } + + @Override + public boolean isThresholdMet() { + return count >= countLimit || bytes >= byteLimit; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SocketLoadBalancedFlowFileQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SocketLoadBalancedFlowFileQueue.java new file mode 100644 index 000000000000..7b3a21124a1f --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/SocketLoadBalancedFlowFileQueue.java @@ -0,0 +1,1146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.ClusterTopologyEventListener; +import org.apache.nifi.cluster.coordination.node.NodeConnectionState; +import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.ProcessScheduler; +import org.apache.nifi.controller.queue.AbstractFlowFileQueue; +import org.apache.nifi.controller.queue.ConnectionEventListener; +import org.apache.nifi.controller.queue.DropFlowFileRequest; +import org.apache.nifi.controller.queue.DropFlowFileState; +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.LoadBalanceStrategy; +import org.apache.nifi.controller.queue.LoadBalancedFlowFileQueue; +import org.apache.nifi.controller.queue.LocalQueuePartitionDiagnostics; +import org.apache.nifi.controller.queue.QueueDiagnostics; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.RemoteQueuePartitionDiagnostics; +import org.apache.nifi.controller.queue.StandardQueueDiagnostics; +import org.apache.nifi.controller.queue.SwappablePriorityQueue; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClientRegistry; +import org.apache.nifi.controller.queue.clustered.partition.CorrelationAttributePartitioner; +import org.apache.nifi.controller.queue.clustered.partition.FirstNodePartitioner; +import org.apache.nifi.controller.queue.clustered.partition.FlowFilePartitioner; +import org.apache.nifi.controller.queue.clustered.partition.LocalPartitionPartitioner; +import org.apache.nifi.controller.queue.clustered.partition.LocalQueuePartition; +import org.apache.nifi.controller.queue.clustered.partition.NonLocalPartitionPartitioner; +import org.apache.nifi.controller.queue.clustered.partition.QueuePartition; +import org.apache.nifi.controller.queue.clustered.partition.RebalancingPartition; +import org.apache.nifi.controller.queue.clustered.partition.RemoteQueuePartition; +import org.apache.nifi.controller.queue.clustered.partition.RoundRobinPartitioner; +import org.apache.nifi.controller.queue.clustered.partition.StandardRebalancingPartition; +import org.apache.nifi.controller.queue.clustered.partition.SwappablePriorityQueueLocalPartition; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.controller.repository.RepositoryRecord; +import org.apache.nifi.controller.repository.StandardRepositoryRecord; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaimManager; +import org.apache.nifi.controller.swap.StandardSwapSummary; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.flowfile.FlowFilePrioritizer; +import org.apache.nifi.processor.FlowFileFilter; +import org.apache.nifi.provenance.ProvenanceEventBuilder; +import org.apache.nifi.provenance.ProvenanceEventRecord; +import org.apache.nifi.provenance.ProvenanceEventRepository; +import org.apache.nifi.provenance.ProvenanceEventType; +import org.apache.nifi.provenance.StandardProvenanceEventRecord; +import org.apache.nifi.reporting.Severity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class SocketLoadBalancedFlowFileQueue extends AbstractFlowFileQueue implements LoadBalancedFlowFileQueue { + private static final Logger logger = LoggerFactory.getLogger(SocketLoadBalancedFlowFileQueue.class); + private static final int NODE_SWAP_THRESHOLD = 1000; + + private final List prioritizers = new ArrayList<>(); + private final ConnectionEventListener eventListener; + private final AtomicReference totalSize = new AtomicReference<>(new QueueSize(0, 0L)); + private final LocalQueuePartition localPartition; + private final RebalancingPartition rebalancingPartition; + private final FlowFileSwapManager swapManager; + private final EventReporter eventReporter; + private final ClusterCoordinator clusterCoordinator; + private final AsyncLoadBalanceClientRegistry clientRegistry; + + private final FlowFileRepository flowFileRepo; + private final ProvenanceEventRepository provRepo; + private final ContentRepository contentRepo; + private final Set nodeIdentifiers; + + private final ReadWriteLock partitionLock = new ReentrantReadWriteLock(); + private final Lock partitionReadLock = partitionLock.readLock(); + private final Lock partitionWriteLock = partitionLock.writeLock(); + private QueuePartition[] queuePartitions; + private FlowFilePartitioner partitioner; + private boolean stopped = true; + private volatile boolean offloaded = false; + + + public SocketLoadBalancedFlowFileQueue(final String identifier, final ConnectionEventListener eventListener, final ProcessScheduler scheduler, final FlowFileRepository flowFileRepo, + final ProvenanceEventRepository provRepo, final ContentRepository contentRepo, final ResourceClaimManager resourceClaimManager, + final ClusterCoordinator clusterCoordinator, final AsyncLoadBalanceClientRegistry clientRegistry, final FlowFileSwapManager swapManager, + final int swapThreshold, final EventReporter eventReporter) { + + super(identifier, scheduler, flowFileRepo, provRepo, resourceClaimManager); + this.eventListener = eventListener; + this.eventReporter = eventReporter; + this.swapManager = swapManager; + this.flowFileRepo = flowFileRepo; + this.provRepo = provRepo; + this.contentRepo = contentRepo; + this.clusterCoordinator = clusterCoordinator; + this.clientRegistry = clientRegistry; + + localPartition = new SwappablePriorityQueueLocalPartition(swapManager, swapThreshold, eventReporter, this, this::drop); + rebalancingPartition = new StandardRebalancingPartition(swapManager, swapThreshold, eventReporter, this, this::drop); + + // Create a RemoteQueuePartition for each node + nodeIdentifiers = clusterCoordinator == null ? Collections.emptySet() : clusterCoordinator.getNodeIdentifiers(); + + final List sortedNodeIdentifiers = new ArrayList<>(nodeIdentifiers); + sortedNodeIdentifiers.sort(Comparator.comparing(NodeIdentifier::getApiAddress)); + + if (sortedNodeIdentifiers.isEmpty()) { + queuePartitions = new QueuePartition[] { localPartition }; + } else { + queuePartitions = new QueuePartition[sortedNodeIdentifiers.size()]; + + for (int i = 0; i < sortedNodeIdentifiers.size(); i++) { + final NodeIdentifier nodeId = sortedNodeIdentifiers.get(i); + if (nodeId.equals(clusterCoordinator.getLocalNodeIdentifier())) { + queuePartitions[i] = localPartition; + } else { + queuePartitions[i] = createRemotePartition(nodeId); + } + } + + } + + partitioner = new LocalPartitionPartitioner(); + + if (clusterCoordinator != null) { + clusterCoordinator.registerEventListener(new ClusterEventListener()); + } + + rebalancingPartition.start(partitioner); + } + + + @Override + public synchronized void setLoadBalanceStrategy(final LoadBalanceStrategy strategy, final String partitioningAttribute) { + final LoadBalanceStrategy currentStrategy = getLoadBalanceStrategy(); + final String currentPartitioningAttribute = getPartitioningAttribute(); + + super.setLoadBalanceStrategy(strategy, partitioningAttribute); + + if (strategy == currentStrategy && Objects.equals(partitioningAttribute, currentPartitioningAttribute)) { + // Nothing changed. + return; + } + + if (clusterCoordinator == null) { + // Not clustered so nothing to worry about. + return; + } + + if (!offloaded) { + // We are already load balancing but are changing how we are load balancing. + final FlowFilePartitioner partitioner; + partitioner = getPartitionerForLoadBalancingStrategy(strategy, partitioningAttribute); + + setFlowFilePartitioner(partitioner); + } + } + + private FlowFilePartitioner getPartitionerForLoadBalancingStrategy(LoadBalanceStrategy strategy, String partitioningAttribute) { + FlowFilePartitioner partitioner; + switch (strategy) { + case DO_NOT_LOAD_BALANCE: + partitioner = new LocalPartitionPartitioner(); + break; + case PARTITION_BY_ATTRIBUTE: + partitioner = new CorrelationAttributePartitioner(partitioningAttribute); + break; + case ROUND_ROBIN: + partitioner = new RoundRobinPartitioner(); + break; + case SINGLE_NODE: + partitioner = new FirstNodePartitioner(); + break; + default: + throw new IllegalArgumentException(); + } + return partitioner; + } + + @Override + public void offloadQueue() { + if (clusterCoordinator == null) { + // Not clustered, cannot offload the queue to other nodes + return; + } + + logger.debug("Setting queue {} on node {} as offloaded", this, clusterCoordinator.getLocalNodeIdentifier()); + offloaded = true; + + partitionWriteLock.lock(); + try { + final Set nodesToKeep = new HashSet<>(); + + // If we have any nodes that are connected, we only want to send data to the connected nodes. + for (final QueuePartition partition : queuePartitions) { + final Optional nodeIdOption = partition.getNodeIdentifier(); + if (!nodeIdOption.isPresent()) { + continue; + } + + final NodeIdentifier nodeId = nodeIdOption.get(); + final NodeConnectionStatus status = clusterCoordinator.getConnectionStatus(nodeId); + if (status != null && status.getState() == NodeConnectionState.CONNECTED) { + nodesToKeep.add(nodeId); + } + } + + if (!nodesToKeep.isEmpty()) { + setNodeIdentifiers(nodesToKeep, false); + } + + // Update our partitioner so that we don't keep any data on the local partition + setFlowFilePartitioner(new NonLocalPartitionPartitioner()); + } finally { + partitionWriteLock.unlock(); + } + } + + @Override + public void resetOffloadedQueue() { + if (clusterCoordinator == null) { + // Not clustered, was not offloading the queue to other nodes + return; + } + + if (offloaded) { + // queue was offloaded previously, allow files to be added to the local partition + offloaded = false; + logger.debug("Queue {} on node {} was previously offloaded, resetting offloaded status to {}", + this, clusterCoordinator.getLocalNodeIdentifier(), offloaded); + // reset the partitioner based on the load balancing strategy, since offloading previously changed the partitioner + FlowFilePartitioner partitioner = getPartitionerForLoadBalancingStrategy(getLoadBalanceStrategy(), getPartitioningAttribute()); + setFlowFilePartitioner(partitioner); + logger.debug("Queue {} is no longer offloaded, restored load balance strategy to {} and partitioning attribute to \"{}\"", + this, getLoadBalanceStrategy(), getPartitioningAttribute()); + } + } + + public synchronized void startLoadBalancing() { + logger.debug("{} started. Will begin distributing FlowFiles across the cluster", this); + + if (!stopped) { + return; + } + + stopped = false; + + partitionReadLock.lock(); + try { + rebalancingPartition.start(partitioner); + + for (final QueuePartition queuePartition : queuePartitions) { + queuePartition.start(partitioner); + } + } finally { + partitionReadLock.unlock(); + } + } + + public synchronized void stopLoadBalancing() { + logger.debug("{} stopped. Will no longer distribute FlowFiles across the cluster", this); + + if (stopped) { + return; + } + + stopped = true; + + partitionReadLock.lock(); + try { + rebalancingPartition.stop(); + for (final QueuePartition queuePartition : queuePartitions) { + queuePartition.stop(); + } + } finally { + partitionReadLock.unlock(); + } + } + + @Override + public boolean isActivelyLoadBalancing() { + final QueueSize size = size(); + if (size.getObjectCount() == 0) { + return false; + } + + final int localObjectCount = localPartition.size().getObjectCount(); + return (size.getObjectCount() > localObjectCount); + } + + private QueuePartition createRemotePartition(final NodeIdentifier nodeId) { + final SwappablePriorityQueue partitionQueue = new SwappablePriorityQueue(swapManager, NODE_SWAP_THRESHOLD, eventReporter, this, this::drop, nodeId.getId()); + + final TransferFailureDestination failureDestination = new TransferFailureDestination() { + @Override + public void putAll(final Collection flowFiles, final FlowFilePartitioner partitionerUsed) { + if (flowFiles.isEmpty()) { + return; + } + + partitionReadLock.lock(); + try { + if (isRebalanceOnFailure(partitionerUsed)) { + logger.debug("Transferring {} FlowFiles to Rebalancing Partition from node {}", flowFiles.size(), nodeId); + rebalancingPartition.rebalance(flowFiles); + } else { + logger.debug("Returning {} FlowFiles to their queue for node {} because Partitioner {} indicates that the FlowFiles should stay where they are", flowFiles.size(), nodeId, + partitioner); + partitionQueue.putAll(flowFiles); + } + } finally { + partitionReadLock.unlock(); + } + } + + @Override + public void putAll(final Function queueContentsFunction, final FlowFilePartitioner partitionerUsed) { + partitionReadLock.lock(); + try { + if (isRebalanceOnFailure(partitionerUsed)) { + final FlowFileQueueContents contents = queueContentsFunction.apply(rebalancingPartition.getSwapPartitionName()); + rebalancingPartition.rebalance(contents); + logger.debug("Transferring all {} FlowFiles and {} Swap Files queued for node {} to Rebalancing Partition", + contents.getActiveFlowFiles().size(), contents.getSwapLocations().size(), nodeId); + } else { + logger.debug("Will not transfer FlowFiles queued for node {} to Rebalancing Partition because Partitioner {} indicates that the FlowFiles should stay where they are", nodeId, + partitioner); + } + } finally { + partitionReadLock.unlock(); + } + } + + @Override + public boolean isRebalanceOnFailure(final FlowFilePartitioner partitionerUsed) { + partitionReadLock.lock(); + try { + if (!partitionerUsed.equals(partitioner)) { + return true; + } + + return partitioner.isRebalanceOnFailure(); + } finally { + partitionReadLock.unlock(); + } + } + }; + + final QueuePartition partition = new RemoteQueuePartition(nodeId, partitionQueue, failureDestination, flowFileRepo, provRepo, contentRepo, clientRegistry, this); + + if (!stopped) { + partition.start(partitioner); + } + + return partition; + } + + @Override + public synchronized List getPriorities() { + return new ArrayList<>(prioritizers); + } + + @Override + public synchronized void setPriorities(final List newPriorities) { + prioritizers.clear(); + prioritizers.addAll(newPriorities); + + partitionReadLock.lock(); + try { + for (final QueuePartition partition : queuePartitions) { + partition.setPriorities(newPriorities); + } + + rebalancingPartition.setPriorities(newPriorities); + } finally { + partitionReadLock.unlock(); + } + } + + + @Override + public SwapSummary recoverSwappedFlowFiles() { + partitionReadLock.lock(); + try { + final List summaries = new ArrayList<>(queuePartitions.length); + + // Discover the names of all partitions that have data swapped out. + Set partitionNamesToRecover; + try { + partitionNamesToRecover = swapManager.getSwappedPartitionNames(this); + logger.debug("For {}, partition names to recover are {}", this, partitionNamesToRecover); + } catch (final IOException ioe) { + logger.error("Failed to determine the names of the Partitions that have swapped FlowFiles for queue with ID {}.", getIdentifier(), ioe); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to determine the names of Partitions that have swapped FlowFiles for queue with ID " + + getIdentifier() + "; see logs for more detials"); + } + + partitionNamesToRecover = Collections.emptySet(); + } + + // For each Queue Partition, recover swapped FlowFiles. + for (final QueuePartition partition : queuePartitions) { + partitionNamesToRecover.remove(partition.getSwapPartitionName()); + + final SwapSummary summary = partition.recoverSwappedFlowFiles(); + summaries.add(summary); + } + + // Recover any swap files that may belong to the 'rebalancing' partition + partitionNamesToRecover.remove(rebalancingPartition.getSwapPartitionName()); + final SwapSummary rebalancingSwapSummary = rebalancingPartition.recoverSwappedFlowFiles(); + summaries.add(rebalancingSwapSummary); + + // If there is any Partition that has swapped FlowFiles but for which we don't have a Queue Partition created, we need to recover those swap locations + // and get their swap summaries now. We then transfer any Swap Files that existed for that partition to the 'rebalancing' partition so that the data + // will be rebalanced against the existing partitions. We do this to handle the following scenario: + // - NiFi is running in a cluster with 5 nodes. + // - A queue is load balanced across the cluster, with all partitions having data swapped out. + // - NiFi is shutdown and upgraded to a new version. + // - Admin failed to copy over the Managed State for the nodes from the old version to the new version. + // - Upon restart, NiFi does not know about any of the nodes in the cluster. + // - When a node joins and recovers swap locations, it is the only known node. + // - NiFi will not know that it needs a Remote Partition for nodes 2-5. + // - If we don't recover those partitions here, then we'll end up not recovering the Swap Files at all, which will result in the Content Claims + // have their Claimant Counts decremented, which could lead to loss of the data from the Content Repository. + for (final String partitionName : partitionNamesToRecover) { + logger.info("Found Swap Files for FlowFile Queue with Identifier {} and Partition {} that has not been recovered yet. " + + "Will recover Swap Files for this Partition even though no partition exists with this name yet", getIdentifier(), partitionName); + + try { + final List swapLocations = swapManager.recoverSwapLocations(this, partitionName); + for (final String swapLocation : swapLocations) { + final SwapSummary swapSummary = swapManager.getSwapSummary(swapLocation); + summaries.add(swapSummary); + + // Transfer the swap file to the rebalancing partition. + final String updatedSwapLocation = swapManager.changePartitionName(swapLocation, rebalancingPartition.getSwapPartitionName()); + final FlowFileQueueContents queueContents = new FlowFileQueueContents(Collections.emptyList(), Collections.singletonList(updatedSwapLocation), swapSummary.getQueueSize()); + rebalancingPartition.rebalance(queueContents); + } + } catch (IOException e) { + logger.error("Failed to determine whether or not any Swap Files exist for FlowFile Queue {} and Partition {}", getIdentifier(), partitionName, e); + if (eventReporter != null) { + eventReporter.reportEvent(Severity.ERROR, "FlowFile Swapping", "Failed to determine whether or not any Swap Files exist for FlowFile Queue " + + getIdentifier() + "; see logs for more detials"); + } + } + } + + Long maxId = null; + QueueSize totalQueueSize = new QueueSize(0, 0L); + final List resourceClaims = new ArrayList<>(); + + for (final SwapSummary summary : summaries) { + Long summaryMaxId = summary.getMaxFlowFileId(); + if (summaryMaxId != null && (maxId == null || summaryMaxId > maxId)) { + maxId = summaryMaxId; + } + + final QueueSize summaryQueueSize = summary.getQueueSize(); + totalQueueSize = totalQueueSize.add(summaryQueueSize); + + final List summaryResourceClaims = summary.getResourceClaims(); + resourceClaims.addAll(summaryResourceClaims); + } + + adjustSize(totalQueueSize.getObjectCount(), totalQueueSize.getByteCount()); + + return new StandardSwapSummary(totalQueueSize, maxId, resourceClaims); + } finally { + partitionReadLock.unlock(); + } + } + + @Override + public void purgeSwapFiles() { + swapManager.purge(); + } + + @Override + public QueueSize size() { + return totalSize.get(); + } + + @Override + public boolean isEmpty() { + return size().getObjectCount() == 0; + } + + @Override + public boolean isActiveQueueEmpty() { + return localPartition.isActiveQueueEmpty(); + } + + @Override + public QueueDiagnostics getQueueDiagnostics() { + partitionReadLock.lock(); + try { + final LocalQueuePartitionDiagnostics localDiagnostics = localPartition.getQueueDiagnostics(); + + final List remoteDiagnostics = new ArrayList<>(queuePartitions.length - 1); + + for (final QueuePartition partition : queuePartitions) { + if (partition instanceof RemoteQueuePartition) { + final RemoteQueuePartition queuePartition = (RemoteQueuePartition) partition; + final RemoteQueuePartitionDiagnostics diagnostics = queuePartition.getDiagnostics(); + remoteDiagnostics.add(diagnostics); + } + } + + return new StandardQueueDiagnostics(localDiagnostics, remoteDiagnostics); + } finally { + partitionReadLock.unlock(); + } + } + + protected LocalQueuePartition getLocalPartition() { + return localPartition; + } + + protected int getPartitionCount() { + partitionReadLock.lock(); + try { + return queuePartitions.length; + } finally { + partitionReadLock.unlock(); + } + } + + protected QueuePartition getPartition(final int index) { + partitionReadLock.lock(); + try { + if (index < 0 || index >= queuePartitions.length) { + throw new IndexOutOfBoundsException(); + } + + return queuePartitions[index]; + } finally { + partitionReadLock.unlock(); + } + } + + private void adjustSize(final int countToAdd, final long bytesToAdd) { + boolean updated = false; + while (!updated) { + final QueueSize queueSize = this.totalSize.get(); + final QueueSize updatedSize = queueSize.add(countToAdd, bytesToAdd); + updated = totalSize.compareAndSet(queueSize, updatedSize); + } + } + + public void onTransfer(final Collection flowFiles) { + adjustSize(-flowFiles.size(), -flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum()); + } + + public void onAbort(final Collection flowFiles) { + if (flowFiles == null || flowFiles.isEmpty()) { + return; + } + + adjustSize(-flowFiles.size(), -flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum()); + } + + /** + * Determines which QueuePartition the given FlowFile belongs to. Must be called with partition read lock held. + * + * @param flowFile the FlowFile + * @return the QueuePartition that the FlowFile belongs to + */ + private QueuePartition getPartition(final FlowFileRecord flowFile) { + final QueuePartition queuePartition = partitioner.getPartition(flowFile, queuePartitions, localPartition); + logger.debug("{} Assigning {} to Partition: {}", this, flowFile, queuePartition); + return queuePartition; + } + + public void setNodeIdentifiers(final Set updatedNodeIdentifiers, final boolean forceUpdate) { + partitionWriteLock.lock(); + try { + // If nothing is changing, then just return + if (!forceUpdate && this.nodeIdentifiers.equals(updatedNodeIdentifiers)) { + logger.debug("{} Not going to rebalance Queue even though setNodeIdentifiers was called, because the new set of Node Identifiers is the same as the existing set", this); + return; + } + + logger.debug("{} Stopping the {} queue partitions in order to change node identifiers from {} to {}", this, queuePartitions.length, this.nodeIdentifiers, updatedNodeIdentifiers); + for (final QueuePartition queuePartition : queuePartitions) { + queuePartition.stop(); + } + + // Determine which Node Identifiers, if any, were removed. + final Set removedNodeIds = new HashSet<>(this.nodeIdentifiers); + removedNodeIds.removeAll(updatedNodeIdentifiers); + logger.debug("{} The following Node Identifiers were removed from the cluster: {}", this, removedNodeIds); + + // Build up a Map of Node ID to Queue Partition so that we can easily pull over the existing + // QueuePartition objects instead of having to create new ones. + final Map partitionMap = new HashMap<>(); + for (final QueuePartition partition : this.queuePartitions) { + final Optional nodeIdOption = partition.getNodeIdentifier(); + nodeIdOption.ifPresent(nodeIdentifier -> partitionMap.put(nodeIdentifier, partition)); + } + + // Re-define 'queuePartitions' array + final List sortedNodeIdentifiers = new ArrayList<>(updatedNodeIdentifiers); + sortedNodeIdentifiers.sort(Comparator.comparing(nodeId -> nodeId.getApiAddress() + ":" + nodeId.getApiPort())); + + QueuePartition[] updatedQueuePartitions; + if (sortedNodeIdentifiers.isEmpty()) { + updatedQueuePartitions = new QueuePartition[] { localPartition }; + } else { + updatedQueuePartitions = new QueuePartition[sortedNodeIdentifiers.size()]; + } + + // Populate the new QueuePartitions. + boolean localPartitionIncluded = false; + for (int i = 0; i < sortedNodeIdentifiers.size(); i++) { + final NodeIdentifier nodeId = sortedNodeIdentifiers.get(i); + if (nodeId.equals(clusterCoordinator.getLocalNodeIdentifier())) { + updatedQueuePartitions[i] = localPartition; + localPartitionIncluded = true; + + // If we have RemoteQueuePartition with this Node ID with data, that data must be migrated to the local partition. + // This can happen if we didn't previously know our Node UUID. + final QueuePartition existingPartition = partitionMap.get(nodeId); + if (existingPartition != null && existingPartition != localPartition) { + final FlowFileQueueContents partitionContents = existingPartition.packageForRebalance(localPartition.getSwapPartitionName()); + logger.debug("Transferred data from {} to {}", existingPartition, localPartition); + localPartition.inheritQueueContents(partitionContents); + } + + continue; + } + + final QueuePartition existingPartition = partitionMap.get(nodeId); + updatedQueuePartitions[i] = existingPartition == null ? createRemotePartition(nodeId) : existingPartition; + } + + if (!localPartitionIncluded) { + final QueuePartition[] withLocal = new QueuePartition[updatedQueuePartitions.length + 1]; + System.arraycopy(updatedQueuePartitions, 0, withLocal, 0, updatedQueuePartitions.length); + withLocal[withLocal.length - 1] = localPartition; + updatedQueuePartitions = withLocal; + } + + // If the partition requires that all partitions be re-balanced when the number of partitions changes, then do so. + // Otherwise, just rebalance the data from any Partitions that were removed, if any. + if (partitioner.isRebalanceOnClusterResize()) { + for (final QueuePartition queuePartition : this.queuePartitions) { + logger.debug("Rebalancing {}", queuePartition); + rebalance(queuePartition); + } + } else { + // Not all partitions need to be rebalanced, so just ensure that we rebalance any FlowFiles that are destined + // for a node that is no longer in the cluster. + for (final NodeIdentifier removedNodeId : removedNodeIds) { + final QueuePartition removedPartition = partitionMap.get(removedNodeId); + if (removedPartition == null) { + continue; + } + + logger.debug("Rebalancing {}", removedPartition); + rebalance(removedPartition); + } + } + + // Unregister any client for which the node was removed from the cluster + for (final NodeIdentifier removedNodeId : removedNodeIds) { + final QueuePartition removedPartition = partitionMap.get(removedNodeId); + if (removedPartition instanceof RemoteQueuePartition) { + ((RemoteQueuePartition) removedPartition).onRemoved(); + } + } + + + this.nodeIdentifiers.clear(); + this.nodeIdentifiers.addAll(updatedNodeIdentifiers); + + this.queuePartitions = updatedQueuePartitions; + + logger.debug("{} Restarting the {} queue partitions now that node identifiers have been updated", this, queuePartitions.length); + if (!stopped) { + for (final QueuePartition queuePartition : updatedQueuePartitions) { + queuePartition.start(partitioner); + } + } + } finally { + partitionWriteLock.unlock(); + } + } + + protected void rebalance(final QueuePartition partition) { + logger.debug("Rebalancing Partition {}", partition); + final FlowFileQueueContents contents = partition.packageForRebalance(rebalancingPartition.getSwapPartitionName()); + rebalancingPartition.rebalance(contents); + } + + @Override + public void put(final FlowFileRecord flowFile) { + putAndGetPartition(flowFile); + } + + protected QueuePartition putAndGetPartition(final FlowFileRecord flowFile) { + final QueuePartition partition; + + partitionReadLock.lock(); + try { + adjustSize(1, flowFile.getSize()); + + partition = getPartition(flowFile); + partition.put(flowFile); + } finally { + partitionReadLock.unlock(); + } + + eventListener.triggerDestinationEvent(); + return partition; + } + + public void receiveFromPeer(final Collection flowFiles) { + partitionReadLock.lock(); + try { + if (partitioner.isRebalanceOnClusterResize()) { + logger.debug("Received the following FlowFiles from Peer: {}. Will re-partition FlowFiles to ensure proper balancing across the cluster.", flowFiles); + putAll(flowFiles); + } else { + logger.debug("Received the following FlowFiles from Peer: {}. Will accept FlowFiles to the local partition", flowFiles); + localPartition.putAll(flowFiles); + adjustSize(flowFiles.size(), flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum()); + } + } finally { + partitionReadLock.unlock(); + } + } + + @Override + public void putAll(final Collection flowFiles) { + putAllAndGetPartitions(flowFiles); + } + + protected Map> putAllAndGetPartitions(final Collection flowFiles) { + partitionReadLock.lock(); + try { + // NOTE WELL: It is imperative that we adjust the size of the queue here before distributing FlowFiles to partitions. + // If we do it the other way around, we could encounter a race condition where we distribute a FlowFile to the Local Partition, + // but have not yet adjusted the size. The processor consuming from this queue could then poll() the FlowFile, and acknowledge it. + // If that happens before we adjust the size, then we can end up with a negative Queue Size, which will throw an IllegalArgumentException, + // and we end up with the wrong Queue Size. + final long bytes = flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum(); + adjustSize(flowFiles.size(), bytes); + + final Map> partitionMap = distributeToPartitionsAndGet(flowFiles); + + return partitionMap; + } finally { + partitionReadLock.unlock(); + + eventListener.triggerDestinationEvent(); + } + } + + @Override + public void distributeToPartitions(final Collection flowFiles) { + distributeToPartitionsAndGet(flowFiles); + } + + public Map> distributeToPartitionsAndGet(final Collection flowFiles) { + if (flowFiles == null || flowFiles.isEmpty()) { + return Collections.emptyMap(); + } + + final Map> partitionMap; + + partitionReadLock.lock(); + try { + // Optimize for the most common case (no load balancing) so that we will just call getPartition() for the first FlowFile + // in the Collection and then put all FlowFiles into that QueuePartition. Is fairly expensive to call stream().collect(#groupingBy). + if (partitioner.isPartitionStatic()) { + final QueuePartition partition = getPartition(flowFiles.iterator().next()); + partition.putAll(flowFiles); + + final List flowFileList = (flowFiles instanceof List) ? (List) flowFiles : new ArrayList<>(flowFiles); + partitionMap = Collections.singletonMap(partition, flowFileList); + + logger.debug("Partitioner is static so Partitioned FlowFiles as: {}", partitionMap); + return partitionMap; + } + + partitionMap = flowFiles.stream().collect(Collectors.groupingBy(this::getPartition)); + logger.debug("Partitioned FlowFiles as: {}", partitionMap); + + for (final Map.Entry> entry : partitionMap.entrySet()) { + final QueuePartition partition = entry.getKey(); + final List flowFilesForPartition = entry.getValue(); + + partition.putAll(flowFilesForPartition); + } + } finally { + partitionReadLock.unlock(); + } + + return partitionMap; + } + + protected void setFlowFilePartitioner(final FlowFilePartitioner partitioner) { + partitionWriteLock.lock(); + try { + if (this.partitioner.equals(partitioner)) { + return; + } + + this.partitioner = partitioner; + + for (final QueuePartition partition : this.queuePartitions) { + rebalance(partition); + } + } finally { + partitionWriteLock.unlock(); + } + } + + @Override + public FlowFileRecord poll(final Set expiredRecords) { + final FlowFileRecord flowFile = localPartition.poll(expiredRecords); + onAbort(expiredRecords); + return flowFile; + } + + @Override + public List poll(int maxResults, Set expiredRecords) { + final List flowFiles = localPartition.poll(maxResults, expiredRecords); + onAbort(expiredRecords); + return flowFiles; + } + + @Override + public List poll(FlowFileFilter filter, Set expiredRecords) { + final List flowFiles = localPartition.poll(filter, expiredRecords); + onAbort(expiredRecords); + return flowFiles; + } + + @Override + public void acknowledge(final FlowFileRecord flowFile) { + localPartition.acknowledge(flowFile); + + adjustSize(-1, -flowFile.getSize()); + eventListener.triggerSourceEvent(); + } + + @Override + public void acknowledge(final Collection flowFiles) { + localPartition.acknowledge(flowFiles); + + if (!flowFiles.isEmpty()) { + final long bytes = flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum(); + adjustSize(-flowFiles.size(), -bytes); + } + + eventListener.triggerSourceEvent(); + } + + @Override + public boolean isUnacknowledgedFlowFile() { + return localPartition.isUnacknowledgedFlowFile(); + } + + @Override + public FlowFileRecord getFlowFile(final String flowFileUuid) throws IOException { + return localPartition.getFlowFile(flowFileUuid); + } + + @Override + public boolean isPropagateBackpressureAcrossNodes() { + // If offloaded = false, the queue is not offloading; return true to honor backpressure + // If offloaded = true, the queue is offloading or has finished offloading; return false to ignore backpressure + return !offloaded; + } + + @Override + public void handleExpiredRecords(final Collection expired) { + if (expired == null || expired.isEmpty()) { + return; + } + + logger.info("{} {} FlowFiles have expired and will be removed", new Object[] {this, expired.size()}); + final List expiredRecords = new ArrayList<>(expired.size()); + final List provenanceEvents = new ArrayList<>(expired.size()); + + for (final FlowFileRecord flowFile : expired) { + final StandardRepositoryRecord record = new StandardRepositoryRecord(this, flowFile); + record.markForDelete(); + expiredRecords.add(record); + + final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder() + .fromFlowFile(flowFile) + .setEventType(ProvenanceEventType.EXPIRE) + .setDetails("Expiration Threshold = " + getFlowFileExpiration()) + .setComponentType("Load-Balanced Connection") + .setComponentId(getIdentifier()) + .setEventTime(System.currentTimeMillis()); + + final ContentClaim contentClaim = flowFile.getContentClaim(); + if (contentClaim != null) { + final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); + builder.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), + contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); + + builder.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), + contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); + } + + final ProvenanceEventRecord provenanceEvent = builder.build(); + provenanceEvents.add(provenanceEvent); + + final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate(); + logger.info("{} terminated due to FlowFile expiration; life of FlowFile = {} ms", new Object[] {flowFile, flowFileLife}); + } + + try { + flowFileRepo.updateRepository(expiredRecords); + + for (final RepositoryRecord expiredRecord : expiredRecords) { + contentRepo.decrementClaimantCount(expiredRecord.getCurrentClaim()); + } + + provRepo.registerEvents(provenanceEvents); + + adjustSize(-expired.size(), -expired.stream().mapToLong(FlowFileRecord::getSize).sum()); + } catch (IOException e) { + logger.warn("Encountered {} expired FlowFiles but failed to update FlowFile Repository. This FlowFiles may re-appear in the queue after NiFi is restarted and will be expired again at " + + "that point.", expiredRecords.size(), e); + } + } + + + @Override + protected List getListableFlowFiles() { + return localPartition.getListableFlowFiles(); + } + + @Override + protected void dropFlowFiles(final DropFlowFileRequest dropRequest, final String requestor) { + partitionReadLock.lock(); + try { + dropRequest.setOriginalSize(size()); + dropRequest.setState(DropFlowFileState.DROPPING_FLOWFILES); + + int droppedCount = 0; + long droppedBytes = 0; + + try { + for (final QueuePartition partition : queuePartitions) { + final DropFlowFileRequest partitionRequest = new DropFlowFileRequest(dropRequest.getRequestIdentifier() + "-" + localPartition.getNodeIdentifier()); + + partition.dropFlowFiles(partitionRequest, requestor); + + adjustSize(-partitionRequest.getDroppedSize().getObjectCount(), -partitionRequest.getDroppedSize().getByteCount()); + dropRequest.setDroppedSize(new QueueSize(dropRequest.getDroppedSize().getObjectCount() + partitionRequest.getDroppedSize().getObjectCount(), + dropRequest.getDroppedSize().getByteCount() + partitionRequest.getDroppedSize().getByteCount())); + + droppedCount += partitionRequest.getDroppedSize().getObjectCount(); + droppedBytes += partitionRequest.getDroppedSize().getByteCount(); + + dropRequest.setDroppedSize(new QueueSize(droppedCount, droppedBytes)); + dropRequest.setCurrentSize(size()); + + if (partitionRequest.getState() == DropFlowFileState.CANCELED) { + dropRequest.cancel(); + break; + } else if (partitionRequest.getState() == DropFlowFileState.FAILURE) { + dropRequest.setState(DropFlowFileState.FAILURE, partitionRequest.getFailureReason()); + break; + } + } + + if (dropRequest.getState() == DropFlowFileState.DROPPING_FLOWFILES) { + dropRequest.setState(DropFlowFileState.COMPLETE); + } + } catch (final Exception e) { + logger.error("Failed to drop FlowFiles for {}", this, e); + dropRequest.setState(DropFlowFileState.FAILURE, "Failed to drop FlowFiles due to " + e.getMessage() + ". See log for more details."); + } + } finally { + partitionReadLock.unlock(); + } + } + + @Override + public void lock() { + partitionReadLock.lock(); + } + + @Override + public void unlock() { + partitionReadLock.unlock(); + } + + private class ClusterEventListener implements ClusterTopologyEventListener { + @Override + public void onNodeAdded(final NodeIdentifier nodeId) { + partitionWriteLock.lock(); + try { + final Set updatedNodeIds = new HashSet<>(nodeIdentifiers); + updatedNodeIds.add(nodeId); + + logger.debug("Node Identifier {} added to cluster. Node ID's changing from {} to {}", nodeId, nodeIdentifiers, updatedNodeIds); + setNodeIdentifiers(updatedNodeIds, false); + } finally { + partitionWriteLock.unlock(); + } + } + + @Override + public void onNodeRemoved(final NodeIdentifier nodeId) { + partitionWriteLock.lock(); + try { + final Set updatedNodeIds = new HashSet<>(nodeIdentifiers); + final boolean removed = updatedNodeIds.remove(nodeId); + if (!removed) { + return; + } + + logger.debug("Node Identifier {} removed from cluster. Node ID's changing from {} to {}", nodeId, nodeIdentifiers, updatedNodeIds); + setNodeIdentifiers(updatedNodeIds, false); + } finally { + partitionWriteLock.unlock(); + } + } + + @Override + public void onLocalNodeIdentifierSet(final NodeIdentifier localNodeId) { + partitionWriteLock.lock(); + try { + if (localNodeId == null) { + return; + } + + if (!nodeIdentifiers.contains(localNodeId)) { + final Set updatedNodeIds = new HashSet<>(nodeIdentifiers); + updatedNodeIds.add(localNodeId); + + logger.debug("Local Node Identifier has now been determined to be {}. Adding to set of Node Identifiers for {}", localNodeId, SocketLoadBalancedFlowFileQueue.this); + setNodeIdentifiers(updatedNodeIds, false); + } + + logger.debug("Local Node Identifier set to {}; current partitions = {}", localNodeId, queuePartitions); + + for (final QueuePartition partition : queuePartitions) { + final Optional nodeIdentifierOption = partition.getNodeIdentifier(); + if (!nodeIdentifierOption.isPresent()) { + continue; + } + + final NodeIdentifier nodeIdentifier = nodeIdentifierOption.get(); + if (nodeIdentifier.equals(localNodeId)) { + if (partition instanceof LocalQueuePartition) { + logger.debug("{} Local Node Identifier set to {} and QueuePartition with this identifier is already a Local Queue Partition", SocketLoadBalancedFlowFileQueue.this, + localNodeId); + break; + } + + logger.debug("{} Local Node Identifier set to {} and found Queue Partition {} with that Node Identifier. Will force update of partitions", + SocketLoadBalancedFlowFileQueue.this, localNodeId, partition); + + final Set updatedNodeIds = new HashSet<>(nodeIdentifiers); + updatedNodeIds.add(localNodeId); + setNodeIdentifiers(updatedNodeIds, true); + return; + } + } + + logger.debug("{} Local Node Identifier set to {} but found no Queue Partition with that Node Identifier.", SocketLoadBalancedFlowFileQueue.this, localNodeId); + } finally { + partitionWriteLock.unlock(); + } + } + + @Override + public void onNodeStateChange(final NodeIdentifier nodeId, final NodeConnectionState newState) { + partitionWriteLock.lock(); + try { + if (!offloaded) { + return; + } + + switch (newState) { + case CONNECTED: + if (nodeId != null && nodeId.equals(clusterCoordinator.getLocalNodeIdentifier())) { + // the node with this queue was connected to the cluster, make sure the queue is not offloaded + resetOffloadedQueue(); + } + break; + case OFFLOADED: + case OFFLOADING: + case DISCONNECTED: + case DISCONNECTING: + onNodeRemoved(nodeId); + break; + } + } finally { + partitionWriteLock.unlock(); + } + } + } +} + diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransactionThreshold.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransactionThreshold.java new file mode 100644 index 000000000000..bdf8d9d7e96a --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransactionThreshold.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +public interface TransactionThreshold { + + void adjust(int flowFileCount, long flowFileSize); + + boolean isThresholdMet(); + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransferFailureDestination.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransferFailureDestination.java new file mode 100644 index 000000000000..0a0648f1a135 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/TransferFailureDestination.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.clustered.partition.FlowFilePartitioner; +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.Collection; +import java.util.function.Function; + +public interface TransferFailureDestination { + /** + * Puts all of the given FlowFiles to the appropriate destination queue + * + * @param flowFiles the FlowFiles to transfer + * @param partitionerUsed the partitioner that was used to determine that the given FlowFiles should be grouped together in the first place + */ + void putAll(Collection flowFiles, FlowFilePartitioner partitionerUsed); + + /** + * Puts all of the given FlowFile Queue Contents to the appropriate destination queue + * + * @param queueContents a function that returns the FlowFileQueueContents, given a Partition Name + * @param partitionerUsed the partitioner that was used to determine that the given FlowFiles should be grouped together in the first place + */ + void putAll(Function queueContents, FlowFilePartitioner partitionerUsed); + + /** + * Indicates whether or not FlowFiles will need to be rebalanced when transferred to the destination. + * + * @param partitionerUsed the partitioner that was used to determine that FlowFiles should be grouped together in the first place + * @return true if FlowFiles will be rebalanced when transferred, false otherwise + */ + boolean isRebalanceOnFailure(FlowFilePartitioner partitionerUsed); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/LoadBalanceFlowFileCodec.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/LoadBalanceFlowFileCodec.java new file mode 100644 index 000000000000..cce2730426a5 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/LoadBalanceFlowFileCodec.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.io.IOException; +import java.io.OutputStream; + +public interface LoadBalanceFlowFileCodec { + void encode(FlowFileRecord flowFile, OutputStream out) throws IOException; +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/StandardLoadBalanceFlowFileCodec.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/StandardLoadBalanceFlowFileCodec.java new file mode 100644 index 000000000000..8e9b165f1e19 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/StandardLoadBalanceFlowFileCodec.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +public class StandardLoadBalanceFlowFileCodec implements LoadBalanceFlowFileCodec { + + @Override + public void encode(final FlowFileRecord flowFile, final OutputStream destination) throws IOException { + final DataOutputStream out = new DataOutputStream(destination); + + out.writeInt(flowFile.getAttributes().size()); + for (final Map.Entry entry : flowFile.getAttributes().entrySet()) { + writeString(entry.getKey(), out); + writeString(entry.getValue(), out); + } + + out.writeLong(flowFile.getLineageStartDate()); + out.writeLong(flowFile.getEntryDate()); + } + + private void writeString(final String value, final DataOutputStream out) throws IOException { + final byte[] bytes = value.getBytes(StandardCharsets.UTF_8); + out.writeInt(bytes.length); + out.write(bytes); + } + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClient.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClient.java new file mode 100644 index 000000000000..8673a8b3eb49 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClient.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.io.IOException; +import java.util.function.BooleanSupplier; +import java.util.function.Supplier; + +public interface AsyncLoadBalanceClient { + + NodeIdentifier getNodeIdentifier(); + + void start(); + + void stop(); + + void register(String connectionId, BooleanSupplier emptySupplier, Supplier flowFileSupplier, + TransactionFailureCallback failureCallback, TransactionCompleteCallback successCallback, + Supplier compressionSupplier, BooleanSupplier honorBackpressureSupplier); + + void unregister(String connectionId); + + int getRegisteredConnectionCount(); + + boolean isRunning(); + + boolean isPenalized(); + + void nodeDisconnected(); + + boolean communicate() throws IOException; +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientFactory.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientFactory.java new file mode 100644 index 000000000000..20a4db2f3f6a --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientFactory.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; + +public interface AsyncLoadBalanceClientFactory { + AsyncLoadBalanceClient createClient(NodeIdentifier nodeIdentifier); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientRegistry.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientRegistry.java new file mode 100644 index 000000000000..49e6aedf890b --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/AsyncLoadBalanceClientRegistry.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.function.BooleanSupplier; +import java.util.function.Supplier; + +public interface AsyncLoadBalanceClientRegistry { + void register(String connectionId, NodeIdentifier nodeId, BooleanSupplier emptySupplier, Supplier flowFileSupplier, TransactionFailureCallback failureCallback, + TransactionCompleteCallback successCallback, Supplier compressionSupplier, BooleanSupplier honorBackpressureSupplier); + + void unregister(String connectionId, NodeIdentifier nodeId); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionCompleteCallback.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionCompleteCallback.java new file mode 100644 index 000000000000..0c8f8b6806d7 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionCompleteCallback.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.List; + +public interface TransactionCompleteCallback { + void onTransactionComplete(List flowFilesSent); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionFailureCallback.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionFailureCallback.java new file mode 100644 index 000000000000..6d1a342d10b9 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/TransactionFailureCallback.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.List; + +public interface TransactionFailureCallback { + default void onTransactionFailed(final List flowFiles, final TransactionPhase transactionPhase) { + onTransactionFailed(flowFiles, null, transactionPhase); + } + + void onTransactionFailed(List flowFiles, Exception cause, TransactionPhase transactionPhase); + + boolean isRebalanceOnFailure(); + + enum TransactionPhase { + /** + * Failure occurred when connecting to the node + */ + CONNECTING, + + /** + * Failure occurred when sending data to the node + */ + SENDING; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/LoadBalanceSession.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/LoadBalanceSession.java new file mode 100644 index 000000000000..638608898ae8 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/LoadBalanceSession.java @@ -0,0 +1,641 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.clustered.FlowFileContentAccess; +import org.apache.nifi.controller.queue.clustered.TransactionThreshold; +import org.apache.nifi.controller.queue.clustered.client.LoadBalanceFlowFileCodec; +import org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants; +import org.apache.nifi.controller.queue.clustered.server.TransactionAbortedException; +import org.apache.nifi.controller.repository.ContentNotFoundException; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.remote.StandardVersionNegotiator; +import org.apache.nifi.remote.VersionNegotiator; +import org.apache.nifi.stream.io.ByteCountingOutputStream; +import org.apache.nifi.stream.io.GZIPOutputStream; +import org.apache.nifi.stream.io.StreamUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayOutputStream; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.SocketTimeoutException; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.OptionalInt; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.zip.CRC32; +import java.util.zip.Checksum; + +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.ABORT_PROTOCOL_NEGOTIATION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.ABORT_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CONFIRM_CHECKSUM; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CONFIRM_COMPLETE_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.QUEUE_FULL; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.REJECT_CHECKSUM; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.REQEUST_DIFFERENT_VERSION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.SPACE_AVAILABLE; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.VERSION_ACCEPTED; + + +public class LoadBalanceSession { + private static final Logger logger = LoggerFactory.getLogger(LoadBalanceSession.class); + static final int MAX_DATA_FRAME_SIZE = 65535; + private static final long PENALTY_MILLIS = TimeUnit.SECONDS.toMillis(2L); + + private final RegisteredPartition partition; + private final Supplier flowFileSupplier; + private final FlowFileContentAccess flowFileContentAccess; + private final LoadBalanceFlowFileCodec flowFileCodec; + private final PeerChannel channel; + private final int timeoutMillis; + private final String peerDescription; + private final String connectionId; + private final TransactionThreshold transactionThreshold; + + final VersionNegotiator negotiator = new StandardVersionNegotiator(1); + private int protocolVersion = 1; + + private final Checksum checksum = new CRC32(); + + // guarded by synchronizing on 'this' + private ByteBuffer preparedFrame; + private FlowFileRecord currentFlowFile; + private List flowFilesSent = new ArrayList<>(); + private TransactionPhase phase = TransactionPhase.RECOMMEND_PROTOCOL_VERSION; + private InputStream flowFileInputStream; + private byte[] byteBuffer = new byte[MAX_DATA_FRAME_SIZE]; + private boolean complete = false; + private long readTimeout; + private long penaltyExpiration = -1L; + + public LoadBalanceSession(final RegisteredPartition partition, final FlowFileContentAccess contentAccess, final LoadBalanceFlowFileCodec flowFileCodec, final PeerChannel peerChannel, + final int timeoutMillis, final TransactionThreshold transactionThreshold) { + this.partition = partition; + this.flowFileSupplier = partition.getFlowFileRecordSupplier(); + this.connectionId = partition.getConnectionId(); + this.flowFileContentAccess = contentAccess; + this.flowFileCodec = flowFileCodec; + this.channel = peerChannel; + this.peerDescription = peerChannel.getPeerDescription(); + + if (timeoutMillis < 1) { + throw new IllegalArgumentException(); + } + this.timeoutMillis = timeoutMillis; + this.transactionThreshold = transactionThreshold; + } + + public RegisteredPartition getPartition() { + return partition; + } + + public synchronized int getDesiredReadinessFlag() { + return phase.getRequiredSelectionKey(); + } + + public synchronized List getFlowFilesSent() { + return Collections.unmodifiableList(flowFilesSent); + } + + public synchronized boolean isComplete() { + return complete; + } + + public synchronized boolean communicate() throws IOException { + if (isComplete()) { + return false; + } + + if (isPenalized()) { + logger.debug("Will not communicate with Peer {} for Connection {} because session is penalized", peerDescription, connectionId); + return false; + } + + // If there's already a data frame prepared for writing, just write to the channel. + if (preparedFrame != null && preparedFrame.hasRemaining()) { + logger.trace("Current Frame is already available. Will continue writing current frame to channel"); + final int bytesWritten = channel.write(preparedFrame); + return bytesWritten > 0; + } + + try { + // Check if the phase is one that needs to receive data and if so, call the appropriate method. + switch (phase) { + case RECEIVE_SPACE_RESPONSE: + return receiveSpaceAvailableResponse(); + case VERIFY_CHECKSUM: + return verifyChecksum(); + case CONFIRM_TRANSACTION_COMPLETE: + return confirmTransactionComplete(); + case RECEIVE_PROTOCOL_VERSION_ACKNOWLEDGMENT: + return receiveProtocolVersionAcknowledgment(); + case RECEIVE_RECOMMENDED_PROTOCOL_VERSION: + return receiveRecommendedProtocolVersion(); + } + + // Otherwise, we need to send something so get the data frame that should be sent and write it to the channel + final ByteBuffer byteBuffer = getDataFrame(); + preparedFrame = channel.prepareForWrite(byteBuffer); // Prepare data frame for writing. E.g., encrypt the data, etc. + + final int bytesWritten = channel.write(preparedFrame); + return bytesWritten > 0; + } catch (final Exception e) { + complete = true; + throw e; + } + } + + + private boolean confirmTransactionComplete() throws IOException { + logger.debug("Confirming Transaction Complete for Peer {}", peerDescription); + + final OptionalInt transactionResponse = channel.read(); + if (!transactionResponse.isPresent()) { + if (System.currentTimeMillis() > readTimeout) { + throw new SocketTimeoutException("Timed out waiting for Peer " + peerDescription + " to confirm the transaction is complete"); + } + + return false; + } + + final int response = transactionResponse.getAsInt(); + if (response < 0) { + throw new EOFException("Confirmed checksum when writing data to Peer " + peerDescription + " but encountered End-of-File when expecting a Transaction Complete confirmation"); + } + + if (response == ABORT_TRANSACTION) { + throw new TransactionAbortedException("Confirmed checksum when writing data to Peer " + peerDescription + " but Peer aborted transaction instead of completing it"); + } + if (response != CONFIRM_COMPLETE_TRANSACTION) { + throw new IOException("Expected a CONFIRM_COMPLETE_TRANSACTION response from Peer " + peerDescription + " but received a value of " + response); + } + + complete = true; + logger.debug("Successfully completed Transaction to send {} FlowFiles to Peer {} for Connection {}", flowFilesSent.size(), peerDescription, connectionId); + + return true; + } + + + private boolean verifyChecksum() throws IOException { + logger.debug("Verifying Checksum for Peer {}", peerDescription); + + final OptionalInt checksumResponse = channel.read(); + if (!checksumResponse.isPresent()) { + if (System.currentTimeMillis() > readTimeout) { + throw new SocketTimeoutException("Timed out waiting for Peer " + peerDescription + " to verify the checksum"); + } + + return false; + } + + final int response = checksumResponse.getAsInt(); + if (response < 0) { + throw new EOFException("Encountered End-of-File when trying to verify Checksum with Peer " + peerDescription); + } + + if (response == REJECT_CHECKSUM) { + throw new TransactionAbortedException("After transferring FlowFiles to Peer " + peerDescription + " received a REJECT_CHECKSUM response. Aborting transaction."); + } + if (response != CONFIRM_CHECKSUM) { + throw new TransactionAbortedException("After transferring FlowFiles to Peer " + peerDescription + " received an unexpected response code " + response + + ". Aborting transaction."); + } + + logger.debug("Checksum confirmed. Writing COMPLETE_TRANSACTION flag"); + phase = TransactionPhase.SEND_TRANSACTION_COMPLETE; + + return true; + } + + + + private ByteBuffer getDataFrame() throws IOException { + switch (phase) { + case RECOMMEND_PROTOCOL_VERSION: + return recommendProtocolVersion(); + case ABORT_PROTOCOL_NEGOTIATION: + return abortProtocolNegotiation(); + case SEND_CONNECTION_ID: + return getConnectionId(); + case CHECK_SPACE: + return checkSpace(); + case GET_NEXT_FLOWFILE: + return getNextFlowFile(); + case SEND_FLOWFILE_DEFINITION: + case SEND_FLOWFILE_CONTENTS: + return getFlowFileContent(); + case SEND_CHECKSUM: + return getChecksum(); + case SEND_TRANSACTION_COMPLETE: + return getTransactionComplete(); + default: + logger.debug("Phase of {}, returning null ByteBuffer", phase); + return null; + } + } + + + private ByteBuffer getTransactionComplete() { + logger.debug("Sending Transaction Complete Indicator to Peer {}", peerDescription); + + final ByteBuffer buffer = ByteBuffer.allocate(1); + buffer.put((byte) LoadBalanceProtocolConstants.COMPLETE_TRANSACTION); + buffer.rewind(); + + readTimeout = System.currentTimeMillis() + timeoutMillis; + phase = TransactionPhase.CONFIRM_TRANSACTION_COMPLETE; + return buffer; + } + + private ByteBuffer getChecksum() { + logger.debug("Sending Checksum of {} to Peer {}", checksum.getValue(), peerDescription); + + // No more FlowFiles. + final ByteBuffer buffer = ByteBuffer.allocate(8); + buffer.putLong(checksum.getValue()); + + readTimeout = System.currentTimeMillis() + timeoutMillis; + phase = TransactionPhase.VERIFY_CHECKSUM; + buffer.rewind(); + return buffer; + } + + private ByteBuffer getFlowFileContent() throws IOException { + // This method is fairly inefficient, copying lots of byte[]. Can do better. But keeping it simple for + // now to get this working. Revisit with optimizations later. + try { + if (flowFileInputStream == null) { + flowFileInputStream = flowFileContentAccess.read(currentFlowFile); + } + + final int bytesRead = StreamUtils.fillBuffer(flowFileInputStream, byteBuffer, false); + if (bytesRead < 1) { + // If no data available, close the stream and move on to the next phase, returning a NO_DATA_FRAME buffer. + flowFileInputStream.close(); + flowFileInputStream = null; + phase = TransactionPhase.GET_NEXT_FLOWFILE; + + final ByteBuffer buffer = ByteBuffer.allocate(1); + buffer.put((byte) LoadBalanceProtocolConstants.NO_DATA_FRAME); + buffer.rewind(); + + checksum.update(LoadBalanceProtocolConstants.NO_DATA_FRAME); + + logger.debug("Sending NO_DATA_FRAME indicator to Peer {}", peerDescription); + + return buffer; + } + + logger.trace("Sending Data Frame that is {} bytes long to Peer {}", bytesRead, peerDescription); + final ByteBuffer buffer; + + if (partition.getCompression() == LoadBalanceCompression.COMPRESS_ATTRIBUTES_AND_CONTENT) { + final byte[] compressed = compressDataFrame(byteBuffer, bytesRead); + final int compressedMaxLen = compressed.length; + + buffer = ByteBuffer.allocate(3 + compressedMaxLen); + buffer.put((byte) LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS); + buffer.putShort((short) compressedMaxLen); + + buffer.put(compressed, 0, compressedMaxLen); + + } else { + buffer = ByteBuffer.allocate(3 + bytesRead); + buffer.put((byte) LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS); + buffer.putShort((short) bytesRead); + + buffer.put(byteBuffer, 0, bytesRead); + } + + final byte[] frameArray = buffer.array(); + checksum.update(frameArray, 0, frameArray.length); + + phase = TransactionPhase.SEND_FLOWFILE_CONTENTS; + buffer.rewind(); + return buffer; + } catch (final ContentNotFoundException cnfe) { + throw new ContentNotFoundException(currentFlowFile, cnfe.getMissingClaim(), cnfe.getMessage()); + } + } + + private byte[] compressDataFrame(final byte[] uncompressed, final int byteCount) throws IOException { + try (final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + final OutputStream gzipOut = new GZIPOutputStream(baos, 1)) { + + gzipOut.write(uncompressed, 0, byteCount); + gzipOut.close(); + + return baos.toByteArray(); + } + } + + private ByteBuffer getNextFlowFile() throws IOException { + if (transactionThreshold.isThresholdMet()) { + currentFlowFile = null; + logger.debug("Transaction Threshold reached sending to Peer {}; Transitioning phase to SEND_CHECKSUM", peerDescription); + } else { + currentFlowFile = flowFileSupplier.get(); + + if (currentFlowFile == null) { + logger.debug("No more FlowFiles to send to Peer {}; Transitioning phase to SEND_CHECKSUM", peerDescription); + } + } + + if (currentFlowFile == null) { + phase = TransactionPhase.SEND_CHECKSUM; + return noMoreFlowFiles(); + } + + transactionThreshold.adjust(1, currentFlowFile.getSize()); + logger.debug("Next FlowFile to send to Peer {} is {}", peerDescription, currentFlowFile); + flowFilesSent.add(currentFlowFile); + + final LoadBalanceCompression compression = partition.getCompression(); + final boolean compressAttributes = compression != LoadBalanceCompression.DO_NOT_COMPRESS; + logger.debug("Compression to use for sending to Peer {} is {}", peerDescription, compression); + + final byte[] flowFileEncoded; + try (final ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + if (compressAttributes) { + try (final OutputStream gzipOut = new GZIPOutputStream(baos, 1); + final ByteCountingOutputStream out = new ByteCountingOutputStream(gzipOut)) { + + flowFileCodec.encode(currentFlowFile, out); + } + } else { + flowFileCodec.encode(currentFlowFile, baos); + } + + flowFileEncoded = baos.toByteArray(); + } + + final int metadataLength = flowFileEncoded.length; + final ByteBuffer buffer = ByteBuffer.allocate(flowFileEncoded.length + 5); + buffer.put((byte) LoadBalanceProtocolConstants.MORE_FLOWFILES); + checksum.update(LoadBalanceProtocolConstants.MORE_FLOWFILES); + + buffer.putInt(metadataLength); + checksum.update((metadataLength >> 24) & 0xFF); + checksum.update((metadataLength >> 16) & 0xFF); + checksum.update((metadataLength >> 8) & 0xFF); + checksum.update(metadataLength & 0xFF); + + buffer.put(flowFileEncoded); + checksum.update(flowFileEncoded, 0, flowFileEncoded.length); + + phase = TransactionPhase.SEND_FLOWFILE_DEFINITION; + buffer.rewind(); + return buffer; + } + + + private ByteBuffer recommendProtocolVersion() { + logger.debug("Recommending to Peer {} that Protocol Version {} be used", peerDescription, protocolVersion); + + final ByteBuffer buffer = ByteBuffer.allocate(1); + buffer.put((byte) protocolVersion); + buffer.rewind(); + + readTimeout = System.currentTimeMillis() + timeoutMillis; + phase = TransactionPhase.RECEIVE_PROTOCOL_VERSION_ACKNOWLEDGMENT; + return buffer; + } + + private boolean receiveProtocolVersionAcknowledgment() throws IOException { + logger.debug("Confirming Transaction Complete for Peer {}", peerDescription); + + final OptionalInt ackResponse = channel.read(); + if (!ackResponse.isPresent()) { + if (System.currentTimeMillis() > readTimeout) { + throw new SocketTimeoutException("Timed out waiting for Peer " + peerDescription + " to acknowledge Protocol Version"); + } + + return false; + } + + final int response = ackResponse.getAsInt(); + if (response < 0) { + throw new EOFException("Encounter End-of-File with Peer " + peerDescription + " when expecting a Protocol Version Acknowledgment"); + } + + if (response == VERSION_ACCEPTED) { + logger.debug("Peer {} accepted Protocol Version {}", peerDescription, protocolVersion); + phase = TransactionPhase.SEND_CONNECTION_ID; + return true; + } + + if (response == REQEUST_DIFFERENT_VERSION) { + logger.debug("Recommended using Protocol Version of {} with Peer {} but received REQUEST_DIFFERENT_VERSION response", protocolVersion, peerDescription); + readTimeout = System.currentTimeMillis() + timeoutMillis; + phase = TransactionPhase.RECEIVE_RECOMMENDED_PROTOCOL_VERSION; + return true; + } + + throw new IOException("Failed to negotiate Protocol Version with Peer " + peerDescription + ". Recommended version " + protocolVersion + " but instead of an ACCEPT or REJECT " + + "response got back a response of " + response); + } + + private boolean receiveRecommendedProtocolVersion() throws IOException { + logger.debug("Receiving Protocol Version from Peer {}", peerDescription); + + final OptionalInt recommendationResponse = channel.read(); + if (!recommendationResponse.isPresent()) { + if (System.currentTimeMillis() > readTimeout) { + throw new SocketTimeoutException("Timed out waiting for Peer " + peerDescription + " to recommend Protocol Version"); + } + + return false; + } + + final int requestedVersion = recommendationResponse.getAsInt(); + if (requestedVersion < 0) { + throw new EOFException("Encounter End-of-File with Peer " + peerDescription + " when expecting a Protocol Version Recommendation"); + } + + if (negotiator.isVersionSupported(requestedVersion)) { + protocolVersion = requestedVersion; + phase = TransactionPhase.SEND_CONNECTION_ID; + logger.debug("Peer {} recommended Protocol Version of {}. Accepting version.", peerDescription, requestedVersion); + + return true; + } else { + final Integer preferred = negotiator.getPreferredVersion(requestedVersion); + if (preferred == null) { + logger.debug("Peer {} requested version {} of the Load Balance Protocol. This version is not acceptable. Aborting communications.", peerDescription, requestedVersion); + phase = TransactionPhase.ABORT_PROTOCOL_NEGOTIATION; + return true; + } else { + logger.debug("Peer {} requested version {} of the Protocol. Recommending version {} instead", peerDescription, requestedVersion, preferred); + protocolVersion = preferred; + phase = TransactionPhase.RECOMMEND_PROTOCOL_VERSION; + return true; + } + } + } + + private ByteBuffer noMoreFlowFiles() { + final ByteBuffer buffer = ByteBuffer.allocate(1); + buffer.put((byte) LoadBalanceProtocolConstants.NO_MORE_FLOWFILES); + buffer.rewind(); + + checksum.update(LoadBalanceProtocolConstants.NO_MORE_FLOWFILES); + return buffer; + } + + private ByteBuffer abortProtocolNegotiation() { + final ByteBuffer buffer = ByteBuffer.allocate(1); + buffer.put((byte) ABORT_PROTOCOL_NEGOTIATION); + buffer.rewind(); + + return buffer; + } + + private ByteBuffer getConnectionId() { + logger.debug("Sending Connection ID {} to Peer {}", connectionId, peerDescription); + + final ByteBuffer buffer = ByteBuffer.allocate(connectionId.length() + 2); + buffer.putShort((short) connectionId.length()); + buffer.put(connectionId.getBytes(StandardCharsets.UTF_8)); + buffer.rewind(); + + final byte[] frameBytes = buffer.array(); + checksum.update(frameBytes, 0, frameBytes.length); + + phase = TransactionPhase.CHECK_SPACE; + return buffer; + } + + private ByteBuffer checkSpace() { + logger.debug("Sending a 'Check Space' request to Peer {} to determine if there is space in the queue for more FlowFiles", peerDescription); + + final ByteBuffer buffer = ByteBuffer.allocate(1); + + if (partition.isHonorBackpressure()) { + buffer.put((byte) LoadBalanceProtocolConstants.CHECK_SPACE); + checksum.update(LoadBalanceProtocolConstants.CHECK_SPACE); + + readTimeout = System.currentTimeMillis() + timeoutMillis; + phase = TransactionPhase.RECEIVE_SPACE_RESPONSE; + } else { + buffer.put((byte) LoadBalanceProtocolConstants.SKIP_SPACE_CHECK); + checksum.update(LoadBalanceProtocolConstants.SKIP_SPACE_CHECK); + + phase = TransactionPhase.GET_NEXT_FLOWFILE; + } + + buffer.rewind(); + return buffer; + } + + + private boolean receiveSpaceAvailableResponse() throws IOException { + logger.debug("Receiving response from Peer {} to determine whether or not space is available in queue {}", peerDescription, connectionId); + + final OptionalInt spaceAvailableResponse = channel.read(); + if (!spaceAvailableResponse.isPresent()) { + if (System.currentTimeMillis() > readTimeout) { + throw new SocketTimeoutException("Timed out waiting for Peer " + peerDescription + " to verify whether or not space is available for Connection " + connectionId); + } + + return false; + } + + final int response = spaceAvailableResponse.getAsInt(); + if (response < 0) { + throw new EOFException("Encountered End-of-File when trying to verify with Peer " + peerDescription + " whether or not space is available in Connection " + connectionId); + } + + if (response == SPACE_AVAILABLE) { + logger.debug("Peer {} has confirmed that space is available in Connection {}", peerDescription, connectionId); + phase = TransactionPhase.GET_NEXT_FLOWFILE; + } else if (response == QUEUE_FULL) { + logger.debug("Peer {} has confirmed that the queue is full for Connection {}", peerDescription, connectionId); + phase = TransactionPhase.RECOMMEND_PROTOCOL_VERSION; + checksum.reset(); // We are restarting the session entirely so we need to reset our checksum + penalize(); + } else { + throw new TransactionAbortedException("After requesting to know whether or not Peer " + peerDescription + " has space available in Connection " + connectionId + + ", received unexpected response of " + response + ". Aborting transaction."); + } + + return true; + } + + private void penalize() { + penaltyExpiration = System.currentTimeMillis() + PENALTY_MILLIS; + } + + private boolean isPenalized() { + // check for penaltyExpiration > -1L is not strictly necessary as it's implied by the second check but is still + // here because it's more efficient to check this than to make the system call to System.currentTimeMillis(). + return penaltyExpiration > -1L && System.currentTimeMillis() < penaltyExpiration; + } + + + private enum TransactionPhase { + RECOMMEND_PROTOCOL_VERSION(SelectionKey.OP_WRITE), + + RECEIVE_PROTOCOL_VERSION_ACKNOWLEDGMENT(SelectionKey.OP_READ), + + RECEIVE_RECOMMENDED_PROTOCOL_VERSION(SelectionKey.OP_READ), + + ABORT_PROTOCOL_NEGOTIATION(SelectionKey.OP_WRITE), + + SEND_CONNECTION_ID(SelectionKey.OP_WRITE), + + CHECK_SPACE(SelectionKey.OP_WRITE), + + RECEIVE_SPACE_RESPONSE(SelectionKey.OP_READ), + + SEND_FLOWFILE_DEFINITION(SelectionKey.OP_WRITE), + + SEND_FLOWFILE_CONTENTS(SelectionKey.OP_WRITE), + + GET_NEXT_FLOWFILE(SelectionKey.OP_WRITE), + + SEND_CHECKSUM(SelectionKey.OP_WRITE), + + VERIFY_CHECKSUM(SelectionKey.OP_READ), + + SEND_TRANSACTION_COMPLETE(SelectionKey.OP_WRITE), + + CONFIRM_TRANSACTION_COMPLETE(SelectionKey.OP_READ); + + + private final int requiredSelectionKey; + + TransactionPhase(final int requiredSelectionKey) { + this.requiredSelectionKey = requiredSelectionKey; + } + + public int getRequiredSelectionKey() { + return requiredSelectionKey; + } + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClient.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClient.java new file mode 100644 index 000000000000..753c1f49a3e9 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClient.java @@ -0,0 +1,477 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.clustered.FlowFileContentAccess; +import org.apache.nifi.controller.queue.clustered.SimpleLimitThreshold; +import org.apache.nifi.controller.queue.clustered.TransactionThreshold; +import org.apache.nifi.controller.queue.clustered.client.LoadBalanceFlowFileCodec; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClient; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionCompleteCallback; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionFailureCallback; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.reporting.Severity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BooleanSupplier; +import java.util.function.Predicate; +import java.util.function.Supplier; + + +public class NioAsyncLoadBalanceClient implements AsyncLoadBalanceClient { + private static final Logger logger = LoggerFactory.getLogger(NioAsyncLoadBalanceClient.class); + private static final long PENALIZATION_MILLIS = TimeUnit.SECONDS.toMillis(1L); + + private final NodeIdentifier nodeIdentifier; + private final SSLContext sslContext; + private final int timeoutMillis; + private final FlowFileContentAccess flowFileContentAccess; + private final LoadBalanceFlowFileCodec flowFileCodec; + private final EventReporter eventReporter; + + private volatile boolean running = false; + private final AtomicLong penalizationEnd = new AtomicLong(0L); + + private final Map registeredPartitions = new HashMap<>(); + private final Queue partitionQueue = new LinkedBlockingQueue<>(); + + // guarded by synchronizing on this + private PeerChannel channel; + private Selector selector; + private SelectionKey selectionKey; + + // While we use synchronization to guard most of the Class's state, we use a separate lock for the LoadBalanceSession. + // We do this because we need to atomically decide whether or not we are able to communicate over the socket with another node and if so, continue on and do so. + // However, we cannot do this within a synchronized block because if we did, then if Thread 1 were communicating with the remote node, and Thread 2 wanted to attempt + // to do so, it would have to wait until Thread 1 released the synchronization. Instead, we want Thread 2 to determine that the resource is not free and move on. + // I.e., we need to use the capability of Lock#tryLock, and the synchronized keyword does not offer this sort of functionality. + private final Lock loadBalanceSessionLock = new ReentrantLock(); + private LoadBalanceSession loadBalanceSession = null; + + + public NioAsyncLoadBalanceClient(final NodeIdentifier nodeIdentifier, final SSLContext sslContext, final int timeoutMillis, final FlowFileContentAccess flowFileContentAccess, + final LoadBalanceFlowFileCodec flowFileCodec, final EventReporter eventReporter) { + this.nodeIdentifier = nodeIdentifier; + this.sslContext = sslContext; + this.timeoutMillis = timeoutMillis; + this.flowFileContentAccess = flowFileContentAccess; + this.flowFileCodec = flowFileCodec; + this.eventReporter = eventReporter; + } + + @Override + public NodeIdentifier getNodeIdentifier() { + return nodeIdentifier; + } + + public synchronized void register(final String connectionId, final BooleanSupplier emptySupplier, final Supplier flowFileSupplier, + final TransactionFailureCallback failureCallback, final TransactionCompleteCallback successCallback, + final Supplier compressionSupplier, final BooleanSupplier honorBackpressureSupplier) { + + if (registeredPartitions.containsKey(connectionId)) { + throw new IllegalStateException("Connection with ID " + connectionId + " is already registered"); + } + + final RegisteredPartition partition = new RegisteredPartition(connectionId, emptySupplier, flowFileSupplier, failureCallback, successCallback, compressionSupplier, honorBackpressureSupplier); + registeredPartitions.put(connectionId, partition); + partitionQueue.add(partition); + } + + public synchronized void unregister(final String connectionId) { + registeredPartitions.remove(connectionId); + } + + public synchronized int getRegisteredConnectionCount() { + return registeredPartitions.size(); + } + + private synchronized Map getRegisteredPartitions() { + return new HashMap<>(registeredPartitions); + } + + public void start() { + running = true; + logger.debug("{} started", this); + } + + public void stop() { + running = false; + logger.debug("{} stopped", this); + close(); + } + + private synchronized void close() { + if (selector != null && selector.isOpen()) { + try { + selector.close(); + } catch (final Exception e) { + logger.warn("Failed to close NIO Selector", e); + } + } + + if (channel != null && channel.isOpen()) { + try { + channel.close(); + } catch (final Exception e) { + logger.warn("Failed to close Socket Channel to {} for Load Balancing", nodeIdentifier, e); + } + } + + channel = null; + selector = null; + } + + public boolean isRunning() { + return running; + } + + public boolean isPenalized() { + final long endTimestamp = penalizationEnd.get(); + if (endTimestamp == 0) { + return false; + } + + if (endTimestamp < System.currentTimeMillis()) { + // set penalization end to 0 so that next time we don't need to check System.currentTimeMillis() because + // systems calls are expensive enough that we'd like to avoid them when we can. + penalizationEnd.compareAndSet(endTimestamp, 0L); + return false; + } + + return true; + } + + private void penalize() { + logger.debug("Penalizing {}", this); + this.penalizationEnd.set(System.currentTimeMillis() + PENALIZATION_MILLIS); + } + + + public boolean communicate() throws IOException { + if (!running) { + return false; + } + + // Use #tryLock here so that if another thread is already communicating with this Client, this thread + // will not block and wait but instead will just return so that the Thread Pool can proceed to the next Client. + if (!loadBalanceSessionLock.tryLock()) { + return false; + } + + try { + RegisteredPartition readyPartition = null; + + if (!isConnectionEstablished()) { + readyPartition = getReadyPartition(); + if (readyPartition == null) { + logger.debug("{} has no connection with data ready to be transmitted so will penalize Client without communicating", this); + penalize(); + return false; + } + + try { + establishConnection(); + } catch (IOException e) { + penalize(); + + partitionQueue.offer(readyPartition); + + for (final RegisteredPartition partition : getRegisteredPartitions().values()) { + logger.debug("Triggering Transaction Failure Callback for {} with Transaction Phase of CONNECTING", partition); + partition.getFailureCallback().onTransactionFailed(Collections.emptyList(), e, TransactionFailureCallback.TransactionPhase.CONNECTING); + } + + return false; + } + } + + final LoadBalanceSession loadBalanceSession = getActiveTransaction(readyPartition); + if (loadBalanceSession == null) { + penalize(); + return false; + } + + selector.selectNow(); + final boolean ready = (loadBalanceSession.getDesiredReadinessFlag() & selectionKey.readyOps()) != 0; + if (!ready) { + return false; + } + + boolean anySuccess = false; + boolean success; + do { + try { + success = loadBalanceSession.communicate(); + } catch (final Exception e) { + logger.error("Failed to communicate with Peer {}", nodeIdentifier.toString(), e); + eventReporter.reportEvent(Severity.ERROR, "Load Balanced Connection", "Failed to communicate with Peer " + nodeIdentifier + " when load balancing data for Connection with ID " + + loadBalanceSession.getPartition().getConnectionId() + " due to " + e); + + penalize(); + loadBalanceSession.getPartition().getFailureCallback().onTransactionFailed(loadBalanceSession.getFlowFilesSent(), e, TransactionFailureCallback.TransactionPhase.SENDING); + close(); + + return false; + } + + anySuccess = anySuccess || success; + } while (success); + + if (loadBalanceSession.isComplete()) { + loadBalanceSession.getPartition().getSuccessCallback().onTransactionComplete(loadBalanceSession.getFlowFilesSent()); + } + + return anySuccess; + } catch (final Exception e) { + close(); + loadBalanceSession = null; + throw e; + } finally { + loadBalanceSessionLock.unlock(); + } + } + + /** + * If any FlowFiles have been transferred in an active session, fail the transaction. Otherwise, gather up to the Transaction Threshold's limits + * worth of FlowFiles and treat them as a failed transaction. In either case, terminate the session. This allows us to transfer FlowFiles from + * queue partitions where the partitioner indicates that the data should be rebalanced, but does so in a way that we don't immediately rebalance + * all FlowFiles. This is desirable in a case such as when we have a lot of data queued up in a connection and then a node temporarily disconnects. + * We don't want to then just push all data to other nodes. We'd rather push the data out to other nodes slowly while waiting for the disconnected + * node to reconnect. And if the node reconnects, we want to keep sending it data. + */ + public void nodeDisconnected() { + if (!loadBalanceSessionLock.tryLock()) { + // If we are not able to obtain the loadBalanceSessionLock, we cannot access the load balance session. + return; + } + + try { + final LoadBalanceSession session = getFailoverSession(); + if (session != null) { + loadBalanceSession = null; + + logger.debug("Node {} disconnected so will terminate the Load Balancing Session", nodeIdentifier); + final List flowFilesSent = session.getFlowFilesSent(); + + if (!flowFilesSent.isEmpty()) { + session.getPartition().getFailureCallback().onTransactionFailed(session.getFlowFilesSent(), TransactionFailureCallback.TransactionPhase.SENDING); + } + + close(); + penalize(); + return; + } + + // Obtain a partition that needs to be rebalanced on failure + final RegisteredPartition readyPartition = getReadyPartition(partition -> partition.getFailureCallback().isRebalanceOnFailure()); + if (readyPartition == null) { + return; + } + + partitionQueue.offer(readyPartition); // allow partition to be obtained again + final TransactionThreshold threshold = newTransactionThreshold(); + + final List flowFiles = new ArrayList<>(); + while (!threshold.isThresholdMet()) { + final FlowFileRecord flowFile = readyPartition.getFlowFileRecordSupplier().get(); + if (flowFile == null) { + break; + } + + flowFiles.add(flowFile); + threshold.adjust(1, flowFile.getSize()); + } + + logger.debug("Node {} not connected so failing {} FlowFiles for Load Balancing", nodeIdentifier, flowFiles.size()); + readyPartition.getFailureCallback().onTransactionFailed(flowFiles, TransactionFailureCallback.TransactionPhase.SENDING); + penalize(); // Don't just transfer FlowFiles out of queue's partition as fast as possible, because the node may only be disconnected for a short time. + } finally { + loadBalanceSessionLock.unlock(); + } + } + + private synchronized LoadBalanceSession getFailoverSession() { + if (loadBalanceSession != null && !loadBalanceSession.isComplete()) { + return loadBalanceSession; + } + + return null; + } + + + private RegisteredPartition getReadyPartition() { + return getReadyPartition(partition -> true); + } + + private synchronized RegisteredPartition getReadyPartition(final Predicate filter) { + final List polledPartitions = new ArrayList<>(); + + try { + RegisteredPartition partition; + while ((partition = partitionQueue.poll()) != null) { + if (partition.isEmpty() || !filter.test(partition)) { + polledPartitions.add(partition); + continue; + } + + return partition; + } + + return null; + } finally { + polledPartitions.forEach(partitionQueue::offer); + } + } + + private synchronized LoadBalanceSession getActiveTransaction(final RegisteredPartition proposedPartition) { + if (loadBalanceSession != null && !loadBalanceSession.isComplete()) { + return loadBalanceSession; + } + + final RegisteredPartition readyPartition = proposedPartition == null ? getReadyPartition() : proposedPartition; + if (readyPartition == null) { + return null; + } + + loadBalanceSession = new LoadBalanceSession(readyPartition, flowFileContentAccess, flowFileCodec, channel, timeoutMillis, newTransactionThreshold()); + partitionQueue.offer(readyPartition); + + return loadBalanceSession; + } + + private TransactionThreshold newTransactionThreshold() { + return new SimpleLimitThreshold(1000, 10_000_000L); + } + + private synchronized boolean isConnectionEstablished() { + return selector != null && channel != null && channel.isConnected(); + } + + private synchronized void establishConnection() throws IOException { + SocketChannel socketChannel = null; + + try { + selector = Selector.open(); + socketChannel = createChannel(); + + socketChannel.configureBlocking(true); + + channel = createPeerChannel(socketChannel, nodeIdentifier.toString()); + channel.performHandshake(); + + socketChannel.configureBlocking(false); + selectionKey = socketChannel.register(selector, SelectionKey.OP_WRITE | SelectionKey.OP_READ); + } catch (Exception e) { + logger.error("Unable to connect to {} for load balancing", nodeIdentifier, e); + + if (selector != null) { + try { + selector.close(); + } catch (final Exception e1) { + e.addSuppressed(e1); + } + } + + if (channel != null) { + try { + channel.close(); + } catch (final Exception e1) { + e.addSuppressed(e1); + } + } + + if (socketChannel != null) { + try { + socketChannel.close(); + } catch (final Exception e1) { + e.addSuppressed(e1); + } + } + + throw e; + } + } + + + private PeerChannel createPeerChannel(final SocketChannel channel, final String peerDescription) { + if (sslContext == null) { + logger.debug("No SSL Context is available so will not perform SSL Handshake with Peer {}", peerDescription); + return new PeerChannel(channel, null, peerDescription); + } + + logger.debug("Performing SSL Handshake with Peer {}", peerDescription); + + final SSLEngine sslEngine = sslContext.createSSLEngine(); + sslEngine.setUseClientMode(true); + sslEngine.setNeedClientAuth(true); + + return new PeerChannel(channel, sslEngine, peerDescription); + } + + + private SocketChannel createChannel() throws IOException { + final SocketChannel socketChannel = SocketChannel.open(); + try { + socketChannel.configureBlocking(true); + final Socket socket = socketChannel.socket(); + socket.setSoTimeout(timeoutMillis); + + socket.connect(new InetSocketAddress(nodeIdentifier.getLoadBalanceAddress(), nodeIdentifier.getLoadBalancePort())); + socket.setSoTimeout(timeoutMillis); + + return socketChannel; + } catch (final Exception e) { + try { + socketChannel.close(); + } catch (final Exception closeException) { + e.addSuppressed(closeException); + } + + throw e; + } + } + + + @Override + public String toString() { + return "NioAsyncLoadBalanceClient[nodeId=" + nodeIdentifier + "]"; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientFactory.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientFactory.java new file mode 100644 index 000000000000..79fe4be5725a --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientFactory.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.clustered.FlowFileContentAccess; +import org.apache.nifi.controller.queue.clustered.client.LoadBalanceFlowFileCodec; +import org.apache.nifi.controller.queue.clustered.client.StandardLoadBalanceFlowFileCodec; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClientFactory; +import org.apache.nifi.events.EventReporter; + +import javax.net.ssl.SSLContext; + +public class NioAsyncLoadBalanceClientFactory implements AsyncLoadBalanceClientFactory { + private final SSLContext sslContext; + private final int timeoutMillis; + private final FlowFileContentAccess flowFileContentAccess; + private final EventReporter eventReporter; + private final LoadBalanceFlowFileCodec flowFileCodec; + + public NioAsyncLoadBalanceClientFactory(final SSLContext sslContext, final int timeoutMillis, final FlowFileContentAccess flowFileContentAccess, final EventReporter eventReporter, + final LoadBalanceFlowFileCodec loadBalanceFlowFileCodec) { + this.sslContext = sslContext; + this.timeoutMillis = timeoutMillis; + this.flowFileContentAccess = flowFileContentAccess; + this.eventReporter = eventReporter; + this.flowFileCodec = loadBalanceFlowFileCodec; + } + + + @Override + public NioAsyncLoadBalanceClient createClient(final NodeIdentifier nodeIdentifier) { + return new NioAsyncLoadBalanceClient(nodeIdentifier, sslContext, timeoutMillis, flowFileContentAccess, new StandardLoadBalanceFlowFileCodec(), eventReporter); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientRegistry.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientRegistry.java new file mode 100644 index 000000000000..3322035db491 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientRegistry.java @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClient; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClientRegistry; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionCompleteCallback; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionFailureCallback; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.function.BooleanSupplier; +import java.util.function.Supplier; + +public class NioAsyncLoadBalanceClientRegistry implements AsyncLoadBalanceClientRegistry { + private static final Logger logger = LoggerFactory.getLogger(NioAsyncLoadBalanceClientRegistry.class); + + private final NioAsyncLoadBalanceClientFactory clientFactory; + private final int clientsPerNode; + + private Map> clientMap = new HashMap<>(); + private Set allClients = new CopyOnWriteArraySet<>(); + private boolean running = false; + + public NioAsyncLoadBalanceClientRegistry(final NioAsyncLoadBalanceClientFactory clientFactory, final int clientsPerNode) { + this.clientFactory = clientFactory; + this.clientsPerNode = clientsPerNode; + } + + @Override + public synchronized void register(final String connectionId, final NodeIdentifier nodeId, final BooleanSupplier emptySupplier, final Supplier flowFileSupplier, + final TransactionFailureCallback failureCallback, final TransactionCompleteCallback successCallback, + final Supplier compressionSupplier, final BooleanSupplier honorBackpressureSupplier) { + + Set clients = clientMap.get(nodeId); + if (clients == null) { + clients = registerClients(nodeId); + } + + clients.forEach(client -> client.register(connectionId, emptySupplier, flowFileSupplier, failureCallback, successCallback, compressionSupplier, honorBackpressureSupplier)); + logger.debug("Registered Connection with ID {} to send to Node {}", connectionId, nodeId); + } + + + @Override + public synchronized void unregister(final String connectionId, final NodeIdentifier nodeId) { + final Set clients = clientMap.get(nodeId); + if (clients == null) { + return; + } + + final Set toRemove = new HashSet<>(); + for (final AsyncLoadBalanceClient client : clients) { + client.unregister(connectionId); + if (client.getRegisteredConnectionCount() == 0) { + toRemove.add(client); + } + } + + clients.removeAll(toRemove); + allClients.removeAll(toRemove); + + if (clients.isEmpty()) { + clientMap.remove(nodeId); + } + + logger.debug("Un-registered Connection with ID {} so that it will no longer send data to Node {}; {} clients were removed", connectionId, nodeId, toRemove.size()); + } + + private Set registerClients(final NodeIdentifier nodeId) { + final Set clients = new HashSet<>(); + + for (int i=0; i < clientsPerNode; i++) { + final AsyncLoadBalanceClient client = clientFactory.createClient(nodeId); + clients.add(client); + + logger.debug("Added client {} for communicating with Node {}", client, nodeId); + } + + clientMap.put(nodeId, clients); + allClients.addAll(clients); + + if (running) { + clients.forEach(AsyncLoadBalanceClient::start); + } + + return clients; + } + + public synchronized Set getAllClients() { + return allClients; + } + + public synchronized void start() { + if (running) { + return; + } + + running = true; + allClients.forEach(AsyncLoadBalanceClient::start); + } + + public synchronized void stop() { + if (!running) { + return; + } + + running = false; + allClients.forEach(AsyncLoadBalanceClient::stop); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientTask.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientTask.java new file mode 100644 index 000000000000..5c8073aa333d --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/NioAsyncLoadBalanceClientTask.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.node.NodeConnectionState; +import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClient; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.reporting.Severity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class NioAsyncLoadBalanceClientTask implements Runnable { + private static final Logger logger = LoggerFactory.getLogger(NioAsyncLoadBalanceClientTask.class); + private static final String EVENT_CATEGORY = "Load-Balanced Connection"; + + private final NioAsyncLoadBalanceClientRegistry clientRegistry; + private final ClusterCoordinator clusterCoordinator; + private final EventReporter eventReporter; + private volatile boolean running = true; + + public NioAsyncLoadBalanceClientTask(final NioAsyncLoadBalanceClientRegistry clientRegistry, final ClusterCoordinator clusterCoordinator, final EventReporter eventReporter) { + this.clientRegistry = clientRegistry; + this.clusterCoordinator = clusterCoordinator; + this.eventReporter = eventReporter; + } + + @Override + public void run() { + while (running) { + try { + boolean success = false; + for (final AsyncLoadBalanceClient client : clientRegistry.getAllClients()) { + if (!client.isRunning()) { + logger.trace("Client {} is not running so will not communicate with it", client); + continue; + } + + if (client.isPenalized()) { + logger.trace("Client {} is penalized so will not communicate with it", client); + continue; + } + + final NodeIdentifier clientNodeId = client.getNodeIdentifier(); + final NodeConnectionStatus connectionStatus = clusterCoordinator.getConnectionStatus(clientNodeId); + if (connectionStatus == null) { + logger.debug("Could not determine Connection Status for Node with ID {}; will not communicate with it", clientNodeId); + continue; + } + + final NodeConnectionState connectionState = connectionStatus.getState(); + if (connectionState != NodeConnectionState.CONNECTED) { + logger.debug("Notifying Client {} that node is not connected because current state is {}", client, connectionState); + client.nodeDisconnected(); + continue; + } + + try { + while (client.communicate()) { + success = true; + logger.trace("Client {} was able to make progress communicating with peer. Will continue to communicate with peer.", client); + } + } catch (final Exception e) { + eventReporter.reportEvent(Severity.ERROR, EVENT_CATEGORY, "Failed to communicate with Peer " + + client.getNodeIdentifier() + " while trying to load balance data across the cluster due to " + e.toString()); + logger.error("Failed to communicate with Peer {} while trying to load balance data across the cluster.", client.getNodeIdentifier(), e); + } + + logger.trace("Client {} was no longer able to make progress communicating with peer. Will move on to the next client", client); + } + + if (!success) { + logger.trace("Was unable to communicate with any client. Will sleep for 10 milliseconds."); + Thread.sleep(10L); + } + } catch (final Exception e) { + logger.error("Failed to communicate with peer while trying to load balance data across the cluster", e); + eventReporter.reportEvent(Severity.ERROR, EVENT_CATEGORY, "Failed to comunicate with Peer while trying to load balance data across the cluster due to " + e); + } + } + } + + public void stop() { + running = false; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/PeerChannel.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/PeerChannel.java new file mode 100644 index 000000000000..67afb4a904fe --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/PeerChannel.java @@ -0,0 +1,358 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLException; +import java.io.Closeable; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.SocketChannel; +import java.util.OptionalInt; + +public class PeerChannel implements Closeable { + private static final Logger logger = LoggerFactory.getLogger(PeerChannel.class); + + private final SocketChannel socketChannel; + private final SSLEngine sslEngine; + private final String peerDescription; + + private final ByteBuffer singleByteBuffer = ByteBuffer.allocate(1); + private ByteBuffer destinationBuffer = ByteBuffer.allocate(16 * 1024); // buffer that SSLEngine is to write into + private ByteBuffer streamBuffer = ByteBuffer.allocate(16 * 1024); // buffer for data that is read from SocketChannel + private ByteBuffer applicationBuffer = ByteBuffer.allocate(0); // buffer for application-level data that is ready to be served up (i.e., already decrypted if necessary) + + public PeerChannel(final SocketChannel socketChannel, final SSLEngine sslEngine, final String peerDescription) { + this.socketChannel = socketChannel; + this.sslEngine = sslEngine; + this.peerDescription = peerDescription; + } + + + @Override + public void close() throws IOException { + socketChannel.close(); + } + + public boolean isConnected() { + return socketChannel.isConnected(); + } + + public boolean isOpen() { + return socketChannel.isOpen(); + } + + public String getPeerDescription() { + return peerDescription; + } + + public boolean write(final byte b) throws IOException { + singleByteBuffer.clear(); + singleByteBuffer.put(b); + singleByteBuffer.rewind(); + + final ByteBuffer prepared = prepareForWrite(singleByteBuffer); + final int bytesWritten = write(prepared); + return bytesWritten > 0; + } + + public OptionalInt read() throws IOException { + singleByteBuffer.clear(); + final int bytesRead = read(singleByteBuffer); + if (bytesRead < 0) { + return OptionalInt.of(-1); + } + if (bytesRead == 0) { + return OptionalInt.empty(); + } + + singleByteBuffer.flip(); + + final byte read = singleByteBuffer.get(); + return OptionalInt.of(read & 0xFF); + } + + + + + /** + * Reads the given ByteBuffer of data and returns a new ByteBuffer (which is "flipped" / ready to be read). The newly returned + * ByteBuffer will be written to be written via the {@link #write(ByteBuffer)} method. I.e., it will have already been encrypted, if + * necessary, and any other decorations that need to be applied before sending will already have been applied. + * + * @param plaintext the data to be prepped + * @return a ByteBuffer containing the prepared data + * @throws IOException if a failure occurs while encrypting the data + */ + public ByteBuffer prepareForWrite(final ByteBuffer plaintext) throws IOException { + if (sslEngine == null) { + return plaintext; + } + + + ByteBuffer prepared = ByteBuffer.allocate(Math.min(85, plaintext.capacity() - plaintext.position())); + while (plaintext.hasRemaining()) { + encrypt(plaintext); + + final int bytesRemaining = prepared.capacity() - prepared.position(); + if (bytesRemaining < destinationBuffer.remaining()) { + final ByteBuffer temp = ByteBuffer.allocate(prepared.capacity() + sslEngine.getSession().getApplicationBufferSize()); + prepared.flip(); + temp.put(prepared); + prepared = temp; + } + + prepared.put(destinationBuffer); + } + + prepared.flip(); + return prepared; + } + + public int write(final ByteBuffer preparedBuffer) throws IOException { + return socketChannel.write(preparedBuffer); + } + + + public int read(final ByteBuffer dst) throws IOException { + // If we have data ready to go, then go ahead and copy it. + final int bytesCopied = copy(applicationBuffer, dst); + if (bytesCopied != 0) { + return bytesCopied; + } + + final int bytesRead = socketChannel.read(streamBuffer); + if (bytesRead < 1) { + return bytesRead; + } + + if (bytesRead > 0) { + logger.trace("Read {} bytes from SocketChannel", bytesRead); + } + + streamBuffer.flip(); + + try { + if (sslEngine == null) { + cloneToApplicationBuffer(streamBuffer); + return copy(applicationBuffer, dst); + } else { + final boolean decrypted = decrypt(streamBuffer); + logger.trace("Decryption after reading those bytes successful = {}", decrypted); + + if (decrypted) { + cloneToApplicationBuffer(destinationBuffer); + logger.trace("Cloned destination buffer to application buffer"); + + return copy(applicationBuffer, dst); + } else { + // Not enough data to decrypt. Compact the buffer so that we keep the data we have + // but prepare the buffer to be written to again. + logger.debug("Not enough data to decrypt. Will need to consume more data before decrypting"); + streamBuffer.compact(); + return 0; + } + } + } finally { + streamBuffer.compact(); + } + } + + private void cloneToApplicationBuffer(final ByteBuffer buffer) { + if (applicationBuffer.capacity() < buffer.remaining()) { + applicationBuffer = ByteBuffer.allocate(buffer.remaining()); + } else { + applicationBuffer.clear(); + } + + applicationBuffer.put(buffer); + applicationBuffer.flip(); + } + + private int copy(final ByteBuffer src, final ByteBuffer dst) { + if (src != null && src.hasRemaining()) { + final int bytesToCopy = Math.min(dst.remaining(), src.remaining()); + if (bytesToCopy < 1) { + return bytesToCopy; + } + + final byte[] buff = new byte[bytesToCopy]; + src.get(buff); + dst.put(buff); + return bytesToCopy; + } + + return 0; + } + + + /** + * Encrypts the given buffer of data, writing the result into {@link #destinationBuffer}. + * @param plaintext the data to encrypt + * @throws IOException if the Peer closes the connection abruptly or if unable to perform the encryption + */ + private void encrypt(final ByteBuffer plaintext) throws IOException { + if (sslEngine == null) { + throw new SSLException("Unable to encrypt message because no SSLEngine has been configured"); + } + + destinationBuffer.clear(); + + while (true) { + final SSLEngineResult result = sslEngine.wrap(plaintext, destinationBuffer); + + switch (result.getStatus()) { + case OK: + destinationBuffer.flip(); + return; + case CLOSED: + throw new IOException("Failed to encrypt data to write to Peer " + peerDescription + " because Peer unexpectedly closed connection"); + case BUFFER_OVERFLOW: + // destinationBuffer is not large enough. Need to increase the size. + final ByteBuffer tempBuffer = ByteBuffer.allocate(destinationBuffer.capacity() + sslEngine.getSession().getApplicationBufferSize()); + destinationBuffer.flip(); + tempBuffer.put(destinationBuffer); + destinationBuffer = tempBuffer; + break; + case BUFFER_UNDERFLOW: + // We should never get this result on a call to SSLEngine.wrap(), only on a call to unwrap(). + throw new IOException("Received unexpected Buffer Underflow result when encrypting data to write to Peer " + peerDescription); + } + } + } + + + + + /** + * Attempts to decrypt the given buffer of data, writing the result into {@link #destinationBuffer}. If successful, will return true. + * If more data is needed in order to perform the decryption, will return false. + * + * @param encrypted the ByteBuffer containing the data to decrypt + * @return true if decryption was successful, false otherwise + * @throws IOException if the Peer closed the connection or if unable to decrypt the message + */ + private boolean decrypt(final ByteBuffer encrypted) throws IOException { + if (sslEngine == null) { + throw new SSLException("Unable to decrypt message because no SSLEngine has been configured"); + } + + destinationBuffer.clear(); + + while (true) { + final SSLEngineResult result = sslEngine.unwrap(encrypted, destinationBuffer); + + switch (result.getStatus()) { + case OK: + destinationBuffer.flip(); + return true; + case CLOSED: + throw new IOException("Failed to decrypt data from Peer " + peerDescription + " because Peer unexpectedly closed connection"); + case BUFFER_OVERFLOW: + // ecnryptedBuffer is not large enough. Need to increase the size. + final ByteBuffer tempBuffer = ByteBuffer.allocate(encrypted.position() + sslEngine.getSession().getApplicationBufferSize()); + destinationBuffer.flip(); + tempBuffer.put(destinationBuffer); + destinationBuffer = tempBuffer; + + break; + case BUFFER_UNDERFLOW: + // Not enough data to decrypt. Must read more from the channel. + return false; + } + } + } + + + public void performHandshake() throws IOException { + if (sslEngine == null) { + return; + } + + sslEngine.beginHandshake(); + + final ByteBuffer emptyMessage = ByteBuffer.allocate(0); + ByteBuffer unwrapBuffer = ByteBuffer.allocate(0); + + while (true) { + final SSLEngineResult.HandshakeStatus handshakeStatus = sslEngine.getHandshakeStatus(); + + switch (handshakeStatus) { + case FINISHED: + case NOT_HANDSHAKING: + streamBuffer.clear(); + destinationBuffer.clear(); + logger.debug("Completed SSL Handshake with Peer {}", peerDescription); + return; + + case NEED_TASK: + logger.debug("SSL Handshake with Peer {} Needs Task", peerDescription); + + Runnable runnable; + while ((runnable = sslEngine.getDelegatedTask()) != null) { + runnable.run(); + } + break; + + case NEED_WRAP: + logger.trace("SSL Handshake with Peer {} Needs Wrap", peerDescription); + + encrypt(emptyMessage); + final int bytesWritten = write(destinationBuffer); + logger.debug("Wrote {} bytes for NEED_WRAP portion of Handshake", bytesWritten); + break; + + case NEED_UNWRAP: + logger.trace("SSL Handshake with Peer {} Needs Unwrap", peerDescription); + + while (sslEngine.getHandshakeStatus() == SSLEngineResult.HandshakeStatus.NEED_UNWRAP) { + final boolean decrypted = decrypt(unwrapBuffer); + if (decrypted) { + logger.trace("Decryption was successful for NEED_UNWRAP portion of Handshake"); + break; + } + + if (unwrapBuffer.capacity() - unwrapBuffer.position() < 1) { + logger.trace("Enlarging size of Buffer for NEED_UNWRAP portion of Handshake"); + + // destinationBuffer is not large enough. Need to increase the size. + final ByteBuffer tempBuffer = ByteBuffer.allocate(unwrapBuffer.capacity() + sslEngine.getSession().getApplicationBufferSize()); + tempBuffer.put(unwrapBuffer); + unwrapBuffer = tempBuffer; + unwrapBuffer.flip(); + continue; + } + + logger.trace("Need to read more bytes for NEED_UNWRAP portion of Handshake"); + + // Need to read more data. + unwrapBuffer.compact(); + final int bytesRead = socketChannel.read(unwrapBuffer); + unwrapBuffer.flip(); + logger.debug("Read {} bytes for NEED_UNWRAP portion of Handshake", bytesRead); + } + + break; + } + } + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/RegisteredPartition.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/RegisteredPartition.java new file mode 100644 index 000000000000..e427b21a163b --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/client/async/nio/RegisteredPartition.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionCompleteCallback; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionFailureCallback; +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.function.BooleanSupplier; +import java.util.function.Supplier; + +public class RegisteredPartition { + private final String connectionId; + private final Supplier flowFileRecordSupplier; + private final TransactionFailureCallback failureCallback; + private final BooleanSupplier emptySupplier; + private final TransactionCompleteCallback successCallback; + private final Supplier compressionSupplier; + private final BooleanSupplier honorBackpressureSupplier; + + public RegisteredPartition(final String connectionId, final BooleanSupplier emptySupplier, final Supplier flowFileSupplier, final TransactionFailureCallback failureCallback, + final TransactionCompleteCallback successCallback, final Supplier compressionSupplier, final BooleanSupplier honorBackpressureSupplier) { + this.connectionId = connectionId; + this.emptySupplier = emptySupplier; + this.flowFileRecordSupplier = flowFileSupplier; + this.failureCallback = failureCallback; + this.successCallback = successCallback; + this.compressionSupplier = compressionSupplier; + this.honorBackpressureSupplier = honorBackpressureSupplier; + } + + public boolean isEmpty() { + return emptySupplier.getAsBoolean(); + } + + public String getConnectionId() { + return connectionId; + } + + public Supplier getFlowFileRecordSupplier() { + return flowFileRecordSupplier; + } + + public TransactionFailureCallback getFailureCallback() { + return failureCallback; + } + + public TransactionCompleteCallback getSuccessCallback() { + return successCallback; + } + + public LoadBalanceCompression getCompression() { + return compressionSupplier.get(); + } + + public boolean isHonorBackpressure() { + return honorBackpressureSupplier.getAsBoolean(); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/CorrelationAttributePartitioner.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/CorrelationAttributePartitioner.java new file mode 100644 index 000000000000..12560d44f41e --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/CorrelationAttributePartitioner.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import com.google.common.hash.Hashing; +import org.apache.nifi.controller.repository.FlowFileRecord; + +public class CorrelationAttributePartitioner implements FlowFilePartitioner { + private final String partitioningAttribute; + + public CorrelationAttributePartitioner(final String partitioningAttribute) { + this.partitioningAttribute = partitioningAttribute; + } + + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + final int hash = hash(flowFile); + + // The consistentHash method appears to always return a bucket of '1' if there are 2 possible buckets, + // so in this case we will just use modulo division to avoid this. I suspect this is a bug with the Guava + // implementation, but it's not clear at this point. + final int index; + if (partitions.length < 3) { + index = hash % partitions.length; + } else { + index = Hashing.consistentHash(hash, partitions.length); + } + + return partitions[index]; + } + + protected int hash(final FlowFileRecord flowFile) { + final String partitionAttributeValue = flowFile.getAttribute(partitioningAttribute); + return (partitionAttributeValue == null) ? 0 : partitionAttributeValue.hashCode(); + } + + @Override + public boolean isRebalanceOnClusterResize() { + return true; + } + + @Override + public boolean isRebalanceOnFailure() { + return false; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FirstNodePartitioner.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FirstNodePartitioner.java new file mode 100644 index 000000000000..10d584e8a1cd --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FirstNodePartitioner.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +public class FirstNodePartitioner implements FlowFilePartitioner { + + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + return partitions[0]; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return true; + } + + @Override + public boolean isRebalanceOnFailure() { + return false; + } + + @Override + public boolean isPartitionStatic() { + return true; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FlowFilePartitioner.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FlowFilePartitioner.java new file mode 100644 index 000000000000..4f528e4c119b --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/FlowFilePartitioner.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +public interface FlowFilePartitioner { + + /** + * Determines which partition the given FlowFile should go to + * + * @param flowFile the FlowFile to partition + * @param partitions the partitions to choose from + * @param localPartition the local partition, which is also included in the given array of partitions + * @return the partition for the FlowFile + */ + QueuePartition getPartition(FlowFileRecord flowFile, QueuePartition[] partitions, QueuePartition localPartition); + + /** + * @return true if a change in the size of a cluster should result in re-balancing all FlowFiles in queue, + * false if a change in the size of a cluster does not require re-balancing. + */ + boolean isRebalanceOnClusterResize(); + + /** + * @return true if FlowFiles should be rebalanced to another partition if they cannot be sent to the designated peer, + * false if a failure should result in the FlowFiles remaining in same partition. + */ + boolean isRebalanceOnFailure(); + + /** + * @return true if the return value of {@link #getPartition(FlowFileRecord, QueuePartition[], QueuePartition)} will be the same + * regardless of how many times it is called or which FlowFiles are passed. + */ + default boolean isPartitionStatic() { + return false; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalPartitionPartitioner.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalPartitionPartitioner.java new file mode 100644 index 000000000000..0f9f9f7c99d9 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalPartitionPartitioner.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +public class LocalPartitionPartitioner implements FlowFilePartitioner { + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + return localPartition; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return false; + } + + @Override + public boolean isRebalanceOnFailure() { + return false; + } + + @Override + public boolean isPartitionStatic() { + return true; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalQueuePartition.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalQueuePartition.java new file mode 100644 index 000000000000..9ee0e0ea6d42 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/LocalQueuePartition.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.LocalQueuePartitionDiagnostics; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.processor.FlowFileFilter; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Set; + +/** + * An extension of a Queue Partition that contains the methods necessary for Processors, Funnels, and Ports to interact with the Partition + * as if it were an entire FlowFile Queue itself. + */ +public interface LocalQueuePartition extends QueuePartition { + /** + * @return true if the active queue is empty, false otherwise + */ + boolean isActiveQueueEmpty(); + + /** + * @return true if there is at least one FlowFile that has not yet been acknowledged, false if all FlowFiles have been acknowledged. + */ + boolean isUnacknowledgedFlowFile(); + + /** + * Returns a single FlowFile with the highest priority that is available in the partition, or null if no FlowFile is available + * + * @param expiredRecords a Set of FlowFileRecord's to which any expired records that are encountered should be added + * @return a single FlowFile with the highest priority that is available in the partition, or null if no FlowFile is available + */ + FlowFileRecord poll(Set expiredRecords); + + /** + * Returns up to maxResults FlowFiles from the queue + * + * @param maxResults the maximum number of FlowFiles to return + * @param expiredRecords a Set of FlowFileRecord's to which any expired records that are encountered should be added + * @return a List of FlowFiles (possibly empty) with the highest priority FlowFiles that are available in the partition + */ + List poll(int maxResults, Set expiredRecords); + + /** + * Returns a List of FlowFiles that match the given filter + * + * @param filter the filter to determine whether or not a given FlowFile should be returned + * @param expiredRecords a Set of FlowFileRecord's to which any expired records that are encountered should be added + * @return a List of FlowFiles (possibly empty) with FlowFiles that matched the given filter + */ + List poll(FlowFileFilter filter, Set expiredRecords); + + /** + * Acknowledges that the given FlowFile has been accounted for and is no longer the responsibility of this partition + * @param flowFile the FlowFile that has been accounted for + */ + void acknowledge(FlowFileRecord flowFile); + + /** + * Acknowledges that the given FlowFiles have been accounted for and is no longer the responsibility of this partition + * @param flowFiles the FlowFiles that have been accounted for + */ + void acknowledge(Collection flowFiles); + + /** + * Returns the FlowFile with the given UUID, or null if the FlowFile with that UUID is not found in the partition + * + * @param flowFileUuid the UUID of the FlowFile + * @return the FlowFile with the given UUID or null if the FlowFile cannot be found + * @throws IOException if unable to read swapped data from a swap file + */ + FlowFileRecord getFlowFile(final String flowFileUuid) throws IOException; + + /** + * Returns the FlowFiles that can be provided as the result of as "List FlowFiles" action + * @return a List of FlowFiles + */ + List getListableFlowFiles(); + + /** + * Inherits the contents of another queue/partition + * @param queueContents the contents to inherit + */ + void inheritQueueContents(FlowFileQueueContents queueContents); + + /** + * @return diagnostics information about the queue partition + */ + LocalQueuePartitionDiagnostics getQueueDiagnostics(); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitioner.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitioner.java new file mode 100644 index 000000000000..0953ce2c4dbd --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitioner.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Returns remote partitions when queried for a partition; never returns the {@link LocalQueuePartition}. + */ +public class NonLocalPartitionPartitioner implements FlowFilePartitioner { + private final AtomicLong counter = new AtomicLong(0L); + + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + QueuePartition remotePartition = null; + final long startIndex = counter.getAndIncrement(); + for (int i = 0, numPartitions = partitions.length; i < numPartitions && remotePartition == null; ++i) { + int index = (int) ((startIndex + i) % numPartitions); + QueuePartition partition = partitions[index]; + if (!partition.equals(localPartition)) { + remotePartition = partition; + } + } + + if (remotePartition == null) { + throw new IllegalStateException("Could not determine a remote partition"); + } + + return remotePartition; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return true; + } + + + @Override + public boolean isRebalanceOnFailure() { + return true; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/QueuePartition.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/QueuePartition.java new file mode 100644 index 000000000000..c73525b07d0e --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/QueuePartition.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.DropFlowFileRequest; +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.flowfile.FlowFilePrioritizer; + +import java.util.Collection; +import java.util.List; +import java.util.Optional; + +/** + * Represents a portion of a FlowFile Queue such that a FlowFile Queue can be broken into + * a local queue partition and 0 or more Remote Queue Partitions. + */ +public interface QueuePartition { + /** + * Discovers any FlowFiles that have been swapped out, returning a summary of the swap files' contents + * @return a summary of the swap files' contents + */ + SwapSummary recoverSwappedFlowFiles(); + + /** + * @return the Node Identifier that this Queue Partition corresponds to, or and empty Optional if the Node Identifier is not yet known. + */ + Optional getNodeIdentifier(); + + /** + * @return the name of the Partition that is used when serializing swap flowfiles in order to denote that a swap file belongs to this partition + */ + String getSwapPartitionName(); + + /** + * Adds the given FlowFile to this partition + * @param flowFile the FlowFile to add + */ + void put(FlowFileRecord flowFile); + + /** + * Adds the given FlowFiles to this partition + * @param flowFiles the FlowFiles to add + */ + void putAll(Collection flowFiles); + + /** + * Drops the FlowFiles in this partition + * @param dropRequest the FlowFile Drop Request + * @param requestor the user making the request + */ + void dropFlowFiles(DropFlowFileRequest dropRequest, String requestor); + + /** + * Updates the prioritizers to use when queueing data + * @param newPriorities the new priorities + */ + void setPriorities(List newPriorities); + + /** + * Starts distributing FlowFiles to their desired destinations + * + * @param flowFilePartitioner the Partitioner that is being used to determine which FlowFiles should belong to this Partition + */ + void start(FlowFilePartitioner flowFilePartitioner); + + /** + * Stop distributing FlowFiles to other nodes in the cluster. This does not interrupt any active transactions but will cause the + * partition to not create any more transactions until it is started again. + */ + void stop(); + + /** + * Provides a {@link FlowFileQueueContents} that can be transferred to another partition + * @param newPartitionName the name of the partition to which the data is being transferred (see {@link #getSwapPartitionName()}. + * @return the contents of the queue + */ + FlowFileQueueContents packageForRebalance(String newPartitionName); + + /** + * @return the current size of the partition's queue + */ + QueueSize size(); +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RebalancingPartition.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RebalancingPartition.java new file mode 100644 index 000000000000..97138146c43f --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RebalancingPartition.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.Collection; + +/** + * A partition whose sole job it is to redistribute FlowFiles to the appropriate partitions. + */ +public interface RebalancingPartition extends QueuePartition { + + /** + * Inherits all of the FlowFiles, including FlowFiles that have been swaped out, in order to + * redistribute them across the cluster + * + * @param queueContents the contents of a FlowFileQueue (or partition) + */ + void rebalance(FlowFileQueueContents queueContents); + + /** + * Inherits all of the givne FlowFiles in order to redistribute them across the cluster + * + * @param flowFiles the FlowFiles to redistribute + */ + void rebalance(Collection flowFiles); + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RemoteQueuePartition.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RemoteQueuePartition.java new file mode 100644 index 000000000000..a78de553df39 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RemoteQueuePartition.java @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.DropFlowFileRequest; +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.LoadBalancedFlowFileQueue; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.RemoteQueuePartitionDiagnostics; +import org.apache.nifi.controller.queue.StandardRemoteQueuePartitionDiagnostics; +import org.apache.nifi.controller.queue.SwappablePriorityQueue; +import org.apache.nifi.controller.queue.clustered.TransferFailureDestination; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClientRegistry; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionCompleteCallback; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionFailureCallback; +import org.apache.nifi.controller.repository.ContentNotFoundException; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.RepositoryRecord; +import org.apache.nifi.controller.repository.StandardRepositoryRecord; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.flowfile.FlowFilePrioritizer; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.provenance.ProvenanceEventBuilder; +import org.apache.nifi.provenance.ProvenanceEventRecord; +import org.apache.nifi.provenance.ProvenanceEventRepository; +import org.apache.nifi.provenance.ProvenanceEventType; +import org.apache.nifi.provenance.StandardProvenanceEventRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * A Queue Partition that is responsible for transferring FlowFiles to another node in the cluster + */ +public class RemoteQueuePartition implements QueuePartition { + private static final Logger logger = LoggerFactory.getLogger(RemoteQueuePartition.class); + + private final NodeIdentifier nodeIdentifier; + private final SwappablePriorityQueue priorityQueue; + private final LoadBalancedFlowFileQueue flowFileQueue; + private final TransferFailureDestination failureDestination; + + private final FlowFileRepository flowFileRepo; + private final ProvenanceEventRepository provRepo; + private final ContentRepository contentRepo; + private final AsyncLoadBalanceClientRegistry clientRegistry; + + private boolean running = false; + private final String description; + + public RemoteQueuePartition(final NodeIdentifier nodeId, final SwappablePriorityQueue priorityQueue, final TransferFailureDestination failureDestination, + final FlowFileRepository flowFileRepo, final ProvenanceEventRepository provRepo, final ContentRepository contentRepository, + final AsyncLoadBalanceClientRegistry clientRegistry, final LoadBalancedFlowFileQueue flowFileQueue) { + + this.nodeIdentifier = nodeId; + this.priorityQueue = priorityQueue; + this.flowFileQueue = flowFileQueue; + this.failureDestination = failureDestination; + this.flowFileRepo = flowFileRepo; + this.provRepo = provRepo; + this.contentRepo = contentRepository; + this.clientRegistry = clientRegistry; + this.description = "RemoteQueuePartition[queueId=" + flowFileQueue.getIdentifier() + ", nodeId=" + nodeIdentifier + "]"; + } + + @Override + public QueueSize size() { + return priorityQueue.size(); + } + + @Override + public String getSwapPartitionName() { + return nodeIdentifier.getId(); + } + + @Override + public Optional getNodeIdentifier() { + return Optional.ofNullable(nodeIdentifier); + } + + @Override + public void put(final FlowFileRecord flowFile) { + priorityQueue.put(flowFile); + } + + @Override + public void putAll(final Collection flowFiles) { + priorityQueue.putAll(flowFiles); + } + + @Override + public void dropFlowFiles(final DropFlowFileRequest dropRequest, final String requestor) { + priorityQueue.dropFlowFiles(dropRequest, requestor); + } + + @Override + public SwapSummary recoverSwappedFlowFiles() { + return priorityQueue.recoverSwappedFlowFiles(); + } + + @Override + public FlowFileQueueContents packageForRebalance(String newPartitionName) { + return priorityQueue.packageForRebalance(newPartitionName); + } + + @Override + public void setPriorities(final List newPriorities) { + priorityQueue.setPriorities(newPriorities); + } + + private FlowFileRecord getFlowFile() { + final Set expired = new HashSet<>(); + final FlowFileRecord flowFile = priorityQueue.poll(expired, flowFileQueue.getFlowFileExpiration(TimeUnit.MILLISECONDS)); + flowFileQueue.handleExpiredRecords(expired); + return flowFile; + } + + @Override + public synchronized void start(final FlowFilePartitioner partitioner) { + if (running) { + return; + } + + final TransactionFailureCallback failureCallback = new TransactionFailureCallback() { + @Override + public void onTransactionFailed(final List flowFiles, final Exception cause, final TransactionPhase phase) { + // In the case of failure, we need to acknowledge the FlowFiles that were removed from the queue, + // and then put the FlowFiles back, or transfer them to another partition. We do not call + // flowFileQueue#onTransfer in the case of failure, though, because the size of the FlowFileQueue itself + // has not changed. They FlowFiles were just re-queued or moved between partitions. + priorityQueue.acknowledge(flowFiles); + + if (cause instanceof ContentNotFoundException) { + // Handle ContentNotFound by creating a RepositoryRecord for the FlowFile and marking as aborted, then updating the + // FlowFiles and Provenance Repositories accordingly. This follows the same pattern as StandardProcessSession so that + // we have a consistent way of handling this case. + final Optional optionalFlowFile = ((ContentNotFoundException) cause).getFlowFile(); + if (optionalFlowFile.isPresent()) { + final List successfulFlowFiles = new ArrayList<>(flowFiles); + + final FlowFileRecord flowFile = optionalFlowFile.get(); + successfulFlowFiles.remove(flowFile); + + final StandardRepositoryRecord repoRecord = new StandardRepositoryRecord(flowFileQueue, flowFile); + repoRecord.markForAbort(); + + updateRepositories(Collections.emptyList(), Collections.singleton(repoRecord)); + + // If unable to even connect to the node, go ahead and transfer all FlowFiles for this queue to the failure destination. + // In either case, transfer those FlowFiles that we failed to send. + if (phase == TransactionPhase.CONNECTING) { + failureDestination.putAll(priorityQueue::packageForRebalance, partitioner); + } + failureDestination.putAll(successfulFlowFiles, partitioner); + + flowFileQueue.onTransfer(Collections.singleton(flowFile)); // Want to ensure that we update queue size because FlowFile won't be re-queued. + + return; + } + } + + // If unable to even connect to the node, go ahead and transfer all FlowFiles for this queue to the failure destination. + // In either case, transfer those FlowFiles that we failed to send. + if (phase == TransactionPhase.CONNECTING) { + failureDestination.putAll(priorityQueue::packageForRebalance, partitioner); + } + failureDestination.putAll(flowFiles, partitioner); + } + + @Override + public boolean isRebalanceOnFailure() { + return failureDestination.isRebalanceOnFailure(partitioner); + } + }; + + final TransactionCompleteCallback successCallback = new TransactionCompleteCallback() { + @Override + public void onTransactionComplete(final List flowFilesSent) { + // We've now completed the transaction. We must now update the repositories and "keep the books", acknowledging the FlowFiles + // with the queue so that its size remains accurate. + priorityQueue.acknowledge(flowFilesSent); + flowFileQueue.onTransfer(flowFilesSent); + updateRepositories(flowFilesSent, Collections.emptyList()); + } + }; + + clientRegistry.register(flowFileQueue.getIdentifier(), nodeIdentifier, priorityQueue::isEmpty, this::getFlowFile, + failureCallback, successCallback, flowFileQueue::getLoadBalanceCompression, flowFileQueue::isPropagateBackpressureAcrossNodes); + + running = true; + } + + public void onRemoved() { + clientRegistry.unregister(flowFileQueue.getIdentifier(), nodeIdentifier); + } + + + /** + * Updates the FlowFileRepository, Provenance Repository, and claimant counts in the Content Repository. + * + * @param flowFilesSent the FlowFiles that were sent to another node. + * @param abortedRecords the Repository Records for any FlowFile whose content was missing. + */ + private void updateRepositories(final List flowFilesSent, final Collection abortedRecords) { + // We update the Provenance Repository first. This way, even if we restart before we update the FlowFile repo, we have the record + // that the data was sent in the Provenance Repository. We then update the content claims and finally the FlowFile Repository. We do it + // in this order so that when the FlowFile repo is sync'ed to disk, we know which Content Claims are no longer in use. Updating the FlowFile + // Repo first could result in holding those Content Claims on disk longer than we need to. + // + // Additionally, we are iterating over the FlowFiles sent multiple times. We could refactor this to iterate over them just once and then + // create the Provenance Events and Repository Records in a single pass. Doing so, however, would mean that we need to keep both collections + // of objects in heap at the same time. Using multiple passes allows the Provenance Events to be freed from heap by the GC before the Repo Records + // are ever created. + final List provenanceEvents = new ArrayList<>(flowFilesSent.size() * 2 + abortedRecords.size()); + for (final FlowFileRecord sent : flowFilesSent) { + provenanceEvents.add(createSendEvent(sent)); + provenanceEvents.add(createDropEvent(sent)); + } + + for (final RepositoryRecord abortedRecord : abortedRecords) { + final FlowFileRecord abortedFlowFile = abortedRecord.getCurrent(); + provenanceEvents.add(createDropEvent(abortedFlowFile, "Content Not Found")); + } + + provRepo.registerEvents(provenanceEvents); + + // Update the FlowFile Repository & content claim counts last + final List flowFileRepoRecords = flowFilesSent.stream() + .map(this::createRepositoryRecord) + .collect(Collectors.toCollection(ArrayList::new)); + + flowFileRepoRecords.addAll(abortedRecords); + + // Decrement claimant count for each FlowFile. + flowFileRepoRecords.stream() + .map(RepositoryRecord::getCurrentClaim) + .forEach(contentRepo::decrementClaimantCount); + + try { + flowFileRepo.updateRepository(flowFileRepoRecords); + } catch (final Exception e) { + logger.error("Unable to update FlowFile repository to indicate that {} FlowFiles have been transferred to {}. " + + "It is possible that these FlowFiles will be duplicated upon restart of NiFi.", flowFilesSent.size(), getNodeIdentifier(), e); + } + } + + private RepositoryRecord createRepositoryRecord(final FlowFileRecord flowFile) { + final StandardRepositoryRecord record = new StandardRepositoryRecord(flowFileQueue, flowFile); + record.markForDelete(); + return record; + } + + private ProvenanceEventRecord createSendEvent(final FlowFileRecord flowFile) { + + final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder() + .fromFlowFile(flowFile) + .setEventType(ProvenanceEventType.SEND) + .setDetails("Re-distributed for Load-balanced connection") + .setComponentId(flowFileQueue.getIdentifier()) + .setComponentType("Connection") + .setSourceQueueIdentifier(flowFileQueue.getIdentifier()) + .setSourceSystemFlowFileIdentifier(flowFile.getAttribute(CoreAttributes.UUID.key())) + .setTransitUri("nifi:connection:" + flowFileQueue.getIdentifier()); + + final ContentClaim contentClaim = flowFile.getContentClaim(); + if (contentClaim != null) { + final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); + builder.setCurrentContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), + contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); + + builder.setPreviousContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), + contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); + } + + final ProvenanceEventRecord sendEvent = builder.build(); + + return sendEvent; + } + + private ProvenanceEventRecord createDropEvent(final FlowFileRecord flowFile) { + return createDropEvent(flowFile, null); + } + + private ProvenanceEventRecord createDropEvent(final FlowFileRecord flowFile, final String details) { + final ProvenanceEventBuilder builder = new StandardProvenanceEventRecord.Builder() + .fromFlowFile(flowFile) + .setEventType(ProvenanceEventType.DROP) + .setDetails(details) + .setComponentId(flowFileQueue.getIdentifier()) + .setComponentType("Connection") + .setSourceQueueIdentifier(flowFileQueue.getIdentifier()); + + final ContentClaim contentClaim = flowFile.getContentClaim(); + if (contentClaim != null) { + final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); + builder.setCurrentContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), + contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); + + builder.setPreviousContentClaim(resourceClaim.getContainer(),resourceClaim.getSection() ,resourceClaim.getId(), + contentClaim.getOffset() + flowFile.getContentClaimOffset(), flowFile.getSize()); + } + + final ProvenanceEventRecord dropEvent = builder.build(); + + return dropEvent; + } + + + @Override + public synchronized void stop() { + running = false; + clientRegistry.unregister(flowFileQueue.getIdentifier(), nodeIdentifier); + } + + public RemoteQueuePartitionDiagnostics getDiagnostics() { + return new StandardRemoteQueuePartitionDiagnostics(nodeIdentifier.toString(), priorityQueue.getFlowFileQueueSize()); + } + + @Override + public String toString() { + return description; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RoundRobinPartitioner.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RoundRobinPartitioner.java new file mode 100644 index 000000000000..c08724dee599 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/RoundRobinPartitioner.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.concurrent.atomic.AtomicLong; + +public class RoundRobinPartitioner implements FlowFilePartitioner { + private final AtomicLong counter = new AtomicLong(0L); + + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + final long count = counter.getAndIncrement(); + final int index = (int) (count % partitions.length); + return partitions[index]; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return false; + } + + + @Override + public boolean isRebalanceOnFailure() { + return true; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/StandardRebalancingPartition.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/StandardRebalancingPartition.java new file mode 100644 index 000000000000..74a8aa6c3cbb --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/StandardRebalancingPartition.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.BlockingSwappablePriorityQueue; +import org.apache.nifi.controller.queue.DropFlowFileAction; +import org.apache.nifi.controller.queue.DropFlowFileRequest; +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.LoadBalancedFlowFileQueue; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.flowfile.FlowFilePrioritizer; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +public class StandardRebalancingPartition implements RebalancingPartition { + private final String SWAP_PARTITION_NAME = "rebalance"; + private final String queueIdentifier; + private final BlockingSwappablePriorityQueue queue; + private final LoadBalancedFlowFileQueue flowFileQueue; + private final String description; + + private volatile boolean stopped = true; + private RebalanceTask rebalanceTask; + + + public StandardRebalancingPartition(final FlowFileSwapManager swapManager, final int swapThreshold, final EventReporter eventReporter, + final LoadBalancedFlowFileQueue flowFileQueue, final DropFlowFileAction dropAction) { + + this.queue = new BlockingSwappablePriorityQueue(swapManager, swapThreshold, eventReporter, flowFileQueue, dropAction, SWAP_PARTITION_NAME); + this.queueIdentifier = flowFileQueue.getIdentifier(); + this.flowFileQueue = flowFileQueue; + this.description = "RebalancingPartition[queueId=" + queueIdentifier + "]"; + } + + @Override + public Optional getNodeIdentifier() { + return Optional.empty(); + } + + @Override + public QueueSize size() { + return queue.size(); + } + + @Override + public SwapSummary recoverSwappedFlowFiles() { + return this.queue.recoverSwappedFlowFiles(); + } + + @Override + public String getSwapPartitionName() { + return SWAP_PARTITION_NAME; + } + + @Override + public void put(final FlowFileRecord flowFile) { + queue.put(flowFile); + } + + @Override + public void putAll(final Collection flowFiles) { + queue.putAll(flowFiles); + } + + @Override + public void dropFlowFiles(DropFlowFileRequest dropRequest, String requestor) { + queue.dropFlowFiles(dropRequest, requestor); + } + + @Override + public void setPriorities(final List newPriorities) { + queue.setPriorities(newPriorities); + } + + @Override + public synchronized void start(final FlowFilePartitioner partitionerUsed) { + stopped = false; + rebalanceFromQueue(); + } + + @Override + public synchronized void stop() { + stopped = true; + + if (this.rebalanceTask != null) { + this.rebalanceTask.stop(); + } + + this.rebalanceTask = null; + } + + private synchronized void rebalanceFromQueue() { + if (stopped) { + return; + } + + // If a task is already defined, do nothing. There's already a thread running. + if (rebalanceTask != null) { + return; + } + + this.rebalanceTask = new RebalanceTask(); + + final Thread rebalanceThread = new Thread(this.rebalanceTask); + rebalanceThread.setName("Rebalance queued data for Connection " + queueIdentifier); + rebalanceThread.start(); + } + + @Override + public void rebalance(final FlowFileQueueContents queueContents) { + if (queueContents.getActiveFlowFiles().isEmpty() && queueContents.getSwapLocations().isEmpty()) { + return; + } + + queue.inheritQueueContents(queueContents); + rebalanceFromQueue(); + } + + @Override + public void rebalance(final Collection flowFiles) { + queue.putAll(flowFiles); + rebalanceFromQueue(); + } + + @Override + public FlowFileQueueContents packageForRebalance(String newPartitionName) { + return queue.packageForRebalance(newPartitionName); + } + + private synchronized boolean complete() { + if (!queue.isEmpty()) { + return false; + } + + this.rebalanceTask = null; + return true; + } + + + private class RebalanceTask implements Runnable { + private volatile boolean stopped = false; + private final Set expiredRecords = new HashSet<>(); + private final long pollWaitMillis = 100L; + + public void stop() { + stopped = true; + } + + @Override + public void run() { + while (!stopped) { + final FlowFileRecord polled; + + expiredRecords.clear(); + + // Wait up to #pollWaitMillis milliseconds to get a FlowFile. If none, then check if stopped + // and if not, poll again. + try { + polled = queue.poll(expiredRecords, -1, pollWaitMillis); + } catch (final InterruptedException ie) { + Thread.currentThread().interrupt(); + continue; + } + + if (polled == null) { + flowFileQueue.handleExpiredRecords(expiredRecords); + + if (complete()) { + return; + } else { + continue; + } + } + + // We got 1 FlowFile. Try a second poll to obtain up to 999 more (for a total of 1,000). + final List toDistribute = new ArrayList<>(); + toDistribute.add(polled); + + final List additionalRecords = queue.poll(999, expiredRecords, -1); + toDistribute.addAll(additionalRecords); + + flowFileQueue.handleExpiredRecords(expiredRecords); + + // Transfer all of the FlowFiles that we got back to the FlowFileQueue itself. This will cause the data to be + // re-partitioned and binned appropriately. We also then need to ensure that we acknowledge the data from our + // own SwappablePriorityQueue to ensure that the sizes are kept in check. + flowFileQueue.distributeToPartitions(toDistribute); + queue.acknowledge(toDistribute); + } + } + } + + @Override + public String toString() { + return description; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/SwappablePriorityQueueLocalPartition.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/SwappablePriorityQueueLocalPartition.java new file mode 100644 index 000000000000..e5e64d04bd8a --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/partition/SwappablePriorityQueueLocalPartition.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.partition; + +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.controller.queue.DropFlowFileAction; +import org.apache.nifi.controller.queue.DropFlowFileRequest; +import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.LocalQueuePartitionDiagnostics; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.SwappablePriorityQueue; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.flowfile.FlowFilePrioritizer; +import org.apache.nifi.processor.FlowFileFilter; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +/** + * A Local Queue Partition that whose implementation is based on the use of a {@link SwappablePriorityQueue}. + */ +public class SwappablePriorityQueueLocalPartition implements LocalQueuePartition { + private static final String SWAP_PARTITION_NAME = "local"; + + private final SwappablePriorityQueue priorityQueue; + private final FlowFileQueue flowFileQueue; + private final String description; + + public SwappablePriorityQueueLocalPartition(final FlowFileSwapManager swapManager, final int swapThreshold, final EventReporter eventReporter, + final FlowFileQueue flowFileQueue, final DropFlowFileAction dropAction) { + this.priorityQueue = new SwappablePriorityQueue(swapManager, swapThreshold, eventReporter, flowFileQueue, dropAction, SWAP_PARTITION_NAME); + this.flowFileQueue = flowFileQueue; + this.description = "SwappablePriorityQueueLocalPartition[queueId=" + flowFileQueue.getIdentifier() + "]"; + } + + @Override + public String getSwapPartitionName() { + return SWAP_PARTITION_NAME; + } + + @Override + public QueueSize size() { + return priorityQueue.size(); + } + + @Override + public boolean isUnacknowledgedFlowFile() { + return priorityQueue.isUnacknowledgedFlowFile(); + } + + @Override + public Optional getNodeIdentifier() { + return Optional.empty(); + } + + @Override + public void put(final FlowFileRecord flowFile) { + priorityQueue.put(flowFile); + } + + @Override + public void putAll(final Collection flowFiles) { + priorityQueue.putAll(flowFiles); + } + + @Override + public boolean isActiveQueueEmpty() { + return priorityQueue.isActiveQueueEmpty(); + } + + @Override + public FlowFileRecord poll(final Set expiredRecords) { + return priorityQueue.poll(expiredRecords, getExpiration()); + } + + @Override + public List poll(final int maxResults, final Set expiredRecords) { + return priorityQueue.poll(maxResults, expiredRecords, getExpiration()); + } + + @Override + public List poll(final FlowFileFilter filter, final Set expiredRecords) { + return priorityQueue.poll(filter, expiredRecords, getExpiration()); + } + + private int getExpiration() { + return flowFileQueue.getFlowFileExpiration(TimeUnit.MILLISECONDS); + } + + @Override + public FlowFileRecord getFlowFile(final String flowFileUuid) throws IOException { + return priorityQueue.getFlowFile(flowFileUuid); + } + + @Override + public List getListableFlowFiles() { + return priorityQueue.getActiveFlowFiles(); + } + + @Override + public void dropFlowFiles(final DropFlowFileRequest dropRequest, final String requestor) { + priorityQueue.dropFlowFiles(dropRequest, requestor); + } + + @Override + public SwapSummary recoverSwappedFlowFiles() { + return priorityQueue.recoverSwappedFlowFiles(); + } + + @Override + public void setPriorities(final List newPriorities) { + priorityQueue.setPriorities(newPriorities); + } + + @Override + public void acknowledge(final FlowFileRecord flowFile) { + priorityQueue.acknowledge(flowFile); + } + + @Override + public void acknowledge(final Collection flowFiles) { + priorityQueue.acknowledge(flowFiles); + } + + @Override + public LocalQueuePartitionDiagnostics getQueueDiagnostics() { + return priorityQueue.getQueueDiagnostics(); + } + + @Override + public FlowFileQueueContents packageForRebalance(String newPartitionName) { + return priorityQueue.packageForRebalance(newPartitionName); + } + + @Override + public void start(final FlowFilePartitioner partitionerUsed) { + } + + @Override + public void stop() { + } + + @Override + public void inheritQueueContents(final FlowFileQueueContents queueContents) { + priorityQueue.inheritQueueContents(queueContents); + } + + @Override + public String toString() { + return description; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/protocol/LoadBalanceProtocolConstants.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/protocol/LoadBalanceProtocolConstants.java new file mode 100644 index 000000000000..5b02f1314b50 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/protocol/LoadBalanceProtocolConstants.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.protocol; + +public class LoadBalanceProtocolConstants { + // Protocol negotiation constants + public static final int VERSION_ACCEPTED = 0x10; + public static final int REQEUST_DIFFERENT_VERSION = 0x11; + public static final int ABORT_PROTOCOL_NEGOTIATION = 0x12; + + // Transaction constants + public static final int CONFIRM_CHECKSUM = 0x21; + public static final int REJECT_CHECKSUM = 0x22; + public static final int COMPLETE_TRANSACTION = 0x23; + public static final int ABORT_TRANSACTION = 0x24; + public static final int CONFIRM_COMPLETE_TRANSACTION = 0x25; + + // FlowFile constants + public static final int MORE_FLOWFILES = 0x31; + public static final int NO_MORE_FLOWFILES = 0x32; + + // Backpressure / Space constants + public static final int CHECK_SPACE = 0x61; + public static final int SKIP_SPACE_CHECK = 0x62; + public static final int SPACE_AVAILABLE = 0x65; + public static final int QUEUE_FULL = 0x66; + + // data frame constants + public static final int NO_DATA_FRAME = 0x40; + public static final int DATA_FRAME_FOLLOWS = 0x42; +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ClusterLoadBalanceAuthorizer.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ClusterLoadBalanceAuthorizer.java new file mode 100644 index 000000000000..43187b5e1267 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ClusterLoadBalanceAuthorizer.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.reporting.Severity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; +import java.util.Set; +import java.util.stream.Collectors; + +public class ClusterLoadBalanceAuthorizer implements LoadBalanceAuthorizer { + private static final Logger logger = LoggerFactory.getLogger(ClusterLoadBalanceAuthorizer.class); + + private final ClusterCoordinator clusterCoordinator; + private final EventReporter eventReporter; + + public ClusterLoadBalanceAuthorizer(final ClusterCoordinator clusterCoordinator, final EventReporter eventReporter) { + this.clusterCoordinator = clusterCoordinator; + this.eventReporter = eventReporter; + } + + @Override + public void authorize(final Collection clientIdentities) throws NotAuthorizedException { + if (clientIdentities == null) { + logger.debug("Client Identities is null, so assuming that Load Balancing communications are not secure. Authorizing client to participate in Load Balancing"); + return; + } + + final Set nodeIds = clusterCoordinator.getNodeIdentifiers().stream() + .map(NodeIdentifier::getApiAddress) + .collect(Collectors.toSet()); + + for (final String clientId : clientIdentities) { + if (nodeIds.contains(clientId)) { + logger.debug("Client ID '{}' is in the list of Nodes in the Cluster. Authorizing Client to Load Balance data", clientId); + return; + } + } + + final String message = String.format("Authorization failed for Client ID's %s to Load Balance data because none of the ID's are known Cluster Node Identifiers", + clientIdentities); + + logger.warn(message); + eventReporter.reportEvent(Severity.WARNING, "Load Balanced Connections", message); + throw new NotAuthorizedException("Client ID's " + clientIdentities + " are not authorized to Load Balance data"); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ConnectionLoadBalanceServer.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ConnectionLoadBalanceServer.java new file mode 100644 index 000000000000..93fc2d74db45 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/ConnectionLoadBalanceServer.java @@ -0,0 +1,251 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import org.apache.nifi.engine.FlowEngine; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.reporting.Severity; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLServerSocket; +import java.io.IOException; +import java.net.InetAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.net.SocketTimeoutException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; + + +public class ConnectionLoadBalanceServer { + private static final Logger logger = LoggerFactory.getLogger(ConnectionLoadBalanceServer.class); + + private final String hostname; + private final int port; + private final SSLContext sslContext; + private final ExecutorService threadPool; + private final LoadBalanceProtocol loadBalanceProtocol; + private final int connectionTimeoutMillis; + private final int numThreads; + private final EventReporter eventReporter; + + private volatile Set communicationActions = Collections.emptySet(); + private final BlockingQueue connectionQueue = new LinkedBlockingQueue<>(); + + private volatile AcceptConnection acceptConnection; + private volatile ServerSocket serverSocket; + private volatile boolean stopped = true; + + public ConnectionLoadBalanceServer(final String hostname, final int port, final SSLContext sslContext, final int numThreads, final LoadBalanceProtocol loadBalanceProtocol, + final EventReporter eventReporter, final int connectionTimeoutMillis) { + this.hostname = hostname; + this.port = port; + this.sslContext = sslContext; + this.loadBalanceProtocol = loadBalanceProtocol; + this.connectionTimeoutMillis = connectionTimeoutMillis; + this.numThreads = numThreads; + this.eventReporter = eventReporter; + + threadPool = new FlowEngine(numThreads, "Load Balance Server"); + } + + public void start() throws IOException { + if (!stopped) { + return; + } + + stopped = false; + if (serverSocket != null) { + return; + } + + try { + serverSocket = createServerSocket(); + } catch (final Exception e) { + throw new IOException("Could not begin listening for incoming connections in order to load balance data across the cluster. Please verify the values of the " + + "'nifi.cluster.load.balance.port' and 'nifi.cluster.load.balance.host' properties as well as the 'nifi.security.*' properties", e); + } + + final Set actions = new HashSet<>(numThreads); + for (int i=0; i < numThreads; i++) { + final CommunicateAction action = new CommunicateAction(loadBalanceProtocol); + actions.add(action); + threadPool.submit(action); + } + + this.communicationActions = actions; + + acceptConnection = new AcceptConnection(serverSocket); + final Thread receiveConnectionThread = new Thread(acceptConnection); + receiveConnectionThread.setName("Receive Queue Load-Balancing Connections"); + receiveConnectionThread.start(); + } + + public int getPort() { + return serverSocket.getLocalPort(); + } + + public void stop() { + stopped = false; + threadPool.shutdown(); + + if (acceptConnection != null) { + acceptConnection.stop(); + } + + communicationActions.forEach(CommunicateAction::stop); + + Socket socket; + while ((socket = connectionQueue.poll()) != null) { + try { + socket.close(); + logger.info("{} Closed connection to {} on Server stop", this, socket.getRemoteSocketAddress()); + } catch (final IOException ioe) { + logger.warn("Failed to properly close socket to " + socket.getRemoteSocketAddress(), ioe); + } + } + } + + private ServerSocket createServerSocket() throws IOException { + final InetAddress inetAddress = hostname == null ? null : InetAddress.getByName(hostname); + + if (sslContext == null) { + return new ServerSocket(port, 50, InetAddress.getByName(hostname)); + } else { + final ServerSocket serverSocket = sslContext.getServerSocketFactory().createServerSocket(port, 50, inetAddress); + ((SSLServerSocket) serverSocket).setNeedClientAuth(true); + return serverSocket; + } + } + + + private class CommunicateAction implements Runnable { + private final LoadBalanceProtocol loadBalanceProtocol; + private volatile boolean stopped = false; + + public CommunicateAction(final LoadBalanceProtocol loadBalanceProtocol) { + this.loadBalanceProtocol = loadBalanceProtocol; + } + + public void stop() { + this.stopped = true; + } + + @Override + public void run() { + String peerDescription = ""; + + while (!stopped) { + Socket socket = null; + try { + socket = connectionQueue.poll(1, TimeUnit.SECONDS); + if (socket == null) { + continue; + } + + peerDescription = socket.getRemoteSocketAddress().toString(); + + if (socket.isClosed()) { + logger.debug("Connection to Peer {} is closed. Will not attempt to communicate over this Socket.", peerDescription); + continue; + } + + logger.debug("Receiving FlowFiles from Peer {}", peerDescription); + loadBalanceProtocol.receiveFlowFiles(socket); + + if (socket.isConnected()) { + logger.debug("Finished receiving FlowFiles from Peer {}. Will recycle connection.", peerDescription); + connectionQueue.offer(socket); + } else { + logger.debug("Finished receiving FlowFiles from Peer {}. Socket is no longer connected so will not recycle connection.", peerDescription); + } + } catch (final Exception e) { + if (socket != null) { + try { + socket.close(); + } catch (final IOException ioe) { + e.addSuppressed(ioe); + } + } + + logger.error("Failed to communicate with Peer {}", peerDescription, e); + eventReporter.reportEvent(Severity.ERROR, "Load Balanced Connection", "Failed to receive FlowFiles for Load Balancing due to " + e); + } + } + + logger.info("Connection Load Balance Server shutdown. Will no longer handle incoming requests."); + } + } + + + private class AcceptConnection implements Runnable { + private final ServerSocket serverSocket; + private volatile boolean stopped = false; + + public AcceptConnection(final ServerSocket serverSocket) { + this.serverSocket = serverSocket; + } + + public void stop() { + stopped = true; + } + + @Override + public void run() { + try { + serverSocket.setSoTimeout(1000); + } catch (final Exception e) { + logger.error("Failed to set soTimeout on Server Socket for Load Balancing data across cluster", e); + } + + while (!stopped) { + try { + final Socket socket; + try { + socket = serverSocket.accept(); + } catch (final SocketTimeoutException ste) { + continue; + } + + socket.setSoTimeout(connectionTimeoutMillis); + connectionQueue.offer(socket); + } catch (final Exception e) { + logger.error("{} Failed to accept connection from other node in cluster", ConnectionLoadBalanceServer.this, e); + } + } + + try { + serverSocket.close(); + } catch (final Exception e) { + logger.warn("Failed to properly shutdown Server Socket for Load Balancing", e); + } + } + } + + @Override + public String toString() { + return "ConnectionLoadBalanceServer[hostname=" + hostname + ", port=" + port + ", secure=" + (sslContext != null) + "]"; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceAuthorizer.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceAuthorizer.java new file mode 100644 index 000000000000..3a716e203561 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceAuthorizer.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import java.util.Collection; + +public interface LoadBalanceAuthorizer { + void authorize(Collection clientIdentities) throws NotAuthorizedException; +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceProtocol.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceProtocol.java new file mode 100644 index 000000000000..5a74ebcf5e73 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/LoadBalanceProtocol.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import java.io.IOException; +import java.net.Socket; + +public interface LoadBalanceProtocol { + + /** + * Receives FlowFiles from the peer attached to the socket + * + * @param socket the socket to read from and write to + * + * @throws TransactionAbortedException if the transaction was aborted + * @throws IOException if unable to communicate with the peer + */ + void receiveFlowFiles(Socket socket) throws IOException; + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/NotAuthorizedException.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/NotAuthorizedException.java new file mode 100644 index 000000000000..8aa1d5348d4a --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/NotAuthorizedException.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import java.io.IOException; + +public class NotAuthorizedException extends IOException { + public NotAuthorizedException(String message) { + super(message); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/StandardLoadBalanceProtocol.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/StandardLoadBalanceProtocol.java new file mode 100644 index 000000000000..d6beff3531ce --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/StandardLoadBalanceProtocol.java @@ -0,0 +1,614 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import org.apache.nifi.connectable.Connection; +import org.apache.nifi.controller.FlowController; +import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.LoadBalancedFlowFileQueue; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.RepositoryRecord; +import org.apache.nifi.controller.repository.StandardFlowFileRecord; +import org.apache.nifi.controller.repository.StandardRepositoryRecord; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.controller.repository.io.LimitedInputStream; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.provenance.ProvenanceEventBuilder; +import org.apache.nifi.provenance.ProvenanceEventRecord; +import org.apache.nifi.provenance.ProvenanceEventType; +import org.apache.nifi.provenance.ProvenanceRepository; +import org.apache.nifi.provenance.StandardProvenanceEventRecord; +import org.apache.nifi.remote.StandardVersionNegotiator; +import org.apache.nifi.remote.VersionNegotiator; +import org.apache.nifi.security.util.CertificateUtils; +import org.apache.nifi.stream.io.ByteCountingInputStream; +import org.apache.nifi.stream.io.LimitingInputStream; +import org.apache.nifi.stream.io.StreamUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.SSLPeerUnverifiedException; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.DataInputStream; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.Socket; +import java.net.SocketTimeoutException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.zip.CRC32; +import java.util.zip.CheckedInputStream; +import java.util.zip.Checksum; +import java.util.zip.GZIPInputStream; + +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.ABORT_PROTOCOL_NEGOTIATION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.ABORT_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CHECK_SPACE; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.COMPLETE_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CONFIRM_CHECKSUM; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CONFIRM_COMPLETE_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.MORE_FLOWFILES; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.NO_DATA_FRAME; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.NO_MORE_FLOWFILES; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.QUEUE_FULL; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.REJECT_CHECKSUM; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.REQEUST_DIFFERENT_VERSION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.SKIP_SPACE_CHECK; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.SPACE_AVAILABLE; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.VERSION_ACCEPTED; + +public class StandardLoadBalanceProtocol implements LoadBalanceProtocol { + private static final Logger logger = LoggerFactory.getLogger(StandardLoadBalanceProtocol.class); + + private static final int SOCKET_CLOSED = -1; + private static final int NO_DATA_AVAILABLE = 0; + + private final FlowFileRepository flowFileRepository; + private final ContentRepository contentRepository; + private final ProvenanceRepository provenanceRepository; + private final FlowController flowController; + private final LoadBalanceAuthorizer authorizer; + + private final ThreadLocal dataBuffer = new ThreadLocal<>(); + private final AtomicLong lineageStartIndex = new AtomicLong(0L); + + public StandardLoadBalanceProtocol(final FlowFileRepository flowFileRepository, final ContentRepository contentRepository, final ProvenanceRepository provenanceRepository, + final FlowController flowController, final LoadBalanceAuthorizer authorizer) { + this.flowFileRepository = flowFileRepository; + this.contentRepository = contentRepository; + this.provenanceRepository = provenanceRepository; + this.flowController = flowController; + this.authorizer = authorizer; + } + + + @Override + public void receiveFlowFiles(final Socket socket) throws IOException { + final InputStream in = new BufferedInputStream(socket.getInputStream()); + final OutputStream out = new BufferedOutputStream(socket.getOutputStream()); + + String peerDescription = socket.getInetAddress().toString(); + if (socket instanceof SSLSocket) { + final SSLSession sslSession = ((SSLSocket) socket).getSession(); + + final Set certIdentities; + try { + certIdentities = getCertificateIdentities(sslSession); + + final String dn = CertificateUtils.extractPeerDNFromSSLSocket(socket); + peerDescription = CertificateUtils.extractUsername(dn); + } catch (final CertificateException e) { + throw new IOException("Failed to extract Client Certificate", e); + } + + logger.debug("Connection received from peer {}. Will perform authorization against Client Identities '{}'", + peerDescription, certIdentities); + + authorizer.authorize(certIdentities); + logger.debug("Client Identities {} are authorized to load balance data", certIdentities); + } + + final int version = negotiateProtocolVersion(in, out, peerDescription); + + if (version == SOCKET_CLOSED) { + socket.close(); + return; + } + if (version == NO_DATA_AVAILABLE) { + logger.debug("No data is available from {}", socket.getRemoteSocketAddress()); + return; + } + + receiveFlowFiles(in, out, peerDescription, version, socket.getInetAddress().getHostName()); + } + + private Set getCertificateIdentities(final SSLSession sslSession) throws CertificateException, SSLPeerUnverifiedException { + final Certificate[] certs = sslSession.getPeerCertificates(); + if (certs == null || certs.length == 0) { + throw new SSLPeerUnverifiedException("No certificates found"); + } + + final X509Certificate cert = CertificateUtils.convertAbstractX509Certificate(certs[0]); + cert.checkValidity(); + + final Set identities = CertificateUtils.getSubjectAlternativeNames(cert).stream() + .map(CertificateUtils::extractUsername) + .collect(Collectors.toSet()); + + return identities; + } + + + protected int negotiateProtocolVersion(final InputStream in, final OutputStream out, final String peerDescription) throws IOException { + final VersionNegotiator negotiator = new StandardVersionNegotiator(1); + + for (int i=0;; i++) { + final int requestedVersion; + try { + requestedVersion = in.read(); + } catch (final SocketTimeoutException ste) { + // If first iteration, then just consider this to indicate "no data available". Otherwise, we were truly expecting data. + if (i == 0) { + logger.debug("SocketTimeoutException thrown when trying to negotiate Protocol Version"); + return NO_DATA_AVAILABLE; + } + + throw ste; + } + + if (requestedVersion < 0) { + logger.debug("Encountered End-of-File when receiving the the recommended Protocol Version. Returning -1 for the protocol version"); + return -1; + } + + final boolean supported = negotiator.isVersionSupported(requestedVersion); + if (supported) { + logger.debug("Peer {} requested version {} of the Load Balance Protocol. Accepting version.", peerDescription, requestedVersion); + + out.write(VERSION_ACCEPTED); + out.flush(); + return requestedVersion; + } + + final Integer preferredVersion = negotiator.getPreferredVersion(requestedVersion); + if (preferredVersion == null) { + logger.debug("Peer {} requested version {} of the Load Balance Protocol. This version is not acceptable. Aborting communications.", peerDescription, requestedVersion); + + out.write(ABORT_PROTOCOL_NEGOTIATION); + out.flush(); + throw new IOException("Peer " + peerDescription + " requested that we use version " + requestedVersion + + " of the Load Balance Protocol, but this version is unacceptable. Aborted communications."); + } + + logger.debug("Peer {} requested version {} of the Load Balance Protocol. Requesting that peer change to version {} instead.", peerDescription, requestedVersion, preferredVersion); + + out.write(REQEUST_DIFFERENT_VERSION); + out.write(preferredVersion); + out.flush(); + } + } + + + protected void receiveFlowFiles(final InputStream in, final OutputStream out, final String peerDescription, final int protocolVersion, final String nodeName) throws IOException { + logger.debug("Receiving FlowFiles from {}", peerDescription); + final long startTimestamp = System.currentTimeMillis(); + + final Checksum checksum = new CRC32(); + final InputStream checkedInput = new CheckedInputStream(in, checksum); + + final DataInputStream dataIn = new DataInputStream(checkedInput); + final String connectionId = getConnectionID(dataIn, peerDescription); + if (connectionId == null) { + logger.debug("Received no Connection ID from Peer {}. Will consider receipt of FlowFiles complete", peerDescription); + return; + } + + final Connection connection = flowController.getConnection(connectionId); + if (connection == null) { + logger.error("Attempted to receive FlowFiles from Peer {} for Connection with ID {} but no connection exists with that ID", peerDescription, connectionId); + throw new TransactionAbortedException("Attempted to receive FlowFiles from Peer " + peerDescription + " for Connection with ID " + connectionId + " but no Connection exists with that ID"); + } + + final FlowFileQueue flowFileQueue = connection.getFlowFileQueue(); + if (!(flowFileQueue instanceof LoadBalancedFlowFileQueue)) { + throw new TransactionAbortedException("Attempted to receive FlowFiles from Peer " + peerDescription + " for Connection with ID " + connectionId + " but the Connection with that ID is " + + "not configured to allow for Load Balancing"); + } + + final int spaceCheck = dataIn.read(); + if (spaceCheck < 0) { + throw new EOFException("Expected to receive a request to determine whether or not space was available for Connection with ID " + connectionId + " from Peer " + peerDescription); + } + + if (spaceCheck == CHECK_SPACE) { + if (flowFileQueue.isFull()) { + logger.debug("Received a 'Check Space' request from Peer {} for Connection with ID {}; responding with QUEUE_FULL", peerDescription, connectionId); + out.write(QUEUE_FULL); + out.flush(); + return; // we're finished receiving flowfiles for now, and we'll restart the communication process. + } else { + logger.debug("Received a 'Check Space' request from Peer {} for Connection with ID {}; responding with SPACE_AVAILABLE", peerDescription, connectionId); + out.write(SPACE_AVAILABLE); + out.flush(); + } + } else if (spaceCheck != SKIP_SPACE_CHECK) { + throw new TransactionAbortedException("Expected to receive a request to determine whether or not space was available for Connection with ID " + + connectionId + " from Peer " + peerDescription + " but instead received value " + spaceCheck); + } + + final LoadBalanceCompression compression = connection.getFlowFileQueue().getLoadBalanceCompression(); + logger.debug("Receiving FlowFiles from Peer {} for Connection {}; Compression = {}", peerDescription, connectionId, compression); + + ContentClaim contentClaim = null; + final List flowFilesReceived = new ArrayList<>(); + OutputStream contentClaimOut = null; + long claimOffset = 0L; + + try { + try { + while (isMoreFlowFiles(dataIn, protocolVersion)) { + if (contentClaim == null) { + contentClaim = contentRepository.create(false); + contentClaimOut = contentRepository.write(contentClaim); + } else { + contentRepository.incrementClaimaintCount(contentClaim); + } + + final RemoteFlowFileRecord flowFile; + try { + flowFile = receiveFlowFile(dataIn, contentClaimOut, contentClaim, claimOffset, protocolVersion, peerDescription, compression); + } catch (final Exception e) { + contentRepository.decrementClaimantCount(contentClaim); + throw e; + } + + flowFilesReceived.add(flowFile); + + claimOffset += flowFile.getFlowFile().getSize(); + } + } finally { + if (contentClaimOut != null) { + contentClaimOut.close(); + } + } + + verifyChecksum(checksum, in, out, peerDescription, flowFilesReceived.size()); + completeTransaction(in, out, peerDescription, flowFilesReceived, nodeName, connectionId, startTimestamp, (LoadBalancedFlowFileQueue) flowFileQueue); + } catch (final Exception e) { + // If any Exception occurs, we need to decrement the claimant counts for the Content Claims that we wrote to because + // they are no longer needed. + for (final RemoteFlowFileRecord remoteFlowFile : flowFilesReceived) { + contentRepository.decrementClaimantCount(remoteFlowFile.getFlowFile().getContentClaim()); + } + + throw e; + } + + logger.debug("Successfully received {} FlowFiles from Peer {} to Load Balance for Connection {}", flowFilesReceived.size(), peerDescription, connectionId); + } + + private void completeTransaction(final InputStream in, final OutputStream out, final String peerDescription, final List flowFilesReceived, + final String nodeName, final String connectionId, final long startTimestamp, final LoadBalancedFlowFileQueue flowFileQueue) throws IOException { + final int completionIndicator = in.read(); + if (completionIndicator < 0) { + throw new EOFException("Expected to receive a Transaction Completion Indicator from Peer " + peerDescription + " but encountered EOF"); + } + + if (completionIndicator == ABORT_TRANSACTION) { + throw new TransactionAbortedException("Peer " + peerDescription + " chose to Abort Load Balance Transaction"); + } + + if (completionIndicator != COMPLETE_TRANSACTION) { + logger.debug("Expected to receive Transaction Completion Indicator from Peer " + peerDescription + " but instead received a value of " + completionIndicator + ". Sending back an Abort " + + "Transaction Flag."); + out.write(ABORT_TRANSACTION); + out.flush(); + throw new IOException("Expected to receive Transaction Completion Indicator from Peer " + peerDescription + " but instead received a value of " + completionIndicator); + } + + logger.debug("Received Complete Transaction indicator from Peer {}", peerDescription); + registerReceiveProvenanceEvents(flowFilesReceived, nodeName, connectionId, startTimestamp); + updateFlowFileRepository(flowFilesReceived, flowFileQueue); + transferFlowFilesToQueue(flowFilesReceived, flowFileQueue); + + out.write(CONFIRM_COMPLETE_TRANSACTION); + out.flush(); + } + + private void registerReceiveProvenanceEvents(final List flowFiles, final String nodeName, final String connectionId, final long startTimestamp) { + final long duration = System.currentTimeMillis() - startTimestamp; + + final List events = new ArrayList<>(flowFiles.size()); + for (final RemoteFlowFileRecord remoteFlowFile : flowFiles) { + final FlowFileRecord flowFileRecord = remoteFlowFile.getFlowFile(); + + final ProvenanceEventBuilder provenanceEventBuilder = new StandardProvenanceEventRecord.Builder() + .fromFlowFile(flowFileRecord) + .setEventType(ProvenanceEventType.RECEIVE) + .setTransitUri("nifi://" + nodeName + "/loadbalance/" + connectionId) + .setSourceSystemFlowFileIdentifier(remoteFlowFile.getRemoteUuid()) + .setEventDuration(duration) + .setComponentId(connectionId) + .setComponentType("Load Balanced Connection"); + + final ContentClaim contentClaim = flowFileRecord.getContentClaim(); + if (contentClaim != null) { + final ResourceClaim resourceClaim = contentClaim.getResourceClaim(); + provenanceEventBuilder.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), + contentClaim.getOffset() + flowFileRecord.getContentClaimOffset(), flowFileRecord.getSize()); + } + + final ProvenanceEventRecord provenanceEvent = provenanceEventBuilder.build(); + events.add(provenanceEvent); + } + + provenanceRepository.registerEvents(events); + } + + private void updateFlowFileRepository(final List flowFiles, final FlowFileQueue flowFileQueue) throws IOException { + final List repoRecords = flowFiles.stream() + .map(remoteFlowFile -> { + final StandardRepositoryRecord record = new StandardRepositoryRecord(flowFileQueue, remoteFlowFile.getFlowFile()); + record.setDestination(flowFileQueue); + return record; + }) + .collect(Collectors.toList()); + flowFileRepository.updateRepository(repoRecords); + } + + private void transferFlowFilesToQueue(final List remoteFlowFiles, final LoadBalancedFlowFileQueue flowFileQueue) { + final List flowFiles = remoteFlowFiles.stream().map(RemoteFlowFileRecord::getFlowFile).collect(Collectors.toList()); + flowFileQueue.receiveFromPeer(flowFiles); + } + + private void verifyChecksum(final Checksum checksum, final InputStream in, final OutputStream out, final String peerDescription, final int flowFileCount) throws IOException { + final long expectedChecksum = readChecksum(in); + if (checksum.getValue() == expectedChecksum) { + logger.debug("Checksum from Peer {} matched the checksum that was calculated. Writing confirmation.", peerDescription); + out.write(CONFIRM_CHECKSUM); + out.flush(); + } else { + logger.error("Received {} FlowFiles from peer {} but the Checksum reported by the peer ({}) did not match the checksum that was calculated ({}). Will reject the transaction.", + flowFileCount, peerDescription, expectedChecksum, checksum.getValue()); + out.write(REJECT_CHECKSUM); + out.flush(); + throw new TransactionAbortedException("Transaction with Peer " + peerDescription + " was aborted because the calculated checksum did not match the checksum provided by peer."); + } + } + + private long readChecksum(final InputStream in) throws IOException { + final byte[] buffer = getDataBuffer(); + StreamUtils.read(in, buffer,8 ); + return ByteBuffer.wrap(buffer, 0, 8).getLong(); + } + + private byte[] getDataBuffer() { + byte[] buffer = dataBuffer.get(); + if (buffer == null) { + buffer = new byte[65536 + 4096]; + dataBuffer.set(buffer); + } + + return buffer; + } + + private String getConnectionID(final DataInputStream in, final String peerDescription) throws IOException { + try { + return in.readUTF(); + } catch (final EOFException eof) { + logger.debug("Encountered EOFException when trying to receive Connection ID from Peer {}. Returning null for Connection ID", peerDescription); + return null; + } + } + + private boolean isMoreFlowFiles(final DataInputStream in, final int protocolVersion) throws IOException { + final int indicator = in.read(); + if (indicator < 0) { + throw new EOFException(); + } + + if (indicator == MORE_FLOWFILES) { + logger.debug("Peer indicates that there is another FlowFile in transaction"); + return true; + } + if (indicator == NO_MORE_FLOWFILES) { + logger.debug("Peer indicates that there are no more FlowFiles in transaction"); + return false; + } + + throw new IOException("Expected to receive 'More FlowFiles' indicator (" + MORE_FLOWFILES + + ") or 'No More FlowFiles' indicator (" + NO_MORE_FLOWFILES + ") but received invalid value of " + indicator); + } + + private RemoteFlowFileRecord receiveFlowFile(final DataInputStream dis, final OutputStream out, final ContentClaim contentClaim, final long claimOffset, final int protocolVersion, + final String peerDescription, final LoadBalanceCompression compression) throws IOException { + final int metadataLength = dis.readInt(); + + DataInputStream metadataIn = new DataInputStream(new LimitingInputStream(dis, metadataLength)); + if (compression != LoadBalanceCompression.DO_NOT_COMPRESS) { + metadataIn = new DataInputStream(new GZIPInputStream(metadataIn)); + } + + final Map attributes = readAttributes(metadataIn); + + logger.debug("Received Attributes {} from Peer {}", attributes, peerDescription); + + final long lineageStartDate = metadataIn.readLong(); + final long entryDate = metadataIn.readLong(); + + final ContentClaimTriple contentClaimTriple = consumeContent(dis, out, contentClaim, claimOffset, peerDescription, compression == LoadBalanceCompression.COMPRESS_ATTRIBUTES_AND_CONTENT); + + final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder() + .id(flowFileRepository.getNextFlowFileSequence()) + .addAttributes(attributes) + .contentClaim(contentClaimTriple.getContentClaim()) + .contentClaimOffset(contentClaimTriple.getClaimOffset()) + .size(contentClaimTriple.getContentLength()) + .entryDate(entryDate) + .lineageStart(lineageStartDate, lineageStartIndex.getAndIncrement()) + .build(); + + logger.debug("Received FlowFile {} with {} attributes and {} bytes of content", flowFileRecord, attributes.size(), contentClaimTriple.getContentLength()); + return new RemoteFlowFileRecord(attributes.get(CoreAttributes.UUID.key()), flowFileRecord); + } + + private Map readAttributes(final DataInputStream in) throws IOException { + final int attributeCount = in.readInt(); + final Map attributes = new HashMap<>(); + for (int i = 0; i < attributeCount; i++) { + final String key = readLongString(in); + final String value = readLongString(in); + + logger.trace("Received attribute '{}' = '{}'", key, value); + attributes.put(key, value); + } + + return attributes; + } + + private String readLongString(final DataInputStream in) throws IOException { + final int stringLength = in.readInt(); + final byte[] bytes = new byte[stringLength]; + StreamUtils.fillBuffer(in, bytes); + return new String(bytes, StandardCharsets.UTF_8); + } + + private ContentClaimTriple consumeContent(final DataInputStream in, final OutputStream out, final ContentClaim contentClaim, final long claimOffset, + final String peerDescription, final boolean compressed) throws IOException { + logger.debug("Consuming content from Peer {}", peerDescription); + + int dataFrameIndicator = in.read(); + if (dataFrameIndicator < 0) { + throw new EOFException("Encountered End-of-File when expecting to read Data Frame Indicator from Peer " + peerDescription); + } + if (dataFrameIndicator == NO_DATA_FRAME) { + logger.debug("Peer {} indicates that there is no Data Frame for the FlowFile", peerDescription); + return new ContentClaimTriple(null, 0L, 0L); + } + if (dataFrameIndicator == ABORT_TRANSACTION) { + throw new TransactionAbortedException("Peer " + peerDescription + " requested that transaction be aborted"); + } + if (dataFrameIndicator != DATA_FRAME_FOLLOWS) { + throw new IOException("Expected a Data Frame Indicator from Peer " + peerDescription + " but received a value of " + dataFrameIndicator); + } + + int dataFrameLength = in.readUnsignedShort(); + logger.trace("Received Data Frame Length of {} for {}", dataFrameLength, peerDescription); + + byte[] buffer = getDataBuffer(); + + long claimLength = 0; + while (true) { + final InputStream limitedIn = new LimitedInputStream(in, dataFrameLength); + final ByteCountingInputStream bcis = new ByteCountingInputStream(limitedIn); + final InputStream contentIn = compressed ? new GZIPInputStream(bcis) : bcis; + final int decompressedSize = StreamUtils.fillBuffer(contentIn, buffer, false); + + if (bcis.getBytesRead() < dataFrameLength) { + throw new EOFException("Expected to receive a Data Frame of length " + dataFrameLength + " bytes but received only " + bcis.getBytesRead() + " bytes"); + } + + out.write(buffer, 0, decompressedSize); + + claimLength += decompressedSize; + + dataFrameIndicator = in.read(); + if (dataFrameIndicator < 0) { + throw new EOFException("Encountered End-of-File when expecting to receive a Data Frame Indicator"); + } + if (dataFrameIndicator == NO_DATA_FRAME) { + logger.debug("Peer {} indicated that no more data frames are available", peerDescription); + break; + } + if (dataFrameIndicator == ABORT_TRANSACTION) { + logger.debug("Peer {} requested that transaction be aborted by sending Data Frame Length of {}", peerDescription, dataFrameLength); + throw new TransactionAbortedException("Peer " + peerDescription + " requested that transaction be aborted"); + } + if (dataFrameIndicator != DATA_FRAME_FOLLOWS) { + throw new IOException("Expected a Data Frame Indicator from Peer " + peerDescription + " but received a value of " + dataFrameIndicator); + } + + dataFrameLength = in.readUnsignedShort(); + logger.trace("Received Data Frame Length of {} for {}", dataFrameLength, peerDescription); + } + + return new ContentClaimTriple(contentClaim, claimOffset, claimLength); + } + + private static class ContentClaimTriple { + private final ContentClaim contentClaim; + private final long claimOffset; + private final long contentLength; + + public ContentClaimTriple(ContentClaim contentClaim, long claimOffset, long contentLength) { + this.contentClaim = contentClaim; + this.claimOffset = claimOffset; + this.contentLength = contentLength; + } + + public ContentClaim getContentClaim() { + return contentClaim; + } + + public long getClaimOffset() { + return claimOffset; + } + + public long getContentLength() { + return contentLength; + } + } + + private static class RemoteFlowFileRecord { + private final String remoteUuid; + private final FlowFileRecord flowFile; + + public RemoteFlowFileRecord(final String remoteUuid, final FlowFileRecord flowFile) { + this.remoteUuid = remoteUuid; + this.flowFile = flowFile; + } + + public String getRemoteUuid() { + return remoteUuid; + } + + public FlowFileRecord getFlowFile() { + return flowFile; + } + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/TransactionAbortedException.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/TransactionAbortedException.java new file mode 100644 index 000000000000..43fd4c353481 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/queue/clustered/server/TransactionAbortedException.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import java.io.IOException; + +public class TransactionAbortedException extends IOException { + public TransactionAbortedException(final String message) { + super(message); + } + + public TransactionAbortedException(final String message, final Throwable cause) { + super(message, cause); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/FileSystemRepository.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/FileSystemRepository.java index 256dba94e5d2..c041f5c91b60 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/FileSystemRepository.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/FileSystemRepository.java @@ -578,7 +578,7 @@ public ContentClaim create(final boolean lossTolerant) throws IOException { } final long modulatedSectionIndex = currentIndex % SECTIONS_PER_CONTAINER; - final String section = String.valueOf(modulatedSectionIndex); + final String section = String.valueOf(modulatedSectionIndex).intern(); final String claimId = System.currentTimeMillis() + "-" + currentIndex; resourceClaim = resourceClaimManager.newResourceClaim(containerName, section, claimId, lossTolerant, true); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/RepositoryContext.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/RepositoryContext.java index a4073716cb00..ecf504677965 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/RepositoryContext.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/RepositoryContext.java @@ -32,9 +32,6 @@ import org.apache.nifi.provenance.ProvenanceEventRepository; import org.apache.nifi.util.Connectables; -/** - * - */ public class RepositoryContext { private final Connectable connectable; diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/StandardProcessSession.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/StandardProcessSession.java index 9741cffe02ff..4354dc416beb 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/StandardProcessSession.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/repository/StandardProcessSession.java @@ -68,6 +68,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.BitSet; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -98,6 +99,11 @@ *

*/ public final class StandardProcessSession implements ProcessSession, ProvenanceEventEnricher { + private static final int SOURCE_EVENT_BIT_INDEXES = (1 << ProvenanceEventType.CREATE.ordinal()) + | (1 << ProvenanceEventType.FORK.ordinal()) + | (1 << ProvenanceEventType.JOIN.ordinal()) + | (1 << ProvenanceEventType.RECEIVE.ordinal()) + | (1 << ProvenanceEventType.FETCH.ordinal()); private static final AtomicLong idGenerator = new AtomicLong(0L); private static final AtomicLong enqueuedIndex = new AtomicLong(0L); @@ -110,7 +116,7 @@ public final class StandardProcessSession implements ProcessSession, ProvenanceE private static final Logger claimLog = LoggerFactory.getLogger(StandardProcessSession.class.getSimpleName() + ".claims"); private static final int MAX_ROLLBACK_FLOWFILES_TO_LOG = 5; - private final Map records = new ConcurrentHashMap<>(); + private final Map records = new ConcurrentHashMap<>(); private final Map connectionCounts = new ConcurrentHashMap<>(); private final Map> unacknowledgedFlowFiles = new ConcurrentHashMap<>(); private final Map appendableStreams = new ConcurrentHashMap<>(); @@ -253,7 +259,7 @@ public void checkpoint() { List autoTerminatedEvents = null; // validate that all records have a transfer relationship for them and if so determine the destination node and clone as necessary - final Map toAdd = new HashMap<>(); + final Map toAdd = new HashMap<>(); for (final StandardRepositoryRecord record : records.values()) { if (record.isMarkedForDelete()) { continue; @@ -317,7 +323,7 @@ public void checkpoint() { newRecord.setDestination(destination.getFlowFileQueue()); newRecord.setTransferRelationship(record.getTransferRelationship()); // put the mapping into toAdd because adding to records now will cause a ConcurrentModificationException - toAdd.put(clone, newRecord); + toAdd.put(clone.getId(), newRecord); } } } @@ -365,10 +371,7 @@ private void commit(final Checkpoint checkpoint) { * points to the Original Claim -- which has already been removed! * */ - for (final Map.Entry entry : checkpoint.records.entrySet()) { - final FlowFile flowFile = entry.getKey(); - final StandardRepositoryRecord record = entry.getValue(); - + for (final StandardRepositoryRecord record : checkpoint.records.values()) { if (record.isMarkedForDelete()) { // if the working claim is not the same as the original claim, we can immediately destroy the working claim // because it was created in this session and is to be deleted. We don't need to wait for the FlowFile Repo to sync. @@ -380,10 +383,14 @@ private void commit(final Checkpoint checkpoint) { // an issue if we only updated the FlowFile attributes. decrementClaimCount(record.getOriginalClaim()); } - final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate(); - final Connectable connectable = context.getConnectable(); - final Object terminator = connectable instanceof ProcessorNode ? ((ProcessorNode) connectable).getProcessor() : connectable; - LOG.info("{} terminated by {}; life of FlowFile = {} ms", new Object[] {flowFile, terminator, flowFileLife}); + + if (LOG.isInfoEnabled()) { + final FlowFileRecord flowFile = record.getCurrent(); + final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate(); + final Connectable connectable = context.getConnectable(); + final Object terminator = connectable instanceof ProcessorNode ? ((ProcessorNode) connectable).getProcessor() : connectable; + LOG.info("{} terminated by {}; life of FlowFile = {} ms", new Object[]{flowFile, terminator, flowFileLife}); + } } else if (record.isWorking() && record.getWorkingClaim() != record.getOriginalClaim()) { // records which have been updated - remove original if exists decrementClaimCount(record.getOriginalClaim()); @@ -482,6 +489,8 @@ private void commit(final Checkpoint checkpoint) { LOG.debug(timingInfo.toString()); } } catch (final Exception e) { + LOG.error("Failed to commit session {}. Will roll back.", e, this); + try { // if we fail to commit the session, we need to roll back // the checkpoints as well because none of the checkpoints @@ -542,10 +551,11 @@ private void updateEventRepository(final Checkpoint checkpoint) { flowFileEvent.setFlowFilesSent(flowFilesSent); flowFileEvent.setBytesSent(bytesSent); + final long now = System.currentTimeMillis(); long lineageMillis = 0L; - for (final Map.Entry entry : checkpoint.records.entrySet()) { - final FlowFile flowFile = entry.getKey(); - final long lineageDuration = System.currentTimeMillis() - flowFile.getLineageStartDate(); + for (final StandardRepositoryRecord record : checkpoint.records.values()) { + final FlowFile flowFile = record.getCurrent(); + final long lineageDuration = now - flowFile.getLineageStartDate(); lineageMillis += lineageDuration; } flowFileEvent.setAggregateLineageMillis(lineageMillis); @@ -564,13 +574,16 @@ private void updateEventRepository(final Checkpoint checkpoint) { } private Map combineCounters(final Map first, final Map second) { - if (first == null && second == null) { + final boolean firstEmpty = first == null || first.isEmpty(); + final boolean secondEmpty = second == null || second.isEmpty(); + + if (firstEmpty && secondEmpty) { return null; } - if (first == null) { + if (firstEmpty) { return second; } - if (second == null) { + if (secondEmpty) { return first; } @@ -580,14 +593,13 @@ private Map combineCounters(final Map first, final M return combined; } - private void addEventType(final Map> map, final String id, final ProvenanceEventType eventType) { - Set eventTypes = map.get(id); - if (eventTypes == null) { - eventTypes = new HashSet<>(); - map.put(id, eventTypes); - } + private void addEventType(final Map map, final String id, final ProvenanceEventType eventType) { + final BitSet eventTypes = map.computeIfAbsent(id, key -> new BitSet()); + eventTypes.set(eventType.ordinal()); + } - eventTypes.add(eventType); + private StandardRepositoryRecord getRecord(final FlowFile flowFile) { + return records.get(flowFile.getId()); } private void updateProvenanceRepo(final Checkpoint checkpoint) { @@ -598,7 +610,7 @@ private void updateProvenanceRepo(final Checkpoint checkpoint) { // in case the Processor developer submitted the same events to the reporter. So we use a LinkedHashSet // for this, so that we are able to ensure that the events are submitted in the proper order. final Set recordsToSubmit = new LinkedHashSet<>(); - final Map> eventTypesPerFlowFileId = new HashMap<>(); + final Map eventTypesPerFlowFileId = new HashMap<>(); final Set processorGenerated = checkpoint.reportedEvents; @@ -611,7 +623,7 @@ private void updateProvenanceRepo(final Checkpoint checkpoint) { final ProvenanceEventBuilder builder = entry.getValue(); final FlowFile flowFile = entry.getKey(); - updateEventContentClaims(builder, flowFile, checkpoint.records.get(flowFile)); + updateEventContentClaims(builder, flowFile, checkpoint.getRecord(flowFile)); final ProvenanceEventRecord event = builder.build(); if (!event.getChildUuids().isEmpty() && !isSpuriousForkEvent(event, checkpoint.removedFlowFiles)) { @@ -690,14 +702,15 @@ private void updateProvenanceRepo(final Checkpoint checkpoint) { } if (checkpoint.createdFlowFiles.contains(flowFileId)) { - final Set registeredTypes = eventTypesPerFlowFileId.get(flowFileId); + final BitSet registeredTypes = eventTypesPerFlowFileId.get(flowFileId); boolean creationEventRegistered = false; if (registeredTypes != null) { - if (registeredTypes.contains(ProvenanceEventType.CREATE) - || registeredTypes.contains(ProvenanceEventType.FORK) - || registeredTypes.contains(ProvenanceEventType.JOIN) - || registeredTypes.contains(ProvenanceEventType.RECEIVE) - || registeredTypes.contains(ProvenanceEventType.FETCH)) { + if (registeredTypes.get(ProvenanceEventType.CREATE.ordinal()) + || registeredTypes.get(ProvenanceEventType.FORK.ordinal()) + || registeredTypes.get(ProvenanceEventType.JOIN.ordinal()) + || registeredTypes.get(ProvenanceEventType.RECEIVE.ordinal()) + || registeredTypes.get(ProvenanceEventType.FETCH.ordinal())) { + creationEventRegistered = true; } } @@ -800,7 +813,7 @@ private void updateEventContentClaims(final ProvenanceEventBuilder builder, fina public StandardProvenanceEventRecord enrich(final ProvenanceEventRecord rawEvent, final FlowFile flowFile, final long commitNanos) { verifyTaskActive(); - final StandardRepositoryRecord repoRecord = records.get(flowFile); + final StandardRepositoryRecord repoRecord = getRecord(flowFile); if (repoRecord == null) { throw new FlowFileHandlingException(flowFile + " is not known in this session (" + toString() + ")"); } @@ -837,12 +850,12 @@ public StandardProvenanceEventRecord enrich(final ProvenanceEventRecord rawEvent } private StandardProvenanceEventRecord enrich( - final ProvenanceEventRecord rawEvent, final Map flowFileRecordMap, final Map records, + final ProvenanceEventRecord rawEvent, final Map flowFileRecordMap, final Map records, final boolean updateAttributes, final long commitNanos) { final StandardProvenanceEventRecord.Builder recordBuilder = new StandardProvenanceEventRecord.Builder().fromEvent(rawEvent); final FlowFileRecord eventFlowFile = flowFileRecordMap.get(rawEvent.getFlowFileUuid()); if (eventFlowFile != null) { - final StandardRepositoryRecord repoRecord = records.get(eventFlowFile); + final StandardRepositoryRecord repoRecord = records.get(eventFlowFile.getId()); if (repoRecord.getCurrent() != null && repoRecord.getCurrentClaim() != null) { final ContentClaim currentClaim = repoRecord.getCurrentClaim(); @@ -908,7 +921,7 @@ private boolean isSpuriousForkEvent(final ProvenanceEventRecord event, final Set * @param records records * @return true if spurious route */ - private boolean isSpuriousRouteEvent(final ProvenanceEventRecord event, final Map records) { + private boolean isSpuriousRouteEvent(final ProvenanceEventRecord event, final Map records) { if (event.getEventType() == ProvenanceEventType.ROUTE) { final String relationshipName = event.getRelationship(); final Relationship relationship = new Relationship.Builder().name(relationshipName).build(); @@ -917,10 +930,9 @@ private boolean isSpuriousRouteEvent(final ProvenanceEventRecord event, final Ma // If the number of connections for this relationship is not 1, then we can't ignore this ROUTE event, // as it may be cloning the FlowFile and adding to multiple connections. if (connectionsForRelationship.size() == 1) { - for (final Map.Entry entry : records.entrySet()) { - final FlowFileRecord flowFileRecord = entry.getKey(); + for (final StandardRepositoryRecord repoRecord : records.values()) { + final FlowFileRecord flowFileRecord = repoRecord.getCurrent(); if (event.getFlowFileUuid().equals(flowFileRecord.getAttribute(CoreAttributes.UUID.key()))) { - final StandardRepositoryRecord repoRecord = entry.getValue(); if (repoRecord.getOriginalQueue() == null) { return false; } @@ -1075,35 +1087,35 @@ private String loggableFlowfileInfo() { final StringBuilder details = new StringBuilder(1024).append("["); final int initLen = details.length(); int filesListed = 0; - for (Map.Entry entry : records.entrySet()) { + for (StandardRepositoryRecord repoRecord : records.values()) { if (filesListed >= MAX_ROLLBACK_FLOWFILES_TO_LOG) { break; } filesListed++; - final FlowFileRecord entryKey = entry.getKey(); - final StandardRepositoryRecord entryValue = entry.getValue(); + if (details.length() > initLen) { details.append(", "); } - if (entryValue.getOriginalQueue() != null && entryValue.getOriginalQueue().getIdentifier() != null) { + if (repoRecord.getOriginalQueue() != null && repoRecord.getOriginalQueue().getIdentifier() != null) { details.append("queue=") - .append(entryValue.getOriginalQueue().getIdentifier()) + .append(repoRecord.getOriginalQueue().getIdentifier()) .append("/"); } details.append("filename=") - .append(entryKey.getAttribute(CoreAttributes.FILENAME.key())) + .append(repoRecord.getCurrent().getAttribute(CoreAttributes.FILENAME.key())) .append("/uuid=") - .append(entryKey.getAttribute(CoreAttributes.UUID.key())); + .append(repoRecord.getCurrent().getAttribute(CoreAttributes.UUID.key())); } - if (records.entrySet().size() > MAX_ROLLBACK_FLOWFILES_TO_LOG) { + if (records.size() > MAX_ROLLBACK_FLOWFILES_TO_LOG) { if (details.length() > initLen) { details.append(", "); } - details.append(records.entrySet().size() - MAX_ROLLBACK_FLOWFILES_TO_LOG) + details.append(records.size() - MAX_ROLLBACK_FLOWFILES_TO_LOG) .append(" additional Flowfiles not listed"); } else if (filesListed == 0) { details.append("none"); } + details.append("]"); return details.toString(); } @@ -1164,11 +1176,13 @@ private void resetState() { } private void acknowledgeRecords() { - for (final Map.Entry> entry : unacknowledgedFlowFiles.entrySet()) { - LOG.trace("Acknowledging {} for {}", entry.getValue(), entry.getKey()); + final Iterator>> itr = unacknowledgedFlowFiles.entrySet().iterator(); + while (itr.hasNext()) { + final Map.Entry> entry = itr.next(); + itr.remove(); + entry.getKey().acknowledge(entry.getValue()); } - unacknowledgedFlowFiles.clear(); } @Override @@ -1212,7 +1226,7 @@ private void migrate(final StandardProcessSession newOwner, Collection throw new IllegalStateException(flowFile + " already in use for an active callback or OutputStream created by ProcessSession.write(FlowFile) has not been closed"); } - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); if (record == null) { throw new FlowFileHandlingException(flowFile + " is not known in this session (" + toString() + ")"); } @@ -1271,8 +1285,8 @@ private void migrate(final StandardProcessSession newOwner, Collection for (final FlowFile flowFile : flowFiles) { final FlowFileRecord flowFileRecord = (FlowFileRecord) flowFile; - final StandardRepositoryRecord repoRecord = this.records.remove(flowFile); - newOwner.records.put(flowFileRecord, repoRecord); + final StandardRepositoryRecord repoRecord = this.records.remove(flowFile.getId()); + newOwner.records.put(flowFileRecord.getId(), repoRecord); // Adjust the counts for Connections for each FlowFile that was pulled from a Connection. // We do not have to worry about accounting for 'input counts' on connections because those @@ -1344,9 +1358,9 @@ private String summarizeEvents(final Checkpoint checkpoint) { final Set modifiedFlowFileIds = new HashSet<>(); int largestTransferSetSize = 0; - for (final Map.Entry entry : checkpoint.records.entrySet()) { - final FlowFile flowFile = entry.getKey(); + for (final Map.Entry entry : checkpoint.records.entrySet()) { final StandardRepositoryRecord record = entry.getValue(); + final FlowFile flowFile = record.getCurrent(); final Relationship relationship = record.getTransferRelationship(); if (Relationship.SELF.equals(relationship)) { @@ -1475,7 +1489,7 @@ private void incrementConnectionOutputCounts(final String connectionId, final in private void registerDequeuedRecord(final FlowFileRecord flowFile, final Connection connection) { final StandardRepositoryRecord record = new StandardRepositoryRecord(connection.getFlowFileQueue(), flowFile); - records.put(flowFile, record); + records.put(flowFile.getId(), record); flowFilesIn++; contentSizeIn += flowFile.getSize(); @@ -1651,16 +1665,17 @@ public FlowFile create() { verifyTaskActive(); final Map attrs = new HashMap<>(); - attrs.put(CoreAttributes.FILENAME.key(), String.valueOf(System.nanoTime())); + final String uuid = UUID.randomUUID().toString(); + attrs.put(CoreAttributes.FILENAME.key(), uuid); attrs.put(CoreAttributes.PATH.key(), DEFAULT_FLOWFILE_PATH); - attrs.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString()); + attrs.put(CoreAttributes.UUID.key(), uuid); final FlowFileRecord fFile = new StandardFlowFileRecord.Builder().id(context.getNextFlowFileSequence()) .addAttributes(attrs) .build(); final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(fFile, attrs); - records.put(fFile, record); + records.put(fFile.getId(), record); createdFlowFiles.add(fFile.getAttribute(CoreAttributes.UUID.key())); return fFile; } @@ -1677,7 +1692,7 @@ public FlowFile clone(FlowFile example, final long offset, final long size) { verifyTaskActive(); example = validateRecordState(example); - final StandardRepositoryRecord exampleRepoRecord = records.get(example); + final StandardRepositoryRecord exampleRepoRecord = getRecord(example); final FlowFileRecord currRec = exampleRepoRecord.getCurrent(); final ContentClaim claim = exampleRepoRecord.getCurrentClaim(); if (offset + size > example.getSize()) { @@ -1698,7 +1713,7 @@ public FlowFile clone(FlowFile example, final long offset, final long size) { } final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(clone, clone.getAttributes()); - records.put(clone, record); + records.put(clone.getId(), record); if (offset == 0L && size == example.getSize()) { provenanceReporter.clone(example, clone); @@ -1726,7 +1741,7 @@ private void registerForkEvent(final FlowFile parent, final FlowFile child) { eventBuilder.setComponentType(processorType); eventBuilder.addParentFlowFile(parent); - updateEventContentClaims(eventBuilder, parent, records.get(parent)); + updateEventContentClaims(eventBuilder, parent, getRecord(parent)); forkEventBuilders.put(parent, eventBuilder); } @@ -1748,7 +1763,7 @@ public FlowFile penalize(FlowFile flowFile) { verifyTaskActive(); flowFile = validateRecordState(flowFile); - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); final long expirationEpochMillis = System.currentTimeMillis() + context.getConnectable().getPenalizationPeriod(TimeUnit.MILLISECONDS); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getCurrent()).penaltyExpirationTime(expirationEpochMillis).build(); record.setWorking(newFile); @@ -1764,7 +1779,7 @@ public FlowFile putAttribute(FlowFile flowFile, final String key, final String v return flowFile; } - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getCurrent()).addAttribute(key, value).build(); record.setWorking(newFile, key, value); @@ -1776,7 +1791,7 @@ public FlowFile putAllAttributes(FlowFile flowFile, final Map at verifyTaskActive(); flowFile = validateRecordState(flowFile); - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); final Map updatedAttributes; if (attributes.containsKey(CoreAttributes.UUID.key())) { @@ -1790,6 +1805,7 @@ public FlowFile putAllAttributes(FlowFile flowFile, final Map at final FlowFileRecord newFile = ffBuilder.build(); record.setWorking(newFile, updatedAttributes); + return newFile; } @@ -1802,7 +1818,7 @@ public FlowFile removeAttribute(FlowFile flowFile, final String key) { return flowFile; } - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getCurrent()).removeAttributes(key).build(); record.setWorking(newFile, key, null); return newFile; @@ -1817,7 +1833,7 @@ public FlowFile removeAllAttributes(FlowFile flowFile, final Set keys) { return flowFile; } - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getCurrent()).removeAttributes(keys).build(); final Map updatedAttrs = new HashMap<>(); @@ -1838,7 +1854,7 @@ public FlowFile removeAllAttributes(FlowFile flowFile, final Pattern keyPattern) verifyTaskActive(); flowFile = validateRecordState(flowFile); - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getCurrent()).removeAttributes(keyPattern).build(); if (keyPattern == null) { @@ -1891,7 +1907,7 @@ public void transfer(FlowFile flowFile, final Relationship relationship) { // the relationship specified is not known in this session/context throw new IllegalArgumentException("Relationship '" + relationship.getName() + "' is not known"); } - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); record.setTransferRelationship(relationship); updateLastQueuedDate(record); @@ -1909,7 +1925,7 @@ public void transfer(FlowFile flowFile) { verifyTaskActive(); flowFile = validateRecordState(flowFile); - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); if (record.getOriginalQueue() == null) { throw new IllegalArgumentException("Cannot transfer FlowFiles that are created in this Session back to self"); } @@ -1947,7 +1963,8 @@ public void transfer(Collection flowFiles, final Relationship relation final long queuedTime = System.currentTimeMillis(); long contentSize = 0L; for (final FlowFile flowFile : flowFiles) { - final StandardRepositoryRecord record = records.get(flowFile); + final FlowFileRecord flowFileRecord = (FlowFileRecord) flowFile; + final StandardRepositoryRecord record = getRecord(flowFileRecord); record.setTransferRelationship(relationship); updateLastQueuedDate(record, queuedTime); @@ -1968,7 +1985,7 @@ public void remove(FlowFile flowFile) { verifyTaskActive(); flowFile = validateRecordState(flowFile); - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); record.markForDelete(); removedFlowFiles.add(flowFile.getAttribute(CoreAttributes.UUID.key())); @@ -1992,7 +2009,7 @@ public void remove(Collection flowFiles) { flowFiles = validateRecordState(flowFiles); for (final FlowFile flowFile : flowFiles) { - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); record.markForDelete(); removedFlowFiles.add(flowFile.getAttribute(CoreAttributes.UUID.key())); @@ -2191,7 +2208,7 @@ public void read(FlowFile source, boolean allowSessionStreamManagement, InputStr verifyTaskActive(); source = validateRecordState(source, true); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); try { ensureNotAppending(record.getCurrentClaim()); @@ -2247,7 +2264,7 @@ public InputStream read(FlowFile source) { verifyTaskActive(); source = validateRecordState(source, true); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); try { ensureNotAppending(record.getCurrentClaim()); @@ -2396,7 +2413,7 @@ public FlowFile merge(Collection sources, FlowFile destination, final final Collection sourceRecords = new ArrayList<>(); for (final FlowFile source : sources) { - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); sourceRecords.add(record); try { @@ -2407,7 +2424,7 @@ public FlowFile merge(Collection sources, FlowFile destination, final } } - final StandardRepositoryRecord destinationRecord = records.get(destination); + final StandardRepositoryRecord destinationRecord = getRecord(destination); final ContentRepository contentRepo = context.getContentRepository(); final ContentClaim newClaim; try { @@ -2433,7 +2450,7 @@ public FlowFile merge(Collection sources, FlowFile destination, final final boolean useDemarcator = demarcator != null && demarcator.length > 0; final int numSources = sources.size(); for (final FlowFile source : sources) { - final StandardRepositoryRecord sourceRecord = records.get(source); + final StandardRepositoryRecord sourceRecord = getRecord(source); final long copied = contentRepo.exportTo(sourceRecord.getCurrentClaim(), out, sourceRecord.getCurrentClaimOffset(), source.getSize()); writtenCount += copied; @@ -2469,7 +2486,6 @@ public FlowFile merge(Collection sources, FlowFile destination, final removeTemporaryClaim(destinationRecord); final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(destinationRecord.getCurrent()).contentClaim(newClaim).contentClaimOffset(0L).size(writtenCount).build(); destinationRecord.setWorking(newFile); - records.put(newFile, destinationRecord); return newFile; } @@ -2491,7 +2507,7 @@ private void ensureNotAppending(final ContentClaim claim) throws IOException { public OutputStream write(FlowFile source) { verifyTaskActive(); source = validateRecordState(source); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); ContentClaim newClaim = null; try { @@ -2614,7 +2630,7 @@ public void close() throws IOException { public FlowFile write(FlowFile source, final OutputStreamCallback writer) { verifyTaskActive(); source = validateRecordState(source); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); long writtenToFlowFile = 0L; ContentClaim newClaim = null; @@ -2673,7 +2689,7 @@ public FlowFile append(FlowFile source, final OutputStreamCallback writer) { verifyTaskActive(); source = validateRecordState(source); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); long newSize = 0L; // Get the current Content Claim from the record and see if we already have @@ -2854,7 +2870,7 @@ private void resetReadClaim() { public FlowFile write(FlowFile source, final StreamCallback writer) { verifyTaskActive(); source = validateRecordState(source); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); final ContentClaim currClaim = record.getCurrentClaim(); long writtenToFlowFile = 0L; @@ -2928,6 +2944,7 @@ public FlowFile write(FlowFile source, final StreamCallback writer) { .build(); record.setWorking(newFile); + return newFile; } @@ -2942,7 +2959,7 @@ public FlowFile importFrom(final Path source, final boolean keepSourceFile, Flow throw new FlowFileAccessException("Cannot write to path " + source.getParent().toFile().getAbsolutePath() + " so cannot delete file; will not import."); } - final StandardRepositoryRecord record = records.get(destination); + final StandardRepositoryRecord record = getRecord(destination); final ContentClaim newClaim; final long claimOffset; @@ -2988,7 +3005,7 @@ public FlowFile importFrom(final InputStream source, FlowFile destination) { verifyTaskActive(); destination = validateRecordState(destination); - final StandardRepositoryRecord record = records.get(destination); + final StandardRepositoryRecord record = getRecord(destination); ContentClaim newClaim = null; final long claimOffset = 0L; @@ -3026,7 +3043,7 @@ public FlowFile importFrom(final InputStream source, FlowFile destination) { public void exportTo(FlowFile source, final Path destination, final boolean append) { verifyTaskActive(); source = validateRecordState(source); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); try { ensureNotAppending(record.getCurrentClaim()); @@ -3045,7 +3062,7 @@ public void exportTo(FlowFile source, final Path destination, final boolean appe public void exportTo(FlowFile source, final OutputStream destination) { verifyTaskActive(); source = validateRecordState(source); - final StandardRepositoryRecord record = records.get(source); + final StandardRepositoryRecord record = getRecord(source); if(record.getCurrentClaim() == null) { return; @@ -3133,7 +3150,7 @@ private FlowFile validateRecordState(final FlowFile flowFile, final boolean allo throw new IllegalStateException(flowFile + " already in use for an active callback or an OutputStream created by ProcessSession.write(FlowFile) has not been closed"); } - final StandardRepositoryRecord record = records.get(flowFile); + final StandardRepositoryRecord record = getRecord(flowFile); if (record == null) { rollback(); throw new FlowFileHandlingException(flowFile + " is not known in this session (" + toString() + ")"); @@ -3166,11 +3183,11 @@ private List validateRecordState(final Collection flowFiles) * false otherwise. */ boolean isFlowFileKnown(final FlowFile flowFile) { - return records.containsKey(flowFile); + return records.containsKey(flowFile.getId()); } private FlowFile getMostRecent(final FlowFile flowFile) { - final StandardRepositoryRecord existingRecord = records.get(flowFile); + final StandardRepositoryRecord existingRecord = getRecord(flowFile); return existingRecord == null ? flowFile : existingRecord.getCurrent(); } @@ -3179,10 +3196,12 @@ public FlowFile create(FlowFile parent) { verifyTaskActive(); parent = getMostRecent(parent); + final String uuid = UUID.randomUUID().toString(); + final Map newAttributes = new HashMap<>(3); - newAttributes.put(CoreAttributes.FILENAME.key(), String.valueOf(System.nanoTime())); + newAttributes.put(CoreAttributes.FILENAME.key(), uuid); newAttributes.put(CoreAttributes.PATH.key(), DEFAULT_FLOWFILE_PATH); - newAttributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString()); + newAttributes.put(CoreAttributes.UUID.key(), uuid); final StandardFlowFileRecord.Builder fFileBuilder = new StandardFlowFileRecord.Builder().id(context.getNextFlowFileSequence()); @@ -3206,7 +3225,7 @@ public FlowFile create(FlowFile parent) { final FlowFileRecord fFile = fFileBuilder.build(); final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(fFile, newAttributes); - records.put(fFile, record); + records.put(fFile.getId(), record); createdFlowFiles.add(fFile.getAttribute(CoreAttributes.UUID.key())); registerForkEvent(parent, fFile); @@ -3243,9 +3262,10 @@ public FlowFile create(Collection parents) { } } - newAttributes.put(CoreAttributes.FILENAME.key(), String.valueOf(System.nanoTime())); + final String uuid = UUID.randomUUID().toString(); + newAttributes.put(CoreAttributes.FILENAME.key(), uuid); newAttributes.put(CoreAttributes.PATH.key(), DEFAULT_FLOWFILE_PATH); - newAttributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString()); + newAttributes.put(CoreAttributes.UUID.key(), uuid); final FlowFileRecord fFile = new StandardFlowFileRecord.Builder().id(context.getNextFlowFileSequence()) .addAttributes(newAttributes) @@ -3254,7 +3274,7 @@ public FlowFile create(Collection parents) { final StandardRepositoryRecord record = new StandardRepositoryRecord(null); record.setWorking(fFile, newAttributes); - records.put(fFile, record); + records.put(fFile.getId(), record); createdFlowFiles.add(fFile.getAttribute(CoreAttributes.UUID.key())); registerJoinEvent(fFile, parents); @@ -3335,7 +3355,7 @@ private static class Checkpoint { private final List autoTerminatedEvents = new ArrayList<>(); private final Set reportedEvents = new LinkedHashSet<>(); - private final Map records = new ConcurrentHashMap<>(); + private final Map records = new ConcurrentHashMap<>(); private final Map connectionCounts = new ConcurrentHashMap<>(); private final Map> unacknowledgedFlowFiles = new ConcurrentHashMap<>(); @@ -3388,5 +3408,9 @@ private void checkpoint(final StandardProcessSession session, final List> disabledFutures = new ArrayList<>(); private final ControllerServiceNode serviceNode; + private final ReadWriteLock rwLock = new ReentrantReadWriteLock(); + private final Lock writeLock = rwLock.writeLock(); + private final Lock readLock = rwLock.readLock(); public ServiceStateTransition(final ControllerServiceNode serviceNode) { this.serviceNode = serviceNode; } - public synchronized boolean transitionToEnabling(final ControllerServiceState expectedState, final CompletableFuture enabledFuture) { - if (expectedState != state) { - return false; + public boolean transitionToEnabling(final ControllerServiceState expectedState, final CompletableFuture enabledFuture) { + writeLock.lock(); + try { + if (expectedState != state) { + return false; + } + + state = ControllerServiceState.ENABLING; + enabledFutures.add(enabledFuture); + return true; + } finally { + writeLock.unlock(); } - - state = ControllerServiceState.ENABLING; - enabledFutures.add(enabledFuture); - return true; } - public synchronized boolean enable() { - if (state != ControllerServiceState.ENABLING) { - return false; - } + public boolean enable() { + writeLock.lock(); + try { + if (state != ControllerServiceState.ENABLING) { + return false; + } - state = ControllerServiceState.ENABLED; + state = ControllerServiceState.ENABLED; - validateReferences(serviceNode); + validateReferences(serviceNode); - enabledFutures.stream().forEach(future -> future.complete(null)); - return true; + enabledFutures.stream().forEach(future -> future.complete(null)); + return true; + } finally { + writeLock.unlock(); + } } private void validateReferences(final ControllerServiceNode service) { @@ -64,22 +80,37 @@ private void validateReferences(final ControllerServiceNode service) { } } - public synchronized boolean transitionToDisabling(final ControllerServiceState expectedState, final CompletableFuture disabledFuture) { - if (expectedState != state) { - return false; + public boolean transitionToDisabling(final ControllerServiceState expectedState, final CompletableFuture disabledFuture) { + writeLock.lock(); + try { + if (expectedState != state) { + return false; + } + + state = ControllerServiceState.DISABLING; + disabledFutures.add(disabledFuture); + return true; + } finally { + writeLock.unlock(); } - - state = ControllerServiceState.DISABLING; - disabledFutures.add(disabledFuture); - return true; } - public synchronized void disable() { - state = ControllerServiceState.DISABLED; - disabledFutures.stream().forEach(future -> future.complete(null)); + public void disable() { + writeLock.lock(); + try { + state = ControllerServiceState.DISABLED; + disabledFutures.stream().forEach(future -> future.complete(null)); + } finally { + writeLock.unlock(); + } } - public synchronized ControllerServiceState getState() { - return state; + public ControllerServiceState getState() { + readLock.lock(); + try { + return state; + } finally { + readLock.unlock(); + } } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/state/manager/StandardStateManagerProvider.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/state/manager/StandardStateManagerProvider.java index b365de25c81b..d3248b507876 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/state/manager/StandardStateManagerProvider.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/controller/state/manager/StandardStateManagerProvider.java @@ -17,17 +17,6 @@ package org.apache.nifi.controller.state.manager; -import java.io.File; -import java.io.IOException; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -import javax.net.ssl.SSLContext; - import org.apache.commons.lang3.ArrayUtils; import org.apache.nifi.attribute.expression.language.StandardPropertyValue; import org.apache.nifi.bundle.Bundle; @@ -57,9 +46,21 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.net.ssl.SSLContext; +import java.io.File; +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + public class StandardStateManagerProvider implements StateManagerProvider{ private static final Logger logger = LoggerFactory.getLogger(StandardStateManagerProvider.class); + private static StateManagerProvider provider; + private final ConcurrentMap stateManagers = new ConcurrentHashMap<>(); private final StateProvider localStateProvider; private final StateProvider clusterStateProvider; @@ -69,7 +70,11 @@ private StandardStateManagerProvider(final StateProvider localStateProvider, fin this.clusterStateProvider = clusterStateProvider; } - public static StateManagerProvider create(final NiFiProperties properties, final VariableRegistry variableRegistry) throws ConfigParseException, IOException { + public static synchronized StateManagerProvider create(final NiFiProperties properties, final VariableRegistry variableRegistry) throws ConfigParseException, IOException { + if (provider != null) { + return provider; + } + final StateProvider localProvider = createLocalStateProvider(properties,variableRegistry); final StateProvider clusterProvider; @@ -79,7 +84,8 @@ public static StateManagerProvider create(final NiFiProperties properties, final clusterProvider = null; } - return new StandardStateManagerProvider(localProvider, clusterProvider); + provider = new StandardStateManagerProvider(localProvider, clusterProvider); + return provider; } private static StateProvider createLocalStateProvider(final NiFiProperties properties, final VariableRegistry variableRegistry) throws IOException, ConfigParseException { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/fingerprint/FingerprintFactory.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/fingerprint/FingerprintFactory.java index e1846a08d8a5..7da570277b7b 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/fingerprint/FingerprintFactory.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/fingerprint/FingerprintFactory.java @@ -16,25 +16,6 @@ */ package org.apache.nifi.fingerprint; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.stream.Stream; - -import javax.xml.XMLConstants; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.validation.Schema; -import javax.xml.validation.SchemaFactory; - import org.apache.commons.lang3.StringUtils; import org.apache.nifi.bundle.BundleCoordinate; import org.apache.nifi.components.ConfigurableComponent; @@ -57,6 +38,24 @@ import org.w3c.dom.NodeList; import org.xml.sax.SAXException; +import javax.xml.XMLConstants; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.validation.Schema; +import javax.xml.validation.SchemaFactory; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.stream.Stream; + /** *

Creates a fingerprint of a flow.xml. The order of elements or attributes in the flow.xml does not influence the fingerprint generation. * @@ -606,6 +605,10 @@ private StringBuilder addConnectionFingerprint(final StringBuilder builder, fina appendFirstValue(builder, DomUtils.getChildNodesByTagName(connectionElem, "name")); + appendFirstValue(builder, DomUtils.getChildNodesByTagName(connectionElem, "loadBalanceStrategy")); + appendFirstValue(builder, DomUtils.getChildNodesByTagName(connectionElem, "partitioningAttribute")); + appendFirstValue(builder, DomUtils.getChildNodesByTagName(connectionElem, "loadBalanceCompression")); + // relationships final NodeList relationshipElems = DomUtils.getChildNodesByTagName(connectionElem, "relationship"); final List sortedRelationshipElems = sortElements(relationshipElems, getConnectionRelationshipsComparator()); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/groups/StandardProcessGroup.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/groups/StandardProcessGroup.java index e173cc6cb311..a683a9e91052 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/groups/StandardProcessGroup.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/groups/StandardProcessGroup.java @@ -54,6 +54,8 @@ import org.apache.nifi.controller.exception.ProcessorInstantiationException; import org.apache.nifi.controller.label.Label; import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.LoadBalanceStrategy; import org.apache.nifi.controller.scheduling.StandardProcessScheduler; import org.apache.nifi.controller.service.ControllerServiceNode; import org.apache.nifi.controller.service.ControllerServiceProvider; @@ -473,6 +475,10 @@ private void shutdown(final ProcessGroup procGroup) { rpg.shutdown(); } + for (final Connection connection : procGroup.getConnections()) { + connection.getFlowFileQueue().stopLoadBalancing(); + } + // Recursively shutdown child groups. for (final ProcessGroup group : procGroup.getProcessGroups()) { shutdown(group); @@ -957,7 +963,7 @@ public void run() { public Collection getProcessors() { readLock.lock(); try { - return new HashSet<>(processors.values()); + return new ArrayList<>(processors.values()); } finally { readLock.unlock(); } @@ -1112,6 +1118,8 @@ public void removeConnection(final Connection connectionToRemove) { connectionToRemove.verifyCanDelete(); + connectionToRemove.getFlowFileQueue().stopLoadBalancing(); + final Connectable source = connectionToRemove.getSource(); final Connectable dest = connectionToRemove.getDestination(); @@ -3863,6 +3871,23 @@ private void updateConnection(final Connection connection, final VersionedConnec .collect(Collectors.toList()); queue.setPriorities(prioritizers); + + final String loadBalanceStrategyName = proposed.getLoadBalanceStrategy(); + if (loadBalanceStrategyName == null) { + queue.setLoadBalanceStrategy(LoadBalanceStrategy.DO_NOT_LOAD_BALANCE, proposed.getPartitioningAttribute()); + } else { + final LoadBalanceStrategy loadBalanceStrategy = LoadBalanceStrategy.valueOf(loadBalanceStrategyName); + final String partitioningAttribute = proposed.getPartitioningAttribute(); + + queue.setLoadBalanceStrategy(loadBalanceStrategy, partitioningAttribute); + } + + final String compressionName = proposed.getLoadBalanceCompression(); + if (compressionName == null) { + queue.setLoadBalanceCompression(LoadBalanceCompression.DO_NOT_COMPRESS); + } else { + queue.setLoadBalanceCompression(LoadBalanceCompression.valueOf(compressionName)); + } } private Connection addConnection(final ProcessGroup destinationGroup, final VersionedConnection proposed, final String componentIdSeed) { @@ -3884,6 +3909,7 @@ private Connection addConnection(final ProcessGroup destinationGroup, final Vers destinationGroup.addConnection(connection); updateConnection(connection, proposed); + flowController.onConnectionAdded(connection); return connection; } @@ -3907,6 +3933,24 @@ private Connectable getConnectable(final ProcessGroup group, final ConnectableCo return port.get(); } + // Attempt to locate child group by versioned component id + final Optional optionalSpecifiedGroup = group.getProcessGroups().stream() + .filter(child -> child.getVersionedComponentId().isPresent()) + .filter(child -> child.getVersionedComponentId().get().equals(connectableComponent.getGroupId())) + .findFirst(); + + if (optionalSpecifiedGroup.isPresent()) { + final ProcessGroup specifiedGroup = optionalSpecifiedGroup.get(); + return specifiedGroup.getInputPorts().stream() + .filter(component -> component.getVersionedComponentId().isPresent()) + .filter(component -> id.equals(component.getVersionedComponentId().get())) + .findAny() + .orElse(null); + } + + // If no child group matched the versioned component id, then look at all child groups. This is done because + // in older versions, we did not properly map Versioned Component ID's to Ports' parent groups. As a result, + // if the flow doesn't contain the properly mapped group id, we need to search all child groups. return group.getProcessGroups().stream() .flatMap(gr -> gr.getInputPorts().stream()) .filter(component -> component.getVersionedComponentId().isPresent()) @@ -3924,6 +3968,24 @@ private Connectable getConnectable(final ProcessGroup group, final ConnectableCo return port.get(); } + // Attempt to locate child group by versioned component id + final Optional optionalSpecifiedGroup = group.getProcessGroups().stream() + .filter(child -> child.getVersionedComponentId().isPresent()) + .filter(child -> child.getVersionedComponentId().get().equals(connectableComponent.getGroupId())) + .findFirst(); + + if (optionalSpecifiedGroup.isPresent()) { + final ProcessGroup specifiedGroup = optionalSpecifiedGroup.get(); + return specifiedGroup.getOutputPorts().stream() + .filter(component -> component.getVersionedComponentId().isPresent()) + .filter(component -> id.equals(component.getVersionedComponentId().get())) + .findAny() + .orElse(null); + } + + // If no child group matched the versioned component id, then look at all child groups. This is done because + // in older versions, we did not properly map Versioned Component ID's to Ports' parent groups. As a result, + // if the flow doesn't contain the properly mapped group id, we need to search all child groups. return group.getProcessGroups().stream() .flatMap(gr -> gr.getOutputPorts().stream()) .filter(component -> component.getVersionedComponentId().isPresent()) @@ -4297,6 +4359,7 @@ private Set getModifications() { final Set differences = comparison.getDifferences().stream() .filter(difference -> difference.getDifferenceType() != DifferenceType.BUNDLE_CHANGED) .filter(FlowDifferenceFilters.FILTER_ADDED_REMOVED_REMOTE_PORTS) + .filter(FlowDifferenceFilters.FILTER_IGNORABLE_VERSIONED_FLOW_COORDINATE_CHANGES) .collect(Collectors.toCollection(HashSet::new)); LOG.debug("There are {} differences between this Local Flow and the Versioned Flow: {}", differences.size(), differences); @@ -4470,7 +4533,7 @@ public void verifyCanUpdate(final VersionedFlowSnapshot updatedFlow, final boole } } - // Ensure that all Prioritizers are instantiate-able. + // Ensure that all Prioritizers are instantiate-able and that any load balancing configuration is correct final Map proposedConnections = new HashMap<>(); findAllConnections(updatedFlow.getFlowContents(), proposedConnections); @@ -4488,6 +4551,16 @@ public void verifyCanUpdate(final VersionedFlowSnapshot updatedFlow, final boole } } } + + final String loadBalanceStrategyName = connectionToAdd.getLoadBalanceStrategy(); + if (loadBalanceStrategyName != null) { + try { + LoadBalanceStrategy.valueOf(loadBalanceStrategyName); + } catch (final IllegalArgumentException iae) { + throw new IllegalArgumentException("Unable to create Connection with Load Balance Strategy of '" + loadBalanceStrategyName + + "' because this is not a known Load Balance Strategy"); + } + } } } finally { readLock.unlock(); diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/processor/StandardProcessContext.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/processor/StandardProcessContext.java index 167fa733b862..299f73ba58df 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/processor/StandardProcessContext.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/processor/StandardProcessContext.java @@ -16,14 +16,6 @@ */ package org.apache.nifi.processor; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - import org.apache.nifi.attribute.expression.language.PreparedQuery; import org.apache.nifi.attribute.expression.language.Query; import org.apache.nifi.attribute.expression.language.Query.Range; @@ -41,6 +33,15 @@ import org.apache.nifi.processor.exception.TerminatedTaskException; import org.apache.nifi.util.Connectables; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + public class StandardProcessContext implements ProcessContext, ControllerServiceLookup { private final ProcessorNode procNode; @@ -49,6 +50,7 @@ public class StandardProcessContext implements ProcessContext, ControllerService private final StringEncryptor encryptor; private final StateManager stateManager; private final TaskTermination taskTermination; + private final Map properties; public StandardProcessContext(final ProcessorNode processorNode, final ControllerServiceProvider controllerServiceProvider, final StringEncryptor encryptor, final StateManager stateManager, final TaskTermination taskTermination) { @@ -71,6 +73,8 @@ public StandardProcessContext(final ProcessorNode processorNode, final Controlle preparedQueries.put(desc, pq); } } + + properties = Collections.unmodifiableMap(processorNode.getProperties()); } private void verifyTaskActive() { @@ -82,7 +86,17 @@ private void verifyTaskActive() { @Override public PropertyValue getProperty(final PropertyDescriptor descriptor) { verifyTaskActive(); - return getProperty(descriptor.getName()); + + final String setPropertyValue = properties.get(descriptor); + if (setPropertyValue != null) { + return new StandardPropertyValue(setPropertyValue, this, preparedQueries.get(descriptor), procNode.getVariableRegistry()); + } + + // Get the "canonical" Property Descriptor from the Processor + final PropertyDescriptor canonicalDescriptor = procNode.getProcessor().getPropertyDescriptor(descriptor.getName()); + final String defaultValue = canonicalDescriptor.getDefaultValue(); + + return new StandardPropertyValue(defaultValue, this, preparedQueries.get(descriptor), procNode.getVariableRegistry()); } /** @@ -99,7 +113,7 @@ public PropertyValue getProperty(final String propertyName) { return null; } - final String setPropertyValue = procNode.getProperty(descriptor); + final String setPropertyValue = properties.get(descriptor); final String propValue = (setPropertyValue == null) ? descriptor.getDefaultValue() : setPropertyValue; return new StandardPropertyValue(propValue, this, preparedQueries.get(descriptor), procNode.getVariableRegistry()); @@ -138,7 +152,7 @@ public String getAnnotationData() { @Override public Map getProperties() { verifyTaskActive(); - return procNode.getProperties(); + return properties; } @Override diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/StandardFlowRegistryClient.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/StandardFlowRegistryClient.java index 754646b82890..870ab7b9b1a0 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/StandardFlowRegistryClient.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/StandardFlowRegistryClient.java @@ -71,6 +71,9 @@ public FlowRegistry addFlowRegistry(final String registryId, final String regist throw new IllegalArgumentException("The given Registry URL is not valid: " + registryUrl); } + // Handles case where the URI entered has a trailing slash, or includes the trailing /nifi-registry-api + final String registryBaseUrl = uri.getScheme() + "://" + uri.getHost() + ":" + uri.getPort(); + final FlowRegistry registry; if (uriScheme.equalsIgnoreCase("http") || uriScheme.equalsIgnoreCase("https")) { final SSLContext sslContext = SslContextFactory.createSslContext(nifiProperties, false); @@ -80,7 +83,7 @@ public FlowRegistry addFlowRegistry(final String registryId, final String regist + "Please populate NiFi's Keystore/Truststore properties or connect to a NiFi Registry over http instead of https."); } - registry = new RestBasedFlowRegistry(this, registryId, registryUrl, sslContext, registryName); + registry = new RestBasedFlowRegistry(this, registryId, registryBaseUrl, sslContext, registryName); registry.setDescription(description); } else { throw new IllegalArgumentException("Cannot create Flow Registry with URI of " + registryUrl diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/mapping/NiFiRegistryFlowMapper.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/mapping/NiFiRegistryFlowMapper.java index 8d6e5e306bb1..074302a739ba 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/mapping/NiFiRegistryFlowMapper.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/registry/flow/mapping/NiFiRegistryFlowMapper.java @@ -17,19 +17,6 @@ package org.apache.nifi.registry.flow.mapping; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - import org.apache.commons.lang3.ClassUtils; import org.apache.nifi.bundle.BundleCoordinate; import org.apache.nifi.components.PropertyDescriptor; @@ -73,6 +60,20 @@ import org.apache.nifi.registry.flow.VersionedRemoteProcessGroup; import org.apache.nifi.remote.RemoteGroupPort; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Collectors; + public class NiFiRegistryFlowMapper { // We need to keep a mapping of component id to versionedComponentId as we transform these objects. This way, when @@ -228,6 +229,20 @@ private String getId(final Optional currentVersionedId, final String com return versionedId; } + private String getIdOrThrow(final Optional currentVersionedId, final String componentId, final Supplier exceptionSupplier) throws E { + if (currentVersionedId.isPresent()) { + return currentVersionedId.get(); + } else { + final String resolved = versionedComponentIds.get(componentId); + if (resolved == null) { + throw exceptionSupplier.get(); + } + + return resolved; + } + } + + private String getGroupId(final String groupId) { return versionedComponentIds.get(groupId); } @@ -247,6 +262,11 @@ public VersionedConnection mapConnection(final Connection connection) { versionedConnection.setSelectedRelationships(connection.getRelationships().stream().map(Relationship::getName).collect(Collectors.toSet())); versionedConnection.setzIndex(connection.getZIndex()); + final FlowFileQueue flowFileQueue = connection.getFlowFileQueue(); + versionedConnection.setLoadBalanceStrategy(flowFileQueue.getLoadBalanceStrategy().name()); + versionedConnection.setPartitioningAttribute(flowFileQueue.getPartitioningAttribute()); + versionedConnection.setLoadBalanceCompression(flowFileQueue.getLoadBalanceCompression().name()); + versionedConnection.setBends(connection.getBendPoints().stream() .map(this::mapPosition) .collect(Collectors.toList())); @@ -260,39 +280,27 @@ public VersionedConnection mapConnection(final Connection connection) { public ConnectableComponent mapConnectable(final Connectable connectable) { final ConnectableComponent component = new InstantiatedConnectableComponent(connectable.getIdentifier(), connectable.getProcessGroupIdentifier()); - final Optional versionedId = connectable.getVersionedComponentId(); - if (versionedId.isPresent()) { - component.setId(versionedId.get()); - } else { - final String resolved = versionedComponentIds.get(connectable.getIdentifier()); - if (resolved == null) { - throw new IllegalArgumentException("Unable to map Connectable Component with identifier " + connectable.getIdentifier() + " to any version-controlled component"); - } - - component.setId(resolved); - } + final String versionedId = getIdOrThrow(connectable.getVersionedComponentId(), connectable.getIdentifier(), + () -> new IllegalArgumentException("Unable to map Connectable Component with identifier " + connectable.getIdentifier() + " to any version-controlled component")); + component.setId(versionedId); component.setComments(connectable.getComments()); + + final String groupId; if (connectable instanceof RemoteGroupPort) { final RemoteGroupPort port = (RemoteGroupPort) connectable; final RemoteProcessGroup rpg = port.getRemoteProcessGroup(); final Optional rpgVersionedId = rpg.getVersionedComponentId(); - final String groupId; - if (rpgVersionedId.isPresent()) { - groupId = rpgVersionedId.get(); - } else { - final String resolved = versionedComponentIds.get(rpg.getIdentifier()); - if (resolved == null) { - throw new IllegalArgumentException("Unable to find the Versioned Component ID for Remote Process Group that " + connectable + " belongs to"); - } - - groupId = resolved; - } + groupId = getIdOrThrow(rpgVersionedId, rpg.getIdentifier(), + () -> new IllegalArgumentException("Unable to find the Versioned Component ID for Remote Process Group that " + connectable + " belongs to")); - component.setGroupId(groupId); } else { - component.setGroupId(connectable.getProcessGroupIdentifier()); + groupId = getIdOrThrow(connectable.getProcessGroup().getVersionedComponentId(), connectable.getProcessGroupIdentifier(), + () -> new IllegalArgumentException("Unable to find the Versioned Component ID for the Process Group that " + connectable + " belongs to")); } + + component.setGroupId(groupId); + component.setName(connectable.getName()); component.setType(ConnectableComponentType.valueOf(connectable.getConnectableType().name())); return component; diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/util/FlowDifferenceFilters.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/util/FlowDifferenceFilters.java index 4d341e97faf8..29c82fc8994f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/util/FlowDifferenceFilters.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/java/org/apache/nifi/util/FlowDifferenceFilters.java @@ -18,6 +18,8 @@ import org.apache.nifi.registry.flow.ComponentType; import org.apache.nifi.registry.flow.VersionedComponent; +import org.apache.nifi.registry.flow.VersionedFlowCoordinates; +import org.apache.nifi.registry.flow.VersionedProcessGroup; import org.apache.nifi.registry.flow.diff.DifferenceType; import org.apache.nifi.registry.flow.diff.FlowDifference; import org.apache.nifi.registry.flow.mapping.InstantiatedVersionedComponent; @@ -49,4 +51,46 @@ public static boolean isAddedOrRemovedRemotePort(final FlowDifference fd) { return false; } + public static Predicate FILTER_IGNORABLE_VERSIONED_FLOW_COORDINATE_CHANGES = (fd) -> { + return !isIgnorableVersionedFlowCoordinateChange(fd); + }; + + public static boolean isIgnorableVersionedFlowCoordinateChange(final FlowDifference fd) { + if (fd.getDifferenceType() == DifferenceType.VERSIONED_FLOW_COORDINATES_CHANGED) { + final VersionedComponent componentA = fd.getComponentA(); + final VersionedComponent componentB = fd.getComponentB(); + + if (componentA != null && componentB != null + && componentA instanceof VersionedProcessGroup + && componentB instanceof VersionedProcessGroup) { + + final VersionedProcessGroup versionedProcessGroupA = (VersionedProcessGroup) componentA; + final VersionedProcessGroup versionedProcessGroupB = (VersionedProcessGroup) componentB; + + final VersionedFlowCoordinates coordinatesA = versionedProcessGroupA.getVersionedFlowCoordinates(); + final VersionedFlowCoordinates coordinatesB = versionedProcessGroupB.getVersionedFlowCoordinates(); + + if (coordinatesA != null && coordinatesB != null) { + String registryUrlA = coordinatesA.getRegistryUrl(); + String registryUrlB = coordinatesB.getRegistryUrl(); + + if (registryUrlA != null && registryUrlB != null && !registryUrlA.equals(registryUrlB)) { + if (registryUrlA.endsWith("/")) { + registryUrlA = registryUrlA.substring(0, registryUrlA.length() - 1); + } + + if (registryUrlB.endsWith("/")) { + registryUrlB = registryUrlB.substring(0, registryUrlB.length() - 1); + } + + if (registryUrlA.equals(registryUrlB)) { + return true; + } + } + } + } + } + + return false; + } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/resources/FlowConfiguration.xsd b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/resources/FlowConfiguration.xsd index 9e81d22bdcb1..3149ee17c2aa 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/resources/FlowConfiguration.xsd +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/main/resources/FlowConfiguration.xsd @@ -30,22 +30,22 @@ - + - + - + - + @@ -54,7 +54,7 @@ - + @@ -80,21 +80,21 @@ - + - + - + - + - + - + @@ -130,13 +130,13 @@ - + - + @@ -144,7 +144,7 @@ - + @@ -153,13 +153,13 @@ - + - + @@ -170,12 +170,12 @@ - + - + @@ -187,7 +187,7 @@ - + @@ -197,13 +197,13 @@ - + - + @@ -213,7 +213,7 @@ - + @@ -221,7 +221,7 @@ - + @@ -232,7 +232,7 @@ - + @@ -259,7 +259,7 @@ - + @@ -276,11 +276,11 @@ - + - + + + + + - + @@ -306,7 +310,7 @@ - + @@ -318,7 +322,7 @@ - + @@ -346,12 +350,12 @@ - + - + @@ -378,13 +382,13 @@ - + - + @@ -396,7 +400,7 @@ - + @@ -405,7 +409,7 @@ - + @@ -418,7 +422,7 @@ - + @@ -428,18 +432,18 @@ - + - + - + @@ -450,7 +454,7 @@ - + diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/StandardFlowServiceSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/StandardFlowServiceSpec.groovy new file mode 100644 index 000000000000..63fedab8d4f9 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/StandardFlowServiceSpec.groovy @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.controller + +import org.apache.nifi.authorization.Authorizer +import org.apache.nifi.cluster.coordination.ClusterCoordinator +import org.apache.nifi.cluster.coordination.node.NodeConnectionState +import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus +import org.apache.nifi.cluster.coordination.node.OffloadCode +import org.apache.nifi.cluster.protocol.NodeIdentifier +import org.apache.nifi.cluster.protocol.impl.NodeProtocolSenderListener +import org.apache.nifi.cluster.protocol.message.OffloadMessage +import org.apache.nifi.components.state.Scope +import org.apache.nifi.components.state.StateManager +import org.apache.nifi.components.state.StateManagerProvider +import org.apache.nifi.controller.queue.FlowFileQueue +import org.apache.nifi.controller.status.ProcessGroupStatus +import org.apache.nifi.encrypt.StringEncryptor +import org.apache.nifi.groups.ProcessGroup +import org.apache.nifi.groups.RemoteProcessGroup +import org.apache.nifi.state.MockStateMap +import org.apache.nifi.util.NiFiProperties +import org.apache.nifi.web.revision.RevisionManager +import spock.lang.Specification +import spock.util.concurrent.BlockingVariable + +import java.util.concurrent.TimeUnit + +class StandardFlowServiceSpec extends Specification { + def "handle an OffloadMessage"() { + given: 'a node to offload' + def nodeToOffload = createNodeIdentifier 1 + + and: 'a simple flow with one root group and a single processor' + def stateManager = Mock StateManager + def stateMap = new MockStateMap([:], 1) + stateManager.getState(_ as Scope) >> stateMap + def stateManagerProvider = Mock StateManagerProvider + stateManagerProvider.getStateManager(_ as String) >> stateManager + + def rootGroup = Mock ProcessGroup + def flowController = Mock FlowController + flowController.getStateManagerProvider() >> stateManagerProvider + _ * flowController.rootGroup >> rootGroup + + def clusterCoordinator = Mock ClusterCoordinator + + def processGroupStatus = Mock ProcessGroupStatus + def processorNode = Mock ProcessorNode + def remoteProcessGroup = Mock RemoteProcessGroup + def flowFileQueue = Mock FlowFileQueue + + and: 'a flow service to handle the OffloadMessage' + def flowService = StandardFlowService.createClusteredInstance(flowController, NiFiProperties.createBasicNiFiProperties('src/test/resources/conf/nifi.properties', + [(NiFiProperties.CLUSTER_NODE_PROTOCOL_PORT): nodeToOffload.socketPort as String, + (NiFiProperties.WEB_HTTP_PORT) : nodeToOffload.apiPort as String, + (NiFiProperties.LOAD_BALANCE_PORT) : nodeToOffload.loadBalancePort as String]), + Mock(NodeProtocolSenderListener), clusterCoordinator, Mock(StringEncryptor), Mock(RevisionManager), Mock(Authorizer)) + + def waitForFinishOffload = new BlockingVariable(5, TimeUnit.SECONDS)//new CountDownLatch(1) + + when: 'the flow services receives an OffloadMessage' + flowService.handle(new OffloadMessage(nodeId: nodeToOffload, explanation: 'unit test offload'), [] as Set) + waitForFinishOffload.get() + + then: 'no exceptions are thrown' + noExceptionThrown() + + and: 'the connection status for the node in the flow controller is set to OFFLOADING' + 1 * flowController.setConnectionStatus({ NodeConnectionStatus status -> + status.nodeIdentifier.logicallyEquals(nodeToOffload) && status.state == NodeConnectionState.OFFLOADING && status.offloadCode == OffloadCode.OFFLOADED + } as NodeConnectionStatus) + + then: 'all processors are requested to stop' + 1 * flowController.stopAllProcessors() + + then: 'all processors are requested to terminate' + 1 * processorNode.scheduledState >> ScheduledState.STOPPED + 1 * processorNode.processGroup >> rootGroup + 1 * rootGroup.terminateProcessor({ ProcessorNode pn -> pn == processorNode } as ProcessorNode) + 1 * rootGroup.findAllProcessors() >> [processorNode] + + then: 'all remote process groups are requested to terminate' + 1 * remoteProcessGroup.stopTransmitting() + 1 * rootGroup.findAllRemoteProcessGroups() >> [remoteProcessGroup] + + then: 'all queues are requested to offload' + 1 * flowFileQueue.offloadQueue() + 1 * flowController.getAllQueues() >> [flowFileQueue] + + then: 'the queued count in the flow controller status is 0 to allow the offloading code to to complete' + 1 * flowController.getControllerStatus() >> processGroupStatus + 1 * processGroupStatus.getQueuedCount() >> 0 + + then: 'all queues are requested to reset to the original partitioner for the load balancing strategy' + 1 * flowFileQueue.resetOffloadedQueue() + 1 * flowController.getAllQueues() >> [flowFileQueue] + + then: 'the connection status for the node in the flow controller is set to OFFLOADED' + 1 * flowController.setConnectionStatus({ NodeConnectionStatus status -> + status.nodeIdentifier.logicallyEquals(nodeToOffload) && status.state == NodeConnectionState.OFFLOADED && status.offloadCode == OffloadCode.OFFLOADED + } as NodeConnectionStatus) + + then: 'the cluster coordinator is requested to finish the node offload' + 1 * clusterCoordinator.finishNodeOffload({ NodeIdentifier nodeIdentifier -> + nodeIdentifier.logicallyEquals(nodeToOffload) + } as NodeIdentifier) >> { waitForFinishOffload.set(it) } + } + + private static NodeIdentifier createNodeIdentifier(final int index) { + new NodeIdentifier("node-id-$index", "localhost", 8000 + index, "localhost", 9000 + index, + "localhost", 10000 + index, 11000 + index, false) + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitionerSpec.groovy b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitionerSpec.groovy new file mode 100644 index 000000000000..f3cfd4c47476 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/groovy/org/apache/nifi/controller/queue/clustered/partition/NonLocalPartitionPartitionerSpec.groovy @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.controller.queue.clustered.partition + + +import org.apache.nifi.controller.repository.FlowFileRecord +import spock.lang.Specification +import spock.lang.Unroll + +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.Executors +import java.util.concurrent.TimeUnit + +class NonLocalPartitionPartitionerSpec extends Specification { + + def "getPartition chooses local partition with 1 partition and throws IllegalStateException"() { + given: "a local partitioner using a local partition" + def partitioner = new NonLocalPartitionPartitioner() + def localPartition = Mock QueuePartition + def partitions = [localPartition] as QueuePartition[] + def flowFileRecord = Mock FlowFileRecord + + when: "a partition is requested from the partitioner" + partitioner.getPartition flowFileRecord, partitions, localPartition + + then: "an IllegalStateExceptions thrown" + thrown(IllegalStateException) + } + + @Unroll + def "getPartition chooses non-local partition with #maxPartitions partitions, #threads threads, #iterations iterations"() { + given: "a local partitioner" + def partitioner = new NonLocalPartitionPartitioner() + def partitions = new QueuePartition[maxPartitions] + + and: "a local partition" + def localPartition = Mock QueuePartition + partitions[0] = localPartition + + and: "one or more multiple partitions" + for (int id = 1; id < maxPartitions; ++id) { + def partition = Mock QueuePartition + partitions[id] = partition + } + + and: "an array to hold the resulting chosen partitions and an executor service with one or more threads" + def flowFileRecord = Mock FlowFileRecord + def chosenPartitions = [] as ConcurrentLinkedQueue + def executorService = Executors.newFixedThreadPool threads + + when: "a partition is requested from the partitioner for a given flowfile record and the existing partitions" + iterations.times { + executorService.submit { + chosenPartitions.add partitioner.getPartition(flowFileRecord, partitions, localPartition) + } + } + executorService.shutdown() + try { + while (!executorService.awaitTermination(10, TimeUnit.MILLISECONDS)) { + Thread.sleep(10) + } + } catch (InterruptedException e) { + executorService.shutdownNow() + Thread.currentThread().interrupt() + } + + then: "no exceptions are thrown" + noExceptionThrown() + + and: "there is a chosen partition for each iteration" + chosenPartitions.size() == iterations + + and: "each chosen partition is a remote partition and is one of the existing partitions" + def validChosenPartitions = chosenPartitions.findAll { it != localPartition && partitions.contains(it) } + + and: "there is a valid chosen partition for each iteration" + validChosenPartitions.size() == iterations + + and: "there are no other mock interactions" + 0 * _ + + where: + maxPartitions | threads | iterations + 2 | 1 | 1 + 2 | 1 | 10 + 2 | 1 | 100 + 2 | 10 | 1000 + 5 | 1 | 1 + 5 | 1 | 10 + 5 | 1 | 100 + 5 | 10 | 1000 + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockFlowFileRecord.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockFlowFileRecord.java new file mode 100644 index 000000000000..541cc7ba30ea --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockFlowFileRecord.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.flowfile.FlowFile; +import org.apache.nifi.flowfile.attributes.CoreAttributes; + +public class MockFlowFileRecord implements FlowFileRecord { + private static final AtomicLong idGenerator = new AtomicLong(0L); + + private final long id = idGenerator.getAndIncrement(); + private final long entryDate = System.currentTimeMillis(); + private final Map attributes; + private final long size; + private final ContentClaim contentClaim; + + public MockFlowFileRecord() { + this(1L); + } + + public MockFlowFileRecord(final long size) { + this(new HashMap<>(), size); + } + + public MockFlowFileRecord(final Map attributes, final long size) { + this(attributes, size, null); + } + + public MockFlowFileRecord(final Map attributes, final long size, final ContentClaim contentClaim) { + this.attributes = attributes; + this.size = size; + this.contentClaim = contentClaim; + + if (!attributes.containsKey(CoreAttributes.UUID.key())) { + attributes.put(CoreAttributes.UUID.key(), createFakeUUID()); + } + } + + public static void resetIdGenerator() { + idGenerator.set(0L); + } + + private String createFakeUUID() { + final String s = Long.toHexString(id); + return new StringBuffer("00000000-0000-0000-0000000000000000".substring(0, (35 - s.length())) + s).insert(23, '-').toString(); + } + + @Override + public long getId() { + return id; + } + + @Override + public long getEntryDate() { + return entryDate; + } + + @Override + public long getLineageStartDate() { + return entryDate; + } + + @Override + public Long getLastQueueDate() { + return null; + } + + @Override + public boolean isPenalized() { + return false; + } + + @Override + public String getAttribute(String key) { + return attributes.get(key); + } + + @Override + public long getSize() { + return size; + } + + @Override + public Map getAttributes() { + return Collections.unmodifiableMap(attributes); + } + + @Override + public int compareTo(final FlowFile o) { + return Long.compare(id, o.getId()); + } + + @Override + public long getPenaltyExpirationMillis() { + return 0; + } + + @Override + public ContentClaim getContentClaim() { + return contentClaim; + } + + @Override + public long getContentClaimOffset() { + return 0; + } + + @Override + public long getLineageStartIndex() { + return 0; + } + + @Override + public long getQueueDateIndex() { + return 0; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockSwapManager.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockSwapManager.java new file mode 100644 index 000000000000..33b71f0fdde7 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/MockSwapManager.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; + +import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileSwapManager; +import org.apache.nifi.controller.repository.IncompleteSwapFileException; +import org.apache.nifi.controller.repository.SwapContents; +import org.apache.nifi.controller.repository.SwapManagerInitializationContext; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.controller.swap.StandardSwapContents; +import org.apache.nifi.controller.swap.StandardSwapSummary; + +public class MockSwapManager implements FlowFileSwapManager { + public final Map> swappedOut = new HashMap<>(); + public int swapOutCalledCount = 0; + public int swapInCalledCount = 0; + + public int incompleteSwapFileRecordsToInclude = -1; + + public int failSwapInAfterN = -1; + public Throwable failSwapInFailure = null; + + public void setSwapInFailure(final Throwable t) { + this.failSwapInFailure = t; + } + + @Override + public void initialize(final SwapManagerInitializationContext initializationContext) { + + } + + public void enableIncompleteSwapFileException(final int flowFilesToInclude) { + incompleteSwapFileRecordsToInclude = flowFilesToInclude; + } + + @Override + public String swapOut(List flowFiles, FlowFileQueue flowFileQueue, final String partitionName) throws IOException { + swapOutCalledCount++; + final String location = UUID.randomUUID().toString() + "." + partitionName; + swappedOut.put(location, new ArrayList<>(flowFiles)); + return location; + } + + private void throwIncompleteIfNecessary(final String swapLocation, final boolean remove) throws IOException { + if (incompleteSwapFileRecordsToInclude > -1) { + final SwapSummary summary = getSwapSummary(swapLocation); + + final List records; + if (remove) { + records = swappedOut.remove(swapLocation); + } else { + records = swappedOut.get(swapLocation); + } + + final List partial = records.subList(0, incompleteSwapFileRecordsToInclude); + final SwapContents partialContents = new StandardSwapContents(summary, partial); + throw new IncompleteSwapFileException(swapLocation, partialContents); + } + + if (swapInCalledCount > failSwapInAfterN && failSwapInAfterN > -1) { + if (failSwapInFailure instanceof RuntimeException) { + throw (RuntimeException) failSwapInFailure; + } + if (failSwapInFailure instanceof Error) { + throw (Error) failSwapInFailure; + } + + throw new RuntimeException(failSwapInFailure); + } + } + + @Override + public SwapContents peek(String swapLocation, final FlowFileQueue flowFileQueue) throws IOException { + throwIncompleteIfNecessary(swapLocation, false); + return new StandardSwapContents(getSwapSummary(swapLocation), swappedOut.get(swapLocation)); + } + + @Override + public SwapContents swapIn(String swapLocation, FlowFileQueue flowFileQueue) throws IOException { + swapInCalledCount++; + throwIncompleteIfNecessary(swapLocation, true); + return new StandardSwapContents(getSwapSummary(swapLocation), swappedOut.remove(swapLocation)); + } + + @Override + public List recoverSwapLocations(FlowFileQueue flowFileQueue, final String partitionName) throws IOException { + return swappedOut.keySet().stream() + .filter(key -> key.endsWith("." + partitionName)) + .collect(Collectors.toList()); + } + + @Override + public SwapSummary getSwapSummary(String swapLocation) throws IOException { + final List flowFiles = swappedOut.get(swapLocation); + if (flowFiles == null) { + return StandardSwapSummary.EMPTY_SUMMARY; + } + + int count = 0; + long size = 0L; + Long max = null; + final List resourceClaims = new ArrayList<>(); + for (final FlowFileRecord flowFile : flowFiles) { + count++; + size += flowFile.getSize(); + if (max == null || flowFile.getId() > max) { + max = flowFile.getId(); + } + + if (flowFile.getContentClaim() != null) { + resourceClaims.add(flowFile.getContentClaim().getResourceClaim()); + } + } + + return new StandardSwapSummary(new QueueSize(count, size), max, resourceClaims); + } + + @Override + public void purge() { + swappedOut.clear(); + } + + @Override + public Set getSwappedPartitionNames(final FlowFileQueue queue) throws IOException { + return swappedOut.keySet().stream() + .filter(key -> key.contains(".")) + .map(key -> key.substring(key.indexOf(".") + 1)) + .collect(Collectors.toCollection(HashSet::new)); + } + + @Override + public String changePartitionName(final String swapLocation, final String newPartitionName) throws IOException { + final List flowFiles = swappedOut.remove(swapLocation); + if (flowFiles == null) { + throw new IOException("Could not find swapfile with name " + swapLocation); + } + + final String newSwapLocation; + final int dotIndex = swapLocation.indexOf("."); + if (dotIndex < 0) { + newSwapLocation = swapLocation + "." + newPartitionName; + } else { + newSwapLocation = swapLocation.substring(0, dotIndex) + "." + newPartitionName; + } + + swappedOut.put(newSwapLocation, flowFiles); + return newSwapLocation; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/TestStandardFlowFileQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/TestStandardFlowFileQueue.java index 303ca7bc8214..b454d2dece37 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/TestStandardFlowFileQueue.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/TestStandardFlowFileQueue.java @@ -17,47 +17,20 @@ package org.apache.nifi.controller; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - import org.apache.nifi.connectable.Connectable; import org.apache.nifi.connectable.Connection; import org.apache.nifi.controller.queue.DropFlowFileState; import org.apache.nifi.controller.queue.DropFlowFileStatus; -import org.apache.nifi.controller.queue.FlowFileQueue; import org.apache.nifi.controller.queue.ListFlowFileState; import org.apache.nifi.controller.queue.ListFlowFileStatus; +import org.apache.nifi.controller.queue.NopConnectionEventListener; import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.StandardFlowFileQueue; import org.apache.nifi.controller.repository.FlowFileRecord; import org.apache.nifi.controller.repository.FlowFileRepository; -import org.apache.nifi.controller.repository.FlowFileSwapManager; -import org.apache.nifi.controller.repository.IncompleteSwapFileException; -import org.apache.nifi.controller.repository.SwapContents; -import org.apache.nifi.controller.repository.SwapManagerInitializationContext; -import org.apache.nifi.controller.repository.SwapSummary; -import org.apache.nifi.controller.repository.claim.ContentClaim; -import org.apache.nifi.controller.repository.claim.ResourceClaim; import org.apache.nifi.controller.repository.claim.ResourceClaimManager; -import org.apache.nifi.controller.swap.StandardSwapContents; -import org.apache.nifi.controller.swap.StandardSwapSummary; import org.apache.nifi.flowfile.FlowFile; import org.apache.nifi.flowfile.FlowFilePrioritizer; -import org.apache.nifi.flowfile.attributes.CoreAttributes; import org.apache.nifi.processor.FlowFileFilter; import org.apache.nifi.provenance.ProvenanceEventRecord; import org.apache.nifi.provenance.ProvenanceEventRepository; @@ -71,8 +44,22 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + public class TestStandardFlowFileQueue { - private TestSwapManager swapManager = null; + private MockSwapManager swapManager = null; private StandardFlowFileQueue queue = null; private Connection connection = null; @@ -98,7 +85,7 @@ public void setup() { Mockito.when(connection.getDestination()).thenReturn(Mockito.mock(Connectable.class)); scheduler = Mockito.mock(ProcessScheduler.class); - swapManager = new TestSwapManager(); + swapManager = new MockSwapManager(); flowFileRepo = Mockito.mock(FlowFileRepository.class); provRepo = Mockito.mock(ProvenanceEventRepository.class); @@ -116,8 +103,8 @@ public Object answer(final InvocationOnMock invocation) throws Throwable { } }).when(provRepo).registerEvents(Mockito.any(Iterable.class)); - queue = new StandardFlowFileQueue("id", connection, flowFileRepo, provRepo, claimManager, scheduler, swapManager, null, 10000, 0L, "0 B"); - TestFlowFile.idGenerator.set(0L); + queue = new StandardFlowFileQueue("id", new NopConnectionEventListener(), flowFileRepo, provRepo, claimManager, scheduler, swapManager, null, 10000, 0L, "0 B"); + MockFlowFileRecord.resetIdGenerator(); } @Test @@ -125,7 +112,7 @@ public void testExpire() { queue.setFlowFileExpiration("1 ms"); for (int i = 0; i < 100; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); } // just make sure that the flowfiles have time to expire. @@ -140,11 +127,11 @@ public void testExpire() { assertNull(pulled); assertEquals(100, expiredRecords.size()); - final QueueSize activeSize = queue.getActiveQueueSize(); + final QueueSize activeSize = queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize(); assertEquals(0, activeSize.getObjectCount()); assertEquals(0L, activeSize.getByteCount()); - final QueueSize unackSize = queue.getUnacknowledgedQueueSize(); + final QueueSize unackSize = queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize(); assertEquals(0, unackSize.getObjectCount()); assertEquals(0L, unackSize.getByteCount()); } @@ -158,13 +145,13 @@ public void testBackPressure() { assertFalse(queue.isFull()); for (int i = 0; i < 9; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertFalse(queue.isFull()); assertFalse(queue.isEmpty()); assertFalse(queue.isActiveQueueEmpty()); } - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertTrue(queue.isFull()); assertFalse(queue.isEmpty()); assertFalse(queue.isActiveQueueEmpty()); @@ -193,11 +180,11 @@ public void testBackPressureAfterPollFilter() throws InterruptedException { queue.setFlowFileExpiration("10 millis"); for (int i = 0; i < 9; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertFalse(queue.isFull()); } - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertTrue(queue.isFull()); Thread.sleep(100L); @@ -226,11 +213,11 @@ public void testBackPressureAfterDrop() throws InterruptedException { queue.setFlowFileExpiration("10 millis"); for (int i = 0; i < 9; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertFalse(queue.isFull()); } - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertTrue(queue.isFull()); Thread.sleep(100L); @@ -259,11 +246,11 @@ public void testBackPressureAfterPollSingle() throws InterruptedException { queue.setFlowFileExpiration("10 millis"); for (int i = 0; i < 9; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertFalse(queue.isFull()); } - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertTrue(queue.isFull()); Thread.sleep(100L); @@ -284,11 +271,11 @@ public void testBackPressureAfterPollMultiple() throws InterruptedException { queue.setFlowFileExpiration("10 millis"); for (int i = 0; i < 9; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertFalse(queue.isFull()); } - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertTrue(queue.isFull()); Thread.sleep(100L); @@ -306,25 +293,25 @@ public void testBackPressureAfterPollMultiple() throws InterruptedException { @Test public void testSwapOutOccurs() { for (int i = 0; i < 10000; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertEquals(0, swapManager.swapOutCalledCount); assertEquals(i + 1, queue.size().getObjectCount()); assertEquals(i + 1, queue.size().getByteCount()); } for (int i = 0; i < 9999; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertEquals(0, swapManager.swapOutCalledCount); assertEquals(i + 10001, queue.size().getObjectCount()); assertEquals(i + 10001, queue.size().getByteCount()); } - queue.put(new TestFlowFile(1000)); + queue.put(new MockFlowFileRecord(1000)); assertEquals(1, swapManager.swapOutCalledCount); assertEquals(20000, queue.size().getObjectCount()); assertEquals(20999, queue.size().getByteCount()); - assertEquals(10000, queue.getActiveQueueSize().getObjectCount()); + assertEquals(10000, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); } @Test @@ -335,13 +322,13 @@ public void testLowestPrioritySwappedOutFirst() { long maxSize = 20000; for (int i = 1; i <= 20000; i++) { - queue.put(new TestFlowFile(maxSize - i)); + queue.put(new MockFlowFileRecord(maxSize - i)); } assertEquals(1, swapManager.swapOutCalledCount); assertEquals(20000, queue.size().getObjectCount()); - assertEquals(10000, queue.getActiveQueueSize().getObjectCount()); + assertEquals(10000, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); final List flowFiles = queue.poll(Integer.MAX_VALUE, new HashSet()); assertEquals(10000, flowFiles.size()); for (int i = 0; i < 10000; i++) { @@ -352,37 +339,37 @@ public void testLowestPrioritySwappedOutFirst() { @Test public void testSwapIn() { for (int i = 1; i <= 20000; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); } assertEquals(1, swapManager.swappedOut.size()); - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertEquals(1, swapManager.swappedOut.size()); final Set exp = new HashSet<>(); for (int i = 0; i < 9999; i++) { final FlowFileRecord flowFile = queue.poll(exp); assertNotNull(flowFile); - assertEquals(1, queue.getUnacknowledgedQueueSize().getObjectCount()); - assertEquals(1, queue.getUnacknowledgedQueueSize().getByteCount()); + assertEquals(1, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(1, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getByteCount()); queue.acknowledge(Collections.singleton(flowFile)); - assertEquals(0, queue.getUnacknowledgedQueueSize().getObjectCount()); - assertEquals(0, queue.getUnacknowledgedQueueSize().getByteCount()); + assertEquals(0, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(0, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getByteCount()); } assertEquals(0, swapManager.swapInCalledCount); - assertEquals(1, queue.getActiveQueueSize().getObjectCount()); + assertEquals(1, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); assertNotNull(queue.poll(exp)); assertEquals(0, swapManager.swapInCalledCount); - assertEquals(0, queue.getActiveQueueSize().getObjectCount()); + assertEquals(0, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); assertEquals(1, swapManager.swapOutCalledCount); assertNotNull(queue.poll(exp)); // this should trigger a swap-in of 10,000 records, and then pull 1 off the top. assertEquals(1, swapManager.swapInCalledCount); - assertEquals(9999, queue.getActiveQueueSize().getObjectCount()); + assertEquals(9999, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); assertTrue(swapManager.swappedOut.isEmpty()); @@ -392,14 +379,14 @@ public void testSwapIn() { @Test public void testSwapInWhenThresholdIsLessThanSwapSize() { // create a queue where the swap threshold is less than 10k - queue = new StandardFlowFileQueue("id", connection, flowFileRepo, provRepo, claimManager, scheduler, swapManager, null, 1000, 0L, "0 B"); + queue = new StandardFlowFileQueue("id", new NopConnectionEventListener(), flowFileRepo, provRepo, claimManager, scheduler, swapManager, null, 1000, 0L, "0 B"); for (int i = 1; i <= 20000; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); } assertEquals(1, swapManager.swappedOut.size()); - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); assertEquals(1, swapManager.swappedOut.size()); final Set exp = new HashSet<>(); @@ -412,26 +399,26 @@ public void testSwapInWhenThresholdIsLessThanSwapSize() { for (int i = 0; i < 999; i++) { // final FlowFileRecord flowFile = queue.poll(exp); assertNotNull(flowFile); - assertEquals(1, queue.getUnacknowledgedQueueSize().getObjectCount()); - assertEquals(1, queue.getUnacknowledgedQueueSize().getByteCount()); + assertEquals(1, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(1, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getByteCount()); queue.acknowledge(Collections.singleton(flowFile)); - assertEquals(0, queue.getUnacknowledgedQueueSize().getObjectCount()); - assertEquals(0, queue.getUnacknowledgedQueueSize().getByteCount()); + assertEquals(0, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(0, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getUnacknowledgedQueueSize().getByteCount()); } assertEquals(0, swapManager.swapInCalledCount); - assertEquals(1, queue.getActiveQueueSize().getObjectCount()); + assertEquals(1, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); assertNotNull(queue.poll(exp)); assertEquals(0, swapManager.swapInCalledCount); - assertEquals(0, queue.getActiveQueueSize().getObjectCount()); + assertEquals(0, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); assertEquals(1, swapManager.swapOutCalledCount); assertNotNull(queue.poll(exp)); // this should trigger a swap-in of 10,000 records, and then pull 1 off the top. assertEquals(1, swapManager.swapInCalledCount); - assertEquals(9999, queue.getActiveQueueSize().getObjectCount()); + assertEquals(9999, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); assertTrue(swapManager.swappedOut.isEmpty()); @@ -441,7 +428,7 @@ public void testSwapInWhenThresholdIsLessThanSwapSize() { @Test public void testQueueCountsUpdatedWhenIncompleteSwapFile() { for (int i = 1; i <= 20000; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); } assertEquals(20000, queue.size().getObjectCount()); @@ -502,7 +489,7 @@ public void testQueueCountsUpdatedWhenIncompleteSwapFile() { @Test(timeout = 120000) public void testDropSwappedFlowFiles() { for (int i = 1; i <= 30000; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); } assertEquals(2, swapManager.swappedOut.size()); @@ -524,7 +511,7 @@ public void testDropSwappedFlowFiles() { @Test(timeout = 5000) public void testListFlowFilesOnlyActiveQueue() throws InterruptedException { for (int i = 0; i < 9999; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); } final ListFlowFileStatus status = queue.listFlowFiles(UUID.randomUUID().toString(), 10000); @@ -544,7 +531,7 @@ public void testListFlowFilesOnlyActiveQueue() throws InterruptedException { @Test(timeout = 5000) public void testListFlowFilesResultsLimited() throws InterruptedException { for (int i = 0; i < 30050; i++) { - queue.put(new TestFlowFile()); + queue.put(new MockFlowFileRecord()); } final ListFlowFileStatus status = queue.listFlowFiles(UUID.randomUUID().toString(), 100); @@ -565,7 +552,7 @@ public void testListFlowFilesResultsLimitedCollection() throws InterruptedExcept Collection tff = new ArrayList<>(); //Swap Size is 10000 records, so 30000 is equal to 3 swap files. for (int i = 0; i < 30000; i++) { - tff.add(new TestFlowFile()); + tff.add(new MockFlowFileRecord()); } queue.putAll(tff); @@ -588,7 +575,7 @@ public void testListFlowFilesResultsLimitedCollection() throws InterruptedExcept public void testOOMEFollowedBySuccessfulSwapIn() { final List flowFiles = new ArrayList<>(); for (int i = 0; i < 50000; i++) { - flowFiles.add(new TestFlowFile()); + flowFiles.add(new MockFlowFileRecord()); } queue.putAll(flowFiles); @@ -633,224 +620,13 @@ public void testOOMEFollowedBySuccessfulSwapIn() { queue.acknowledge(flowFiles); assertNull(queue.poll(expiredRecords)); - assertEquals(0, queue.getActiveQueueSize().getObjectCount()); + assertEquals(0, queue.getQueueDiagnostics().getLocalQueuePartitionDiagnostics().getActiveQueueSize().getObjectCount()); assertEquals(0, queue.size().getObjectCount()); assertTrue(swapManager.swappedOut.isEmpty()); } - private class TestSwapManager implements FlowFileSwapManager { - private final Map> swappedOut = new HashMap<>(); - int swapOutCalledCount = 0; - int swapInCalledCount = 0; - - private int incompleteSwapFileRecordsToInclude = -1; - - private int failSwapInAfterN = -1; - private Throwable failSwapInFailure = null; - - private void setSwapInFailure(final Throwable t) { - this.failSwapInFailure = t; - } - - @Override - public void initialize(final SwapManagerInitializationContext initializationContext) { - - } - - public void enableIncompleteSwapFileException(final int flowFilesToInclude) { - incompleteSwapFileRecordsToInclude = flowFilesToInclude; - } - - @Override - public String swapOut(List flowFiles, FlowFileQueue flowFileQueue) throws IOException { - swapOutCalledCount++; - final String location = UUID.randomUUID().toString(); - swappedOut.put(location, new ArrayList<>(flowFiles)); - return location; - } - - private void throwIncompleteIfNecessary(final String swapLocation, final boolean remove) throws IOException { - if (incompleteSwapFileRecordsToInclude > -1) { - final SwapSummary summary = getSwapSummary(swapLocation); - - final List records; - if (remove) { - records = swappedOut.remove(swapLocation); - } else { - records = swappedOut.get(swapLocation); - } - - final List partial = records.subList(0, incompleteSwapFileRecordsToInclude); - final SwapContents partialContents = new StandardSwapContents(summary, partial); - throw new IncompleteSwapFileException(swapLocation, partialContents); - } - - if (swapInCalledCount > failSwapInAfterN && failSwapInAfterN > -1) { - if (failSwapInFailure instanceof RuntimeException) { - throw (RuntimeException) failSwapInFailure; - } - if (failSwapInFailure instanceof Error) { - throw (Error) failSwapInFailure; - } - - throw new RuntimeException(failSwapInFailure); - } - } - - @Override - public SwapContents peek(String swapLocation, final FlowFileQueue flowFileQueue) throws IOException { - throwIncompleteIfNecessary(swapLocation, false); - return new StandardSwapContents(getSwapSummary(swapLocation), swappedOut.get(swapLocation)); - } - - @Override - public SwapContents swapIn(String swapLocation, FlowFileQueue flowFileQueue) throws IOException { - swapInCalledCount++; - throwIncompleteIfNecessary(swapLocation, true); - return new StandardSwapContents(getSwapSummary(swapLocation), swappedOut.remove(swapLocation)); - } - - @Override - public List recoverSwapLocations(FlowFileQueue flowFileQueue) throws IOException { - return new ArrayList<>(swappedOut.keySet()); - } - - @Override - public SwapSummary getSwapSummary(String swapLocation) throws IOException { - final List flowFiles = swappedOut.get(swapLocation); - if (flowFiles == null) { - return StandardSwapSummary.EMPTY_SUMMARY; - } - - int count = 0; - long size = 0L; - Long max = null; - final List resourceClaims = new ArrayList<>(); - for (final FlowFileRecord flowFile : flowFiles) { - count++; - size += flowFile.getSize(); - if (max == null || flowFile.getId() > max) { - max = flowFile.getId(); - } - - if (flowFile.getContentClaim() != null) { - resourceClaims.add(flowFile.getContentClaim().getResourceClaim()); - } - } - - return new StandardSwapSummary(new QueueSize(count, size), max, resourceClaims); - } - - @Override - public void purge() { - swappedOut.clear(); - } - } - - - private static class TestFlowFile implements FlowFileRecord { - private static final AtomicLong idGenerator = new AtomicLong(0L); - - private final long id = idGenerator.getAndIncrement(); - private final long entryDate = System.currentTimeMillis(); - private final Map attributes; - private final long size; - - public TestFlowFile() { - this(1L); - } - - public TestFlowFile(final long size) { - this(new HashMap<>(), size); - } - - public TestFlowFile(final Map attributes, final long size) { - this.attributes = attributes; - this.size = size; - - if (!attributes.containsKey(CoreAttributes.UUID.key())) { - attributes.put(CoreAttributes.UUID.key(), createFakeUUID()); - } - } - - private String createFakeUUID(){ - final String s=Long.toHexString(id); - return new StringBuffer("00000000-0000-0000-0000000000000000".substring(0,(35-s.length()))+s).insert(23, '-').toString(); - } - - @Override - public long getId() { - return id; - } - - @Override - public long getEntryDate() { - return entryDate; - } - - @Override - public long getLineageStartDate() { - return entryDate; - } - - @Override - public Long getLastQueueDate() { - return null; - } - - @Override - public boolean isPenalized() { - return false; - } - - @Override - public String getAttribute(String key) { - return attributes.get(key); - } - - @Override - public long getSize() { - return size; - } - - @Override - public Map getAttributes() { - return Collections.unmodifiableMap(attributes); - } - - @Override - public int compareTo(final FlowFile o) { - return Long.compare(id, o.getId()); - } - - @Override - public long getPenaltyExpirationMillis() { - return 0; - } - - @Override - public ContentClaim getContentClaim() { - return null; - } - - @Override - public long getContentClaimOffset() { - return 0; - } - - @Override - public long getLineageStartIndex() { - return 0; - } - - @Override - public long getQueueDateIndex() { - return 0; - } - } - private static class FlowFileSizePrioritizer implements FlowFilePrioritizer { @Override public int compare(final FlowFile o1, final FlowFile o2) { diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/LoadBalancedQueueIT.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/LoadBalancedQueueIT.java new file mode 100644 index 000000000000..17e92377061d --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/LoadBalancedQueueIT.java @@ -0,0 +1,1345 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.ClusterTopologyEventListener; +import org.apache.nifi.cluster.coordination.node.NodeConnectionState; +import org.apache.nifi.cluster.coordination.node.NodeConnectionStatus; +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.connectable.Connection; +import org.apache.nifi.controller.FlowController; +import org.apache.nifi.controller.MockFlowFileRecord; +import org.apache.nifi.controller.MockSwapManager; +import org.apache.nifi.controller.ProcessScheduler; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.LoadBalancedFlowFileQueue; +import org.apache.nifi.controller.queue.NopConnectionEventListener; +import org.apache.nifi.controller.queue.clustered.client.StandardLoadBalanceFlowFileCodec; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClient; +import org.apache.nifi.controller.queue.clustered.client.async.nio.NioAsyncLoadBalanceClientFactory; +import org.apache.nifi.controller.queue.clustered.client.async.nio.NioAsyncLoadBalanceClientRegistry; +import org.apache.nifi.controller.queue.clustered.client.async.nio.NioAsyncLoadBalanceClientTask; +import org.apache.nifi.controller.queue.clustered.partition.FlowFilePartitioner; +import org.apache.nifi.controller.queue.clustered.partition.QueuePartition; +import org.apache.nifi.controller.queue.clustered.partition.RoundRobinPartitioner; +import org.apache.nifi.controller.queue.clustered.server.ConnectionLoadBalanceServer; +import org.apache.nifi.controller.queue.clustered.server.LoadBalanceAuthorizer; +import org.apache.nifi.controller.queue.clustered.server.LoadBalanceProtocol; +import org.apache.nifi.controller.queue.clustered.server.NotAuthorizedException; +import org.apache.nifi.controller.queue.clustered.server.StandardLoadBalanceProtocol; +import org.apache.nifi.controller.repository.ContentNotFoundException; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.RepositoryRecord; +import org.apache.nifi.controller.repository.RepositoryRecordType; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaimManager; +import org.apache.nifi.controller.repository.claim.StandardResourceClaimManager; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.provenance.ProvenanceRepository; +import org.apache.nifi.security.util.SslContextFactory; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import javax.net.ssl.SSLContext; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.KeyManagementException; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyCollection; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class LoadBalancedQueueIT { + private final LoadBalanceAuthorizer ALWAYS_AUTHORIZED = nodeIds -> {}; + private final LoadBalanceAuthorizer NEVER_AUTHORIZED = nodeIds -> { + throw new NotAuthorizedException("Intentional Unit Test Failure - Not Authorized"); + }; + + private final MockSwapManager flowFileSwapManager = new MockSwapManager(); + private final String queueId = "unit-test"; + private final EventReporter eventReporter = EventReporter.NO_OP; + private final int swapThreshold = 10_000; + + private Set nodeIdentifiers; + private ClusterCoordinator clusterCoordinator; + private NodeIdentifier localNodeId; + private ProcessScheduler processScheduler; + private ResourceClaimManager resourceClaimManager; + private LoadBalancedFlowFileQueue serverQueue; + private FlowController flowController; + + private ProvenanceRepository clientProvRepo; + private ContentRepository clientContentRepo; + private List clientRepoRecords; + private FlowFileRepository clientFlowFileRepo; + private ConcurrentMap clientClaimContents; + + private ProvenanceRepository serverProvRepo; + private List serverRepoRecords; + private FlowFileRepository serverFlowFileRepo; + private ConcurrentMap serverClaimContents; + private ContentRepository serverContentRepo; + + private SSLContext sslContext; + + private final Set clusterEventListeners = Collections.synchronizedSet(new HashSet<>()); + private final AtomicReference compressionReference = new AtomicReference<>(); + + @Before + public void setup() throws IOException, UnrecoverableKeyException, CertificateException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + compressionReference.set(LoadBalanceCompression.DO_NOT_COMPRESS); + + nodeIdentifiers = new HashSet<>(); + + clusterCoordinator = mock(ClusterCoordinator.class); + when(clusterCoordinator.getNodeIdentifiers()).thenAnswer(invocation -> new HashSet<>(nodeIdentifiers)); + when(clusterCoordinator.getLocalNodeIdentifier()).thenAnswer(invocation -> localNodeId); + + clusterEventListeners.clear(); + doAnswer(new Answer() { + @Override + public Object answer(final InvocationOnMock invocation) { + clusterEventListeners.add(invocation.getArgumentAt(0, ClusterTopologyEventListener.class)); + return null; + } + }).when(clusterCoordinator).registerEventListener(any(ClusterTopologyEventListener.class)); + + processScheduler = mock(ProcessScheduler.class); + clientProvRepo = mock(ProvenanceRepository.class); + resourceClaimManager = new StandardResourceClaimManager(); + final Connection connection = mock(Connection.class); + when(connection.getIdentifier()).thenReturn(queueId); + + serverQueue = mock(LoadBalancedFlowFileQueue.class); + when(serverQueue.isFull()).thenReturn(false); + when(connection.getFlowFileQueue()).thenReturn(serverQueue); + doAnswer(invocation -> compressionReference.get()).when(serverQueue).getLoadBalanceCompression(); + + flowController = mock(FlowController.class); + when(flowController.getConnection(anyString())).thenReturn(connection); + + // Create repos for the server + serverRepoRecords = Collections.synchronizedList(new ArrayList<>()); + serverFlowFileRepo = createFlowFileRepository(serverRepoRecords); + + serverClaimContents = new ConcurrentHashMap<>(); + serverContentRepo = createContentRepository(serverClaimContents); + serverProvRepo = mock(ProvenanceRepository.class); + + clientClaimContents = new ConcurrentHashMap<>(); + clientContentRepo = createContentRepository(clientClaimContents); + clientRepoRecords = Collections.synchronizedList(new ArrayList<>()); + clientFlowFileRepo = createFlowFileRepository(clientRepoRecords); + + final String keystore = "src/test/resources/localhost-ks.jks"; + final String keystorePass = "OI7kMpWzzVNVx/JGhTL/0uO4+PWpGJ46uZ/pfepbkwI"; + final String keyPass = keystorePass; + final String truststore = "src/test/resources/localhost-ts.jks"; + final String truststorePass = "wAOR0nQJ2EXvOP0JZ2EaqA/n7W69ILS4sWAHghmIWCc"; + sslContext = SslContextFactory.createSslContext(keystore, keystorePass.toCharArray(), keyPass.toCharArray(), "JKS", + truststore, truststorePass.toCharArray(), "JKS", + SslContextFactory.ClientAuth.REQUIRED, "TLS"); + } + + + private ContentClaim createContentClaim(final byte[] bytes) { + final ResourceClaim resourceClaim = mock(ResourceClaim.class); + when(resourceClaim.getContainer()).thenReturn("container"); + when(resourceClaim.getSection()).thenReturn("section"); + when(resourceClaim.getId()).thenReturn("identifier"); + + final ContentClaim contentClaim = mock(ContentClaim.class); + when(contentClaim.getResourceClaim()).thenReturn(resourceClaim); + + if (bytes != null) { + clientClaimContents.put(contentClaim, bytes); + } + + return contentClaim; + } + + + private NioAsyncLoadBalanceClientFactory createClientFactory(final SSLContext sslContext) { + final FlowFileContentAccess flowFileContentAccess = flowFile -> clientContentRepo.read(flowFile.getContentClaim()); + return new NioAsyncLoadBalanceClientFactory(sslContext, 30000, flowFileContentAccess, eventReporter, new StandardLoadBalanceFlowFileCodec()); + } + + @Test(timeout = 20_000) + public void testNewNodeAdded() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 1000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + final SSLContext sslContext = null; + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + final Thread clientThread = new Thread(clientTask); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + final int serverCount = 5; + final ConnectionLoadBalanceServer[] servers = new ConnectionLoadBalanceServer[serverCount]; + + try { + flowFileQueue.startLoadBalancing(); + clientThread.start(); + + for (int i = 0; i < serverCount; i++) { + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 8, loadBalanceProtocol, eventReporter, timeoutMillis); + servers[i] = server; + server.start(); + + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier nodeId = new NodeIdentifier("unit-test-" + i, "localhost", 8090 + i, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(nodeId); + + + clusterEventListeners.forEach(listener -> listener.onNodeAdded(nodeId)); + + for (int j=0; j < 2; j++) { + final Map attributes = new HashMap<>(); + attributes.put("greeting", "hello"); + + final MockFlowFileRecord flowFile = new MockFlowFileRecord(attributes, 0L); + flowFileQueue.put(flowFile); + } + } + + final int totalFlowFileCount = 6; + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.size() < totalFlowFileCount && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never fully updated", serverRepoRecords.isEmpty()); + + assertEquals(totalFlowFileCount, serverRepoRecords.size()); + + for (final RepositoryRecord serverRecord : serverRepoRecords) { + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("hello", serverFlowFile.getAttribute("greeting")); + } + + while (clientRepoRecords.size() < totalFlowFileCount) { + Thread.sleep(10L); + } + + assertEquals(totalFlowFileCount, clientRepoRecords.size()); + + for (final RepositoryRecord clientRecord : clientRepoRecords) { + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } + } finally { + clientTask.stop(); + + flowFileQueue.stopLoadBalancing(); + + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + Arrays.stream(servers).filter(Objects::nonNull).forEach(ConnectionLoadBalanceServer::stop); + } + } + + @Test(timeout = 60_000) + public void testFailover() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 1000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + final SSLContext sslContext = null; + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier availableNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(availableNodeId); + + // Add a Node Identifier pointing to a non-existent server + final NodeIdentifier inaccessibleNodeId = new NodeIdentifier("unit-test-invalid-host-does-not-exist", "invalid-host-does-not-exist", 8090, "invalid-host-does-not-exist", 8090, + "invalid-host-does-not-exist", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(inaccessibleNodeId); + + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + try { + final int numFlowFiles = 1200; + for (int i = 0; i < numFlowFiles; i++) { + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + + final Map attributes = new HashMap<>(); + attributes.put("uuid", UUID.randomUUID().toString()); + attributes.put("greeting", "hello"); + + final MockFlowFileRecord flowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(flowFile); + } + + flowFileQueue.startLoadBalancing(); + + clientThread.start(); + + // Sending to one partition should fail. When that happens, half of the FlowFiles should go to the local partition, + // the other half to the other node. So the total number of FlowFiles expected is ((numFlowFiles per node) / 3 * 1.5) + final int flowFilesPerNode = numFlowFiles / 3; + final int expectedFlowFileReceiveCount = flowFilesPerNode + flowFilesPerNode / 2; + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(30L); + while (serverRepoRecords.size() < expectedFlowFileReceiveCount && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never fully updated", serverRepoRecords.isEmpty()); + + assertEquals(expectedFlowFileReceiveCount, serverRepoRecords.size()); + + for (final RepositoryRecord serverRecord : serverRepoRecords) { + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("hello", serverFlowFile.getAttribute("greeting")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals("hello".getBytes(), Arrays.copyOfRange(serverFlowFileContent, serverFlowFileContent.length - 5, serverFlowFileContent.length)); + } + + // We expect the client records to be numFlowFiles / 2 because half of the FlowFile will have gone to the other node + // in the cluster and half would still be in the local partition. + while (clientRepoRecords.size() < numFlowFiles / 2) { + Thread.sleep(10L); + } + + assertEquals(numFlowFiles / 2, clientRepoRecords.size()); + + for (final RepositoryRecord clientRecord : clientRepoRecords) { + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 20_000) + public void testTransferToRemoteNode() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + final SSLContext sslContext = null; + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.isEmpty() && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never updated", serverRepoRecords.isEmpty()); + + assertEquals(1, serverRepoRecords.size()); + + final RepositoryRecord serverRecord = serverRepoRecords.iterator().next(); + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("test", serverFlowFile.getAttribute("integration")); + assertEquals("false", serverFlowFile.getAttribute("unit-test")); + assertEquals("true", serverFlowFile.getAttribute("integration-test")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals("hello".getBytes(), serverFlowFileContent); + + while (clientRepoRecords.size() == 0) { + Thread.sleep(10L); + } + + assertEquals(1, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 20_000) + public void testContentNotFound() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + final SSLContext sslContext = null; + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + this.clientClaimContents.remove(contentClaim); // cause ContentNotFoundException + + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + while (clientRepoRecords.size() == 0) { + Thread.sleep(10L); + } + + assertEquals(1, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.CONTENTMISSING, clientRecord.getType()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 20_000) + public void testTransferToRemoteNodeAttributeCompression() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + compressionReference.set(LoadBalanceCompression.COMPRESS_ATTRIBUTES_ONLY); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + final SSLContext sslContext = null; + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + flowFileQueue.setLoadBalanceCompression(LoadBalanceCompression.COMPRESS_ATTRIBUTES_ONLY); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.isEmpty() && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never updated", serverRepoRecords.isEmpty()); + + assertEquals(1, serverRepoRecords.size()); + + final RepositoryRecord serverRecord = serverRepoRecords.iterator().next(); + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("test", serverFlowFile.getAttribute("integration")); + assertEquals("false", serverFlowFile.getAttribute("unit-test")); + assertEquals("true", serverFlowFile.getAttribute("integration-test")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals("hello".getBytes(), serverFlowFileContent); + + while (clientRepoRecords.size() == 0) { + Thread.sleep(10L); + } + + assertEquals(1, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 20_000) + public void testTransferToRemoteNodeContentCompression() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + compressionReference.set(LoadBalanceCompression.COMPRESS_ATTRIBUTES_AND_CONTENT); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + final SSLContext sslContext = null; + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + flowFileQueue.setLoadBalanceCompression(LoadBalanceCompression.COMPRESS_ATTRIBUTES_AND_CONTENT); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.isEmpty() && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never updated", serverRepoRecords.isEmpty()); + + assertEquals(1, serverRepoRecords.size()); + + final RepositoryRecord serverRecord = serverRepoRecords.iterator().next(); + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("test", serverFlowFile.getAttribute("integration")); + assertEquals("false", serverFlowFile.getAttribute("unit-test")); + assertEquals("true", serverFlowFile.getAttribute("integration-test")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals("hello".getBytes(), serverFlowFileContent); + + while (clientRepoRecords.size() == 0) { + Thread.sleep(10L); + } + + assertEquals(1, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + @Test(timeout = 20_000) + public void testWithSSLContext() throws IOException, InterruptedException, UnrecoverableKeyException, CertificateException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.isEmpty() && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never updated", serverRepoRecords.isEmpty()); + + assertEquals(1, serverRepoRecords.size()); + + final RepositoryRecord serverRecord = serverRepoRecords.iterator().next(); + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("test", serverFlowFile.getAttribute("integration")); + assertEquals("false", serverFlowFile.getAttribute("unit-test")); + assertEquals("true", serverFlowFile.getAttribute("integration-test")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals("hello".getBytes(), serverFlowFileContent); + + while (clientRepoRecords.size() == 0) { + Thread.sleep(10L); + } + + assertEquals(1, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 60_000) + public void testReusingClient() throws IOException, InterruptedException, UnrecoverableKeyException, CertificateException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + try { + for (int i = 1; i <= 10; i++) { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.size() < i && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertEquals(i, serverRepoRecords.size()); + + final RepositoryRecord serverRecord = serverRepoRecords.iterator().next(); + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("test", serverFlowFile.getAttribute("integration")); + assertEquals("false", serverFlowFile.getAttribute("unit-test")); + assertEquals("true", serverFlowFile.getAttribute("integration-test")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals("hello".getBytes(), serverFlowFileContent); + + while (clientRepoRecords.size() < i) { + Thread.sleep(10L); + } + + assertEquals(i, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 20_000) + public void testLargePayload() throws IOException, InterruptedException, UnrecoverableKeyException, CertificateException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + final byte[] payload = new byte[1024 * 1024]; + Arrays.fill(payload, (byte) 'A'); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim(payload); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, payload.length, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.isEmpty() && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never updated", serverRepoRecords.isEmpty()); + + assertEquals(1, serverRepoRecords.size()); + + final RepositoryRecord serverRecord = serverRepoRecords.iterator().next(); + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("test", serverFlowFile.getAttribute("integration")); + assertEquals("false", serverFlowFile.getAttribute("unit-test")); + assertEquals("true", serverFlowFile.getAttribute("integration-test")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals(payload, serverFlowFileContent); + + while (clientRepoRecords.size() == 0) { + Thread.sleep(10L); + } + + assertEquals(1, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 60_000) + public void testServerClosesUnexpectedly() throws IOException, InterruptedException { + + doAnswer(new Answer() { + int iterations = 0; + + @Override + public OutputStream answer(final InvocationOnMock invocation) { + if (iterations++ < 5) { + return new OutputStream() { + @Override + public void write(final int b) throws IOException { + throw new IOException("Intentional unit test failure"); + } + }; + } + + final ContentClaim contentClaim = invocation.getArgumentAt(0, ContentClaim.class); + final ByteArrayOutputStream baos = new ByteArrayOutputStream() { + @Override + public void close() throws IOException { + super.close(); + serverClaimContents.put(contentClaim, toByteArray()); + } + }; + + return baos; + } + }).when(serverContentRepo).write(any(ContentClaim.class)); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + final SSLContext sslContext = null; + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new FlowFilePartitioner() { + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + for (final QueuePartition partition : partitions) { + if (partition != localPartition) { + return partition; + } + } + + return null; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return true; + } + @Override + public boolean isRebalanceOnFailure() { + return true; + } + }); + + try { + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + // Wait up to 10 seconds for the server's FlowFile Repository to be updated + final long endTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10L); + while (serverRepoRecords.isEmpty() && System.currentTimeMillis() < endTime) { + Thread.sleep(10L); + } + + assertFalse("Server's FlowFile Repo was never updated", serverRepoRecords.isEmpty()); + + assertEquals(1, serverRepoRecords.size()); + + final RepositoryRecord serverRecord = serverRepoRecords.iterator().next(); + final FlowFileRecord serverFlowFile = serverRecord.getCurrent(); + assertEquals("test", serverFlowFile.getAttribute("integration")); + assertEquals("false", serverFlowFile.getAttribute("unit-test")); + assertEquals("true", serverFlowFile.getAttribute("integration-test")); + + final ContentClaim serverContentClaim = serverFlowFile.getContentClaim(); + final byte[] serverFlowFileContent = serverClaimContents.get(serverContentClaim); + assertArrayEquals("hello".getBytes(), serverFlowFileContent); + + while (clientRepoRecords.size() == 0) { + Thread.sleep(10L); + } + + assertEquals(1, clientRepoRecords.size()); + final RepositoryRecord clientRecord = clientRepoRecords.iterator().next(); + assertEquals(RepositoryRecordType.DELETE, clientRecord.getType()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 20_000) + public void testNotAuthorized() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, NEVER_AUTHORIZED); + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + Thread.sleep(5000L); + + assertTrue("Server's FlowFile Repo was updated", serverRepoRecords.isEmpty()); + assertTrue(clientRepoRecords.isEmpty()); + + assertEquals(2, flowFileQueue.size().getObjectCount()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + + @Test(timeout = 35_000) + public void testDestinationNodeQueueFull() throws IOException, InterruptedException { + localNodeId = new NodeIdentifier("unit-test-local", "localhost", 7090, "localhost", 7090, "localhost", 7090, null, null, null, false, null); + nodeIdentifiers.add(localNodeId); + + when(serverQueue.isFull()).thenReturn(true); + + // Create the server + final int timeoutMillis = 30000; + final LoadBalanceProtocol loadBalanceProtocol = new StandardLoadBalanceProtocol(serverFlowFileRepo, serverContentRepo, serverProvRepo, flowController, ALWAYS_AUTHORIZED); + + final ConnectionLoadBalanceServer server = new ConnectionLoadBalanceServer("localhost", 0, sslContext, 2, loadBalanceProtocol, eventReporter, timeoutMillis); + server.start(); + + try { + final int loadBalancePort = server.getPort(); + + // Create the Load Balanced FlowFile Queue + final NodeIdentifier remoteNodeId = new NodeIdentifier("unit-test", "localhost", 8090, "localhost", 8090, "localhost", loadBalancePort, null, null, null, false, null); + nodeIdentifiers.add(remoteNodeId); + + final NioAsyncLoadBalanceClientRegistry clientRegistry = new NioAsyncLoadBalanceClientRegistry(createClientFactory(sslContext), 1); + clientRegistry.start(); + + final NodeConnectionStatus connectionStatus = mock(NodeConnectionStatus.class); + when(connectionStatus.getState()).thenReturn(NodeConnectionState.CONNECTED); + when(clusterCoordinator.getConnectionStatus(any(NodeIdentifier.class))).thenReturn(connectionStatus); + final NioAsyncLoadBalanceClientTask clientTask = new NioAsyncLoadBalanceClientTask(clientRegistry, clusterCoordinator, eventReporter); + + final Thread clientThread = new Thread(clientTask); + clientThread.setDaemon(true); + clientThread.start(); + + final SocketLoadBalancedFlowFileQueue flowFileQueue = new SocketLoadBalancedFlowFileQueue(queueId, new NopConnectionEventListener(), processScheduler, clientFlowFileRepo, clientProvRepo, + clientContentRepo, resourceClaimManager, clusterCoordinator, clientRegistry, flowFileSwapManager, swapThreshold, eventReporter); + flowFileQueue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + try { + final MockFlowFileRecord firstFlowFile = new MockFlowFileRecord(0L); + flowFileQueue.put(firstFlowFile); + + final Map attributes = new HashMap<>(); + attributes.put("integration", "test"); + attributes.put("unit-test", "false"); + attributes.put("integration-test", "true"); + + final ContentClaim contentClaim = createContentClaim("hello".getBytes()); + final MockFlowFileRecord secondFlowFile = new MockFlowFileRecord(attributes, 5L, contentClaim); + flowFileQueue.put(secondFlowFile); + + flowFileQueue.startLoadBalancing(); + + Thread.sleep(5000L); + + assertTrue("Server's FlowFile Repo was updated", serverRepoRecords.isEmpty()); + assertTrue(clientRepoRecords.isEmpty()); + + assertEquals(2, flowFileQueue.size().getObjectCount()); + + // Enable data to be transferred + when(serverQueue.isFull()).thenReturn(false); + + while (clientRepoRecords.size() != 1) { + Thread.sleep(10L); + } + + assertEquals(1, serverRepoRecords.size()); + } finally { + flowFileQueue.stopLoadBalancing(); + clientRegistry.getAllClients().forEach(AsyncLoadBalanceClient::stop); + } + } finally { + server.stop(); + } + } + + private FlowFileRepository createFlowFileRepository(final List repoRecords) throws IOException { + final FlowFileRepository flowFileRepo = mock(FlowFileRepository.class); + doAnswer(invocation -> { + final Collection records = invocation.getArgumentAt(0, Collection.class); + repoRecords.addAll(records); + return null; + }).when(flowFileRepo).updateRepository(anyCollection()); + + return flowFileRepo; + } + + + private ContentRepository createContentRepository(final ConcurrentMap claimContents) throws IOException { + final ContentRepository contentRepo = mock(ContentRepository.class); + + Mockito.doAnswer(new Answer() { + @Override + public ContentClaim answer(final InvocationOnMock invocation) { + return createContentClaim(null); + } + }).when(contentRepo).create(Mockito.anyBoolean()); + + + Mockito.doAnswer(new Answer() { + @Override + public OutputStream answer(final InvocationOnMock invocation) { + final ContentClaim contentClaim = invocation.getArgumentAt(0, ContentClaim.class); + + final ByteArrayOutputStream baos = new ByteArrayOutputStream() { + @Override + public void close() throws IOException { + super.close(); + claimContents.put(contentClaim, toByteArray()); + } + }; + + return baos; + } + }).when(contentRepo).write(any(ContentClaim.class)); + + + Mockito.doAnswer(new Answer() { + @Override + public InputStream answer(final InvocationOnMock invocation) { + final ContentClaim contentClaim = invocation.getArgumentAt(0, ContentClaim.class); + if (contentClaim == null) { + return new ByteArrayInputStream(new byte[0]); + } + + final byte[] bytes = claimContents.get(contentClaim); + if (bytes == null) { + throw new ContentNotFoundException(contentClaim); + } + + return new ByteArrayInputStream(bytes); + } + }).when(contentRepo).read(any(ContentClaim.class)); + + return contentRepo; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/MockTransferFailureDestination.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/MockTransferFailureDestination.java new file mode 100644 index 000000000000..dc5c1dbd74df --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/MockTransferFailureDestination.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import org.apache.nifi.controller.queue.FlowFileQueueContents; +import org.apache.nifi.controller.queue.clustered.partition.FlowFilePartitioner; +import org.apache.nifi.controller.repository.FlowFileRecord; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; + +public class MockTransferFailureDestination implements TransferFailureDestination { + private List flowFilesTransferred = new ArrayList<>(); + private List swapFilesTransferred = new ArrayList<>(); + private final boolean rebalanceOnFailure; + + public MockTransferFailureDestination(final boolean rebalanceOnFailure) { + this.rebalanceOnFailure = rebalanceOnFailure; + } + + @Override + public void putAll(final Collection flowFiles, final FlowFilePartitioner partitionerUsed) { + flowFilesTransferred.addAll(flowFiles); + } + + public List getFlowFilesTransferred() { + return flowFilesTransferred; + } + + @Override + public void putAll(final Function queueContents, final FlowFilePartitioner partitionerUsed) { + final FlowFileQueueContents contents = queueContents.apply("unit-test"); + flowFilesTransferred.addAll(contents.getActiveFlowFiles()); + swapFilesTransferred.addAll(contents.getSwapLocations()); + } + + @Override + public boolean isRebalanceOnFailure(final FlowFilePartitioner partitionerUsed) { + return rebalanceOnFailure; + } + + public List getSwapFilesTransferred() { + return swapFilesTransferred; + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestContentRepositoryFlowFileAccess.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestContentRepositoryFlowFileAccess.java new file mode 100644 index 000000000000..4d3609d67940 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestContentRepositoryFlowFileAccess.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import org.apache.nifi.controller.repository.ContentNotFoundException; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaimManager; +import org.apache.nifi.controller.repository.claim.StandardContentClaim; +import org.apache.nifi.controller.repository.claim.StandardResourceClaim; +import org.apache.nifi.controller.repository.claim.StandardResourceClaimManager; +import org.apache.nifi.stream.io.StreamUtils; +import org.junit.Assert; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TestContentRepositoryFlowFileAccess { + + @Test + public void testInputStreamFromContentRepo() throws IOException { + final ContentRepository contentRepo = mock(ContentRepository.class); + + final ResourceClaimManager claimManager = new StandardResourceClaimManager(); + final ResourceClaim resourceClaim = new StandardResourceClaim(claimManager, "container", "section", "id", false); + final ContentClaim contentClaim = new StandardContentClaim(resourceClaim, 5L); + + final FlowFileRecord flowFile = mock(FlowFileRecord.class); + when(flowFile.getContentClaim()).thenReturn(contentClaim); + when(flowFile.getSize()).thenReturn(5L); + + final InputStream inputStream = new ByteArrayInputStream("hello".getBytes()); + when(contentRepo.read(contentClaim)).thenReturn(inputStream); + + final ContentRepositoryFlowFileAccess flowAccess = new ContentRepositoryFlowFileAccess(contentRepo); + + final InputStream repoStream = flowAccess.read(flowFile); + verify(contentRepo, times(1)).read(contentClaim); + + final byte[] buffer = new byte[5]; + StreamUtils.fillBuffer(repoStream, buffer); + assertEquals(-1, repoStream.read()); + assertArrayEquals("hello".getBytes(), buffer); + } + + + @Test + public void testContentNotFoundPropagated() throws IOException { + final ContentRepository contentRepo = mock(ContentRepository.class); + + final ResourceClaimManager claimManager = new StandardResourceClaimManager(); + final ResourceClaim resourceClaim = new StandardResourceClaim(claimManager, "container", "section", "id", false); + final ContentClaim contentClaim = new StandardContentClaim(resourceClaim, 5L); + + final FlowFileRecord flowFile = mock(FlowFileRecord.class); + when(flowFile.getContentClaim()).thenReturn(contentClaim); + + final ContentNotFoundException cnfe = new ContentNotFoundException(contentClaim); + when(contentRepo.read(contentClaim)).thenThrow(cnfe); + + final ContentRepositoryFlowFileAccess flowAccess = new ContentRepositoryFlowFileAccess(contentRepo); + + try { + flowAccess.read(flowFile); + Assert.fail("Expected ContentNotFoundException but it did not happen"); + } catch (final ContentNotFoundException thrown) { + // expected + thrown.getFlowFile().orElseThrow(() -> new AssertionError("Expected FlowFile to be provided")); + } + } + + @Test + public void testEOFExceptionIfNotEnoughData() throws IOException { + final ContentRepository contentRepo = mock(ContentRepository.class); + + final ResourceClaimManager claimManager = new StandardResourceClaimManager(); + final ResourceClaim resourceClaim = new StandardResourceClaim(claimManager, "container", "section", "id", false); + final ContentClaim contentClaim = new StandardContentClaim(resourceClaim, 5L); + + final FlowFileRecord flowFile = mock(FlowFileRecord.class); + when(flowFile.getContentClaim()).thenReturn(contentClaim); + when(flowFile.getSize()).thenReturn(100L); + + final InputStream inputStream = new ByteArrayInputStream("hello".getBytes()); + when(contentRepo.read(contentClaim)).thenReturn(inputStream); + + final ContentRepositoryFlowFileAccess flowAccess = new ContentRepositoryFlowFileAccess(contentRepo); + + final InputStream repoStream = flowAccess.read(flowFile); + verify(contentRepo, times(1)).read(contentClaim); + + final byte[] buffer = new byte[5]; + StreamUtils.fillBuffer(repoStream, buffer); + + try { + repoStream.read(); + Assert.fail("Expected EOFException because not enough bytes were in the InputStream for the FlowFile"); + } catch (final EOFException eof) { + // expected + } + } + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestNaiveLimitThreshold.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestNaiveLimitThreshold.java new file mode 100644 index 000000000000..e4f0c74a38ea --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestNaiveLimitThreshold.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class TestNaiveLimitThreshold { + + @Test + public void testCount() { + final SimpleLimitThreshold threshold = new SimpleLimitThreshold(10, 100L); + for (int i = 0; i < 9; i++) { + threshold.adjust(1, 1L); + assertFalse(threshold.isThresholdMet()); + } + + threshold.adjust(1, 1L); + assertTrue(threshold.isThresholdMet()); + } + + @Test + public void testSize() { + final SimpleLimitThreshold threshold = new SimpleLimitThreshold(10, 100L); + for (int i = 0; i < 9; i++) { + threshold.adjust(0, 10L); + assertFalse(threshold.isThresholdMet()); + } + + threshold.adjust(1, 9L); + assertFalse(threshold.isThresholdMet()); + + threshold.adjust(-1, 1L); + assertTrue(threshold.isThresholdMet()); + + threshold.adjust(0, -1L); + assertFalse(threshold.isThresholdMet()); + + threshold.adjust(-10, 10000L); + assertTrue(threshold.isThresholdMet()); + } + +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSocketLoadBalancedFlowFileQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSocketLoadBalancedFlowFileQueue.java new file mode 100644 index 000000000000..971770a31747 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSocketLoadBalancedFlowFileQueue.java @@ -0,0 +1,514 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import org.apache.nifi.cluster.coordination.ClusterCoordinator; +import org.apache.nifi.cluster.coordination.ClusterTopologyEventListener; +import org.apache.nifi.cluster.protocol.NodeIdentifier; +import org.apache.nifi.connectable.Connection; +import org.apache.nifi.controller.MockFlowFileRecord; +import org.apache.nifi.controller.MockSwapManager; +import org.apache.nifi.controller.ProcessScheduler; +import org.apache.nifi.controller.queue.NopConnectionEventListener; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.clustered.client.async.AsyncLoadBalanceClientRegistry; +import org.apache.nifi.controller.queue.clustered.partition.FlowFilePartitioner; +import org.apache.nifi.controller.queue.clustered.partition.QueuePartition; +import org.apache.nifi.controller.queue.clustered.partition.RoundRobinPartitioner; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.SwapSummary; +import org.apache.nifi.controller.repository.claim.ResourceClaimManager; +import org.apache.nifi.controller.repository.claim.StandardResourceClaimManager; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.provenance.ProvenanceEventRepository; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TestSocketLoadBalancedFlowFileQueue { + + private Connection connection; + private FlowFileRepository flowFileRepo; + private ContentRepository contentRepo; + private ProvenanceEventRepository provRepo; + private ResourceClaimManager claimManager; + private ClusterCoordinator clusterCoordinator; + private MockSwapManager swapManager; + private EventReporter eventReporter; + private SocketLoadBalancedFlowFileQueue queue; + private volatile ClusterTopologyEventListener clusterTopologyEventListener; + + private List nodeIds; + private int nodePort = 4096; + + @Before + public void setup() { + MockFlowFileRecord.resetIdGenerator(); + connection = mock(Connection.class); + when(connection.getIdentifier()).thenReturn("unit-test"); + + flowFileRepo = mock(FlowFileRepository.class); + contentRepo = mock(ContentRepository.class); + provRepo = mock(ProvenanceEventRepository.class); + claimManager = new StandardResourceClaimManager(); + clusterCoordinator = mock(ClusterCoordinator.class); + swapManager = new MockSwapManager(); + eventReporter = EventReporter.NO_OP; + + final NodeIdentifier localNodeIdentifier = createNodeIdentifier(); + + nodeIds = new ArrayList<>(); + nodeIds.add(localNodeIdentifier); + nodeIds.add(createNodeIdentifier()); + nodeIds.add(createNodeIdentifier()); + + Mockito.doAnswer(new Answer>() { + @Override + public Set answer(InvocationOnMock invocation) throws Throwable { + return new HashSet<>(nodeIds); + } + }).when(clusterCoordinator).getNodeIdentifiers(); + + when(clusterCoordinator.getLocalNodeIdentifier()).thenReturn(localNodeIdentifier); + + doAnswer(new Answer() { + @Override + public Object answer(final InvocationOnMock invocation) throws Throwable { + clusterTopologyEventListener = invocation.getArgumentAt(0, ClusterTopologyEventListener.class); + return null; + } + }).when(clusterCoordinator).registerEventListener(Mockito.any(ClusterTopologyEventListener.class)); + + final ProcessScheduler scheduler = mock(ProcessScheduler.class); + + final AsyncLoadBalanceClientRegistry registry = mock(AsyncLoadBalanceClientRegistry.class); + queue = new SocketLoadBalancedFlowFileQueue("unit-test", new NopConnectionEventListener(), scheduler, flowFileRepo, provRepo, + contentRepo, claimManager, clusterCoordinator, registry, swapManager, 10000, eventReporter); + } + + private NodeIdentifier createNodeIdentifier() { + return new NodeIdentifier(UUID.randomUUID().toString(), "localhost", nodePort++, "localhost", nodePort++, + "localhost", nodePort++, "localhost", nodePort++, nodePort++, true, Collections.emptySet()); + } + + @Test + public void testBinsAccordingToPartitioner() { + final FlowFilePartitioner partitioner = new StaticFlowFilePartitioner(1); + queue.setFlowFilePartitioner(partitioner); + + final QueuePartition desiredPartition = queue.getPartition(1); + for (int i = 0; i < 100; i++) { + final MockFlowFileRecord flowFile = new MockFlowFileRecord(0L); + final QueuePartition partition = queue.putAndGetPartition(flowFile); + assertSame(desiredPartition, partition); + } + } + + @Test + public void testPutAllBinsFlowFilesSeparately() { + // Partition data based on size. FlowFiles with 0 bytes will go to partition 0 (local partition), + // FlowFiles with 1 byte will go to partition 1, and FlowFiles with 2 bytes will go to partition 2. + final FlowFilePartitioner partitioner = new FlowFileSizePartitioner(); + queue.setFlowFilePartitioner(partitioner); + + // Add 3 FlowFiles for each size + final List flowFiles = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + flowFiles.add(new MockFlowFileRecord(0)); + flowFiles.add(new MockFlowFileRecord(1)); + flowFiles.add(new MockFlowFileRecord(2)); + } + + final Map> partitionMap = queue.putAllAndGetPartitions(flowFiles); + assertEquals(3, partitionMap.size()); + + // For each partition, get the List of FlowFiles added to it, then verify that there are 3 FlowFiles with that size. + for (int i = 0; i < 3; i++) { + final QueuePartition partition = queue.getPartition(i); + final List flowFilesForPartition = partitionMap.get(partition); + assertNotNull(flowFilesForPartition); + assertEquals(3, flowFilesForPartition.size()); + + for (final FlowFileRecord flowFile : flowFilesForPartition) { + assertEquals(i, flowFile.getSize()); + } + } + } + + private int determineRemotePartitionIndex() { + final QueuePartition localPartition = queue.getLocalPartition(); + if (queue.getPartition(0) == localPartition) { + return 1; + } else { + return 0; + } + } + + private int determineLocalPartitionIndex() { + final QueuePartition localPartition = queue.getLocalPartition(); + for (int i=0; i < clusterCoordinator.getNodeIdentifiers().size(); i++) { + if (queue.getPartition(i) == localPartition) { + return i; + } + } + + throw new IllegalStateException("Could not determine local partition index"); + } + + @Test + public void testIsEmptyWhenFlowFileInRemotePartition() { + queue.setFlowFilePartitioner(new StaticFlowFilePartitioner(determineRemotePartitionIndex())); + + assertTrue(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(0, 0L), queue.size()); + + queue.put(new MockFlowFileRecord(0L)); + assertFalse(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(1, 0L), queue.size()); + + assertNull(queue.poll(new HashSet<>())); + assertFalse(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(1, 0L), queue.size()); + } + + @Test + public void testIsEmptyWhenFlowFileInLocalPartition() { + queue.setFlowFilePartitioner(new StaticFlowFilePartitioner(determineLocalPartitionIndex())); + + // Ensure queue is empty + assertTrue(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(0, 0L), queue.size()); + + // add a flowfile + final FlowFileRecord flowFile = new MockFlowFileRecord(0L); + queue.put(flowFile); + assertFalse(queue.isEmpty()); + assertFalse(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(1, 0L), queue.size()); + + // Ensure that we get the same FlowFile back. This will not decrement + // the queue size, only acknowledging the FlowFile will do that. + assertSame(flowFile, queue.poll(new HashSet<>())); + assertFalse(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(1, 0L), queue.size()); + + // Acknowledging FlowFile should reduce queue size + queue.acknowledge(flowFile); + assertTrue(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(0, 0L), queue.size()); + + // Add FlowFile back in, poll it to ensure that we get it back, and + // then acknowledge as a Collection and ensure the correct sizes. + queue.put(flowFile); + assertFalse(queue.isEmpty()); + assertFalse(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(1, 0L), queue.size()); + + assertSame(flowFile, queue.poll(new HashSet<>())); + assertFalse(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(1, 0L), queue.size()); + + queue.acknowledge(Collections.singleton(flowFile)); + assertTrue(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + assertEquals(new QueueSize(0, 0L), queue.size()); + } + + @Test + public void testGetFlowFile() throws IOException { + queue.setFlowFilePartitioner(new FlowFileSizePartitioner()); + + final Map localAttributes = Collections.singletonMap("uuid", "local"); + final MockFlowFileRecord localFlowFile = new MockFlowFileRecord(localAttributes, determineLocalPartitionIndex()); + + final Map remoteAttributes = Collections.singletonMap("uuid", "remote"); + final MockFlowFileRecord remoteFlowFile = new MockFlowFileRecord(remoteAttributes, determineRemotePartitionIndex()); + + queue.put(localFlowFile); + queue.put(remoteFlowFile); + + assertSame(localFlowFile, queue.getFlowFile("local")); + assertNull(queue.getFlowFile("remote")); + assertNull(queue.getFlowFile("other")); + } + + + @Test + public void testRecoverSwapFiles() throws IOException { + for (int partitionIndex = 0; partitionIndex < 3; partitionIndex++) { + final String partitionName = queue.getPartition(partitionIndex).getSwapPartitionName(); + + final List flowFiles = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + flowFiles.add(new MockFlowFileRecord(100L)); + } + + swapManager.swapOut(flowFiles, queue, partitionName); + } + + final List flowFiles = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + flowFiles.add(new MockFlowFileRecord(100L)); + } + + swapManager.swapOut(flowFiles, queue, "other-partition"); + + final SwapSummary swapSummary = queue.recoverSwappedFlowFiles(); + assertEquals(399L, swapSummary.getMaxFlowFileId().longValue()); + assertEquals(400, swapSummary.getQueueSize().getObjectCount()); + assertEquals(400 * 100L, swapSummary.getQueueSize().getByteCount()); + } + + + @Test(timeout = 10000) + public void testChangeInClusterTopologyTriggersRebalance() throws InterruptedException { + // Create partitioner that sends first 2 FlowFiles to Partition 0, next 2 to Partition 1, and then next 4 to Partition 3. + queue.setFlowFilePartitioner(new StaticSequencePartitioner(new int[] {0, 0, 1, 1, 3, 3, 3, 3}, true)); + + for (int i = 0; i < 4; i++) { + queue.put(new MockFlowFileRecord()); + } + + assertEquals(2, queue.getPartition(0).size().getObjectCount()); + assertEquals(2, queue.getPartition(1).size().getObjectCount()); + assertEquals(0, queue.getPartition(2).size().getObjectCount()); + + final Set updatedNodeIdentifiers = new HashSet<>(nodeIds); + // Add a Node Identifier with an of ID consisting of a bunch of Z's so that the new partition will be Partition Number 3. + updatedNodeIdentifiers.add(new NodeIdentifier("ZZZZZZZZZZZZZZ", "localhost", nodePort++, "localhost", nodePort++, + "localhost", nodePort++, "localhost", nodePort++, nodePort++, true, Collections.emptySet())); + + queue.setNodeIdentifiers(updatedNodeIdentifiers, false); + + final int[] expectedPartitionSizes = new int[] {0, 0, 0, 4}; + final int[] partitionSizes = new int[4]; + while (!Arrays.equals(expectedPartitionSizes, partitionSizes)) { + Thread.sleep(10L); + + for (int i = 0; i < 4; i++) { + partitionSizes[i] = queue.getPartition(i).size().getObjectCount(); + } + } + } + + @Test(timeout = 10000) + public void testChangeInClusterTopologyTriggersRebalanceOnlyOnRemovedNodeIfNecessary() throws InterruptedException { + // Create partitioner that sends first 2 FlowFiles to Partition 0, next 2 to Partition 1, and then next 4 to Partition 3. + queue.setFlowFilePartitioner(new StaticSequencePartitioner(new int[] {0, 1, 2, 2, 0, 1}, false)); + + for (int i = 0; i < 4; i++) { + queue.put(new MockFlowFileRecord()); + } + + assertEquals(1, queue.getPartition(0).size().getObjectCount()); + assertEquals(1, queue.getPartition(1).size().getObjectCount()); + assertEquals(2, queue.getPartition(2).size().getObjectCount()); + + final Set updatedNodeIdentifiers = new HashSet<>(); + updatedNodeIdentifiers.add(nodeIds.get(0)); + updatedNodeIdentifiers.add(nodeIds.get(1)); + queue.setNodeIdentifiers(updatedNodeIdentifiers, false); + + final int[] expectedPartitionSizes = new int[] {2, 2}; + final int[] partitionSizes = new int[2]; + while (!Arrays.equals(expectedPartitionSizes, partitionSizes)) { + Thread.sleep(10L); + + for (int i = 0; i < 2; i++) { + partitionSizes[i] = queue.getPartition(i).size().getObjectCount(); + } + } + } + + @Test(timeout = 10000) + public void testChangeInPartitionerTriggersRebalance() throws InterruptedException { + // Create partitioner that sends first 2 FlowFiles to Partition 0, next 2 to Partition 1, and then next 4 to Partition 3. + queue.setFlowFilePartitioner(new StaticSequencePartitioner(new int[] {0, 1, 0, 1}, false)); + + for (int i = 0; i < 4; i++) { + queue.put(new MockFlowFileRecord()); + } + + assertEquals(2, queue.getPartition(0).size().getObjectCount()); + assertEquals(2, queue.getPartition(1).size().getObjectCount()); + assertEquals(0, queue.getPartition(2).size().getObjectCount()); + + queue.setFlowFilePartitioner(new StaticSequencePartitioner(new int[] {0, 1, 2, 2}, true)); + + final int[] expectedPartitionSizes = new int[] {1, 1, 2}; + assertPartitionSizes(expectedPartitionSizes); + } + + @Test(timeout = 100000) + public void testLocalNodeIdentifierSet() throws InterruptedException { + nodeIds.clear(); + + final NodeIdentifier id1 = createNodeIdentifier(); + final NodeIdentifier id2 = createNodeIdentifier(); + final NodeIdentifier id3 = createNodeIdentifier(); + nodeIds.add(id1); + nodeIds.add(id2); + nodeIds.add(id3); + + when(clusterCoordinator.getLocalNodeIdentifier()).thenReturn(null); + + final AsyncLoadBalanceClientRegistry registry = mock(AsyncLoadBalanceClientRegistry.class); + queue = new SocketLoadBalancedFlowFileQueue("unit-test", new NopConnectionEventListener(), mock(ProcessScheduler.class), flowFileRepo, provRepo, + contentRepo, claimManager, clusterCoordinator, registry, swapManager, 10000, eventReporter); + + queue.setFlowFilePartitioner(new RoundRobinPartitioner()); + + // Queue up data without knowing the local node id. + final Map attributes = new HashMap<>(); + for (int i=0; i < 6; i++) { + attributes.put("i", String.valueOf(i)); + queue.put(new MockFlowFileRecord(attributes, 0)); + } + + for (int i=0; i < 3; i++) { + assertEquals(2, queue.getPartition(i).size().getObjectCount()); + } + + assertEquals(0, queue.getLocalPartition().size().getObjectCount()); + + when(clusterCoordinator.getLocalNodeIdentifier()).thenReturn(id1); + clusterTopologyEventListener.onLocalNodeIdentifierSet(id1); + + assertPartitionSizes(new int[] {2, 2, 2}); + + while (queue.getLocalPartition().size().getObjectCount() != 2) { + Thread.sleep(10L); + } + } + + private void assertPartitionSizes(final int[] expectedSizes) { + final int[] partitionSizes = new int[queue.getPartitionCount()]; + while (!Arrays.equals(expectedSizes, partitionSizes)) { + try { + Thread.sleep(10L); + } catch (InterruptedException e) { + Assert.fail("Interrupted"); + } + + for (int i = 0; i < partitionSizes.length; i++) { + partitionSizes[i] = queue.getPartition(i).size().getObjectCount(); + } + } + } + + + private static class StaticFlowFilePartitioner implements FlowFilePartitioner { + private final int partitionIndex; + + public StaticFlowFilePartitioner(final int partition) { + this.partitionIndex = partition; + } + + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + return partitions[partitionIndex]; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return false; + } + + @Override + public boolean isRebalanceOnFailure() { + return false; + } + } + + private static class FlowFileSizePartitioner implements FlowFilePartitioner { + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + return partitions[(int) flowFile.getSize()]; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return false; + } + + @Override + public boolean isRebalanceOnFailure() { + return false; + } + } + + private static class StaticSequencePartitioner implements FlowFilePartitioner { + private final int[] partitionIndices; + private final boolean requireRebalance; + private int index = 0; + + public StaticSequencePartitioner(final int[] partitions, final boolean requireRebalance) { + this.partitionIndices = partitions; + this.requireRebalance = requireRebalance; + } + + @Override + public QueuePartition getPartition(final FlowFileRecord flowFile, final QueuePartition[] partitions, final QueuePartition localPartition) { + final int partitionIndex = partitionIndices[index++]; + return partitions[partitionIndex]; + } + + @Override + public boolean isRebalanceOnClusterResize() { + return requireRebalance; + } + + @Override + public boolean isRebalanceOnFailure() { + return false; + } + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSwappablePriorityQueue.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSwappablePriorityQueue.java new file mode 100644 index 000000000000..71ad257e1bb7 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/TestSwappablePriorityQueue.java @@ -0,0 +1,471 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.nifi.controller.MockFlowFileRecord; +import org.apache.nifi.controller.MockSwapManager; +import org.apache.nifi.controller.queue.DropFlowFileAction; +import org.apache.nifi.controller.queue.DropFlowFileRequest; +import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.SwappablePriorityQueue; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.events.EventReporter; +import org.apache.nifi.flowfile.FlowFilePrioritizer; +import org.apache.nifi.util.MockFlowFile; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class TestSwappablePriorityQueue { + + private MockSwapManager swapManager; + private final EventReporter eventReporter = EventReporter.NO_OP; + private final FlowFileQueue flowFileQueue = Mockito.mock(FlowFileQueue.class); + private final DropFlowFileAction dropAction = (flowFiles, requestor) -> { + return new QueueSize(flowFiles.size(), flowFiles.stream().mapToLong(FlowFileRecord::getSize).sum()); + }; + + private SwappablePriorityQueue queue; + + @Before + public void setup() { + swapManager = new MockSwapManager(); + + when(flowFileQueue.getIdentifier()).thenReturn("unit-test"); + queue = new SwappablePriorityQueue(swapManager, 10000, eventReporter, flowFileQueue, dropAction, "local"); + } + + + @Test + public void testPrioritizer() { + final FlowFilePrioritizer prioritizer = (o1, o2) -> Long.compare(o1.getId(), o2.getId()); + queue.setPriorities(Collections.singletonList(prioritizer)); + + for (int i = 0; i < 5000; i++) { + queue.put(new MockFlowFile(i)); + } + + final Set expiredRecords = new HashSet<>(); + for (int i = 0; i < 5000; i++) { + final FlowFileRecord polled = queue.poll(expiredRecords, 500000L); + assertEquals(i, polled.getId()); + } + + // We can add flowfiles in reverse order (highest ID first) and we should still get the same order back when polling + for (int i = 0; i < 5000; i++) { + queue.put(new MockFlowFile(5000 - i)); + } + for (int i = 0; i < 5000; i++) { + final FlowFileRecord polled = queue.poll(expiredRecords, 500000L); + // ID's will start at 1, since the last FlowFile added will have ID of 5000 - 4999 + assertEquals(i + 1, polled.getId()); + } + + // Add FlowFiles again, then change prioritizer and ensure that the order is updated + for (int i = 0; i < 5000; i++) { + queue.put(new MockFlowFile(i)); + } + + final FlowFilePrioritizer reversePrioritizer = (o1, o2) -> Long.compare(o2.getId(), o1.getId()); + queue.setPriorities(Collections.singletonList(reversePrioritizer)); + + for (int i = 0; i < 5000; i++) { + final FlowFileRecord polled = queue.poll(expiredRecords, 500000L); + // ID's will start at 4999, since the last FlowFile added will have ID of 4999 (i < 5000, not i <= 5000). + assertEquals(5000 - i - 1, polled.getId()); + } + } + + @Test + public void testPollWithOnlyExpiredFlowFile() { + final FlowFileRecord expiredFlowFile = mock(FlowFileRecord.class); + when(expiredFlowFile.getEntryDate()).thenReturn(System.currentTimeMillis() - 5000L); + queue.put(expiredFlowFile); + + final Set expiredRecords = new HashSet<>(); + final FlowFileRecord polled = queue.poll(expiredRecords, 4999); + assertNull(polled); + + assertEquals(1, expiredRecords.size()); + final FlowFileRecord expired = expiredRecords.iterator().next(); + assertSame(expiredFlowFile, expired); + } + + @Test + public void testPollWithExpiredAndUnexpired() { + final SwappablePriorityQueue queue = new SwappablePriorityQueue(swapManager, 100, eventReporter, flowFileQueue, dropAction, "local"); + + final FlowFileRecord expiredFlowFile = mock(FlowFileRecord.class); + when(expiredFlowFile.getEntryDate()).thenReturn(System.currentTimeMillis() - 5000L); + queue.put(expiredFlowFile); + + final FlowFileRecord unexpiredFlowFile = mock(FlowFileRecord.class); + when(unexpiredFlowFile.getEntryDate()).thenReturn(System.currentTimeMillis() + 500000L); + queue.put(unexpiredFlowFile); + + final Set expiredRecords = new HashSet<>(); + final FlowFileRecord polled = queue.poll(expiredRecords, 4999); + assertSame(unexpiredFlowFile, polled); + + assertEquals(1, expiredRecords.size()); + final FlowFileRecord expired = expiredRecords.iterator().next(); + assertSame(expiredFlowFile, expired); + } + + @Test + public void testEmpty() { + assertTrue(queue.isEmpty()); + assertTrue(queue.isActiveQueueEmpty()); + + for (int i = 0; i < 9; i++) { + queue.put(new MockFlowFileRecord()); + assertFalse(queue.isEmpty()); + assertFalse(queue.isActiveQueueEmpty()); + } + + queue.put(new MockFlowFileRecord()); + assertFalse(queue.isEmpty()); + assertFalse(queue.isActiveQueueEmpty()); + + final Set expiredRecords = new HashSet<>(); + final FlowFileRecord polled = queue.poll(expiredRecords, 500000); + assertNotNull(polled); + assertTrue(expiredRecords.isEmpty()); + + assertFalse(queue.isEmpty()); + assertFalse(queue.isActiveQueueEmpty()); + + // queue is still full because FlowFile has not yet been acknowledged. + queue.acknowledge(polled); + + // FlowFile has been acknowledged; queue should no longer be full. + assertFalse(queue.isEmpty()); + assertFalse(queue.isActiveQueueEmpty()); + } + + @Test + public void testSwapOutOccurs() { + for (int i = 0; i < 10000; i++) { + queue.put(new MockFlowFileRecord()); + assertEquals(0, swapManager.swapOutCalledCount); + assertEquals(i + 1, queue.size().getObjectCount()); + assertEquals(i + 1, queue.size().getByteCount()); + } + + for (int i = 0; i < 9999; i++) { + queue.put(new MockFlowFileRecord()); + assertEquals(0, swapManager.swapOutCalledCount); + assertEquals(i + 10001, queue.size().getObjectCount()); + assertEquals(i + 10001, queue.size().getByteCount()); + } + + queue.put(new MockFlowFileRecord(1000)); + assertEquals(1, swapManager.swapOutCalledCount); + assertEquals(20000, queue.size().getObjectCount()); + assertEquals(20999, queue.size().getByteCount()); + + assertEquals(10000, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + } + + @Test + public void testLowestPrioritySwappedOutFirst() { + final List prioritizers = new ArrayList<>(); + prioritizers.add((o1, o2) -> Long.compare(o1.getSize(), o2.getSize())); + queue.setPriorities(prioritizers); + + long maxSize = 20000; + for (int i = 1; i <= 20000; i++) { + queue.put(new MockFlowFileRecord(maxSize - i)); + } + + assertEquals(1, swapManager.swapOutCalledCount); + assertEquals(20000, queue.size().getObjectCount()); + + assertEquals(10000, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + final List flowFiles = queue.poll(Integer.MAX_VALUE, new HashSet(), 500000); + assertEquals(10000, flowFiles.size()); + for (int i = 0; i < 10000; i++) { + assertEquals(i, flowFiles.get(i).getSize()); + } + } + + + @Test + public void testSwapIn() { + for (int i = 1; i <= 20000; i++) { + queue.put(new MockFlowFileRecord()); + } + + assertEquals(1, swapManager.swappedOut.size()); + queue.put(new MockFlowFileRecord()); + assertEquals(1, swapManager.swappedOut.size()); + + final Set exp = new HashSet<>(); + for (int i = 0; i < 9999; i++) { + final FlowFileRecord flowFile = queue.poll(exp, 500000); + assertNotNull(flowFile); + assertEquals(1, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(1, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getByteCount()); + + queue.acknowledge(Collections.singleton(flowFile)); + assertEquals(0, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(0, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getByteCount()); + } + + assertEquals(0, swapManager.swapInCalledCount); + assertEquals(1, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + assertNotNull(queue.poll(exp, 500000)); + + assertEquals(0, swapManager.swapInCalledCount); + assertEquals(0, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + + assertEquals(1, swapManager.swapOutCalledCount); + + assertNotNull(queue.poll(exp, 500000)); // this should trigger a swap-in of 10,000 records, and then pull 1 off the top. + assertEquals(1, swapManager.swapInCalledCount); + assertEquals(9999, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + + assertTrue(swapManager.swappedOut.isEmpty()); + + queue.poll(exp, 500000); + } + + @Test + public void testSwapInWhenThresholdIsLessThanSwapSize() { + // create a queue where the swap threshold is less than 10k + queue = new SwappablePriorityQueue(swapManager, 1000, eventReporter, flowFileQueue, dropAction, null); + + for (int i = 1; i <= 20000; i++) { + queue.put(new MockFlowFileRecord()); + } + + assertEquals(1, swapManager.swappedOut.size()); + queue.put(new MockFlowFileRecord()); + assertEquals(1, swapManager.swappedOut.size()); + + final Set exp = new HashSet<>(); + + // At this point there should be: + // 1k flow files in the active queue + // 9,001 flow files in the swap queue + // 10k flow files swapped to disk + + for (int i = 0; i < 999; i++) { // + final FlowFileRecord flowFile = queue.poll(exp, 500000); + assertNotNull(flowFile); + assertEquals(1, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(1, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getByteCount()); + + queue.acknowledge(Collections.singleton(flowFile)); + assertEquals(0, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getObjectCount()); + assertEquals(0, queue.getQueueDiagnostics().getUnacknowledgedQueueSize().getByteCount()); + } + + assertEquals(0, swapManager.swapInCalledCount); + assertEquals(1, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + assertNotNull(queue.poll(exp, 500000)); + + assertEquals(0, swapManager.swapInCalledCount); + assertEquals(0, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + + assertEquals(1, swapManager.swapOutCalledCount); + + assertNotNull(queue.poll(exp, 500000)); // this should trigger a swap-in of 10,000 records, and then pull 1 off the top. + assertEquals(1, swapManager.swapInCalledCount); + assertEquals(9999, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + + assertTrue(swapManager.swappedOut.isEmpty()); + + queue.poll(exp, 500000); + } + + @Test + public void testQueueCountsUpdatedWhenIncompleteSwapFile() { + for (int i = 1; i <= 20000; i++) { + queue.put(new MockFlowFileRecord()); + } + + assertEquals(20000, queue.size().getObjectCount()); + assertEquals(20000, queue.size().getByteCount()); + + assertEquals(1, swapManager.swappedOut.size()); + + // when we swap in, cause an IncompleteSwapFileException to be + // thrown and contain only 9,999 of the 10,000 FlowFiles + swapManager.enableIncompleteSwapFileException(9999); + final Set expired = Collections.emptySet(); + FlowFileRecord flowFile; + + for (int i = 0; i < 10000; i++) { + flowFile = queue.poll(expired, 500000); + assertNotNull(flowFile); + queue.acknowledge(Collections.singleton(flowFile)); + } + + // 10,000 FlowFiles on queue - all swapped out + assertEquals(10000, queue.size().getObjectCount()); + assertEquals(10000, queue.size().getByteCount()); + assertEquals(1, swapManager.swappedOut.size()); + assertEquals(0, swapManager.swapInCalledCount); + + // Trigger swap in. This will remove 1 FlowFile from queue, leaving 9,999 but + // on swap in, we will get only 9,999 FlowFiles put onto the queue, and the queue size will + // be decremented by 10,000 (because the Swap File's header tells us that there are 10K + // FlowFiles, even though only 9999 are in the swap file) + flowFile = queue.poll(expired, 500000); + assertNotNull(flowFile); + queue.acknowledge(Collections.singleton(flowFile)); + + // size should be 9,998 because we lost 1 on Swap In, and then we pulled one above. + assertEquals(9998, queue.size().getObjectCount()); + assertEquals(9998, queue.size().getByteCount()); + assertEquals(0, swapManager.swappedOut.size()); + assertEquals(1, swapManager.swapInCalledCount); + + for (int i = 0; i < 9998; i++) { + flowFile = queue.poll(expired, 500000); + assertNotNull("Null FlowFile when i = " + i, flowFile); + queue.acknowledge(Collections.singleton(flowFile)); + + final QueueSize queueSize = queue.size(); + assertEquals(9998 - i - 1, queueSize.getObjectCount()); + assertEquals(9998 - i - 1, queueSize.getByteCount()); + } + + final QueueSize queueSize = queue.size(); + assertEquals(0, queueSize.getObjectCount()); + assertEquals(0L, queueSize.getByteCount()); + + flowFile = queue.poll(expired, 500000); + assertNull(flowFile); + } + + @Test(timeout = 120000) + public void testDropSwappedFlowFiles() { + for (int i = 1; i <= 30000; i++) { + queue.put(new MockFlowFileRecord()); + } + + assertEquals(2, swapManager.swappedOut.size()); + final DropFlowFileRequest request = new DropFlowFileRequest("Unit Test"); + + queue.dropFlowFiles(request, "Unit Test"); + + assertEquals(0, queue.size().getObjectCount()); + assertEquals(0, queue.size().getByteCount()); + assertEquals(0, swapManager.swappedOut.size()); + assertEquals(2, swapManager.swapInCalledCount); + } + + + @Test(timeout = 5000) + public void testGetActiveFlowFilesReturnsAllActiveFlowFiles() throws InterruptedException { + for (int i = 0; i < 9999; i++) { + queue.put(new MockFlowFileRecord()); + } + + final List active = queue.getActiveFlowFiles(); + assertNotNull(active); + assertEquals(9999, active.size()); + } + + + @Test(timeout = 5000) + public void testListFlowFilesResultsLimited() throws InterruptedException { + for (int i = 0; i < 30050; i++) { + queue.put(new MockFlowFileRecord()); + } + + final List activeFlowFiles = queue.getActiveFlowFiles(); + assertNotNull(activeFlowFiles); + assertEquals(10000, activeFlowFiles.size()); + } + + + @Test + public void testOOMEFollowedBySuccessfulSwapIn() { + final List flowFiles = new ArrayList<>(); + for (int i = 0; i < 50000; i++) { + flowFiles.add(new MockFlowFileRecord()); + } + + queue.putAll(flowFiles); + + swapManager.failSwapInAfterN = 2; + swapManager.setSwapInFailure(new OutOfMemoryError("Intentional OOME for unit test")); + + final Set expiredRecords = new HashSet<>(); + for (int i = 0; i < 30000; i++) { + final FlowFileRecord polled = queue.poll(expiredRecords, 500000); + assertNotNull(polled); + } + + // verify that unexpected ERROR's are handled in such a way that we keep retrying + for (int i = 0; i < 3; i++) { + try { + queue.poll(expiredRecords, 500000); + Assert.fail("Expected OOME to be thrown"); + } catch (final OutOfMemoryError oome) { + // expected + } + } + + // verify that unexpected Runtime Exceptions are handled in such a way that we keep retrying + swapManager.setSwapInFailure(new NullPointerException("Intentional OOME for unit test")); + + for (int i = 0; i < 3; i++) { + try { + queue.poll(expiredRecords, 500000); + Assert.fail("Expected NPE to be thrown"); + } catch (final NullPointerException npe) { + // expected + } + } + + swapManager.failSwapInAfterN = -1; + + for (int i = 0; i < 20000; i++) { + final FlowFileRecord polled = queue.poll(expiredRecords, 500000); + assertNotNull(polled); + } + + queue.acknowledge(flowFiles); + assertNull(queue.poll(expiredRecords, 500000)); + assertEquals(0, queue.getQueueDiagnostics().getActiveQueueSize().getObjectCount()); + assertEquals(0, queue.size().getObjectCount()); + + assertTrue(swapManager.swappedOut.isEmpty()); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/client/async/nio/TestLoadBalanceSession.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/client/async/nio/TestLoadBalanceSession.java new file mode 100644 index 000000000000..efa5d738eab7 --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/client/async/nio/TestLoadBalanceSession.java @@ -0,0 +1,273 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.client.async.nio; + +import org.apache.nifi.controller.MockFlowFileRecord; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.clustered.FlowFileContentAccess; +import org.apache.nifi.controller.queue.clustered.SimpleLimitThreshold; +import org.apache.nifi.controller.queue.clustered.client.StandardLoadBalanceFlowFileCodec; +import org.apache.nifi.controller.queue.clustered.client.async.TransactionFailureCallback; +import org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.nio.channels.SocketChannel; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.zip.CRC32; +import java.util.zip.CheckedOutputStream; +import java.util.zip.Checksum; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class TestLoadBalanceSession { + + private final TransactionFailureCallback NOP_FAILURE_CALLBACK = new TransactionFailureCallback() { + @Override + public void onTransactionFailed(final List flowFiles, final Exception cause, final TransactionPhase transactionPhase) { + } + + @Override + public boolean isRebalanceOnFailure() { + return false; + } + }; + + private ByteArrayOutputStream received; + private ServerSocket serverSocket; + private int port; + + @Before + public void setup() throws IOException { + received = new ByteArrayOutputStream(); + + serverSocket = new ServerSocket(0); + port = serverSocket.getLocalPort(); + + final Thread thread = new Thread(new Runnable() { + @Override + public void run() { + try (final Socket socket = serverSocket.accept()) { + final InputStream in = socket.getInputStream(); + int data; + + socket.getOutputStream().write(LoadBalanceProtocolConstants.VERSION_ACCEPTED); + socket.getOutputStream().write(LoadBalanceProtocolConstants.SPACE_AVAILABLE); + socket.getOutputStream().write(LoadBalanceProtocolConstants.CONFIRM_CHECKSUM); + socket.getOutputStream().write(LoadBalanceProtocolConstants.CONFIRM_COMPLETE_TRANSACTION); + + while ((data = in.read()) != -1) { + received.write(data); + } + } catch (IOException e) { + e.printStackTrace(); + Assert.fail(); + } + } + }); + thread.setDaemon(true); + thread.start(); + } + + @After + public void shutdown() throws IOException { + serverSocket.close(); + } + + @Test(timeout = 10000) + public void testSunnyCase() throws InterruptedException, IOException { + final Queue flowFiles = new LinkedList<>(); + final FlowFileRecord flowFile1 = new MockFlowFileRecord(5); + final FlowFileRecord flowFile2 = new MockFlowFileRecord(8); + flowFiles.offer(flowFile1); + flowFiles.offer(flowFile2); + + final Map contentMap = new HashMap<>(); + contentMap.put(flowFile1, new ByteArrayInputStream("hello".getBytes())); + contentMap.put(flowFile2, new ByteArrayInputStream("good-bye".getBytes())); + + final FlowFileContentAccess contentAccess = contentMap::get; + + final RegisteredPartition partition = new RegisteredPartition("unit-test-connection", () -> false, + flowFiles::poll, NOP_FAILURE_CALLBACK, (ff) -> {}, () -> LoadBalanceCompression.DO_NOT_COMPRESS, () -> true); + + final SocketChannel socketChannel = SocketChannel.open(new InetSocketAddress("localhost", port)); + + socketChannel.configureBlocking(false); + final PeerChannel peerChannel = new PeerChannel(socketChannel, null, "unit-test"); + final LoadBalanceSession transaction = new LoadBalanceSession(partition, contentAccess, new StandardLoadBalanceFlowFileCodec(), peerChannel, 30000, + new SimpleLimitThreshold(100, 10_000_000)); + + Thread.sleep(100L); + + while (transaction.communicate()) { + } + + assertTrue(transaction.isComplete()); + socketChannel.close(); + + final Checksum expectedChecksum = new CRC32(); + final ByteArrayOutputStream expectedOut = new ByteArrayOutputStream(); + expectedOut.write(1); // Protocol Version + + final DataOutputStream expectedDos = new DataOutputStream(new CheckedOutputStream(expectedOut, expectedChecksum)); + expectedDos.writeUTF("unit-test-connection"); + + expectedDos.write(LoadBalanceProtocolConstants.CHECK_SPACE); + expectedDos.write(LoadBalanceProtocolConstants.MORE_FLOWFILES); + expectedDos.writeInt(68); // metadata length + expectedDos.writeInt(1); // 1 attribute + expectedDos.writeInt(4); // length of attribute + expectedDos.write("uuid".getBytes()); + expectedDos.writeInt(flowFile1.getAttribute("uuid").length()); + expectedDos.write(flowFile1.getAttribute("uuid").getBytes()); + expectedDos.writeLong(flowFile1.getLineageStartDate()); // lineage start date + expectedDos.writeLong(flowFile1.getEntryDate()); // entry date + expectedDos.write(LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS); + expectedDos.writeShort(5); + expectedDos.write("hello".getBytes()); + expectedDos.write(LoadBalanceProtocolConstants.NO_DATA_FRAME); + + expectedDos.write(LoadBalanceProtocolConstants.MORE_FLOWFILES); + expectedDos.writeInt(68); // metadata length + expectedDos.writeInt(1); // 1 attribute + expectedDos.writeInt(4); // length of attribute + expectedDos.write("uuid".getBytes()); + expectedDos.writeInt(flowFile2.getAttribute("uuid").length()); + expectedDos.write(flowFile2.getAttribute("uuid").getBytes()); + expectedDos.writeLong(flowFile2.getLineageStartDate()); // lineage start date + expectedDos.writeLong(flowFile2.getEntryDate()); // entry date + expectedDos.write(LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS); + expectedDos.writeShort(8); + expectedDos.write("good-bye".getBytes()); + expectedDos.write(LoadBalanceProtocolConstants.NO_DATA_FRAME); + + expectedDos.write(LoadBalanceProtocolConstants.NO_MORE_FLOWFILES); + expectedDos.writeLong(expectedChecksum.getValue()); + expectedDos.write(LoadBalanceProtocolConstants.COMPLETE_TRANSACTION); + + final byte[] expectedSent = expectedOut.toByteArray(); + + while (received.size() < expectedSent.length) { + Thread.sleep(10L); + } + final byte[] dataSent = received.toByteArray(); + + assertArrayEquals(expectedSent, dataSent); + + assertEquals(Arrays.asList(flowFile1, flowFile2), transaction.getFlowFilesSent()); + } + + + @Test(timeout = 10000) + public void testLargeContent() throws InterruptedException, IOException { + final byte[] content = new byte[66000]; + for (int i=0; i < 66000; i++) { + content[i] = 'A'; + } + + final Queue flowFiles = new LinkedList<>(); + final FlowFileRecord flowFile1 = new MockFlowFileRecord(content.length); + flowFiles.offer(flowFile1); + + final Map contentMap = new HashMap<>(); + contentMap.put(flowFile1, new ByteArrayInputStream(content)); + + final FlowFileContentAccess contentAccess = contentMap::get; + + final RegisteredPartition partition = new RegisteredPartition("unit-test-connection", () -> false, + flowFiles::poll, NOP_FAILURE_CALLBACK, (ff) -> {}, () -> LoadBalanceCompression.DO_NOT_COMPRESS, () -> true); + + final SocketChannel socketChannel = SocketChannel.open(new InetSocketAddress("localhost", port)); + + socketChannel.configureBlocking(false); + final PeerChannel peerChannel = new PeerChannel(socketChannel, null, "unit-test"); + final LoadBalanceSession transaction = new LoadBalanceSession(partition, contentAccess, new StandardLoadBalanceFlowFileCodec(), peerChannel, 30000, + new SimpleLimitThreshold(100, 10_000_000)); + + Thread.sleep(100L); + + while (transaction.communicate()) { + } + + socketChannel.close(); + + final Checksum expectedChecksum = new CRC32(); + final ByteArrayOutputStream expectedOut = new ByteArrayOutputStream(); + expectedOut.write(1); // Protocol Version + + final DataOutputStream expectedDos = new DataOutputStream(new CheckedOutputStream(expectedOut, expectedChecksum)); + + expectedDos.writeUTF("unit-test-connection"); + + expectedDos.write(LoadBalanceProtocolConstants.CHECK_SPACE); + expectedDos.write(LoadBalanceProtocolConstants.MORE_FLOWFILES); + expectedDos.writeInt(68); // metadata length + expectedDos.writeInt(1); // 1 attribute + expectedDos.writeInt(4); // length of attribute + expectedDos.write("uuid".getBytes()); + expectedDos.writeInt(flowFile1.getAttribute("uuid").length()); + expectedDos.write(flowFile1.getAttribute("uuid").getBytes()); + expectedDos.writeLong(flowFile1.getLineageStartDate()); // lineage start date + expectedDos.writeLong(flowFile1.getEntryDate()); // entry date + + // first data frame + expectedDos.write(LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS); + expectedDos.writeShort(LoadBalanceSession.MAX_DATA_FRAME_SIZE); + expectedDos.write(Arrays.copyOfRange(content, 0, LoadBalanceSession.MAX_DATA_FRAME_SIZE)); + + // second data frame + expectedDos.write(LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS); + expectedDos.writeShort(content.length - LoadBalanceSession.MAX_DATA_FRAME_SIZE); + expectedDos.write(Arrays.copyOfRange(content, LoadBalanceSession.MAX_DATA_FRAME_SIZE, content.length)); + expectedDos.write(LoadBalanceProtocolConstants.NO_DATA_FRAME); + + expectedDos.write(LoadBalanceProtocolConstants.NO_MORE_FLOWFILES); + expectedDos.writeLong(expectedChecksum.getValue()); + expectedDos.write(LoadBalanceProtocolConstants.COMPLETE_TRANSACTION); + + final byte[] expectedSent = expectedOut.toByteArray(); + + while (received.size() < expectedSent.length) { + Thread.sleep(10L); + } + final byte[] dataSent = received.toByteArray(); + + assertArrayEquals(expectedSent, dataSent); + + assertEquals(Arrays.asList(flowFile1), transaction.getFlowFilesSent()); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/server/TestStandardLoadBalanceProtocol.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/server/TestStandardLoadBalanceProtocol.java new file mode 100644 index 000000000000..d020c12f552c --- /dev/null +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/queue/clustered/server/TestStandardLoadBalanceProtocol.java @@ -0,0 +1,656 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.nifi.controller.queue.clustered.server; + +import org.apache.nifi.connectable.Connection; +import org.apache.nifi.controller.FlowController; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.LoadBalancedFlowFileQueue; +import org.apache.nifi.controller.repository.ContentRepository; +import org.apache.nifi.controller.repository.FlowFileRecord; +import org.apache.nifi.controller.repository.FlowFileRepository; +import org.apache.nifi.controller.repository.RepositoryRecord; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.controller.repository.claim.ResourceClaim; +import org.apache.nifi.provenance.ProvenanceEventRecord; +import org.apache.nifi.provenance.ProvenanceEventType; +import org.apache.nifi.provenance.ProvenanceRepository; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.IOException; +import java.io.OutputStream; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.zip.CRC32; +import java.util.zip.CheckedOutputStream; +import java.util.zip.Checksum; + +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.ABORT_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CHECK_SPACE; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.COMPLETE_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CONFIRM_CHECKSUM; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.CONFIRM_COMPLETE_TRANSACTION; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.DATA_FRAME_FOLLOWS; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.MORE_FLOWFILES; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.NO_DATA_FRAME; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.NO_MORE_FLOWFILES; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.REJECT_CHECKSUM; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.SKIP_SPACE_CHECK; +import static org.apache.nifi.controller.queue.clustered.protocol.LoadBalanceProtocolConstants.SPACE_AVAILABLE; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyCollection; +import static org.mockito.Matchers.anyList; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +public class TestStandardLoadBalanceProtocol { + private final LoadBalanceAuthorizer ALWAYS_AUTHORIZED = nodeIds -> {}; + private FlowFileRepository flowFileRepo; + private ContentRepository contentRepo; + private ProvenanceRepository provenanceRepo; + private FlowController flowController; + private LoadBalancedFlowFileQueue flowFileQueue; + + private List flowFileRepoUpdateRecords; + private List provRepoUpdateRecords; + private List flowFileQueuePutRecords; + private List flowFileQueueReceiveRecords; + + private ConcurrentMap claimContents; + + + @Before + public void setup() throws IOException { + flowFileQueuePutRecords = new ArrayList<>(); + flowFileQueueReceiveRecords = new ArrayList<>(); + flowFileRepoUpdateRecords = new ArrayList<>(); + provRepoUpdateRecords = new ArrayList<>(); + + flowFileRepo = Mockito.mock(FlowFileRepository.class); + contentRepo = Mockito.mock(ContentRepository.class); + provenanceRepo = Mockito.mock(ProvenanceRepository.class); + flowController = Mockito.mock(FlowController.class); + claimContents = new ConcurrentHashMap<>(); + + Mockito.doAnswer(new Answer() { + @Override + public ContentClaim answer(final InvocationOnMock invocation) throws Throwable { + final ContentClaim contentClaim = Mockito.mock(ContentClaim.class); + final ResourceClaim resourceClaim = Mockito.mock(ResourceClaim.class); + when(contentClaim.getResourceClaim()).thenReturn(resourceClaim); + return contentClaim; + } + }).when(contentRepo).create(Mockito.anyBoolean()); + + Mockito.doAnswer(new Answer() { + @Override + public OutputStream answer(final InvocationOnMock invocation) throws Throwable { + final ContentClaim contentClaim = invocation.getArgumentAt(0, ContentClaim.class); + + final ByteArrayOutputStream baos = new ByteArrayOutputStream() { + @Override + public void close() throws IOException { + super.close(); + claimContents.put(contentClaim, toByteArray()); + } + }; + + return baos; + } + }).when(contentRepo).write(Mockito.any(ContentClaim.class)); + + final Connection connection = Mockito.mock(Connection.class); + when(flowController.getConnection(Mockito.anyString())).thenReturn(connection); + + flowFileQueue = Mockito.mock(LoadBalancedFlowFileQueue.class); + when(flowFileQueue.getLoadBalanceCompression()).thenReturn(LoadBalanceCompression.DO_NOT_COMPRESS); + when(connection.getFlowFileQueue()).thenReturn(flowFileQueue); + + Mockito.doAnswer(new Answer() { + @Override + public Void answer(final InvocationOnMock invocation) throws Throwable { + flowFileQueuePutRecords.addAll(invocation.getArgumentAt(0, Collection.class)); + return null; + } + }).when(flowFileQueue).putAll(anyCollection()); + + Mockito.doAnswer(new Answer() { + @Override + public Void answer(final InvocationOnMock invocation) throws Throwable { + flowFileQueueReceiveRecords.addAll(invocation.getArgumentAt(0, Collection.class)); + return null; + } + }).when(flowFileQueue).receiveFromPeer(anyCollection()); + + Mockito.doAnswer(new Answer() { + @Override + public Void answer(final InvocationOnMock invocation) throws Throwable { + flowFileRepoUpdateRecords.addAll(invocation.getArgumentAt(0, Collection.class)); + return null; + } + }).when(flowFileRepo).updateRepository(anyCollection()); + + Mockito.doAnswer(new Answer() { + @Override + public Void answer(final InvocationOnMock invocation) throws Throwable { + provRepoUpdateRecords.addAll(invocation.getArgumentAt(0, Collection.class)); + return null; + } + }).when(provenanceRepo).registerEvents(anyCollection()); + } + + + @Test + public void testSimpleFlowFileTransaction() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("a", "A"); + attributes.put("uuid", "unit-test-id"); + attributes.put("b", "B"); + + dos.write(CHECK_SPACE); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + writeContent("hello".getBytes(), dos); + dos.write(NO_MORE_FLOWFILES); + + dos.writeLong(checksum.getValue()); + dos.write(COMPLETE_TRANSACTION); + + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(3, serverResponse.length); + assertEquals(SPACE_AVAILABLE, serverResponse[0]); + assertEquals(CONFIRM_CHECKSUM, serverResponse[1]); + assertEquals(CONFIRM_COMPLETE_TRANSACTION, serverResponse[2]); + + assertEquals(1, claimContents.size()); + final byte[] firstFlowFileContent = claimContents.values().iterator().next(); + assertArrayEquals("hello".getBytes(), firstFlowFileContent); + + Mockito.verify(flowFileRepo, times(1)).updateRepository(anyCollection()); + Mockito.verify(provenanceRepo, times(1)).registerEvents(anyList()); + Mockito.verify(flowFileQueue, times(0)).putAll(anyCollection()); + Mockito.verify(flowFileQueue, times(1)).receiveFromPeer(anyCollection()); + } + + @Test + public void testMultipleFlowFiles() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("a", "A"); + attributes.put("uuid", "unit-test-id"); + attributes.put("b", "B"); + + // Send 4 FlowFiles. + dos.write(CHECK_SPACE); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + writeContent("hello".getBytes(), dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-2"), dos); + writeContent(null, dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-3"), dos); + writeContent("greetings".getBytes(), dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-4"), dos); + writeContent(new byte[0], dos); + + dos.write(NO_MORE_FLOWFILES); + + dos.writeLong(checksum.getValue()); + dos.write(COMPLETE_TRANSACTION); + + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(3, serverResponse.length); + assertEquals(SPACE_AVAILABLE, serverResponse[0]); + assertEquals(CONFIRM_CHECKSUM, serverResponse[1]); + assertEquals(CONFIRM_COMPLETE_TRANSACTION, serverResponse[2]); + + assertEquals(1, claimContents.size()); + final byte[] bytes = claimContents.values().iterator().next(); + assertTrue(Arrays.equals("hellogreetings".getBytes(), bytes) || Arrays.equals("greetingshello".getBytes(), bytes)); + + assertEquals(4, flowFileRepoUpdateRecords.size()); + assertEquals(4, provRepoUpdateRecords.size()); + assertEquals(0, flowFileQueuePutRecords.size()); + assertEquals(4, flowFileQueueReceiveRecords.size()); + + assertTrue(provRepoUpdateRecords.stream().allMatch(event -> event.getEventType() == ProvenanceEventType.RECEIVE)); + } + + + @Test + public void testMultipleFlowFilesWithoutCheckingSpace() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("a", "A"); + attributes.put("uuid", "unit-test-id"); + attributes.put("b", "B"); + + // Send 4 FlowFiles. + dos.write(SKIP_SPACE_CHECK); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + writeContent("hello".getBytes(), dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-2"), dos); + writeContent(null, dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-3"), dos); + writeContent("greetings".getBytes(), dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-4"), dos); + writeContent(new byte[0], dos); + + dos.write(NO_MORE_FLOWFILES); + + dos.writeLong(checksum.getValue()); + dos.write(COMPLETE_TRANSACTION); + + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(2, serverResponse.length); + assertEquals(CONFIRM_CHECKSUM, serverResponse[0]); + assertEquals(CONFIRM_COMPLETE_TRANSACTION, serverResponse[1]); + + assertEquals(1, claimContents.size()); + final byte[] bytes = claimContents.values().iterator().next(); + assertTrue(Arrays.equals("hellogreetings".getBytes(), bytes) || Arrays.equals("greetingshello".getBytes(), bytes)); + + assertEquals(4, flowFileRepoUpdateRecords.size()); + assertEquals(4, provRepoUpdateRecords.size()); + assertEquals(0, flowFileQueuePutRecords.size()); + assertEquals(4, flowFileQueueReceiveRecords.size()); + + assertTrue(provRepoUpdateRecords.stream().allMatch(event -> event.getEventType() == ProvenanceEventType.RECEIVE)); + } + + @Test + public void testEofExceptionMultipleFlowFiles() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("a", "A"); + attributes.put("uuid", "unit-test-id"); + attributes.put("b", "B"); + + // Send 4 FlowFiles. + dos.write(CHECK_SPACE); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + writeContent("hello".getBytes(), dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-2"), dos); + writeContent(null, dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-3"), dos); + writeContent("greetings".getBytes(), dos); + + dos.write(MORE_FLOWFILES); + writeAttributes(Collections.singletonMap("uuid", "unit-test-id-4"), dos); + writeContent(new byte[0], dos); + + dos.flush(); + dos.close(); + + try { + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + Assert.fail("Expected EOFException but none was thrown"); + } catch (final EOFException eof) { + // expected + } + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(1, serverResponse.length); + assertEquals(SPACE_AVAILABLE, serverResponse[0]); + + assertEquals(1, claimContents.size()); + assertArrayEquals("hellogreetings".getBytes(), claimContents.values().iterator().next()); + + assertEquals(0, flowFileRepoUpdateRecords.size()); + assertEquals(0, provRepoUpdateRecords.size()); + assertEquals(0, flowFileQueuePutRecords.size()); + } + + @Test + public void testBadChecksum() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("uuid", "unit-test-id"); + + dos.write(CHECK_SPACE); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + writeContent("hello".getBytes(), dos); + dos.write(NO_MORE_FLOWFILES); + + dos.writeLong(1L); // Write bad checksum. + dos.write(COMPLETE_TRANSACTION); + + try { + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + Assert.fail("Expected TransactionAbortedException but none was thrown"); + } catch (final TransactionAbortedException e) { + // expected + } + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(2, serverResponse.length); + assertEquals(SPACE_AVAILABLE, serverResponse[0]); + assertEquals(REJECT_CHECKSUM, serverResponse[1]); + + assertEquals(1, claimContents.size()); + final byte[] firstFlowFileContent = claimContents.values().iterator().next(); + assertArrayEquals("hello".getBytes(), firstFlowFileContent); + + Mockito.verify(flowFileRepo, times(0)).updateRepository(anyCollection()); + Mockito.verify(provenanceRepo, times(0)).registerEvents(anyList()); + Mockito.verify(flowFileQueue, times(0)).putAll(anyCollection()); + Mockito.verify(contentRepo, times(1)).decrementClaimantCount(claimContents.keySet().iterator().next()); + } + + @Test + public void testEofWritingContent() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("uuid", "unit-test-id"); + + dos.write(CHECK_SPACE); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + + // Indicate 45 byte data frame, then stop after 5 bytes. + dos.write(DATA_FRAME_FOLLOWS); + dos.writeShort(45); + dos.write("hello".getBytes()); + dos.flush(); + dos.close(); + + try { + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + Assert.fail("Expected EOFException but none was thrown"); + } catch (final EOFException e) { + // expected + } + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(1, serverResponse.length); + assertEquals(SPACE_AVAILABLE, serverResponse[0]); + + assertEquals(1, claimContents.size()); + final byte[] firstFlowFileContent = claimContents.values().iterator().next(); + assertArrayEquals(new byte[0], firstFlowFileContent); + + Mockito.verify(flowFileRepo, times(0)).updateRepository(anyCollection()); + Mockito.verify(provenanceRepo, times(0)).registerEvents(anyList()); + Mockito.verify(flowFileQueue, times(0)).putAll(anyCollection()); + Mockito.verify(contentRepo, times(1)).decrementClaimantCount(claimContents.keySet().iterator().next()); + } + + @Test + public void testAbortAfterChecksumConfirmation() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("uuid", "unit-test-id"); + + dos.write(CHECK_SPACE); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + writeContent("hello".getBytes(), dos); + dos.write(NO_MORE_FLOWFILES); + + dos.writeLong(checksum.getValue()); + dos.write(ABORT_TRANSACTION); + + try { + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + Assert.fail("Expected TransactionAbortedException but none was thrown"); + } catch (final TransactionAbortedException e) { + // expected + } + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(2, serverResponse.length); + assertEquals(SPACE_AVAILABLE, serverResponse[0]); + assertEquals(CONFIRM_CHECKSUM, serverResponse[1]); + + assertEquals(1, claimContents.size()); + final byte[] firstFlowFileContent = claimContents.values().iterator().next(); + assertArrayEquals("hello".getBytes(), firstFlowFileContent); + + Mockito.verify(flowFileRepo, times(0)).updateRepository(anyCollection()); + Mockito.verify(provenanceRepo, times(0)).registerEvents(anyList()); + Mockito.verify(flowFileQueue, times(0)).putAll(anyCollection()); + Mockito.verify(contentRepo, times(1)).decrementClaimantCount(claimContents.keySet().iterator().next()); + } + + @Test + public void testFlowFileNoContent() throws IOException { + final StandardLoadBalanceProtocol protocol = new StandardLoadBalanceProtocol(flowFileRepo, contentRepo, provenanceRepo, flowController, ALWAYS_AUTHORIZED); + + final PipedInputStream serverInput = new PipedInputStream(); + final PipedOutputStream serverContentSource = new PipedOutputStream(); + serverInput.connect(serverContentSource); + + final ByteArrayOutputStream serverOutput = new ByteArrayOutputStream(); + + // Write connection ID + final Checksum checksum = new CRC32(); + final OutputStream checkedOutput = new CheckedOutputStream(serverContentSource, checksum); + final DataOutputStream dos = new DataOutputStream(checkedOutput); + dos.writeUTF("unit-test-connection-id"); + + final Map attributes = new HashMap<>(); + attributes.put("uuid", "unit-test-id"); + + dos.write(CHECK_SPACE); + dos.write(MORE_FLOWFILES); + writeAttributes(attributes, dos); + writeContent(null, dos); + dos.write(NO_MORE_FLOWFILES); + + dos.writeLong(checksum.getValue()); + dos.write(COMPLETE_TRANSACTION); + + protocol.receiveFlowFiles(serverInput, serverOutput, "Unit Test", 1, "unit.test"); + + final byte[] serverResponse = serverOutput.toByteArray(); + assertEquals(3, serverResponse.length); + assertEquals(SPACE_AVAILABLE, serverResponse[0]); + assertEquals(CONFIRM_CHECKSUM, serverResponse[1]); + assertEquals(CONFIRM_COMPLETE_TRANSACTION, serverResponse[2]); + + assertEquals(1, claimContents.size()); + assertEquals(0, claimContents.values().iterator().next().length); + + Mockito.verify(flowFileRepo, times(1)).updateRepository(anyCollection()); + Mockito.verify(provenanceRepo, times(1)).registerEvents(anyList()); + Mockito.verify(flowFileQueue, times(0)).putAll(anyCollection()); + Mockito.verify(flowFileQueue, times(1)).receiveFromPeer(anyCollection()); + } + + private void writeAttributes(final Map attributes, final DataOutputStream dos) throws IOException { + try (final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + final DataOutputStream out = new DataOutputStream(baos)) { + out.writeInt(attributes.size()); + + for (final Map.Entry entry : attributes.entrySet()) { + final byte[] key = entry.getKey().getBytes(); + out.writeInt(key.length); + out.write(key); + + final byte[] value = entry.getValue().getBytes(); + out.writeInt(value.length); + out.write(value); + } + + out.writeLong(0L); // lineage start date + out.writeLong(0L); // entry date + + dos.writeInt(baos.size()); + baos.writeTo(dos); + } + + } + + private void writeContent(final byte[] content, final DataOutputStream out) throws IOException { + if (content == null) { + out.write(NO_DATA_FRAME); + return; + } + + int iterations = content.length / 65535; + if (content.length % 65535 > 0) { + iterations++; + } + + for (int i=0; i < iterations; i++) { + final int offset = i * 65536; + final int length = Math.min(content.length - offset, 65535); + + out.write(DATA_FRAME_FOLLOWS); + out.writeShort(length); + out.write(content, offset, length); + } + + out.write(NO_DATA_FRAME); + } +} diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestStandardProcessSession.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestStandardProcessSession.java index 56670942ed49..efe2bd4380e4 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestStandardProcessSession.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestStandardProcessSession.java @@ -16,55 +16,13 @@ */ package org.apache.nifi.controller.repository; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.notNull; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.FilterOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Pattern; - import org.apache.nifi.connectable.Connectable; import org.apache.nifi.connectable.ConnectableType; import org.apache.nifi.connectable.Connection; import org.apache.nifi.controller.ProcessScheduler; -import org.apache.nifi.controller.StandardFlowFileQueue; import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.NopConnectionEventListener; +import org.apache.nifi.controller.queue.StandardFlowFileQueue; import org.apache.nifi.controller.repository.claim.ContentClaim; import org.apache.nifi.controller.repository.claim.ResourceClaim; import org.apache.nifi.controller.repository.claim.ResourceClaimManager; @@ -99,6 +57,49 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.FilterOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Pattern; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.notNull; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + public class TestStandardProcessSession { private StandardProcessSession session; @@ -207,7 +208,7 @@ private FlowFileQueue createFlowFileQueueSpy(Connection connection) { final FlowFileSwapManager swapManager = Mockito.mock(FlowFileSwapManager.class); final ProcessScheduler processScheduler = Mockito.mock(ProcessScheduler.class); - final StandardFlowFileQueue actualQueue = new StandardFlowFileQueue("1", connection, flowFileRepo, provenanceRepo, null, + final StandardFlowFileQueue actualQueue = new StandardFlowFileQueue("1", new NopConnectionEventListener(), flowFileRepo, provenanceRepo, null, processScheduler, swapManager, null, 10000, 0L, "0 B"); return Mockito.spy(actualQueue); } @@ -1515,7 +1516,7 @@ public void testCommitFailureRequeuesFlowFiles() { final FlowFile originalFlowFile = session.get(); assertTrue(flowFileQueue.isActiveQueueEmpty()); - assertEquals(1, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount()); + assertTrue(flowFileQueue.isUnacknowledgedFlowFile()); final FlowFile modified = session.write(originalFlowFile, new OutputStreamCallback() { @Override @@ -1538,7 +1539,7 @@ public void process(OutputStream out) throws IOException { assertFalse(flowFileQueue.isActiveQueueEmpty()); assertEquals(1, flowFileQueue.size().getObjectCount()); - assertEquals(0, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount()); + assertFalse(flowFileQueue.isUnacknowledgedFlowFile()); } @Test @@ -1552,7 +1553,7 @@ public void testRollbackAfterCheckpoint() { final FlowFile originalFlowFile = session.get(); assertTrue(flowFileQueue.isActiveQueueEmpty()); - assertEquals(1, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount()); + assertTrue(flowFileQueue.isUnacknowledgedFlowFile()); final FlowFile modified = session.write(originalFlowFile, new OutputStreamCallback() { @Override @@ -1569,7 +1570,7 @@ public void process(OutputStream out) throws IOException { session.rollback(); assertTrue(flowFileQueue.isActiveQueueEmpty()); assertEquals(0, flowFileQueue.size().getObjectCount()); - assertEquals(0, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount()); + assertFalse(flowFileQueue.isUnacknowledgedFlowFile()); session.rollback(); @@ -1578,7 +1579,7 @@ public void process(OutputStream out) throws IOException { final FlowFile originalRound2 = session.get(); assertTrue(flowFileQueue.isActiveQueueEmpty()); - assertEquals(1, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount()); + assertTrue(flowFileQueue.isUnacknowledgedFlowFile()); final FlowFile modifiedRound2 = session.write(originalRound2, new OutputStreamCallback() { @Override @@ -1591,13 +1592,13 @@ public void process(OutputStream out) throws IOException { session.checkpoint(); assertTrue(flowFileQueue.isActiveQueueEmpty()); - assertEquals(1, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount()); + assertTrue(flowFileQueue.isUnacknowledgedFlowFile()); session.commit(); // FlowFile transferred back to queue assertEquals(1, flowFileQueue.size().getObjectCount()); - assertEquals(0, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount()); + assertFalse(flowFileQueue.isUnacknowledgedFlowFile()); assertFalse(flowFileQueue.isActiveQueueEmpty()); } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestWriteAheadFlowFileRepository.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestWriteAheadFlowFileRepository.java index 671c58d76913..a3ee5c16b3d8 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestWriteAheadFlowFileRepository.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/repository/TestWriteAheadFlowFileRepository.java @@ -16,35 +16,20 @@ */ package org.apache.nifi.controller.repository; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.when; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.TimeUnit; import org.apache.nifi.connectable.Connectable; import org.apache.nifi.connectable.Connection; -import org.apache.nifi.controller.StandardFlowFileQueue; import org.apache.nifi.controller.queue.DropFlowFileStatus; import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.queue.FlowFileQueueSize; import org.apache.nifi.controller.queue.ListFlowFileStatus; +import org.apache.nifi.controller.queue.LoadBalanceCompression; +import org.apache.nifi.controller.queue.LoadBalanceStrategy; +import org.apache.nifi.controller.queue.NopConnectionEventListener; +import org.apache.nifi.controller.queue.QueueDiagnostics; import org.apache.nifi.controller.queue.QueueSize; +import org.apache.nifi.controller.queue.StandardFlowFileQueue; +import org.apache.nifi.controller.queue.StandardLocalQueuePartitionDiagnostics; +import org.apache.nifi.controller.queue.StandardQueueDiagnostics; import org.apache.nifi.controller.repository.claim.ContentClaim; import org.apache.nifi.controller.repository.claim.ResourceClaim; import org.apache.nifi.controller.repository.claim.ResourceClaimManager; @@ -69,6 +54,28 @@ import org.wali.MinimalLockingWriteAheadLog; import org.wali.WriteAheadRepository; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.when; + @SuppressWarnings("deprecation") public class TestWriteAheadFlowFileRepository { @@ -92,6 +99,28 @@ public void clearRepo() throws IOException { @Ignore("Intended only for local performance testing before/after making changes") public void testUpdatePerformance() throws IOException, InterruptedException { final FlowFileQueue queue = new FlowFileQueue() { + private LoadBalanceCompression compression = LoadBalanceCompression.DO_NOT_COMPRESS; + + @Override + public void startLoadBalancing() { + } + + @Override + public void stopLoadBalancing() { + } + + @Override + public void offloadQueue() { + } + + @Override + public void resetOffloadedQueue() { + } + + @Override + public boolean isActivelyLoadBalancing() { + return false; + } @Override public String getIdentifier() { @@ -112,11 +141,6 @@ public SwapSummary recoverSwappedFlowFiles() { public void purgeSwapFiles() { } - @Override - public int getSwapFileCount() { - return 0; - } - @Override public void setPriorities(List newPriorities) { } @@ -154,21 +178,6 @@ public boolean isActiveQueueEmpty() { return false; } - @Override - public QueueSize getUnacknowledgedQueueSize() { - return null; - } - - @Override - public QueueSize getActiveQueueSize() { - return size(); - } - - @Override - public QueueSize getSwapQueueSize() { - return null; - } - @Override public void acknowledge(FlowFileRecord flowFile) { } @@ -178,12 +187,7 @@ public void acknowledge(Collection flowFiles) { } @Override - public boolean isAllActiveFlowFilesPenalized() { - return false; - } - - @Override - public boolean isAnyActiveFlowFilePenalized() { + public boolean isUnacknowledgedFlowFile() { return false; } @@ -210,11 +214,6 @@ public List poll(int maxResults, Set expiredReco return null; } - @Override - public long drainQueue(Queue sourceQueue, List destination, int maxResults, Set expiredRecords) { - return 0; - } - @Override public List poll(FlowFileFilter filter, Set expiredRecords) { return null; @@ -272,6 +271,44 @@ public FlowFileRecord getFlowFile(String flowFileUuid) throws IOException { @Override public void verifyCanList() throws IllegalStateException { } + + @Override + public QueueDiagnostics getQueueDiagnostics() { + final FlowFileQueueSize size = new FlowFileQueueSize(size().getObjectCount(), size().getByteCount(), 0, 0, 0, 0, 0); + return new StandardQueueDiagnostics(new StandardLocalQueuePartitionDiagnostics(size, false, false), Collections.emptyList()); + } + + @Override + public void lock() { + } + + @Override + public void unlock() { + } + + @Override + public void setLoadBalanceStrategy(final LoadBalanceStrategy strategy, final String partitioningAttribute) { + } + + @Override + public LoadBalanceStrategy getLoadBalanceStrategy() { + return null; + } + + @Override + public void setLoadBalanceCompression(final LoadBalanceCompression compression) { + this.compression = compression; + } + + @Override + public LoadBalanceCompression getLoadBalanceCompression() { + return compression; + } + + @Override + public String getPartitioningAttribute() { + return null; + } }; @@ -370,7 +407,7 @@ public void testResourceClaimsIncremented() throws IOException { when(connection.getDestination()).thenReturn(Mockito.mock(Connectable.class)); final FlowFileSwapManager swapMgr = new MockFlowFileSwapManager(); - final FlowFileQueue queue = new StandardFlowFileQueue("1234", connection, null, null, claimManager, null, swapMgr, null, 10000, 0L, "0 B"); + final FlowFileQueue queue = new StandardFlowFileQueue("1234", new NopConnectionEventListener(), null, null, claimManager, null, swapMgr, null, 10000, 0L, "0 B"); when(connection.getFlowFileQueue()).thenReturn(queue); queueProvider.addConnection(connection); @@ -414,7 +451,7 @@ public void testResourceClaimsIncremented() throws IOException { records.add(rec2); repo.updateRepository(records); - final String swapLocation = swapMgr.swapOut(Collections.singletonList(flowFile2), queue); + final String swapLocation = swapMgr.swapOut(Collections.singletonList(flowFile2), queue, null); repo.swapFlowFilesOut(Collections.singletonList(flowFile2), queue, swapLocation); } @@ -546,7 +583,7 @@ public void initialize(SwapManagerInitializationContext initializationContext) { } @Override - public String swapOut(List flowFiles, FlowFileQueue flowFileQueue) throws IOException { + public String swapOut(List flowFiles, FlowFileQueue flowFileQueue, final String partitionName) throws IOException { Map> swapMap = swappedRecords.get(flowFileQueue); if (swapMap == null) { swapMap = new HashMap<>(); @@ -583,7 +620,7 @@ public SwapContents swapIn(String swapLocation, FlowFileQueue flowFileQueue) thr } @Override - public List recoverSwapLocations(FlowFileQueue flowFileQueue) throws IOException { + public List recoverSwapLocations(FlowFileQueue flowFileQueue, final String partitionName) throws IOException { Map> swapMap = swappedRecords.get(flowFileQueue); if (swapMap == null) { return null; @@ -631,5 +668,14 @@ public void purge() { this.swappedRecords.clear(); } + @Override + public Set getSwappedPartitionNames(FlowFileQueue queue) throws IOException { + return Collections.emptySet(); + } + + @Override + public String changePartitionName(String swapLocation, String newPartitionName) throws IOException { + return swapLocation; + } } } diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestProcessorLifecycle.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestProcessorLifecycle.java index 5940c8a5a802..b9f4320b5e02 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestProcessorLifecycle.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestProcessorLifecycle.java @@ -493,11 +493,11 @@ public void validateProcessorCanBeStoppedWhenOnTriggerThrowsException() throws E testProcNode.performValidation(); ps.startProcessor(testProcNode, true); - assertCondition(() -> ScheduledState.RUNNING == testProcNode.getScheduledState(), SHORT_DELAY_TOLERANCE); + assertCondition(() -> ScheduledState.RUNNING == testProcNode.getScheduledState(), LONG_DELAY_TOLERANCE); ps.disableProcessor(testProcNode); - assertCondition(() -> ScheduledState.RUNNING == testProcNode.getScheduledState(), SHORT_DELAY_TOLERANCE); + assertCondition(() -> ScheduledState.RUNNING == testProcNode.getScheduledState(), LONG_DELAY_TOLERANCE); ps.stopProcessor(testProcNode); - assertCondition(() -> ScheduledState.STOPPED == testProcNode.getScheduledState(), SHORT_DELAY_TOLERANCE); + assertCondition(() -> ScheduledState.STOPPED == testProcNode.getScheduledState(), LONG_DELAY_TOLERANCE); } /** diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestStandardProcessScheduler.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestStandardProcessScheduler.java index 2f1d0cde3aab..8e1d154a9e5f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestStandardProcessScheduler.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/controller/scheduling/TestStandardProcessScheduler.java @@ -169,6 +169,7 @@ public void after() throws Exception { * run. This unit test is intended to verify that we have this resolved. */ @Test + @Ignore("This test appears to be buggy") public void testReportingTaskDoesntKeepRunningAfterStop() throws InterruptedException, InitializationException { taskNode.performValidation(); scheduler.schedule(taskNode); @@ -232,7 +233,7 @@ public void testDisableControllerServiceWithProcessorTryingToStartUsingIt() thro assertTrue(service.getState() == ControllerServiceState.DISABLED); } - private class TestReportingTask extends AbstractReportingTask { + public class TestReportingTask extends AbstractReportingTask { private final AtomicBoolean failOnScheduled = new AtomicBoolean(true); private final AtomicInteger onScheduleAttempts = new AtomicInteger(0); @@ -253,7 +254,7 @@ public void onTrigger(final ReportingContext context) { } } - private static class ServiceReferencingProcessor extends AbstractProcessor { + public static class ServiceReferencingProcessor extends AbstractProcessor { static final PropertyDescriptor SERVICE_DESC = new PropertyDescriptor.Builder() .name("service") diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/util/TestFlowDifferenceFilters.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/util/TestFlowDifferenceFilters.java index 9cc8b480e25d..330dd33e0d6b 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/util/TestFlowDifferenceFilters.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/java/org/apache/nifi/util/TestFlowDifferenceFilters.java @@ -17,6 +17,8 @@ package org.apache.nifi.util; import org.apache.nifi.registry.flow.ComponentType; +import org.apache.nifi.registry.flow.VersionedFlowCoordinates; +import org.apache.nifi.registry.flow.VersionedProcessGroup; import org.apache.nifi.registry.flow.VersionedProcessor; import org.apache.nifi.registry.flow.VersionedRemoteGroupPort; import org.apache.nifi.registry.flow.diff.DifferenceType; @@ -73,4 +75,52 @@ public void testFilterAddedRemotePortsWithNonRemoteInputPort() { // predicate should return true because we do want to include changes for adding a non-port Assert.assertTrue(FlowDifferenceFilters.FILTER_ADDED_REMOVED_REMOTE_PORTS.test(flowDifference)); } + + @Test + public void testFilterIgnorableVersionedCoordinateDifferencesWithIgnorableDifference() { + VersionedFlowCoordinates coordinatesA = new VersionedFlowCoordinates(); + coordinatesA.setRegistryUrl("http://localhost:18080"); + + VersionedProcessGroup processGroupA = new VersionedProcessGroup(); + processGroupA.setVersionedFlowCoordinates(coordinatesA); + + VersionedFlowCoordinates coordinatesB = new VersionedFlowCoordinates(); + coordinatesB.setRegistryUrl("http://localhost:18080/"); + + VersionedProcessGroup processGroupB = new VersionedProcessGroup(); + processGroupB.setVersionedFlowCoordinates(coordinatesB); + + StandardFlowDifference flowDifference = new StandardFlowDifference( + DifferenceType.VERSIONED_FLOW_COORDINATES_CHANGED, + processGroupA, processGroupB, + coordinatesA.getRegistryUrl(), coordinatesB.getRegistryUrl(), + ""); + + Assert.assertFalse(FlowDifferenceFilters.FILTER_IGNORABLE_VERSIONED_FLOW_COORDINATE_CHANGES.test(flowDifference)); + } + + @Test + public void testFilterIgnorableVersionedCoordinateDifferencesWithNonIgnorableDifference() { + VersionedFlowCoordinates coordinatesA = new VersionedFlowCoordinates(); + coordinatesA.setRegistryUrl("http://localhost:18080"); + + VersionedProcessGroup processGroupA = new VersionedProcessGroup(); + processGroupA.setVersionedFlowCoordinates(coordinatesA); + + VersionedFlowCoordinates coordinatesB = new VersionedFlowCoordinates(); + coordinatesB.setRegistryUrl("http://localhost:18080"); + + VersionedProcessGroup processGroupB = new VersionedProcessGroup(); + processGroupB.setVersionedFlowCoordinates(coordinatesB); + + StandardFlowDifference flowDifference = new StandardFlowDifference( + DifferenceType.VERSIONED_FLOW_COORDINATES_CHANGED, + processGroupA, processGroupB, + coordinatesA.getRegistryUrl(), coordinatesB.getRegistryUrl(), + ""); + + Assert.assertTrue(FlowDifferenceFilters.FILTER_IGNORABLE_VERSIONED_FLOW_COORDINATE_CHANGES.test(flowDifference)); + } + } + diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/localhost-ks.jks b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/localhost-ks.jks new file mode 100755 index 0000000000000000000000000000000000000000..6db775d765c5f59506fcd8953bd0d248b9a40ba5 GIT binary patch literal 3076 zcmb`|c{tQt9{}+A&7v8MEnC7^itNAPVsN>3BGOnAS!0k%3RyCPq3PN}Ba9FgA_{RQ zgOcnK5!X(XrF(~LX|cR>yYF+K?(^LDdEYXP+oIlR@^F6z)T^0lafwBki zZ6JdJ2mvaAzLD$&pWi{zXAlSi04eYtkcWd)i35NGico$4KtSLW_!2JaHILDZ8|}fY z(p^FPLF0KZY))#XMN{m(=v6J#r}jI;)0lixZQb7bs4Aai>Nm zBZcfQZ?G0MvYV{8M1o(+NIu_c$&G?HP5|$>BR;{P30@Ylmj{AB@nnNP6t+S$KR6ib z-CTWu+judpSkPMMC6e`7#^4q=?qE-DPaTEJr=iiGp=?3qdadPDo9R2;K&$ROd!{?H zzEELQnU7VGsaLvWeaSJ`wSaXUwxMw(F{%1WbmOEg}bqFCiyBxW9?G==JK@!Q5%R zxXWG1$farxX>#^5QRA^m9m7)pQJ1Idb-7dZlf4<}YGC3JLhS-G|AOp~L;3?`M}etU zm*F;rWk1{Ejf3TQuNG~~iZ#B~&dQ({20@wC5&_b2>q^fc?ol+yy5nsdqYE;3Uk2-B zNs-#DpcO{u=oEVO0DnpT^{dgFLHVlUSTm@s@1c#Ai=Sh)r4P6LP!N&s6|Ry}qLU$& zI`}$0@=NGt4fAl=Wnbn&3}ohHz~0TA`ferCOD~d-$=Wq~s;j!RIgQb*2Yq_Yb!udN z&#|a5vH7vdGBs`?(N@y3;is#M_*oLMD!1)zMmT<_RoKQ`4*I;i>2HA(C+o$cWq;rHvB^5eg$NGw^H1>| zKg9L+w^Or68}a&^^kb(bAd>^x^gRgT0G)vp_R>{XR26W$NT$fj-p5P2h7k?eKwr5>z zeB|&6Kbg)UkL2R6Uq_b0Xz-*qZ)JJW+}hl0qzeo9Q<5yzMRNNWwmKXJ&DoxiA4mD3t94c89GKy81BbO@p zNAJ%uT~|cj& zS)6Y*X~65YPjIZ(;+X^8@kNC>LM5?o0K5n!9^^r+?jjuzqLkfYhf*r4Om1qZ4&3=d)kJv%;DuEpY)&?+h1O`;P9@%=IWg zeCm}ctK>vk0+~ji|+NyPaW??VB8`SBzcE+Af1gy zUOFvR2@-nv8fGEm$rT*gi^+0(<9xd;_1*&_&2-}V_S(Q#-pz8(4)qlGgigFoT~op! zcgy{6EfV3ZstM{qATV!`0&@o`Q1e0<6o5iGzctEwb`AtYbD$KdmeegY0B|87pez;* za&f>M;auEs+oR@3u>v69A0dnzMLd7Ti|BtoBovesL1S^Cwg!mR#%gM)yNICGv7olP z&cDO|?GOqe^Zn)Q1`VSCybzEAa6%~n0HIWbn9PNb$Jh{ao_Jcnl)(KOdMOmDv$ACW zIB@io$9klRhkBXZ4g1JVkS}0)+}`iRveLXa?}y{)E>)|7MBVB4<*O?sl|;w+bS1Ky z)XR>$d5b^CGh9Qp^MO5x`D0raE(!|A3Yxb;KHhZwWTDvqZcaektOt~IIl=x3*$KP=um^|3 zps-IM>W_Fo9124IJo+$P7!>3{vVThiVjm3p*9>5gd7=U}x^T}4i_|@tY23&F zFr`nkv{0iXV%9$<4xecaP6Y_by^R4&R@w)y3wa7soszeQ7jaXr_0!FvV(upsBn=)~ zTpwbIME3_U;?+NsDm9#i^)pp4wWr5VJJ4AOP400pK|XJNKq{X3b@4IeFKjk!Y)9GH zu-{=5w)sC{!~Pu`*5&_$E$7wOE9hHksZNEwy{|yF$J;}0AKO(!8j@|cw(R)~-9c5=w#tuT2gtORu>;+`$hKEA z_Cs=2!XeTTPQoM8V}3nBT-d|PQgzO-JlUP+OqYyE-}TicyKPIxnPnNv@nJO=s9%kJ zFCls;O|?g`y@?eH>k76WrG0raIy1Y1YQ4RE@2z+~YU4#&UX_@n;iVkvuk;SBWaR!U z^F105d-!<0U#mswv&hPpf-Kz0neeo?RlWkUDU7tJ_mA9n`l1JBtia8>5~XP;p8A4t zid6N(Wun{Yd!*`;{-4-lK+@l-0J|XfpHvVajLqjei2vy9ze4;5`9Ev>2Un_BBZ4ng zUa(xD0Ey_JzLh6?rR@#YkSJHUUsc*}(4-*=fTF|Wi4DqjN*wNQo|RWwD$zTIu@lNo z0yLvuLKvzeg=Ya9p`Z7g3aOwd!UeL~>|_*YtZU{tcEucXv%3UTZ|7so zwgRr^Y448J*KS6xuny`!(9#e9L<7z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?h z&tTBR$i>ve$jC6`;_p`8`Tf0N3;649tW5p>pu^cbVAGZdoX_%Wn=c&R$lwz1I{&ht z(FU!wog5rHSNvw29{)ro_S-SN;@#c8-@gY7IK&y~=^b7BtyHpZ@}XOjr_OR5z4##b zs@evzV#SIuu^abqrJu>;HdNJ7*c!n+NBQsJT~QM&Q$K%v8Suwrd-6LakK5PuqnOe+ z1)oi;x!SLHM&#Juw$&M&7|&ey+N%3A20%B`577iv#Gmr)G_*lePMBI)te48Ep!(Hpb)5k*Qu4fG7q5=)%LDI@B5(Z)o*cE{FtFnk0 zh_G>JvoW%=vNJQmS&Yb01WX^mC}L#LYp*CMTU+SyzKOAuH}CxWegBsq+wILz70r^q zq5ogrU58pmCf@SqoZUJB+H7$T53btap*{12nBg6ry+V^6&!&}f9`E6MZdhSE=e2I_ zyoD8E@^C+mG;1IidT{+TQ9`aQFKj`_b^JGXqTeE(M3 zN-?x*N?+8olYcoIW)^(<9klk;`!8oyA87R!@Sc1$<+7j6e;I+@B0u~lc6Tzspj*?i+UcBhZ?c;?()q8-Q4+kz0p(3 FxB&l(STg_s literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/logback-test.xml b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/logback-test.xml index 560363c45ccd..f6cb8c54655a 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/logback-test.xml +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-framework-core/src/test/resources/logback-test.xml @@ -20,15 +20,15 @@ %-4r [%t] %-5p %c - %m%n - + ./target/log %date %level [%thread] %logger{40} %msg%n - - + + diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/StandardRepositoryRecord.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/StandardRepositoryRecord.java index 6f045e50e6b1..c960902960eb 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/StandardRepositoryRecord.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/StandardRepositoryRecord.java @@ -16,30 +16,31 @@ */ package org.apache.nifi.controller.repository; +import org.apache.nifi.controller.queue.FlowFileQueue; +import org.apache.nifi.controller.repository.claim.ContentClaim; +import org.apache.nifi.processor.Relationship; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.nifi.controller.queue.FlowFileQueue; -import org.apache.nifi.controller.repository.claim.ContentClaim; -import org.apache.nifi.processor.Relationship; - public class StandardRepositoryRecord implements RepositoryRecord { - private RepositoryRecordType type = null; + private RepositoryRecordType type; private FlowFileRecord workingFlowFileRecord = null; private Relationship transferRelationship = null; private FlowFileQueue destination = null; private final FlowFileRecord originalFlowFileRecord; private final FlowFileQueue originalQueue; private String swapLocation; - private final Map updatedAttributes = new HashMap<>(); private final Map originalAttributes; + private Map updatedAttributes = null; private List transientClaims; private final long startNanos = System.nanoTime(); + /** * Creates a new record which has no original claim or flow file - it is entirely new * @@ -66,7 +67,7 @@ public StandardRepositoryRecord(final FlowFileQueue originalQueue, final FlowFil this.originalFlowFileRecord = originalFlowFileRecord; this.type = RepositoryRecordType.SWAP_OUT; this.swapLocation = swapLocation; - this.originalAttributes = originalFlowFileRecord == null ? Collections.emptyMap() : originalFlowFileRecord.getAttributes(); + this.originalAttributes = originalFlowFileRecord == null ? Collections.emptyMap() : originalFlowFileRecord.getAttributes(); } @Override @@ -113,30 +114,48 @@ public void setWorking(final FlowFileRecord flowFile) { workingFlowFileRecord = flowFile; } + private Map initializeUpdatedAttributes() { + if (updatedAttributes == null) { + updatedAttributes = new HashMap<>(); + } + + return updatedAttributes; + } + public void setWorking(final FlowFileRecord flowFile, final String attributeKey, final String attributeValue) { workingFlowFileRecord = flowFile; + // In the case that the type is CREATE, we know that all attributes are updated attributes, so no need to store them. + if (type == RepositoryRecordType.CREATE) { + return; + } + // If setting attribute to same value as original, don't add to updated attributes final String currentValue = originalAttributes.get(attributeKey); if (currentValue == null || !currentValue.equals(attributeValue)) { - updatedAttributes.put(attributeKey, attributeValue); + initializeUpdatedAttributes().put(attributeKey, attributeValue); } } public void setWorking(final FlowFileRecord flowFile, final Map updatedAttribs) { workingFlowFileRecord = flowFile; + // In the case that the type is CREATE, we know that all attributes are updated attributes, so no need to store them. + if (type == RepositoryRecordType.CREATE) { + return; + } + for (final Map.Entry entry : updatedAttribs.entrySet()) { final String currentValue = originalAttributes.get(entry.getKey()); if (currentValue == null || !currentValue.equals(entry.getValue())) { - updatedAttributes.put(entry.getKey(), entry.getValue()); + initializeUpdatedAttributes().put(entry.getKey(), entry.getValue()); } } } @Override public boolean isAttributesChanged() { - return !updatedAttributes.isEmpty(); + return type == RepositoryRecordType.CREATE || (updatedAttributes != null && !updatedAttributes.isEmpty()); } public void markForAbort() { @@ -196,7 +215,11 @@ Map getOriginalAttributes() { } Map getUpdatedAttributes() { - return updatedAttributes; + if (type == RepositoryRecordType.CREATE) { + return getCurrent().getAttributes(); + } + + return updatedAttributes == null ? Collections.emptyMap() : updatedAttributes; } @Override diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/claim/StandardResourceClaim.java b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/claim/StandardResourceClaim.java index 7e87199e6b4d..890895636a65 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/claim/StandardResourceClaim.java +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-repository-models/src/main/java/org/apache/nifi/controller/repository/claim/StandardResourceClaim.java @@ -17,7 +17,7 @@ package org.apache.nifi.controller.repository.claim; public class StandardResourceClaim implements ResourceClaim, Comparable { - private final StandardResourceClaimManager claimManager; + private final ResourceClaimManager claimManager; private final String id; private final String container; private final String section; @@ -25,7 +25,7 @@ public class StandardResourceClaim implements ResourceClaim, Comparable5 mins + + + 6342 + 4 + 8 + 30 sec + 15 secs diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/authorizers.xml b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/authorizers.xml index b57239a25e8e..d6d3c45901f0 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/authorizers.xml +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/authorizers.xml @@ -241,6 +241,8 @@ - Node Group - The name of a group containing NiFi cluster nodes. The typical use for this is when nodes are dynamically added/removed from the cluster. + + NOTE: The group must exist before starting NiFi. --> file-access-policy-provider diff --git a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/logback.xml b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/logback.xml index 6ab98ab00ce0..cf3af8854b9f 100644 --- a/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/logback.xml +++ b/nifi-nar-bundles/nifi-framework-bundle/nifi-framework/nifi-resources/src/main/resources/conf/logback.xml @@ -117,6 +117,9 @@ + + + c3p0 @@ -288,7 +288,7 @@ javax.mail mail 1.4.7 - + org.apache.httpcomponents httpclient @@ -302,12 +302,12 @@ org.bouncycastle bcprov-jdk15on - 1.59 + 1.60 org.bouncycastle bcpkix-jdk15on - 1.59 + 1.60 com.google.guava @@ -521,7 +521,7 @@ spring-core ${spring.version} - commons-logging @@ -547,7 +547,7 @@ org.apache.commons commons-collections4 - 4.1 + 4.2 org.jasypt @@ -594,13 +594,19 @@ org.apache.commons commons-compress - 1.16.1 + 1.18 commons-net commons-net 3.6 + + + io.netty + netty + 3.7.1.Final + diff --git a/nifi-nar-bundles/nifi-gcp-bundle/nifi-gcp-services-api/pom.xml b/nifi-nar-bundles/nifi-gcp-bundle/nifi-gcp-services-api/pom.xml index 8f8979dc3d67..5515e17363dd 100644 --- a/nifi-nar-bundles/nifi-gcp-bundle/nifi-gcp-services-api/pom.xml +++ b/nifi-nar-bundles/nifi-gcp-bundle/nifi-gcp-services-api/pom.xml @@ -33,7 +33,6 @@ com.google.auth google-auth-library-oauth2-http - 0.6.0 com.google.code.findbugs @@ -49,7 +48,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 diff --git a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/pom.xml b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/pom.xml index 1ce4f004f9e6..e6d9d1e4edd9 100644 --- a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/pom.xml +++ b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/pom.xml @@ -38,7 +38,7 @@ language governing permissions and limitations under the License. --> org.apache.commons commons-lang3 - 3.7 + 3.8.1 commons-io diff --git a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/ITListenGRPC.java b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/ITListenGRPC.java index 28a73b90a558..16929a4179ef 100644 --- a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/ITListenGRPC.java +++ b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/ITListenGRPC.java @@ -50,21 +50,21 @@ public class ITListenGRPC { private static final String HOST = "localhost"; - private static final String CERT_DN = "CN=localhost, OU=Apache NiFi, O=Apache, L=Santa Monica, ST=CA, C=US"; + private static final String CERT_DN = "CN=localhost, OU=NIFI"; private static final String SOURCE_SYSTEM_UUID = "FAKE_UUID"; private static Map getTruststoreProperties() { final Map props = new HashMap<>(); - props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); props.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); return props; } private static Map getKeystoreProperties() { final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); return properties; } diff --git a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/TestInvokeGRPC.java b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/TestInvokeGRPC.java index 3b0c41ad8544..fc0f09b82a2a 100644 --- a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/TestInvokeGRPC.java +++ b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/java/org/apache/nifi/processors/grpc/TestInvokeGRPC.java @@ -482,16 +482,16 @@ public void testSecureOneWaySsl() throws Exception { private static Map getTruststoreProperties() { final Map props = new HashMap<>(); - props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); props.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); return props; } private static Map getKeystoreProperties() { final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); return properties; } diff --git a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/keystore.jks b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/localhost-ks.jks b/nifi-nar-bundles/nifi-grpc-bundle/nifi-grpc-processors/src/test/resources/localhost-ks.jks deleted file mode 100755 index df36197d92ab8e9870f42666d74c47646fd56f26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3512 zcmchZXH*l&7RQqiLLfv+Xd3T`p&Iir~-L#Y=#ud?{E1Ldil!ycf^TKF)2%4dD_ z&CRl2juv-w;hX`>r;n!ME0*eJZQo{liYbqFr%&s4KWji3-S|{|Q#z3Bi!_n-HQIvn z)_yhbW3OmoReGZ$;mdsOy)j4ml{e?MpM3BXEZ&%y>@=UK++W7rcU+QDvQxbsYBP>C ziqWW_4}oMI2<5S^ml11u$vzs(Bz1QY%@RE`7dI!_J9pQZTH;ai+~*FZ-!&0FO}AsY zOxhC^^;ctKwcW!%@WtyMsu@6xd3zdv(I!8(v5$IseOUHF#yBeb=(KkBD?D*{)a_{6 zy11;ZtH1s5w8!+ewZvnrkKmE%X*#>Ul%b`b!V6_&L1)$_<6^i6k7Bh$Cbm8X7HN40 zS#G)q)jhM1yqIk|ug4$}yr>lNM^7CDi=S{rQqn53pE8J!Vk=?&Q_pATc&ICwBQ zS(^FTsqy1f=9leGJUj=gReI>!b5N4p{xQ7Yh?)gcpugwPJJKnkHLG#|+$oVkg4yV1aO1A$e7 zaQjo^Q#=uo%^bn4wLVp1-Lpy>m3Om-GmM2@#_FNth9W;Io4*MtEVVL^kgC7SFA-we z#qVjp#>O>$RucpY72eI-)`&+06CPE;lJYi4}@3m`# zJ_AU}qlHP&l8^Sxdy9$-4gOUb4UL4637oYGzAr%oZTy>dW-CT`%o3B(duSJ1(e{$Y zM<9UyvWx;+833RQMN{a4(G-wlHXR5E0)ZV>5?#@72%}__LDViB2!zoC&;$$&%?P2h z0z(iWD~mq^C<3ITh2caaj#n5E%ofhx0nUQPL~nPTGlqqB22Ex{K(u_Eac+1F2b%p@ zfFWRi2!bZ=dhQr@H0!ZShxiYx(fr(S%o#KWt$@YIDPiPok3$Sr4*fIyhqIvoh5uR( z+G9aS0kQzl6d)6b0t5omn(X@$hGj=yE`{&~S2Gtia5Gn?EL_(yG|G+K@=fp0D^(rz zxT1R64#p$fx05POs#deg9+l!c8gwhEor|BbmTA)uRlj-gz6)6_cB&4*Tc-M`bK9>c z*H4msFu-a#7iT^GkUgZvxqIcr(X*;=?XWBEh_4N)!@=`Ah5M!kt4cNNSPATwH?AXC zdENd&XqoAr2Dq}BQ6Gnc3D~XB-xhZWLe^fld)&QlbH&rFP$(?%sxBMiB_=cw?r7CH@9Dd8TnkYHTi)yt>lPMf~Qh{TVz-%zd}mpoX@Lx z7dHOF@cCta&Y}DYj>8M>y0uqvg+{1>9qQK_{DUz^17>%6baZre>Zg9-*JTh{JeEgE(Xc$3KCdGsnB0X~&288Q1yu50`xi`1$u zxw%0F{zoTzg?QpaXg#S%Pc}TD&G9sE#r*FN1sL2ia!PT<-siU_xsUiWo{_zcpd9U!Ni)~G zLi}%abS2t*$1jmQ&rh~)%FTUKeNh{2;~_;7Z1a$&S<~zN0o(9-C8gCXFPUtQaEi(Ok}L|C$~05J}GOTeZ2`>N!9w z|5?&Yv(xUn4w}Md-)+>Xm-idnwqK!l-ep)3M#!opq&#uM)v4O^f$5XSSy^-7P*&lV zi*Bv9WLRzp8QFh_Sp$75|b~$}d%! zADHN!cN?}Zq;Pfp`_&u3UsSsuum4tHmJnSKKJnFdCJT}j<9dY@Y9;CdG*Uh6JugW| zjszU%k%LnRdK;+FkhCS;r3tV3Qu-?q>U@4Gz20FckyBYJ$a2l5D|g6nnw|8he9Zuw zE>xvKu;5sW8RFB^dtl3__u=TrP;92~^c`S>V6o8(>LDq#2#WbkDhztv-Y+KRxxc_( z9-Ig8g=a}sc!GElV)j`DAZZobG^EycOweBae{tMx(CCHt3QRem*{+4B%V0XzUy$!_ zUZ;}$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java index 91fd204ce144..6e71331feeef 100644 --- a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java +++ b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/java/org/apache/nifi/processors/hadoop/PutHDFS.java @@ -49,6 +49,9 @@ import org.apache.nifi.processor.util.StandardValidators; import org.apache.nifi.stream.io.StreamUtils; import org.apache.nifi.util.StopWatch; +import org.ietf.jgss.GSSException; + +import com.google.common.base.Throwables; import java.io.BufferedInputStream; import java.io.FileNotFoundException; @@ -60,8 +63,11 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; +import java.util.stream.Stream; /** * This processor copies FlowFiles to HDFS. @@ -373,6 +379,17 @@ public void process(InputStream in) throws IOException { session.transfer(putFlowFile, REL_SUCCESS); + } catch (final IOException e) { + Optional causeOptional = findCause(e, GSSException.class, gsse -> GSSException.NO_CRED == gsse.getMajor()); + if (causeOptional.isPresent()) { + getLogger().warn("An error occurred while connecting to HDFS. " + + "Rolling back session, and penalizing flow file {}", + new Object[] {putFlowFile.getAttribute(CoreAttributes.UUID.key()), causeOptional.get()}); + session.rollback(true); + } else { + getLogger().error("Failed to access HDFS due to {}", new Object[]{e}); + session.transfer(putFlowFile, REL_FAILURE); + } } catch (final Throwable t) { if (tempDotCopyFile != null) { try { @@ -391,6 +408,22 @@ public void process(InputStream in) throws IOException { }); } + + /** + * Returns an optional with the first throwable in the causal chain that is assignable to the provided cause type, + * and satisfies the provided cause predicate, {@link Optional#empty()} otherwise. + * @param t The throwable to inspect for the cause. + * @return + */ + private Optional findCause(Throwable t, Class expectedCauseType, Predicate causePredicate) { + Stream causalChain = Throwables.getCausalChain(t).stream(); + return causalChain + .filter(expectedCauseType::isInstance) + .map(expectedCauseType::cast) + .filter(causePredicate) + .findFirst(); + } + protected void changeOwner(final ProcessContext context, final FileSystem hdfs, final Path name, final FlowFile flowFile) { try { // Change owner and group of file if configured to do so diff --git a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java index 46b377d1e245..2334730af4b6 100644 --- a/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java +++ b/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/test/java/org/apache/nifi/processors/hadoop/PutHDFSTest.java @@ -30,19 +30,21 @@ import org.apache.nifi.flowfile.attributes.CoreAttributes; import org.apache.nifi.hadoop.KerberosProperties; import org.apache.nifi.processor.ProcessContext; +import org.apache.nifi.processor.ProcessSession; import org.apache.nifi.processor.Relationship; import org.apache.nifi.processor.exception.ProcessException; import org.apache.nifi.provenance.ProvenanceEventRecord; import org.apache.nifi.provenance.ProvenanceEventType; import org.apache.nifi.util.MockFlowFile; import org.apache.nifi.util.MockProcessContext; -import org.apache.nifi.util.NiFiProperties; import org.apache.nifi.util.TestRunner; import org.apache.nifi.util.TestRunners; +import org.ietf.jgss.GSSException; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import javax.security.sasl.SaslException; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; @@ -57,20 +59,16 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class PutHDFSTest { - private NiFiProperties mockNiFiProperties; private KerberosProperties kerberosProperties; private FileSystem mockFileSystem; @Before public void setup() { - mockNiFiProperties = mock(NiFiProperties.class); - when(mockNiFiProperties.getKerberosConfigurationFile()).thenReturn(null); kerberosProperties = new KerberosProperties(null); mockFileSystem = new MockFileSystem(); } @@ -191,14 +189,12 @@ public void testValidators() { @Test public void testPutFile() throws IOException { - // Refer to comment in the BeforeClass method for an explanation - PutHDFS proc = new TestablePutHDFS(kerberosProperties, mockFileSystem); TestRunner runner = TestRunners.newTestRunner(proc); runner.setProperty(PutHDFS.DIRECTORY, "target/test-classes"); runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace"); - try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1");) { - Map attributes = new HashMap(); + try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) { + Map attributes = new HashMap<>(); attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1"); runner.enqueue(fis, attributes); runner.run(); @@ -225,15 +221,13 @@ public void testPutFile() throws IOException { @Test public void testPutFileWithCompression() throws IOException { - // Refer to comment in the BeforeClass method for an explanation - PutHDFS proc = new TestablePutHDFS(kerberosProperties, mockFileSystem); TestRunner runner = TestRunners.newTestRunner(proc); runner.setProperty(PutHDFS.DIRECTORY, "target/test-classes"); runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace"); runner.setProperty(PutHDFS.COMPRESSION_CODEC, "GZIP"); - try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1");) { - Map attributes = new HashMap(); + try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) { + Map attributes = new HashMap<>(); attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1"); runner.enqueue(fis, attributes); runner.run(); @@ -252,31 +246,60 @@ public void testPutFileWithCompression() throws IOException { } @Test - public void testPutFileWithException() throws IOException { - // Refer to comment in the BeforeClass method for an explanation + public void testPutFileWithGSSException() throws IOException { + FileSystem noCredentialsFileSystem = new MockFileSystem() { + @Override + public FileStatus getFileStatus(Path path) throws IOException { + throw new IOException("ioe", new SaslException("sasle", new GSSException(13))); + } + }; + TestRunner runner = TestRunners.newTestRunner(new TestablePutHDFS(kerberosProperties, noCredentialsFileSystem)); + runner.setProperty(PutHDFS.DIRECTORY, "target/test-classes"); + runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace"); + try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) { + Map attributes = new HashMap<>(); + attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1"); + runner.enqueue(fis, attributes); + runner.run(); + } + + // assert no flowfiles transferred to outgoing relationships + runner.assertTransferCount(PutHDFS.REL_SUCCESS, 0); + runner.assertTransferCount(PutHDFS.REL_FAILURE, 0); + // assert the input flowfile was penalized + List penalizedFlowFiles = runner.getPenalizedFlowFiles(); + assertEquals(1, penalizedFlowFiles.size()); + assertEquals("randombytes-1", penalizedFlowFiles.iterator().next().getAttribute(CoreAttributes.FILENAME.key())); + // assert the processor's queue is not empty + assertFalse(runner.isQueueEmpty()); + assertEquals(1, runner.getQueueSize().getObjectCount()); + // assert the input file is back on the queue + ProcessSession session = runner.getProcessSessionFactory().createSession(); + FlowFile queuedFlowFile = session.get(); + assertNotNull(queuedFlowFile); + assertEquals("randombytes-1", queuedFlowFile.getAttribute(CoreAttributes.FILENAME.key())); + session.rollback(); + } + + @Test + public void testPutFileWithProcessException() throws IOException { String dirName = "target/testPutFileWrongPermissions"; File file = new File(dirName); file.mkdirs(); Path p = new Path(dirName).makeQualified(mockFileSystem.getUri(), mockFileSystem.getWorkingDirectory()); - final KerberosProperties testKerberosProperties = kerberosProperties; - TestRunner runner = TestRunners.newTestRunner(new PutHDFS() { + TestRunner runner = TestRunners.newTestRunner(new TestablePutHDFS(kerberosProperties, mockFileSystem) { @Override protected void changeOwner(ProcessContext context, FileSystem hdfs, Path name, FlowFile flowFile) { throw new ProcessException("Forcing Exception to get thrown in order to verify proper handling"); } - - @Override - protected KerberosProperties getKerberosProperties(File kerberosConfigFile) { - return testKerberosProperties; - } }); runner.setProperty(PutHDFS.DIRECTORY, dirName); runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace"); - try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1");) { - Map attributes = new HashMap(); + try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) { + Map attributes = new HashMap<>(); attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1"); runner.enqueue(fis, attributes); runner.run(); @@ -292,13 +315,11 @@ protected KerberosProperties getKerberosProperties(File kerberosConfigFile) { @Test public void testPutFileWhenDirectoryUsesValidELFunction() throws IOException { - // Refer to comment in the BeforeClass method for an explanation - PutHDFS proc = new TestablePutHDFS(kerberosProperties, mockFileSystem); TestRunner runner = TestRunners.newTestRunner(proc); runner.setProperty(PutHDFS.DIRECTORY, "target/data_${literal('testing'):substring(0,4)}"); runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace"); - try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1");) { + try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) { Map attributes = new HashMap<>(); attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1"); runner.enqueue(fis, attributes); @@ -319,8 +340,6 @@ public void testPutFileWhenDirectoryUsesValidELFunction() throws IOException { @Test public void testPutFileWhenDirectoryUsesUnrecognizedEL() throws IOException { - // Refer to comment in the BeforeClass method for an explanation - PutHDFS proc = new TestablePutHDFS(kerberosProperties, mockFileSystem); TestRunner runner = TestRunners.newTestRunner(proc); @@ -329,8 +348,8 @@ public void testPutFileWhenDirectoryUsesUnrecognizedEL() throws IOException { runner.setProperty(PutHDFS.DIRECTORY, "data_${literal('testing'):substring(0,4)%7D"); runner.setProperty(PutHDFS.CONFLICT_RESOLUTION, "replace"); - try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1");) { - Map attributes = new HashMap(); + try (FileInputStream fis = new FileInputStream("src/test/resources/testdata/randombytes-1")) { + Map attributes = new HashMap<>(); attributes.put(CoreAttributes.FILENAME.key(), "randombytes-1"); runner.enqueue(fis, attributes); runner.run(); @@ -340,9 +359,7 @@ public void testPutFileWhenDirectoryUsesUnrecognizedEL() throws IOException { } @Test - public void testPutFileWhenDirectoryUsesInvalidEL() throws IOException { - // Refer to comment in the BeforeClass method for an explanation - + public void testPutFileWhenDirectoryUsesInvalidEL() { PutHDFS proc = new TestablePutHDFS(kerberosProperties, mockFileSystem); TestRunner runner = TestRunners.newTestRunner(proc); // the validator should pick up the invalid EL @@ -356,7 +373,7 @@ private class TestablePutHDFS extends PutHDFS { private KerberosProperties testKerberosProperties; private FileSystem fileSystem; - public TestablePutHDFS(KerberosProperties testKerberosProperties, FileSystem fileSystem) { + TestablePutHDFS(KerberosProperties testKerberosProperties, FileSystem fileSystem) { this.testKerberosProperties = testKerberosProperties; this.fileSystem = fileSystem; } @@ -367,7 +384,7 @@ protected KerberosProperties getKerberosProperties(File kerberosConfigFile) { } @Override - protected FileSystem getFileSystem(Configuration config) throws IOException { + protected FileSystem getFileSystem(Configuration config) { return fileSystem; } @@ -386,24 +403,24 @@ public URI getUri() { } @Override - public FSDataInputStream open(final Path f, final int bufferSize) throws IOException { + public FSDataInputStream open(final Path f, final int bufferSize) { return null; } @Override public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, - final long blockSize, final Progressable progress) throws IOException { + final long blockSize, final Progressable progress) { pathToStatus.put(f, newFile(f)); return new FSDataOutputStream(new ByteArrayOutputStream(), new Statistics("")); } @Override - public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) throws IOException { + public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) { return null; } @Override - public boolean rename(final Path src, final Path dst) throws IOException { + public boolean rename(final Path src, final Path dst) { if (pathToStatus.containsKey(src)) { pathToStatus.put(dst, pathToStatus.remove(src)); } else { @@ -413,7 +430,7 @@ public boolean rename(final Path src, final Path dst) throws IOException { } @Override - public boolean delete(final Path f, final boolean recursive) throws IOException { + public boolean delete(final Path f, final boolean recursive) { if (pathToStatus.containsKey(f)) { pathToStatus.remove(f); } else { @@ -423,7 +440,7 @@ public boolean delete(final Path f, final boolean recursive) throws IOException } @Override - public FileStatus[] listStatus(final Path f) throws FileNotFoundException, IOException { + public FileStatus[] listStatus(final Path f) { return null; } @@ -438,12 +455,12 @@ public Path getWorkingDirectory() { } @Override - public boolean mkdirs(final Path f, final FsPermission permission) throws IOException { + public boolean mkdirs(final Path f, final FsPermission permission) { return false; } @Override - public boolean mkdirs(Path f) throws IOException { + public boolean mkdirs(Path f) { pathToStatus.put(f, newDir(f)); return true; } @@ -456,7 +473,7 @@ public FileStatus getFileStatus(final Path f) throws IOException { } @Override - public boolean exists(Path f) throws IOException { + public boolean exists(Path f) { return pathToStatus.containsKey(f); } diff --git a/nifi-nar-bundles/nifi-hadoop-libraries-bundle/pom.xml b/nifi-nar-bundles/nifi-hadoop-libraries-bundle/pom.xml index 77716eedee4b..26e3750df672 100644 --- a/nifi-nar-bundles/nifi-hadoop-libraries-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-hadoop-libraries-bundle/pom.xml @@ -26,4 +26,14 @@ nifi-hadoop-libraries-nar + + + + + io.netty + netty + 3.7.1.Final + + + diff --git a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/pom.xml b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/pom.xml index cf6e09d21cb4..c2866fa51706 100644 --- a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/pom.xml +++ b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/pom.xml @@ -24,7 +24,7 @@ Support for interacting with HBase - 2.9.5 + 2.9.7 @@ -55,12 +55,12 @@ org.apache.nifi nifi-record - ${project.version} + 1.8.0-SNAPSHOT org.apache.commons commons-lang3 - 3.4 + 3.8.1 com.fasterxml.jackson.core @@ -93,7 +93,12 @@ org.apache.nifi nifi-record-path - ${project.version} + 1.8.0-SNAPSHOT + + + org.apache.commons + commons-text + 1.4 diff --git a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonFullRowSerializer.java b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonFullRowSerializer.java index 837f14d220a4..a51840035739 100644 --- a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonFullRowSerializer.java +++ b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonFullRowSerializer.java @@ -16,7 +16,7 @@ */ package org.apache.nifi.hbase.io; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.hbase.scan.ResultCell; import org.apache.nifi.hbase.util.RowSerializerUtil; diff --git a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonQualifierAndValueRowSerializer.java b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonQualifierAndValueRowSerializer.java index 0eb18ffbf8b9..ee05f04b4fd9 100644 --- a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonQualifierAndValueRowSerializer.java +++ b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonQualifierAndValueRowSerializer.java @@ -16,7 +16,7 @@ */ package org.apache.nifi.hbase.io; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.hbase.scan.ResultCell; import org.apache.nifi.hbase.util.RowSerializerUtil; diff --git a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonRowSerializer.java b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonRowSerializer.java index 0ea0804b8007..fc903e43ba74 100644 --- a/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonRowSerializer.java +++ b/nifi-nar-bundles/nifi-hbase-bundle/nifi-hbase-processors/src/main/java/org/apache/nifi/hbase/io/JsonRowSerializer.java @@ -16,7 +16,7 @@ */ package org.apache.nifi.hbase.io; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.hbase.scan.ResultCell; import java.io.IOException; diff --git a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/pom.xml b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/pom.xml index fda010ee060f..30e641b216c5 100644 --- a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/pom.xml +++ b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/pom.xml @@ -106,6 +106,11 @@ findbugs-annotations 1.3.9-1 + + org.apache.commons + commons-text + 1.4 + org.apache.nifi nifi-mock diff --git a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java index ff06495fee97..6e28f94a67c9 100644 --- a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java +++ b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java @@ -24,7 +24,7 @@ import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.generic.GenericRecord; import org.apache.avro.io.DatumWriter; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java index ff06495fee97..6e28f94a67c9 100644 --- a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java +++ b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/main/java/org/apache/nifi/util/hive/HiveJdbcCommon.java @@ -24,7 +24,7 @@ import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.generic.GenericRecord; import org.apache.avro.io.DatumWriter; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/test/java/org/apache/nifi/processors/orc/PutORCTest.java b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/test/java/org/apache/nifi/processors/orc/PutORCTest.java index 552544fe3b6b..3cc6fba05114 100644 --- a/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/test/java/org/apache/nifi/processors/orc/PutORCTest.java +++ b/nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/test/java/org/apache/nifi/processors/orc/PutORCTest.java @@ -66,6 +66,8 @@ import java.nio.charset.StandardCharsets; import java.sql.Date; import java.sql.Timestamp; +import java.text.DateFormat; +import java.text.SimpleDateFormat; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; @@ -74,6 +76,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.TimeZone; import java.util.function.BiFunction; import static org.junit.Assert.assertEquals; @@ -247,7 +250,9 @@ public void testWriteORCWithAvroLogicalTypes() throws IOException, Initializatio assertEquals((int) currUser, ((IntWritable) x.get(0)).get()); assertEquals(timeMillis, ((IntWritable) x.get(1)).get()); assertEquals(timestampMillis, ((TimestampWritableV2) x.get(2)).getTimestamp().toSqlTimestamp()); - assertEquals(dt.toLocalDate().toEpochDay(), ((DateWritableV2) x.get(3)).get().toEpochDay()); + final DateFormat noTimeOfDayDateFormat = new SimpleDateFormat("yyyy-MM-dd"); + noTimeOfDayDateFormat.setTimeZone(TimeZone.getTimeZone("gmt")); + assertEquals(noTimeOfDayDateFormat.format(dt), ((DateWritableV2) x.get(3)).get().toString()); assertEquals(dec, ((DoubleWritable) x.get(4)).get(), Double.MIN_VALUE); return null; } diff --git a/nifi-nar-bundles/nifi-hive-bundle/pom.xml b/nifi-nar-bundles/nifi-hive-bundle/pom.xml index c67a26775541..eca835ab4681 100644 --- a/nifi-nar-bundles/nifi-hive-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-hive-bundle/pom.xml @@ -35,6 +35,17 @@ nifi-hive3-nar + + + + + io.netty + netty + 3.7.1.Final + + + + 1.2.1 2.6.2 diff --git a/nifi-nar-bundles/nifi-hl7-bundle/nifi-hl7-processors/pom.xml b/nifi-nar-bundles/nifi-hl7-bundle/nifi-hl7-processors/pom.xml index 2b43a50420a7..fb53d5ce4580 100644 --- a/nifi-nar-bundles/nifi-hl7-bundle/nifi-hl7-processors/pom.xml +++ b/nifi-nar-bundles/nifi-hl7-bundle/nifi-hl7-processors/pom.xml @@ -52,7 +52,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi @@ -66,47 +66,47 @@ ca.uhn.hapi hapi-base - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v21 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v22 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v23 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v231 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v24 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v25 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v251 - 2.2 + 2.3 ca.uhn.hapi hapi-structures-v26 - 2.2 + 2.3 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-html-bundle/nifi-html-processors/pom.xml b/nifi-nar-bundles/nifi-html-bundle/nifi-html-processors/pom.xml index 06bdf8844a20..25c1143b3c8c 100644 --- a/nifi-nar-bundles/nifi-html-bundle/nifi-html-processors/pom.xml +++ b/nifi-nar-bundles/nifi-html-bundle/nifi-html-processors/pom.xml @@ -34,7 +34,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-ignite-bundle/nifi-ignite-processors/pom.xml b/nifi-nar-bundles/nifi-ignite-bundle/nifi-ignite-processors/pom.xml index 0dc78808f427..029346044abe 100644 --- a/nifi-nar-bundles/nifi-ignite-bundle/nifi-ignite-processors/pom.xml +++ b/nifi-nar-bundles/nifi-ignite-bundle/nifi-ignite-processors/pom.xml @@ -24,7 +24,6 @@ nifi-ignite-processors jar - org.apache.ignite @@ -45,7 +44,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.ignite @@ -81,7 +80,7 @@ com.google.guava guava - 18.0 + 26.0-jre test diff --git a/nifi-nar-bundles/nifi-ignite-bundle/pom.xml b/nifi-nar-bundles/nifi-ignite-bundle/pom.xml index a4ac4bb63efb..eabeb4d3496f 100644 --- a/nifi-nar-bundles/nifi-ignite-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-ignite-bundle/pom.xml @@ -29,9 +29,18 @@ nifi-ignite-processors nifi-ignite-nar - + + 4.3.19.RELEASE + + + org.springframework + spring-framework-bom + ${spring.version} + pom + import + org.apache.nifi nifi-ignite-processors diff --git a/nifi-nar-bundles/nifi-influxdb-bundle/nifi-influxdb-processors/pom.xml b/nifi-nar-bundles/nifi-influxdb-bundle/nifi-influxdb-processors/pom.xml index aad47b903204..9e94c1ac3a04 100644 --- a/nifi-nar-bundles/nifi-influxdb-bundle/nifi-influxdb-processors/pom.xml +++ b/nifi-nar-bundles/nifi-influxdb-bundle/nifi-influxdb-processors/pom.xml @@ -34,7 +34,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/pom.xml b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/pom.xml index 9b711ebfa56f..7ed13ca7d261 100644 --- a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/pom.xml +++ b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/pom.xml @@ -41,7 +41,7 @@ org.springframework spring-jms - 4.2.4.RELEASE + 4.3.19.RELEASE commons-logging @@ -51,7 +51,7 @@ org.apache.activemq activemq-client - 5.15.3 + 5.15.6 @@ -64,7 +64,7 @@ org.apache.activemq activemq-broker - 5.15.3 + 5.15.6 test diff --git a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSConsumer.java b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSConsumer.java index 809227779daf..a2c73b4e3bba 100644 --- a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSConsumer.java +++ b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSConsumer.java @@ -85,10 +85,6 @@ public void consume(final String destinationName, final boolean durable, final b this.jmsTemplate.execute(new SessionCallback() { @Override public Void doInJms(final Session session) throws JMSException { - // We need to call recover to ensure that in in the event of - // abrupt end or exception the current session will stop message - // delivery and restarts with the oldest unacknowledged message - session.recover(); final MessageConsumer msgConsumer = createMessageConsumer(session, destinationName, durable, shared, subscriberName); try { @@ -126,6 +122,12 @@ public Void doInJms(final Session session) throws JMSException { // and if CLIENT_ACKNOWLEDGE is set. consumerCallback.accept(response); acknowledge(message, session); + } catch (Exception e) { + // We need to call recover to ensure that in the event of + // abrupt end or exception the current session will stop message + // delivery and restart with the oldest unacknowledged message + session.recover(); + throw e; } finally { JmsUtils.closeMessageConsumer(msgConsumer); } diff --git a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSPublisher.java b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSPublisher.java index 9912c81b7417..392157fdea20 100644 --- a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSPublisher.java +++ b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/JMSPublisher.java @@ -16,8 +16,11 @@ */ package org.apache.nifi.jms.processors; +import java.util.Arrays; +import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; +import java.util.stream.Collectors; import javax.jms.BytesMessage; import javax.jms.Destination; @@ -78,11 +81,14 @@ public Message createMessage(Session session) throws JMSException { void setMessageHeaderAndProperties(final Message message, final Map flowFileAttributes) throws JMSException { if (flowFileAttributes != null && !flowFileAttributes.isEmpty()) { - for (Entry entry : flowFileAttributes.entrySet()) { + + Map flowFileAttributesToSend = flowFileAttributes.entrySet().stream() + .filter(entry -> !entry.getKey().contains("-") && !entry.getKey().contains(".")) // '-' and '.' are illegal chars in JMS property names + .collect(Collectors.toMap(Entry::getKey, Entry::getValue)); + + for (Entry entry : flowFileAttributesToSend.entrySet()) { try { - if (!entry.getKey().startsWith(JmsHeaders.PREFIX) && !entry.getKey().contains("-") && !entry.getKey().contains(".")) {// '-' and '.' are illegal char in JMS prop names - message.setStringProperty(entry.getKey(), entry.getValue()); - } else if (entry.getKey().equals(JmsHeaders.DELIVERY_MODE)) { + if (entry.getKey().equals(JmsHeaders.DELIVERY_MODE)) { message.setJMSDeliveryMode(Integer.parseInt(entry.getValue())); } else if (entry.getKey().equals(JmsHeaders.EXPIRATION)) { message.setJMSExpiration(Integer.parseInt(entry.getValue())); @@ -110,6 +116,11 @@ void setMessageHeaderAndProperties(final Message message, final Map { + message.setBooleanProperty(name, Boolean.parseBoolean(value)); + } ), + BYTE( (message, name, value) -> { + message.setByteProperty(name, Byte.parseByte(value)); + } ), + SHORT( (message, name, value) -> { + message.setShortProperty(name, Short.parseShort(value)); + } ), + INTEGER( (message, name, value) -> { + message.setIntProperty(name, Integer.parseInt(value)); + } ), + LONG( (message, name, value) -> { + message.setLongProperty(name, Long.parseLong(value)); + } ), + FLOAT( (message, name, value) -> { + message.setFloatProperty(name, Float.parseFloat(value)); + } ), + DOUBLE( (message, name, value) -> { + message.setDoubleProperty(name, Double.parseDouble(value)); + } ), + STRING( (message, name, value) -> { + message.setStringProperty(name, value); + } ); + + private final JmsPropertySetter setter; + JmsPropertySetterEnum(JmsPropertySetter setter) { + this.setter = setter; + } + + public void setProperty(Message message, String name, String value) throws JMSException, NumberFormatException { + setter.setProperty(message, name, value); + } + } + + /** + * This map helps us avoid using JmsPropertySetterEnum.valueOf and dealing with IllegalArgumentException on failed lookup. + */ + public static Map propertySetterMap = new HashMap<>(); + static { + Arrays.stream(JmsPropertySetterEnum.values()).forEach(e -> propertySetterMap.put(e.name().toLowerCase(), e)); + } } diff --git a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/PublishJMS.java b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/PublishJMS.java index f58b9cfaf795..3afa0f016396 100644 --- a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/PublishJMS.java +++ b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/main/java/org/apache/nifi/jms/processors/PublishJMS.java @@ -72,7 +72,11 @@ @ReadsAttribute(attribute = JmsHeaders.TYPE, description = "This attribute becomes the JMSType message header. Must be an integer."), @ReadsAttribute(attribute = JmsHeaders.REPLY_TO, description = "This attribute becomes the JMSReplyTo message header. Must be an integer."), @ReadsAttribute(attribute = JmsHeaders.DESTINATION, description = "This attribute becomes the JMSDestination message header. Must be an integer."), - @ReadsAttribute(attribute = "other attributes", description = "All other attributes that do not start with " + JmsHeaders.PREFIX + " are added as message properties.") + @ReadsAttribute(attribute = "other attributes", description = "All other attributes that do not start with " + JmsHeaders.PREFIX + " are added as message properties."), + @ReadsAttribute(attribute = "other attributes .type", description = "When an attribute will be added as a message property, a second attribute of the same name but with an extra" + + " `.type` at the end will cause the message property to be sent using that strong type. For example, attribute `delay` with value `12000` and another attribute" + + " `delay.type` with value `integer` will cause a JMS message property `delay` to be sent as an Integer rather than a String. Supported types are boolean, byte," + + " short, integer, long, float, double, and string (which is the default).") }) @SeeAlso(value = { ConsumeJMS.class, JMSConnectionFactoryProvider.class }) @SystemResourceConsideration(resource = SystemResource.MEMORY) diff --git a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/test/java/org/apache/nifi/jms/processors/PublishJMSIT.java b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/test/java/org/apache/nifi/jms/processors/PublishJMSIT.java index a365ad5b755d..fa0bd7a6b916 100644 --- a/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/test/java/org/apache/nifi/jms/processors/PublishJMSIT.java +++ b/nifi-nar-bundles/nifi-jms-bundle/nifi-jms-processors/src/test/java/org/apache/nifi/jms/processors/PublishJMSIT.java @@ -34,6 +34,7 @@ import java.util.Map; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; @@ -180,4 +181,76 @@ public void validatePublishTextMessage() throws Exception { runner.run(1, true, false); // Run once just so that we can trigger the shutdown of the Connection Factory } + + @Test(timeout = 10000) + public void validatePublishPropertyTypes() throws Exception { + ActiveMQConnectionFactory cf = new ActiveMQConnectionFactory("vm://localhost?broker.persistent=false"); + + final String destinationName = "validatePublishPropertyTypes"; + PublishJMS pubProc = new PublishJMS(); + TestRunner runner = TestRunners.newTestRunner(pubProc); + JMSConnectionFactoryProviderDefinition cs = mock(JMSConnectionFactoryProviderDefinition.class); + when(cs.getIdentifier()).thenReturn("cfProvider"); + when(cs.getConnectionFactory()).thenReturn(cf); + + runner.addControllerService("cfProvider", cs); + runner.enableControllerService(cs); + + runner.setProperty(PublishJMS.CF_SERVICE, "cfProvider"); + runner.setProperty(PublishJMS.DESTINATION, destinationName); + + Map attributes = new HashMap<>(); + attributes.put("foo", "foo"); + attributes.put("myboolean", "true"); + attributes.put("myboolean.type", "boolean"); + attributes.put("mybyte", "127"); + attributes.put("mybyte.type", "byte"); + attributes.put("myshort", "16384"); + attributes.put("myshort.type", "short"); + attributes.put("myinteger", "1544000"); + attributes.put("myinteger.type", "INTEGER"); // test upper case + attributes.put("mylong", "9876543210"); + attributes.put("mylong.type", "long"); + attributes.put("myfloat", "3.14"); + attributes.put("myfloat.type", "float"); + attributes.put("mydouble", "3.14159265359"); + attributes.put("mydouble.type", "double"); + attributes.put("badtype", "3.14"); + attributes.put("badtype.type", "pi"); // pi not recognized as a type, so send as String + attributes.put("badint", "3.14"); // value is not an integer + attributes.put("badint.type", "integer"); + + runner.enqueue("Hey dude!".getBytes(), attributes); + runner.run(1, false); // Run once but don't shut down because we want the Connection Factory left intact so that we can use it. + + final MockFlowFile successFF = runner.getFlowFilesForRelationship(PublishJMS.REL_SUCCESS).get(0); + assertNotNull(successFF); + + JmsTemplate jmst = new JmsTemplate(cf); + BytesMessage message = (BytesMessage) jmst.receive(destinationName); + + byte[] messageBytes = MessageBodyToBytesConverter.toBytes(message); + assertEquals("Hey dude!", new String(messageBytes)); + assertEquals(true, message.getObjectProperty("foo") instanceof String); + assertEquals("foo", message.getStringProperty("foo")); + assertEquals(true, message.getObjectProperty("myboolean") instanceof Boolean); + assertEquals(true, message.getBooleanProperty("myboolean")); + assertEquals(true, message.getObjectProperty("mybyte") instanceof Byte); + assertEquals(127, message.getByteProperty("mybyte")); + assertEquals(true, message.getObjectProperty("myshort") instanceof Short); + assertEquals(16384, message.getShortProperty("myshort")); + assertEquals(true, message.getObjectProperty("myinteger") instanceof Integer); + assertEquals(1544000, message.getIntProperty("myinteger")); + assertEquals(true, message.getObjectProperty("mylong") instanceof Long); + assertEquals(9876543210L, message.getLongProperty("mylong")); + assertEquals(true, message.getObjectProperty("myfloat") instanceof Float); + assertEquals(3.14F, message.getFloatProperty("myfloat"), 0.001F); + assertEquals(true, message.getObjectProperty("mydouble") instanceof Double); + assertEquals(3.14159265359D, message.getDoubleProperty("mydouble"), 0.00000000001D); + assertEquals(true, message.getObjectProperty("badtype") instanceof String); + assertEquals("3.14", message.getStringProperty("badtype")); + assertFalse(message.propertyExists("badint")); + + runner.run(1, true, false); // Run once just so that we can trigger the shutdown of the Connection Factory + } } diff --git a/nifi-nar-bundles/nifi-jolt-record-bundle/pom.xml b/nifi-nar-bundles/nifi-jolt-record-bundle/pom.xml index 3a7c4fae363a..b1c0c8d6f4d4 100644 --- a/nifi-nar-bundles/nifi-jolt-record-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-jolt-record-bundle/pom.xml @@ -33,7 +33,7 @@ - 2.9.5 + 2.9.7 2.2.0 0.1.1 2.26 diff --git a/nifi-nar-bundles/nifi-kafka-bundle/pom.xml b/nifi-nar-bundles/nifi-kafka-bundle/pom.xml index 04790c899501..8728fe1f51a5 100644 --- a/nifi-nar-bundles/nifi-kafka-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-kafka-bundle/pom.xml @@ -77,6 +77,12 @@ nifi-kafka-2-0-processors 1.8.0-SNAPSHOT + + + io.netty + netty + 3.7.1.Final + diff --git a/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/nifi-kerberos-iaa-providers/pom.xml b/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/nifi-kerberos-iaa-providers/pom.xml index 008b0d8d7b96..1c2a0efa2444 100644 --- a/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/nifi-kerberos-iaa-providers/pom.xml +++ b/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/nifi-kerberos-iaa-providers/pom.xml @@ -23,8 +23,7 @@ nifi-kerberos-iaa-providers jar - 4.3.10.RELEASE - 4.2.4.RELEASE + 4.2.8.RELEASE @@ -53,22 +52,19 @@ org.springframework spring-beans - ${spring.version} org.springframework spring-context - ${spring.version} org.springframework spring-tx - ${spring.version} org.apache.commons commons-lang3 - 3.7 + 3.8.1 nifi-kerberos-iaa-providers diff --git a/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/pom.xml b/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/pom.xml index 12d2d332beef..7e9578d37e4b 100644 --- a/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-kerberos-iaa-providers-bundle/pom.xml @@ -26,8 +26,18 @@ nifi-kerberos-iaa-providers nifi-kerberos-iaa-providers-nar + + 4.3.19.RELEASE + + + org.springframework + spring-framework-bom + ${spring.version} + pom + import + org.apache.nifi nifi-kerberos-iaa-providers diff --git a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-nar/pom.xml b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-nar/pom.xml index cb954908e3f3..a10f5a9204c8 100644 --- a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-nar/pom.xml +++ b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-nar/pom.xml @@ -97,7 +97,7 @@ commons-lang - commons-lang3 + org.apache.commons commons-lang3 diff --git a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/pom.xml b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/pom.xml index 9f87b3688291..0ff664a953c3 100644 --- a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/pom.xml +++ b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/pom.xml @@ -60,7 +60,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 @@ -280,6 +280,11 @@ nifi-hadoop-utils 1.8.0-SNAPSHOT + + org.apache.commons + commons-text + 1.4 + diff --git a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/ConvertCSVToAvro.java b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/ConvertCSVToAvro.java index 3646680c37e2..bacef3b257f1 100644 --- a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/ConvertCSVToAvro.java +++ b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/ConvertCSVToAvro.java @@ -29,7 +29,7 @@ import org.apache.avro.Schema; import org.apache.avro.file.DataFileWriter; import org.apache.avro.generic.GenericData.Record; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.InputRequirement.Requirement; import org.apache.nifi.annotation.documentation.CapabilityDescription; diff --git a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/InferAvroSchema.java b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/InferAvroSchema.java index 4344ce099e04..69545dd4f38a 100644 --- a/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/InferAvroSchema.java +++ b/nifi-nar-bundles/nifi-kite-bundle/nifi-kite-processors/src/main/java/org/apache/nifi/processors/kite/InferAvroSchema.java @@ -20,7 +20,7 @@ import org.apache.avro.Schema; import org.apache.commons.lang.StringUtils; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.ReadsAttribute; import org.apache.nifi.annotation.behavior.ReadsAttributes; diff --git a/nifi-nar-bundles/nifi-kite-bundle/pom.xml b/nifi-nar-bundles/nifi-kite-bundle/pom.xml index e566c2381f68..9911b8596030 100644 --- a/nifi-nar-bundles/nifi-kite-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-kite-bundle/pom.xml @@ -38,6 +38,12 @@ nifi-kite-processors 1.8.0-SNAPSHOT + + + io.netty + netty + 3.6.9.Final + diff --git a/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/nifi-ldap-iaa-providers/pom.xml b/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/nifi-ldap-iaa-providers/pom.xml index e6f7c3a43804..167a8f716f72 100644 --- a/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/nifi-ldap-iaa-providers/pom.xml +++ b/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/nifi-ldap-iaa-providers/pom.xml @@ -23,8 +23,7 @@ nifi-ldap-iaa-providers jar - 4.3.10.RELEASE - 4.2.4.RELEASE + 4.2.8.RELEASE @@ -75,22 +74,14 @@ org.springframework spring-beans - ${spring.version} org.springframework spring-context - ${spring.version} - - org.springframework - spring-tx - ${spring.version} - - org.apache.commons - commons-lang3 - 3.7 + org.springframework + spring-tx org.apache.directory.server diff --git a/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/pom.xml b/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/pom.xml index 5905cbb0a9c3..c2dbc2f142b1 100644 --- a/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-ldap-iaa-providers-bundle/pom.xml @@ -26,8 +26,18 @@ nifi-ldap-iaa-providers nifi-ldap-iaa-providers-nar + + 4.3.19.RELEASE + + + org.springframework + spring-framework-bom + ${spring.version} + pom + import + org.apache.nifi nifi-ldap-iaa-providers diff --git a/nifi-nar-bundles/nifi-media-bundle/nifi-media-processors/pom.xml b/nifi-nar-bundles/nifi-media-bundle/nifi-media-processors/pom.xml index 8fd828edfe53..7166397bf0e5 100644 --- a/nifi-nar-bundles/nifi-media-bundle/nifi-media-processors/pom.xml +++ b/nifi-nar-bundles/nifi-media-bundle/nifi-media-processors/pom.xml @@ -49,7 +49,7 @@ org.apache.tika tika-parsers - 1.17 + 1.19 com.fasterxml.jackson.core @@ -66,12 +66,6 @@ - - - org.apache.commons - commons-compress - 1.16.1 - diff --git a/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-services/pom.xml b/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-services/pom.xml index 9bfe27f40a23..f4dfa8f679a8 100644 --- a/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-services/pom.xml +++ b/nifi-nar-bundles/nifi-mongodb-bundle/nifi-mongodb-services/pom.xml @@ -55,7 +55,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.mongodb diff --git a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/common/MqttTestUtils.java b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/common/MqttTestUtils.java index 5373a9f61fff..1aa844b6ccdc 100644 --- a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/common/MqttTestUtils.java +++ b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/common/MqttTestUtils.java @@ -26,11 +26,11 @@ public class MqttTestUtils { public static Map createSslProperties() { final Map map = new HashMap<>(); - map.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - map.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + map.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + map.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); map.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - map.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - map.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + map.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + map.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); map.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); return map; } diff --git a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestConsumeMqttSSL.java b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestConsumeMqttSSL.java index 693c2a9cb348..ccb0eb7e5b52 100644 --- a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestConsumeMqttSSL.java +++ b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestConsumeMqttSSL.java @@ -59,9 +59,9 @@ private void startServer() throws IOException { configProps.put(BrokerConstants.WEB_SOCKET_PORT_PROPERTY_NAME, "1884"); configProps.put(BrokerConstants.SSL_PORT_PROPERTY_NAME, "8883"); - configProps.put(BrokerConstants.JKS_PATH_PROPERTY_NAME, "src/test/resources/localhost-ks.jks"); - configProps.put(BrokerConstants.KEY_STORE_PASSWORD_PROPERTY_NAME, "localtest"); - configProps.put(BrokerConstants.KEY_MANAGER_PASSWORD_PROPERTY_NAME, "localtest"); + configProps.put(BrokerConstants.JKS_PATH_PROPERTY_NAME, "src/test/resources/keystore.jks"); + configProps.put(BrokerConstants.KEY_STORE_PASSWORD_PROPERTY_NAME, "passwordpassword"); + configProps.put(BrokerConstants.KEY_MANAGER_PASSWORD_PROPERTY_NAME, "passwordpassword"); configProps.setProperty(PERSISTENT_STORE_PROPERTY_NAME,"./target/moquette_store.mapdb"); IConfig server_config = new MemoryConfig(configProps); MQTT_server.startServer(server_config); diff --git a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestPublishMqttSSL.java b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestPublishMqttSSL.java index 6270d7a8b880..4d455598b396 100644 --- a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestPublishMqttSSL.java +++ b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/java/org/apache/nifi/processors/mqtt/integration/TestPublishMqttSSL.java @@ -47,9 +47,9 @@ private void startServer() throws IOException { configProps.put(BrokerConstants.WEB_SOCKET_PORT_PROPERTY_NAME, "1884"); configProps.put(BrokerConstants.SSL_PORT_PROPERTY_NAME, "8883"); - configProps.put(BrokerConstants.JKS_PATH_PROPERTY_NAME, "src/test/resources/localhost-ks.jks"); - configProps.put(BrokerConstants.KEY_STORE_PASSWORD_PROPERTY_NAME, "localtest"); - configProps.put(BrokerConstants.KEY_MANAGER_PASSWORD_PROPERTY_NAME, "localtest"); + configProps.put(BrokerConstants.JKS_PATH_PROPERTY_NAME, "src/test/resources/keystore.jks"); + configProps.put(BrokerConstants.KEY_STORE_PASSWORD_PROPERTY_NAME, "passwordpassword"); + configProps.put(BrokerConstants.KEY_MANAGER_PASSWORD_PROPERTY_NAME, "passwordpassword"); configProps.setProperty(PERSISTENT_STORE_PROPERTY_NAME,"./target/moquette_store.mapdb"); IConfig server_config = new MemoryConfig(configProps); MQTT_server.startServer(server_config); diff --git a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/keystore.jks b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/localhost-ks.jks b/nifi-nar-bundles/nifi-mqtt-bundle/nifi-mqtt-processors/src/test/resources/localhost-ks.jks deleted file mode 100755 index df36197d92ab8e9870f42666d74c47646fd56f26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3512 zcmchZXH*l&7RQqiLLfv+Xd3T`p&Iir~-L#Y=#ud?{E1Ldil!ycf^TKF)2%4dD_ z&CRl2juv-w;hX`>r;n!ME0*eJZQo{liYbqFr%&s4KWji3-S|{|Q#z3Bi!_n-HQIvn z)_yhbW3OmoReGZ$;mdsOy)j4ml{e?MpM3BXEZ&%y>@=UK++W7rcU+QDvQxbsYBP>C ziqWW_4}oMI2<5S^ml11u$vzs(Bz1QY%@RE`7dI!_J9pQZTH;ai+~*FZ-!&0FO}AsY zOxhC^^;ctKwcW!%@WtyMsu@6xd3zdv(I!8(v5$IseOUHF#yBeb=(KkBD?D*{)a_{6 zy11;ZtH1s5w8!+ewZvnrkKmE%X*#>Ul%b`b!V6_&L1)$_<6^i6k7Bh$Cbm8X7HN40 zS#G)q)jhM1yqIk|ug4$}yr>lNM^7CDi=S{rQqn53pE8J!Vk=?&Q_pATc&ICwBQ zS(^FTsqy1f=9leGJUj=gReI>!b5N4p{xQ7Yh?)gcpugwPJJKnkHLG#|+$oVkg4yV1aO1A$e7 zaQjo^Q#=uo%^bn4wLVp1-Lpy>m3Om-GmM2@#_FNth9W;Io4*MtEVVL^kgC7SFA-we z#qVjp#>O>$RucpY72eI-)`&+06CPE;lJYi4}@3m`# zJ_AU}qlHP&l8^Sxdy9$-4gOUb4UL4637oYGzAr%oZTy>dW-CT`%o3B(duSJ1(e{$Y zM<9UyvWx;+833RQMN{a4(G-wlHXR5E0)ZV>5?#@72%}__LDViB2!zoC&;$$&%?P2h z0z(iWD~mq^C<3ITh2caaj#n5E%ofhx0nUQPL~nPTGlqqB22Ex{K(u_Eac+1F2b%p@ zfFWRi2!bZ=dhQr@H0!ZShxiYx(fr(S%o#KWt$@YIDPiPok3$Sr4*fIyhqIvoh5uR( z+G9aS0kQzl6d)6b0t5omn(X@$hGj=yE`{&~S2Gtia5Gn?EL_(yG|G+K@=fp0D^(rz zxT1R64#p$fx05POs#deg9+l!c8gwhEor|BbmTA)uRlj-gz6)6_cB&4*Tc-M`bK9>c z*H4msFu-a#7iT^GkUgZvxqIcr(X*;=?XWBEh_4N)!@=`Ah5M!kt4cNNSPATwH?AXC zdENd&XqoAr2Dq}BQ6Gnc3D~XB-xhZWLe^fld)&QlbH&rFP$(?%sxBMiB_=cw?r7CH@9Dd8TnkYHTi)yt>lPMf~Qh{TVz-%zd}mpoX@Lx z7dHOF@cCta&Y}DYj>8M>y0uqvg+{1>9qQK_{DUz^17>%6baZre>Zg9-*JTh{JeEgE(Xc$3KCdGsnB0X~&288Q1yu50`xi`1$u zxw%0F{zoTzg?QpaXg#S%Pc}TD&G9sE#r*FN1sL2ia!PT<-siU_xsUiWo{_zcpd9U!Ni)~G zLi}%abS2t*$1jmQ&rh~)%FTUKeNh{2;~_;7Z1a$&S<~zN0o(9-C8gCXFPUtQaEi(Ok}L|C$~05J}GOTeZ2`>N!9w z|5?&Yv(xUn4w}Md-)+>Xm-idnwqK!l-ep)3M#!opq&#uM)v4O^f$5XSSy^-7P*&lV zi*Bv9WLRzp8QFh_Sp$75|b~$}d%! zADHN!cN?}Zq;Pfp`_&u3UsSsuum4tHmJnSKKJnFdCJT}j<9dY@Y9;CdG*Uh6JugW| zjszU%k%LnRdK;+FkhCS;r3tV3Qu-?q>U@4Gz20FckyBYJ$a2l5D|g6nnw|8he9Zuw zE>xvKu;5sW8RFB^dtl3__u=TrP;92~^c`S>V6o8(>LDq#2#WbkDhztv-Y+KRxxc_( z9-Ig8g=a}sc!GElV)j`DAZZobG^EycOweBae{tMx(CCHt3QRem*{+4B%V0XzUy$!_ zUZ;}$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-network-bundle/nifi-network-processors/pom.xml b/nifi-nar-bundles/nifi-network-bundle/nifi-network-processors/pom.xml index 3491cd989efe..9996ea9904ca 100644 --- a/nifi-nar-bundles/nifi-network-bundle/nifi-network-processors/pom.xml +++ b/nifi-nar-bundles/nifi-network-bundle/nifi-network-processors/pom.xml @@ -44,7 +44,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-network-bundle/nifi-network-utils/pom.xml b/nifi-nar-bundles/nifi-network-bundle/nifi-network-utils/pom.xml index 0a72a1c0d5b4..55f0277d086d 100644 --- a/nifi-nar-bundles/nifi-network-bundle/nifi-network-utils/pom.xml +++ b/nifi-nar-bundles/nifi-network-bundle/nifi-network-utils/pom.xml @@ -14,30 +14,30 @@ limitations under the License. --> - - nifi-network-bundle - org.apache.nifi - 1.8.0-SNAPSHOT - - 4.0.0 - nifi-network-utils - jar - - - com.fasterxml.jackson.core - jackson-databind - 2.9.5 - - - org.slf4j - slf4j-simple - test - - - junit - junit - test - - + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + nifi-network-bundle + org.apache.nifi + 1.8.0-SNAPSHOT + + 4.0.0 + nifi-network-utils + jar + + + com.fasterxml.jackson.core + jackson-databind + 2.9.7 + + + org.slf4j + slf4j-simple + test + + + junit + junit + test + + \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-parquet-bundle/nifi-parquet-processors/pom.xml b/nifi-nar-bundles/nifi-parquet-bundle/nifi-parquet-processors/pom.xml index 57eed83f90ac..c8aa0edb0607 100644 --- a/nifi-nar-bundles/nifi-parquet-bundle/nifi-parquet-processors/pom.xml +++ b/nifi-nar-bundles/nifi-parquet-bundle/nifi-parquet-processors/pom.xml @@ -82,7 +82,6 @@ test - diff --git a/nifi-nar-bundles/nifi-parquet-bundle/pom.xml b/nifi-nar-bundles/nifi-parquet-bundle/pom.xml index 0ed219a59d12..ad80dc480073 100644 --- a/nifi-nar-bundles/nifi-parquet-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-parquet-bundle/pom.xml @@ -31,5 +31,14 @@ nifi-parquet-processors nifi-parquet-nar - + + + + + io.netty + netty + 3.6.9.Final + + + diff --git a/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/pom.xml b/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/pom.xml index 3de45c9dbfd5..ccc689207051 100644 --- a/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/pom.xml +++ b/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/pom.xml @@ -17,7 +17,7 @@ 4.0.0 - 3.17 + 4.0.0 diff --git a/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/main/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessor.java b/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/main/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessor.java index 7a762f4b718f..51abc27f955a 100644 --- a/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/main/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessor.java +++ b/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/main/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessor.java @@ -55,7 +55,7 @@ import org.apache.poi.ss.usermodel.DataFormatter; import org.apache.poi.ss.util.CellAddress; import org.apache.poi.ss.util.CellReference; -import org.apache.poi.util.SAXHelper; +import org.apache.poi.ooxml.util.SAXHelper; import org.apache.poi.xssf.eventusermodel.ReadOnlySharedStringsTable; import org.apache.poi.xssf.eventusermodel.XSSFReader; import org.apache.poi.xssf.eventusermodel.XSSFSheetXMLHandler; diff --git a/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/test/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessorTest.java b/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/test/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessorTest.java index 9999ac568839..afcb28a44f65 100644 --- a/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/test/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessorTest.java +++ b/nifi-nar-bundles/nifi-poi-bundle/nifi-poi-processors/src/test/java/org/apache/nifi/processors/poi/ConvertExcelToCSVProcessorTest.java @@ -20,6 +20,8 @@ import static org.junit.Assert.assertTrue; import java.io.File; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -122,13 +124,14 @@ public void testQuoting() throws Exception { Long rowsSheet = new Long(ff.getAttribute(ConvertExcelToCSVProcessor.ROW_NUM)); assertTrue(rowsSheet == 9); + LocalDateTime localDt = LocalDateTime.of(2017, 1, 1, 12, 0, 0); ff.assertContentEquals("Numbers,Timestamps,Money\n" + - "1234.456,1/1/17,$ 123.45\n" + - "1234.46,12:00:00 PM,£ 123.45\n" + - "1234.5,\"Sunday, January 01, 2017\",¥ 123.45\n" + - "\"1,234.46\",1/1/17 12:00,\"$ 1,023.45\"\n" + - "\"1,234.4560\",12:00 PM,\"£ 1,023.45\"\n" + - "9.88E+08,2017/01/01/ 12:00,\"¥ 1,023.45\"\n" + + "1234.456," + DateTimeFormatter.ofPattern("d/M/yy").format(localDt) + ",$ 123.45\n" + + "1234.46," + DateTimeFormatter.ofPattern("hh:mm:ss a").format(localDt) + ",£ 123.45\n" + + "1234.5,\"" + DateTimeFormatter.ofPattern("EEEE, MMMM dd, yyyy").format(localDt) + "\",¥ 123.45\n" + + "\"1,234.46\"," + DateTimeFormatter.ofPattern("d/M/yy HH:mm").format(localDt) + ",\"$ 1,023.45\"\n" + + "\"1,234.4560\"," + DateTimeFormatter.ofPattern("hh:mm a").format(localDt) + ",\"£ 1,023.45\"\n" + + "9.88E+08," + DateTimeFormatter.ofPattern("yyyy/MM/dd/ HH:mm").format(localDt) + ",\"¥ 1,023.45\"\n" + "9.877E+08,,\n" + "9.8765E+08,,\n"); } @@ -150,11 +153,12 @@ public void testSkipRows() throws Exception { Long rowsSheet = new Long(ff.getAttribute(ConvertExcelToCSVProcessor.ROW_NUM)); assertEquals("Row count does match expected value.", "7", rowsSheet.toString()); - ff.assertContentEquals("1234.46,12:00:00 PM,£ 123.45\n" + - "1234.5,Sunday\\, January 01\\, 2017,¥ 123.45\n" + - "1\\,234.46,1/1/17 12:00,$ 1\\,023.45\n" + - "1\\,234.4560,12:00 PM,£ 1\\,023.45\n" + - "9.88E+08,2017/01/01/ 12:00,¥ 1\\,023.45\n" + + LocalDateTime localDt = LocalDateTime.of(2017, 1, 1, 12, 0, 0); + ff.assertContentEquals("1234.46," + DateTimeFormatter.ofPattern("hh:mm:ss a").format(localDt) + ",£ 123.45\n" + + "1234.5," + DateTimeFormatter.ofPattern("EEEE\\, MMMM dd\\, yyyy").format(localDt) + ",¥ 123.45\n" + + "1\\,234.46," + DateTimeFormatter.ofPattern("d/M/yy HH:mm").format(localDt) + ",$ 1\\,023.45\n" + + "1\\,234.4560," + DateTimeFormatter.ofPattern("hh:mm a").format(localDt) + ",£ 1\\,023.45\n" + + "9.88E+08," + DateTimeFormatter.ofPattern("yyyy/MM/dd/ HH:mm").format(localDt) + ",¥ 1\\,023.45\n" + "9.877E+08,,\n" + "9.8765E+08,,\n"); } @@ -178,11 +182,12 @@ public void testSkipRowsWithEL() throws Exception { Long rowsSheet = new Long(ff.getAttribute(ConvertExcelToCSVProcessor.ROW_NUM)); assertEquals("Row count does match expected value.", "7", rowsSheet.toString()); - ff.assertContentEquals("1234.46,12:00:00 PM,£ 123.45\n" + - "1234.5,Sunday\\, January 01\\, 2017,¥ 123.45\n" + - "1\\,234.46,1/1/17 12:00,$ 1\\,023.45\n" + - "1\\,234.4560,12:00 PM,£ 1\\,023.45\n" + - "9.88E+08,2017/01/01/ 12:00,¥ 1\\,023.45\n" + + LocalDateTime localDt = LocalDateTime.of(2017, 1, 1, 12, 0, 0); + ff.assertContentEquals("1234.46," + DateTimeFormatter.ofPattern("hh:mm:ss a").format(localDt) + ",£ 123.45\n" + + "1234.5," + DateTimeFormatter.ofPattern("EEEE\\, MMMM dd\\, yyyy").format(localDt) + ",¥ 123.45\n" + + "1\\,234.46," + DateTimeFormatter.ofPattern("d/M/yy HH:mm").format(localDt) + ",$ 1\\,023.45\n" + + "1\\,234.4560," + DateTimeFormatter.ofPattern("hh:mm a").format(localDt) + ",£ 1\\,023.45\n" + + "9.88E+08," + DateTimeFormatter.ofPattern("yyyy/MM/dd/ HH:mm").format(localDt) + ",¥ 1\\,023.45\n" + "9.877E+08,,\n" + "9.8765E+08,,\n"); } @@ -263,13 +268,14 @@ public void testCustomDelimiters() throws Exception { Long rowsSheet = new Long(ff.getAttribute(ConvertExcelToCSVProcessor.ROW_NUM)); assertTrue(rowsSheet == 9); + LocalDateTime localDt = LocalDateTime.of(2017, 1, 1, 12, 0, 0); ff.assertContentEquals("Numbers|Timestamps|Money\r\n" + - "1234.456|1/1/17|$ 123.45\r\n" + - "1234.46|12:00:00 PM|£ 123.45\r\n" + - "1234.5|Sunday, January 01, 2017|¥ 123.45\r\n" + - "1,234.46|1/1/17 12:00|$ 1,023.45\r\n" + - "1,234.4560|12:00 PM|£ 1,023.45\r\n" + - "9.88E+08|2017/01/01/ 12:00|¥ 1,023.45\r\n" + + "1234.456|" + DateTimeFormatter.ofPattern("d/M/yy").format(localDt) + "|$ 123.45\r\n" + + "1234.46|" + DateTimeFormatter.ofPattern("hh:mm:ss a").format(localDt) + "|£ 123.45\r\n" + + "1234.5|" + DateTimeFormatter.ofPattern("EEEE, MMMM dd, yyyy").format(localDt) + "|¥ 123.45\r\n" + + "1,234.46|" + DateTimeFormatter.ofPattern("d/M/yy HH:mm").format(localDt) + "|$ 1,023.45\r\n" + + "1,234.4560|" + DateTimeFormatter.ofPattern("hh:mm a").format(localDt) + "|£ 1,023.45\r\n" + + "9.88E+08|" + DateTimeFormatter.ofPattern("yyyy/MM/dd/ HH:mm").format(localDt) + "|¥ 1,023.45\r\n" + "9.877E+08||\r\n" + "9.8765E+08||\r\n"); } diff --git a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/groovy/org/apache/nifi/provenance/EncryptedWriteAheadProvenanceRepositoryTest.groovy b/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/groovy/org/apache/nifi/provenance/EncryptedWriteAheadProvenanceRepositoryTest.groovy index 582a80518862..ce1ebc7d8938 100644 --- a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/groovy/org/apache/nifi/provenance/EncryptedWriteAheadProvenanceRepositoryTest.groovy +++ b/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/groovy/org/apache/nifi/provenance/EncryptedWriteAheadProvenanceRepositoryTest.groovy @@ -29,6 +29,7 @@ import org.junit.Before import org.junit.BeforeClass import org.junit.ClassRule import org.junit.Test +import org.junit.Ignore import org.junit.rules.TemporaryFolder import org.junit.runner.RunWith import org.junit.runners.JUnit4 @@ -249,6 +250,7 @@ class EncryptedWriteAheadProvenanceRepositoryTest { } @Test + @Ignore("test is unstable. NIFI-5624 to improve it") void testShouldRegisterAndGetEvent() { // Arrange diff --git a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/java/org/apache/nifi/provenance/TestPersistentProvenanceRepository.java b/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/java/org/apache/nifi/provenance/ITestPersistentProvenanceRepository.java similarity index 99% rename from nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/java/org/apache/nifi/provenance/TestPersistentProvenanceRepository.java rename to nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/java/org/apache/nifi/provenance/ITestPersistentProvenanceRepository.java index f031710e58f5..db87c97384d7 100644 --- a/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/java/org/apache/nifi/provenance/TestPersistentProvenanceRepository.java +++ b/nifi-nar-bundles/nifi-provenance-repository-bundle/nifi-persistent-provenance-repository/src/test/java/org/apache/nifi/provenance/ITestPersistentProvenanceRepository.java @@ -102,7 +102,7 @@ import static org.junit.Assume.assumeFalse; import static org.mockito.Mockito.mock; -public class TestPersistentProvenanceRepository { +public class ITestPersistentProvenanceRepository { @Rule public TestName name = new TestName(); diff --git a/nifi-nar-bundles/nifi-provenance-repository-bundle/pom.xml b/nifi-nar-bundles/nifi-provenance-repository-bundle/pom.xml index 39e41ffb3382..517622b00aeb 100644 --- a/nifi-nar-bundles/nifi-provenance-repository-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-provenance-repository-bundle/pom.xml @@ -59,12 +59,12 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.bouncycastle bcprov-jdk15on - 1.59 + 1.60 diff --git a/nifi-nar-bundles/nifi-redis-bundle/nifi-redis-extensions/pom.xml b/nifi-nar-bundles/nifi-redis-bundle/nifi-redis-extensions/pom.xml index 045dc4d8a007..d7e8b07d13b6 100644 --- a/nifi-nar-bundles/nifi-redis-bundle/nifi-redis-extensions/pom.xml +++ b/nifi-nar-bundles/nifi-redis-bundle/nifi-redis-extensions/pom.xml @@ -54,7 +54,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-redis-bundle/pom.xml b/nifi-nar-bundles/nifi-redis-bundle/pom.xml index c9f25bfce3f1..449f5feea517 100644 --- a/nifi-nar-bundles/nifi-redis-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-redis-bundle/pom.xml @@ -27,7 +27,7 @@ pom - 2.0.8.RELEASE + 2.1.0.RELEASE diff --git a/nifi-nar-bundles/nifi-registry-bundle/nifi-registry-service/pom.xml b/nifi-nar-bundles/nifi-registry-bundle/nifi-registry-service/pom.xml index 7138820fe0f8..57515ce0b77d 100644 --- a/nifi-nar-bundles/nifi-registry-bundle/nifi-registry-service/pom.xml +++ b/nifi-nar-bundles/nifi-registry-bundle/nifi-registry-service/pom.xml @@ -48,7 +48,7 @@ language governing permissions and limitations under the License. --> org.apache.commons commons-lang3 - 3.7 + 3.8.1 commons-io diff --git a/nifi-nar-bundles/nifi-rethinkdb-bundle/nifi-rethinkdb-processors/pom.xml b/nifi-nar-bundles/nifi-rethinkdb-bundle/nifi-rethinkdb-processors/pom.xml index 330b78e0549f..e451fa06e841 100644 --- a/nifi-nar-bundles/nifi-rethinkdb-bundle/nifi-rethinkdb-processors/pom.xml +++ b/nifi-nar-bundles/nifi-rethinkdb-bundle/nifi-rethinkdb-processors/pom.xml @@ -49,7 +49,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 com.google.code.gson diff --git a/nifi-nar-bundles/nifi-riemann-bundle/nifi-riemann-processors/pom.xml b/nifi-nar-bundles/nifi-riemann-bundle/nifi-riemann-processors/pom.xml index 3502816c4ffe..b3a72f726263 100644 --- a/nifi-nar-bundles/nifi-riemann-bundle/nifi-riemann-processors/pom.xml +++ b/nifi-nar-bundles/nifi-riemann-bundle/nifi-riemann-processors/pom.xml @@ -44,7 +44,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-riemann-bundle/pom.xml b/nifi-nar-bundles/nifi-riemann-bundle/pom.xml index 986325a100d1..42532f709aad 100644 --- a/nifi-nar-bundles/nifi-riemann-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-riemann-bundle/pom.xml @@ -37,13 +37,19 @@ com.aphyr riemann-java-client - 0.4.0 + 0.4.1 org.apache.nifi nifi-riemann-processors 1.8.0-SNAPSHOT + + + io.netty + netty + 3.6.9.Final + \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-nar/src/main/resources/META-INF/NOTICE b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-nar/src/main/resources/META-INF/NOTICE index 3e3622be6599..7495cb6a2604 100644 --- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-nar/src/main/resources/META-INF/NOTICE +++ b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-nar/src/main/resources/META-INF/NOTICE @@ -73,6 +73,20 @@ The following binary components are provided under the Apache Software License v The original software and related information is available at http://www.jcraft.com/jsch/. + (ASLv2) Apache Xerces Java + The following NOTICE information applies: + Copyright 1999-2018 The Apache Software Foundation + + This product includes software developed at + The Apache Software Foundation (http://www.apache.org/). + + Portions of this software were originally based on the following: + - software copyright (c) 1999, IBM Corporation., http://www.ibm.com. + - software copyright (c) 1999, Sun Microsystems., http://www.sun.com. + - voluntary contributions made by Paul Eng on behalf of the + Apache Software Foundation that were originally developed at iClick, Inc., + software copyright (c) 1999. + ****************** Eclipse Public License v1.0 ****************** diff --git a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/pom.xml b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/pom.xml index af104b2b2990..91ed1ffd9542 100644 --- a/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/pom.xml +++ b/nifi-nar-bundles/nifi-scripting-bundle/nifi-scripting-processors/pom.xml @@ -69,7 +69,7 @@ org.jruby jruby-complete - 9.1.9.0 + 9.1.17.0 org.clojure @@ -79,7 +79,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 commons-io @@ -96,6 +96,12 @@ metrics-core 2.2.0 + + + xerces + xercesImpl + 2.12.0 + org.apache.nifi nifi-mock diff --git a/nifi-nar-bundles/nifi-scripting-bundle/pom.xml b/nifi-nar-bundles/nifi-scripting-bundle/pom.xml index d2d11221167b..f006e64993f9 100644 --- a/nifi-nar-bundles/nifi-scripting-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-scripting-bundle/pom.xml @@ -64,7 +64,7 @@ org.codehaus.groovy groovy-all - 2.4.5 + 2.4.15 + io.netty + netty + 3.7.1.Final + @@ -81,6 +87,5 @@ nifi-processor-utils 1.8.0-SNAPSHOT - \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-splunk-bundle/pom.xml b/nifi-nar-bundles/nifi-splunk-bundle/pom.xml index 0f422d726e45..a792d5b65fc4 100644 --- a/nifi-nar-bundles/nifi-splunk-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-splunk-bundle/pom.xml @@ -51,7 +51,7 @@ com.splunk splunk - 1.6.3.0 + 1.6.4.0 diff --git a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/pom.xml b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/pom.xml index 9b6fb6df917e..e49bf4ad4293 100644 --- a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/pom.xml +++ b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/pom.xml @@ -22,13 +22,13 @@ org.springframework spring-messaging - 4.2.4.RELEASE + 4.3.19.RELEASE provided org.apache.commons commons-lang3 - 3.7 + 3.8.1 commons-io @@ -59,13 +59,13 @@ org.springframework.integration spring-integration-core - 4.2.4.RELEASE + 4.3.17.RELEASE test org.springframework.integration spring-integration-event - 4.2.4.RELEASE + 4.3.17.RELEASE test diff --git a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/main/resources/docs/org.apache.nifi.spring.SpringContextProcessor/additionalDetails.html b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/main/resources/docs/org.apache.nifi.spring.SpringContextProcessor/additionalDetails.html index de2d9e30993e..5c67eeca5e7d 100644 --- a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/main/resources/docs/org.apache.nifi.spring.SpringContextProcessor/additionalDetails.html +++ b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/main/resources/docs/org.apache.nifi.spring.SpringContextProcessor/additionalDetails.html @@ -68,13 +68,13 @@

Description:

├── SI_DEMO-0.0.1-SNAPSHOT.jar ├── aopalliance-1.0.jar ├── commons-logging-1.2.jar - ├── spring-aop-4.2.4.RELEASE.jar - ├── spring-beans-4.2.4.RELEASE.jar - ├── spring-context-4.2.4.RELEASE.jar - ├── spring-core-4.2.4.RELEASE.jar - ├── spring-expression-4.2.4.RELEASE.jar - ├── spring-integration-core-4.2.5.RELEASE.jar - ├── spring-messaging-4.2.4.RELEASE.jar + ├── spring-aop-4.3.19.RELEASE.jar + ├── spring-beans-4.3.19.RELEASE.jar + ├── spring-context-4.3.19.RELEASE.jar + ├── spring-core-4.3.19.RELEASE.jar + ├── spring-expression-4.3.19.RELEASE.jar + ├── spring-integration-core-4.3.17.RELEASE.jar + ├── spring-messaging-4.3.19.RELEASE.jar

diff --git a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/aggregated.xml b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/aggregated.xml index 66c71887b73c..f4c6db44a661 100644 --- a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/aggregated.xml +++ b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/aggregated.xml @@ -10,7 +10,7 @@ + http://www.springframework.org/schema/integration http://www.springframework.org/schema/integration/spring-integration.xsd"> diff --git a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/fromSpringOnly.xml b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/fromSpringOnly.xml index 46675792d065..fa8f743ac92d 100644 --- a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/fromSpringOnly.xml +++ b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/fromSpringOnly.xml @@ -11,8 +11,8 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:int="http://www.springframework.org/schema/integration" xmlns:int-event="http://www.springframework.org/schema/integration/event" xsi:schemaLocation="http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd - http://www.springframework.org/schema/integration/event http://www.springframework.org/schema/integration/event/spring-integration-event-4.2.xsd - http://www.springframework.org/schema/integration http://www.springframework.org/schema/integration/spring-integration-4.2.xsd"> + http://www.springframework.org/schema/integration/event http://www.springframework.org/schema/integration/event/spring-integration-event.xsd + http://www.springframework.org/schema/integration http://www.springframework.org/schema/integration/spring-integration.xsd"> diff --git a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/requestReply.xml b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/requestReply.xml index 4bbc0b582127..d14efd0f51d5 100644 --- a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/requestReply.xml +++ b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/requestReply.xml @@ -10,7 +10,7 @@ + http://www.springframework.org/schema/integration http://www.springframework.org/schema/integration/spring-integration.xsd"> diff --git a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/toSpringOnly.xml b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/toSpringOnly.xml index 2167e3e48c64..fe505e9fe034 100644 --- a/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/toSpringOnly.xml +++ b/nifi-nar-bundles/nifi-spring-bundle/nifi-spring-processors/src/test/resources/toSpringOnly.xml @@ -10,7 +10,7 @@ + http://www.springframework.org/schema/integration http://www.springframework.org/schema/integration/spring-integration.xsd"> diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java new file mode 100644 index 000000000000..bf46549807aa --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractExecuteSQL.java @@ -0,0 +1,369 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard; + +import org.apache.commons.io.IOUtils; +import org.apache.nifi.annotation.lifecycle.OnScheduled; +import org.apache.nifi.components.PropertyDescriptor; +import org.apache.nifi.dbcp.DBCPService; +import org.apache.nifi.expression.ExpressionLanguageScope; +import org.apache.nifi.flowfile.FlowFile; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.flowfile.attributes.FragmentAttributes; +import org.apache.nifi.logging.ComponentLog; +import org.apache.nifi.processor.AbstractProcessor; +import org.apache.nifi.processor.ProcessContext; +import org.apache.nifi.processor.ProcessSession; +import org.apache.nifi.processor.Relationship; +import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.processor.util.StandardValidators; +import org.apache.nifi.processors.standard.sql.SqlWriter; +import org.apache.nifi.processors.standard.util.JdbcCommon; +import org.apache.nifi.util.StopWatch; + +import java.nio.charset.Charset; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + + +public abstract class AbstractExecuteSQL extends AbstractProcessor { + + public static final String RESULT_ROW_COUNT = "executesql.row.count"; + public static final String RESULT_QUERY_DURATION = "executesql.query.duration"; + public static final String RESULT_QUERY_EXECUTION_TIME = "executesql.query.executiontime"; + public static final String RESULT_QUERY_FETCH_TIME = "executesql.query.fetchtime"; + public static final String RESULTSET_INDEX = "executesql.resultset.index"; + + public static final String FRAGMENT_ID = FragmentAttributes.FRAGMENT_ID.key(); + public static final String FRAGMENT_INDEX = FragmentAttributes.FRAGMENT_INDEX.key(); + public static final String FRAGMENT_COUNT = FragmentAttributes.FRAGMENT_COUNT.key(); + + // Relationships + public static final Relationship REL_SUCCESS = new Relationship.Builder() + .name("success") + .description("Successfully created FlowFile from SQL query result set.") + .build(); + public static final Relationship REL_FAILURE = new Relationship.Builder() + .name("failure") + .description("SQL query execution failed. Incoming FlowFile will be penalized and routed to this relationship") + .build(); + protected Set relationships; + + public static final PropertyDescriptor DBCP_SERVICE = new PropertyDescriptor.Builder() + .name("Database Connection Pooling Service") + .description("The Controller Service that is used to obtain connection to database") + .required(true) + .identifiesControllerService(DBCPService.class) + .build(); + + public static final PropertyDescriptor SQL_SELECT_QUERY = new PropertyDescriptor.Builder() + .name("SQL select query") + .description("The SQL select query to execute. The query can be empty, a constant value, or built from attributes " + + "using Expression Language. If this property is specified, it will be used regardless of the content of " + + "incoming flowfiles. If this property is empty, the content of the incoming flow file is expected " + + "to contain a valid SQL select query, to be issued by the processor to the database. Note that Expression " + + "Language is not evaluated for flow file contents.") + .required(false) + .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) + .build(); + + public static final PropertyDescriptor QUERY_TIMEOUT = new PropertyDescriptor.Builder() + .name("Max Wait Time") + .description("The maximum amount of time allowed for a running SQL select query " + + " , zero means there is no limit. Max time less than 1 second will be equal to zero.") + .defaultValue("0 seconds") + .required(true) + .addValidator(StandardValidators.TIME_PERIOD_VALIDATOR) + .sensitive(false) + .build(); + + public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new PropertyDescriptor.Builder() + .name("esql-max-rows") + .displayName("Max Rows Per Flow File") + .description("The maximum number of result rows that will be included in a single FlowFile. This will allow you to break up very large " + + "result sets into multiple FlowFiles. If the value specified is zero, then all rows are returned in a single FlowFile.") + .defaultValue("0") + .required(true) + .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .build(); + + public static final PropertyDescriptor OUTPUT_BATCH_SIZE = new PropertyDescriptor.Builder() + .name("esql-output-batch-size") + .displayName("Output Batch Size") + .description("The number of output FlowFiles to queue before committing the process session. When set to zero, the session will be committed when all result set rows " + + "have been processed and the output FlowFiles are ready for transfer to the downstream relationship. For large result sets, this can cause a large burst of FlowFiles " + + "to be transferred at the end of processor execution. If this property is set, then when the specified number of FlowFiles are ready for transfer, then the session will " + + "be committed, thus releasing the FlowFiles to the downstream relationship. NOTE: The fragment.count attribute will not be set on FlowFiles when this " + + "property is set.") + .defaultValue("0") + .required(true) + .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .build(); + + protected List propDescriptors; + + protected DBCPService dbcpService; + + @Override + public Set getRelationships() { + return relationships; + } + + @Override + protected List getSupportedPropertyDescriptors() { + return propDescriptors; + } + + @OnScheduled + public void setup(ProcessContext context) { + // If the query is not set, then an incoming flow file is needed. Otherwise fail the initialization + if (!context.getProperty(SQL_SELECT_QUERY).isSet() && !context.hasIncomingConnection()) { + final String errorString = "Either the Select Query must be specified or there must be an incoming connection " + + "providing flowfile(s) containing a SQL select query"; + getLogger().error(errorString); + throw new ProcessException(errorString); + } + dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class); + + } + + @Override + public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { + FlowFile fileToProcess = null; + if (context.hasIncomingConnection()) { + fileToProcess = session.get(); + + // If we have no FlowFile, and all incoming connections are self-loops then we can continue on. + // However, if we have no FlowFile and we have connections coming from other Processors, then + // we know that we should run only if we have a FlowFile. + if (fileToProcess == null && context.hasNonLoopConnection()) { + return; + } + } + + final List resultSetFlowFiles = new ArrayList<>(); + + final ComponentLog logger = getLogger(); + final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue(); + final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger(); + final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions().asInteger(); + final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField; + + SqlWriter sqlWriter = configureSqlWriter(session, context, fileToProcess); + + final String selectQuery; + if (context.getProperty(SQL_SELECT_QUERY).isSet()) { + selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess).getValue(); + } else { + // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query. + // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled. + final StringBuilder queryContents = new StringBuilder(); + session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset()))); + selectQuery = queryContents.toString(); + } + + int resultCount = 0; + try (final Connection con = dbcpService.getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes()); + final PreparedStatement st = con.prepareStatement(selectQuery)) { + st.setQueryTimeout(queryTimeout); // timeout in seconds + + if (fileToProcess != null) { + JdbcCommon.setParameters(st, fileToProcess.getAttributes()); + } + logger.debug("Executing query {}", new Object[]{selectQuery}); + + int fragmentIndex = 0; + final String fragmentId = UUID.randomUUID().toString(); + + final StopWatch executionTime = new StopWatch(true); + + boolean hasResults = st.execute(); + + long executionTimeElapsed = executionTime.getElapsed(TimeUnit.MILLISECONDS); + + boolean hasUpdateCount = st.getUpdateCount() != -1; + + while (hasResults || hasUpdateCount) { + //getMoreResults() and execute() return false to indicate that the result of the statement is just a number and not a ResultSet + if (hasResults) { + final AtomicLong nrOfRows = new AtomicLong(0L); + + try { + final ResultSet resultSet = st.getResultSet(); + do { + final StopWatch fetchTime = new StopWatch(true); + + FlowFile resultSetFF; + if (fileToProcess == null) { + resultSetFF = session.create(); + } else { + resultSetFF = session.create(fileToProcess); + resultSetFF = session.putAllAttributes(resultSetFF, fileToProcess.getAttributes()); + } + + try { + resultSetFF = session.write(resultSetFF, out -> { + try { + nrOfRows.set(sqlWriter.writeResultSet(resultSet, out, getLogger(), null)); + } catch (Exception e) { + throw (e instanceof ProcessException) ? (ProcessException) e : new ProcessException(e); + } + }); + + long fetchTimeElapsed = fetchTime.getElapsed(TimeUnit.MILLISECONDS); + + // set attributes + final Map attributesToAdd = new HashMap<>(); + attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); + attributesToAdd.put(RESULT_QUERY_DURATION, String.valueOf(executionTimeElapsed + fetchTimeElapsed)); + attributesToAdd.put(RESULT_QUERY_EXECUTION_TIME, String.valueOf(executionTimeElapsed)); + attributesToAdd.put(RESULT_QUERY_FETCH_TIME, String.valueOf(fetchTimeElapsed)); + attributesToAdd.put(RESULTSET_INDEX, String.valueOf(resultCount)); + attributesToAdd.putAll(sqlWriter.getAttributesToAdd()); + resultSetFF = session.putAllAttributes(resultSetFF, attributesToAdd); + sqlWriter.updateCounters(session); + + // if fragmented ResultSet, determine if we should keep this fragment; set fragment attributes + if (maxRowsPerFlowFile > 0) { + // if row count is zero and this is not the first fragment, drop it instead of committing it. + if (nrOfRows.get() == 0 && fragmentIndex > 0) { + session.remove(resultSetFF); + break; + } + + resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_ID, fragmentId); + resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_INDEX, String.valueOf(fragmentIndex)); + } + + logger.info("{} contains {} records; transferring to 'success'", + new Object[]{resultSetFF, nrOfRows.get()}); + // Report a FETCH event if there was an incoming flow file, or a RECEIVE event otherwise + if(context.hasIncomingConnection()) { + session.getProvenanceReporter().fetch(resultSetFF, "Retrieved " + nrOfRows.get() + " rows", executionTimeElapsed + fetchTimeElapsed); + } else { + session.getProvenanceReporter().receive(resultSetFF, "Retrieved " + nrOfRows.get() + " rows", executionTimeElapsed + fetchTimeElapsed); + } + resultSetFlowFiles.add(resultSetFF); + + // If we've reached the batch size, send out the flow files + if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) { + session.transfer(resultSetFlowFiles, REL_SUCCESS); + session.commit(); + resultSetFlowFiles.clear(); + } + + fragmentIndex++; + } catch (Exception e) { + // Remove the result set flow file and propagate the exception + session.remove(resultSetFF); + if (e instanceof ProcessException) { + throw (ProcessException) e; + } else { + throw new ProcessException(e); + } + } + } while (maxRowsPerFlowFile > 0 && nrOfRows.get() == maxRowsPerFlowFile); + + // If we are splitting results but not outputting batches, set count on all FlowFiles + if (outputBatchSize == 0 && maxRowsPerFlowFile > 0) { + for (int i = 0; i < resultSetFlowFiles.size(); i++) { + resultSetFlowFiles.set(i, + session.putAttribute(resultSetFlowFiles.get(i), FRAGMENT_COUNT, Integer.toString(fragmentIndex))); + } + } + } catch (final SQLException e) { + throw new ProcessException(e); + } + + resultCount++; + } + + // are there anymore result sets? + try { + hasResults = st.getMoreResults(Statement.CLOSE_CURRENT_RESULT); + hasUpdateCount = st.getUpdateCount() != -1; + } catch (SQLException ex) { + hasResults = false; + hasUpdateCount = false; + } + } + + // Transfer any remaining files to SUCCESS + session.transfer(resultSetFlowFiles, REL_SUCCESS); + resultSetFlowFiles.clear(); + + //If we had at least one result then it's OK to drop the original file, but if we had no results then + // pass the original flow file down the line to trigger downstream processors + if (fileToProcess != null) { + if (resultCount > 0) { + session.remove(fileToProcess); + } else { + fileToProcess = session.write(fileToProcess, out -> sqlWriter.writeEmptyResultSet(out, getLogger())); + fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, "0"); + fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(), sqlWriter.getMimeType()); + session.transfer(fileToProcess, REL_SUCCESS); + } + } else if (resultCount == 0) { + //If we had no inbound FlowFile, no exceptions, and the SQL generated no result sets (Insert/Update/Delete statements only) + // Then generate an empty Output FlowFile + FlowFile resultSetFF = session.create(); + + resultSetFF = session.write(resultSetFF, out -> sqlWriter.writeEmptyResultSet(out, getLogger())); + resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, "0"); + resultSetFF = session.putAttribute(resultSetFF, CoreAttributes.MIME_TYPE.key(), sqlWriter.getMimeType()); + session.transfer(resultSetFF, REL_SUCCESS); + } + } catch (final ProcessException | SQLException e) { + //If we had at least one result then it's OK to drop the original file, but if we had no results then + // pass the original flow file down the line to trigger downstream processors + if (fileToProcess == null) { + // This can happen if any exceptions occur while setting up the connection, statement, etc. + logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure", + new Object[]{selectQuery, e}); + context.yield(); + } else { + if (context.hasIncomingConnection()) { + logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure", + new Object[]{selectQuery, fileToProcess, e}); + fileToProcess = session.penalize(fileToProcess); + } else { + logger.error("Unable to execute SQL select query {} due to {}; routing to failure", + new Object[]{selectQuery, e}); + context.yield(); + } + session.transfer(fileToProcess, REL_FAILURE); + } + } + } + + protected abstract SqlWriter configureSqlWriter(ProcessSession session, ProcessContext context, FlowFile fileToProcess); +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java new file mode 100644 index 000000000000..06df6c196209 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/AbstractQueryDatabaseTable.java @@ -0,0 +1,483 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard; + +import org.apache.commons.lang3.StringUtils; +import org.apache.nifi.annotation.lifecycle.OnScheduled; +import org.apache.nifi.annotation.lifecycle.OnStopped; +import org.apache.nifi.components.PropertyDescriptor; +import org.apache.nifi.components.state.Scope; +import org.apache.nifi.components.state.StateManager; +import org.apache.nifi.components.state.StateMap; +import org.apache.nifi.dbcp.DBCPService; +import org.apache.nifi.expression.AttributeExpression; +import org.apache.nifi.expression.ExpressionLanguageScope; +import org.apache.nifi.flowfile.FlowFile; +import org.apache.nifi.flowfile.attributes.FragmentAttributes; +import org.apache.nifi.logging.ComponentLog; +import org.apache.nifi.processor.ProcessContext; +import org.apache.nifi.processor.ProcessSession; +import org.apache.nifi.processor.ProcessSessionFactory; +import org.apache.nifi.processor.Relationship; +import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.processor.util.StandardValidators; +import org.apache.nifi.processors.standard.db.DatabaseAdapter; +import org.apache.nifi.processors.standard.sql.SqlWriter; +import org.apache.nifi.processors.standard.util.JdbcCommon; +import org.apache.nifi.util.StopWatch; + +import java.io.IOException; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.text.ParseException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.IntStream; + + +public abstract class AbstractQueryDatabaseTable extends AbstractDatabaseFetchProcessor { + + public static final String RESULT_TABLENAME = "tablename"; + public static final String RESULT_ROW_COUNT = "querydbtable.row.count"; + + public static final String FRAGMENT_ID = FragmentAttributes.FRAGMENT_ID.key(); + public static final String FRAGMENT_INDEX = FragmentAttributes.FRAGMENT_INDEX.key(); + + public static final PropertyDescriptor FETCH_SIZE = new PropertyDescriptor.Builder() + .name("Fetch Size") + .description("The number of result rows to be fetched from the result set at a time. This is a hint to the database driver and may not be " + + "honored and/or exact. If the value specified is zero, then the hint is ignored.") + .defaultValue("0") + .required(true) + .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .build(); + + public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new PropertyDescriptor.Builder() + .name("qdbt-max-rows") + .displayName("Max Rows Per Flow File") + .description("The maximum number of result rows that will be included in a single FlowFile. This will allow you to break up very large " + + "result sets into multiple FlowFiles. If the value specified is zero, then all rows are returned in a single FlowFile.") + .defaultValue("0") + .required(true) + .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .build(); + + public static final PropertyDescriptor OUTPUT_BATCH_SIZE = new PropertyDescriptor.Builder() + .name("qdbt-output-batch-size") + .displayName("Output Batch Size") + .description("The number of output FlowFiles to queue before committing the process session. When set to zero, the session will be committed when all result set rows " + + "have been processed and the output FlowFiles are ready for transfer to the downstream relationship. For large result sets, this can cause a large burst of FlowFiles " + + "to be transferred at the end of processor execution. If this property is set, then when the specified number of FlowFiles are ready for transfer, then the session will " + + "be committed, thus releasing the FlowFiles to the downstream relationship. NOTE: The maxvalue.* and fragment.count attributes will not be set on FlowFiles when this " + + "property is set.") + .defaultValue("0") + .required(true) + .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .build(); + + public static final PropertyDescriptor MAX_FRAGMENTS = new PropertyDescriptor.Builder() + .name("qdbt-max-frags") + .displayName("Maximum Number of Fragments") + .description("The maximum number of fragments. If the value specified is zero, then all fragments are returned. " + + "This prevents OutOfMemoryError when this processor ingests huge table. NOTE: Setting this property can result in data loss, as the incoming results are " + + "not ordered, and fragments may end at arbitrary boundaries where rows are not included in the result set.") + .defaultValue("0") + .required(true) + .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .build(); + + @Override + public Set getRelationships() { + return relationships; + } + + @Override + protected List getSupportedPropertyDescriptors() { + return propDescriptors; + } + + @Override + protected PropertyDescriptor getSupportedDynamicPropertyDescriptor(final String propertyDescriptorName) { + return new PropertyDescriptor.Builder() + .name(propertyDescriptorName) + .required(false) + .addValidator(StandardValidators.createAttributeExpressionLanguageValidator(AttributeExpression.ResultType.STRING, true)) + .addValidator(StandardValidators.ATTRIBUTE_KEY_PROPERTY_NAME_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .dynamic(true) + .build(); + } + + @OnScheduled + public void setup(final ProcessContext context) { + maxValueProperties = getDefaultMaxValueProperties(context, null); + } + + @OnStopped + public void stop() { + // Reset the column type map in case properties change + setupComplete.set(false); + } + + @Override + public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { + // Fetch the column/table info once + if (!setupComplete.get()) { + super.setup(context); + } + ProcessSession session = sessionFactory.createSession(); + final List resultSetFlowFiles = new ArrayList<>(); + + final ComponentLog logger = getLogger(); + + final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class); + final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue()); + final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue(); + final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue(); + final String sqlQuery = context.getProperty(SQL_QUERY).evaluateAttributeExpressions().getValue(); + final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES).evaluateAttributeExpressions().getValue(); + final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions().getValue(); + final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger(); + final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger(); + final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions().asInteger(); + final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField; + final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet() + ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger() + : 0; + + + SqlWriter sqlWriter = configureSqlWriter(session, context); + + final StateManager stateManager = context.getStateManager(); + final StateMap stateMap; + + try { + stateMap = stateManager.getState(Scope.CLUSTER); + } catch (final IOException ioe) { + getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform " + + "query until this is accomplished.", ioe); + context.yield(); + return; + } + // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually + // set as the current state map (after the session has been committed) + final Map statePropertyMap = new HashMap<>(stateMap.toMap()); + + //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map + for (final Map.Entry maxProp : maxValueProperties.entrySet()) { + String maxPropKey = maxProp.getKey().toLowerCase(); + String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey, dbAdapter); + if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) { + String newMaxPropValue; + // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) + // the value has been stored under a key that is only the column name. Fall back to check the column name, + // but store the new initial max value under the fully-qualified key. + if (statePropertyMap.containsKey(maxPropKey)) { + newMaxPropValue = statePropertyMap.get(maxPropKey); + } else { + newMaxPropValue = maxProp.getValue(); + } + statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue); + + } + } + + List maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) + ? null + : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*")); + final String selectQuery = getQuery(dbAdapter, tableName, sqlQuery, columnNames, maxValueColumnNameList, customWhereClause, statePropertyMap); + final StopWatch stopWatch = new StopWatch(true); + final String fragmentIdentifier = UUID.randomUUID().toString(); + + try (final Connection con = dbcpService.getConnection(Collections.emptyMap()); + final Statement st = con.createStatement()) { + + if (fetchSize != null && fetchSize > 0) { + try { + st.setFetchSize(fetchSize); + } catch (SQLException se) { + // Not all drivers support this, just log the error (at debug level) and move on + logger.debug("Cannot set fetch size to {} due to {}", new Object[]{fetchSize, se.getLocalizedMessage()}, se); + } + } + + String jdbcURL = "DBCPService"; + try { + DatabaseMetaData databaseMetaData = con.getMetaData(); + if (databaseMetaData != null) { + jdbcURL = databaseMetaData.getURL(); + } + } catch (SQLException se) { + // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly + } + + final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions().asTimePeriod(TimeUnit.SECONDS).intValue(); + st.setQueryTimeout(queryTimeout); // timeout in seconds + if (logger.isDebugEnabled()) { + logger.debug("Executing query {}", new Object[] { selectQuery }); + } + try (final ResultSet resultSet = st.executeQuery(selectQuery)) { + int fragmentIndex=0; + // Max values will be updated in the state property map by the callback + final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(tableName, statePropertyMap, dbAdapter); + + while(true) { + final AtomicLong nrOfRows = new AtomicLong(0L); + + FlowFile fileToProcess = session.create(); + try { + fileToProcess = session.write(fileToProcess, out -> { + try { + nrOfRows.set(sqlWriter.writeResultSet(resultSet, out, getLogger(), maxValCollector)); + } catch (Exception e) { + throw new ProcessException("Error during database query or conversion of records.", e); + } + }); + } catch (ProcessException e) { + // Add flowfile to results before rethrowing so it will be removed from session in outer catch + resultSetFlowFiles.add(fileToProcess); + throw e; + } + + if (nrOfRows.get() > 0) { + // set attributes + final Map attributesToAdd = new HashMap<>(); + attributesToAdd.put(RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); + attributesToAdd.put(RESULT_TABLENAME, tableName); + + if(maxRowsPerFlowFile > 0) { + attributesToAdd.put(FRAGMENT_ID, fragmentIdentifier); + attributesToAdd.put(FRAGMENT_INDEX, String.valueOf(fragmentIndex)); + } + + attributesToAdd.putAll(sqlWriter.getAttributesToAdd()); + fileToProcess = session.putAllAttributes(fileToProcess, attributesToAdd); + sqlWriter.updateCounters(session); + + logger.info("{} contains {} records; transferring to 'success'", + new Object[]{fileToProcess, nrOfRows.get()}); + + session.getProvenanceReporter().receive(fileToProcess, jdbcURL, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); + resultSetFlowFiles.add(fileToProcess); + // If we've reached the batch size, send out the flow files + if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) { + session.transfer(resultSetFlowFiles, REL_SUCCESS); + session.commit(); + resultSetFlowFiles.clear(); + } + } else { + // If there were no rows returned, don't send the flowfile + session.remove(fileToProcess); + // If no rows and this was first FlowFile, yield + if(fragmentIndex == 0){ + context.yield(); + } + break; + } + + fragmentIndex++; + if (maxFragments > 0 && fragmentIndex >= maxFragments) { + break; + } + + // If we aren't splitting up the data into flow files or fragments, then the result set has been entirely fetched so don't loop back around + if (maxFragments == 0 && maxRowsPerFlowFile == 0) { + break; + } + + // If we are splitting up the data into flow files, don't loop back around if we've gotten all results + if(maxRowsPerFlowFile > 0 && nrOfRows.get() < maxRowsPerFlowFile) { + break; + } + } + + // Apply state changes from the Max Value tracker + maxValCollector.applyStateChanges(); + + // Even though the maximum value and total count are known at this point, to maintain consistent behavior if Output Batch Size is set, do not store the attributes + if (outputBatchSize == 0) { + for (int i = 0; i < resultSetFlowFiles.size(); i++) { + // Add maximum values as attributes + for (Map.Entry entry : statePropertyMap.entrySet()) { + // Get just the column name from the key + String key = entry.getKey(); + String colName = key.substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length()); + resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "maxvalue." + colName, entry.getValue())); + } + + //set count on all FlowFiles + if (maxRowsPerFlowFile > 0) { + resultSetFlowFiles.set(i, + session.putAttribute(resultSetFlowFiles.get(i), "fragment.count", Integer.toString(fragmentIndex))); + } + } + } + } catch (final SQLException e) { + throw e; + } + + session.transfer(resultSetFlowFiles, REL_SUCCESS); + + } catch (final ProcessException | SQLException e) { + logger.error("Unable to execute SQL select query {} due to {}", new Object[]{selectQuery, e}); + if (!resultSetFlowFiles.isEmpty()) { + session.remove(resultSetFlowFiles); + } + context.yield(); + } finally { + session.commit(); + try { + // Update the state + stateManager.setState(statePropertyMap, Scope.CLUSTER); + } catch (IOException ioe) { + getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded", new Object[]{this, ioe}); + } + } + } + + protected String getQuery(DatabaseAdapter dbAdapter, String tableName, String columnNames, List maxValColumnNames, + String customWhereClause, Map stateMap) { + + return getQuery(dbAdapter, tableName, null, columnNames, maxValColumnNames, customWhereClause, stateMap); + } + + protected String getQuery(DatabaseAdapter dbAdapter, String tableName, String sqlQuery, String columnNames, List maxValColumnNames, + String customWhereClause, Map stateMap) { + if (StringUtils.isEmpty(tableName)) { + throw new IllegalArgumentException("Table name must be specified"); + } + final StringBuilder query; + + if (StringUtils.isEmpty(sqlQuery)) { + query = new StringBuilder(dbAdapter.getSelectStatement(tableName, columnNames, null, null, null, null)); + } else { + query = getWrappedQuery(dbAdapter, sqlQuery, tableName); + } + + List whereClauses = new ArrayList<>(); + // Check state map for last max values + if (stateMap != null && !stateMap.isEmpty() && maxValColumnNames != null) { + IntStream.range(0, maxValColumnNames.size()).forEach((index) -> { + String colName = maxValColumnNames.get(index); + String maxValueKey = getStateKey(tableName, colName, dbAdapter); + String maxValue = stateMap.get(maxValueKey); + if (StringUtils.isEmpty(maxValue)) { + // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) + // the value has been stored under a key that is only the column name. Fall back to check the column name; either way, when a new + // maximum value is observed, it will be stored under the fully-qualified key from then on. + maxValue = stateMap.get(colName.toLowerCase()); + } + if (!StringUtils.isEmpty(maxValue)) { + Integer type = columnTypeMap.get(maxValueKey); + if (type == null) { + // This shouldn't happen as we are populating columnTypeMap when the processor is scheduled. + throw new IllegalArgumentException("No column type found for: " + colName); + } + // Add a condition for the WHERE clause + whereClauses.add(colName + (index == 0 ? " > " : " >= ") + getLiteralByType(type, maxValue, dbAdapter.getName())); + } + }); + } + + if (customWhereClause != null) { + whereClauses.add("(" + customWhereClause + ")"); + } + + if (!whereClauses.isEmpty()) { + query.append(" WHERE "); + query.append(StringUtils.join(whereClauses, " AND ")); + } + + return query.toString(); + } + + public class MaxValueResultSetRowCollector implements JdbcCommon.ResultSetRowCallback { + DatabaseAdapter dbAdapter; + final Map newColMap; + final Map originalState; + String tableName; + + public MaxValueResultSetRowCollector(String tableName, Map stateMap, DatabaseAdapter dbAdapter) { + this.dbAdapter = dbAdapter; + this.originalState = stateMap; + + this.newColMap = new HashMap<>(); + this.newColMap.putAll(stateMap); + + this.tableName = tableName; + } + + @Override + public void processRow(ResultSet resultSet) throws IOException { + if (resultSet == null) { + return; + } + try { + // Iterate over the row, check-and-set max values + final ResultSetMetaData meta = resultSet.getMetaData(); + final int nrOfColumns = meta.getColumnCount(); + if (nrOfColumns > 0) { + for (int i = 1; i <= nrOfColumns; i++) { + String colName = meta.getColumnName(i).toLowerCase(); + String fullyQualifiedMaxValueKey = getStateKey(tableName, colName, dbAdapter); + Integer type = columnTypeMap.get(fullyQualifiedMaxValueKey); + // Skip any columns we're not keeping track of or whose value is null + if (type == null || resultSet.getObject(i) == null) { + continue; + } + String maxValueString = newColMap.get(fullyQualifiedMaxValueKey); + // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) + // the value has been stored under a key that is only the column name. Fall back to check the column name; either way, when a new + // maximum value is observed, it will be stored under the fully-qualified key from then on. + if (StringUtils.isEmpty(maxValueString)) { + maxValueString = newColMap.get(colName); + } + String newMaxValueString = getMaxValueFromRow(resultSet, i, type, maxValueString, dbAdapter.getName()); + if (newMaxValueString != null) { + newColMap.put(fullyQualifiedMaxValueKey, newMaxValueString); + } + } + } + } catch (ParseException | SQLException e) { + throw new IOException(e); + } + } + + @Override + public void applyStateChanges() { + this.originalState.putAll(this.newColMap); + } + } + + protected abstract SqlWriter configureSqlWriter(ProcessSession session, ProcessContext context); +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java index df82e2ead406..cc6d5088bd96 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQL.java @@ -16,22 +16,12 @@ */ package org.apache.nifi.processors.standard; -import java.nio.charset.Charset; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.io.IOUtils; import org.apache.nifi.annotation.behavior.EventDriven; import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.InputRequirement.Requirement; @@ -41,23 +31,16 @@ import org.apache.nifi.annotation.behavior.WritesAttributes; import org.apache.nifi.annotation.documentation.CapabilityDescription; import org.apache.nifi.annotation.documentation.Tags; -import org.apache.nifi.annotation.lifecycle.OnScheduled; import org.apache.nifi.components.PropertyDescriptor; -import org.apache.nifi.dbcp.DBCPService; import org.apache.nifi.expression.ExpressionLanguageScope; import org.apache.nifi.flowfile.FlowFile; -import org.apache.nifi.flowfile.attributes.CoreAttributes; -import org.apache.nifi.flowfile.attributes.FragmentAttributes; -import org.apache.nifi.logging.ComponentLog; -import org.apache.nifi.processor.AbstractProcessor; import org.apache.nifi.processor.ProcessContext; import org.apache.nifi.processor.ProcessSession; import org.apache.nifi.processor.Relationship; -import org.apache.nifi.processor.exception.ProcessException; -import org.apache.nifi.processor.util.StandardValidators; -import org.apache.nifi.processors.standard.util.AvroUtil.CodecType; +import org.apache.nifi.processors.standard.sql.DefaultAvroSqlWriter; +import org.apache.nifi.processors.standard.sql.SqlWriter; import org.apache.nifi.processors.standard.util.JdbcCommon; -import org.apache.nifi.util.StopWatch; +import org.apache.nifi.processors.standard.util.AvroUtil.CodecType; import static org.apache.nifi.processors.standard.util.JdbcCommon.DEFAULT_PRECISION; import static org.apache.nifi.processors.standard.util.JdbcCommon.DEFAULT_SCALE; @@ -94,99 +77,24 @@ + "'yyyy-MM-dd HH:mm:ss.SSS' for Timestamp is used.") }) @WritesAttributes({ - @WritesAttribute(attribute="executesql.row.count", description = "Contains the number of rows returned in the select query"), - @WritesAttribute(attribute="executesql.query.duration", description = "Combined duration of the query execution time and fetch time in milliseconds"), - @WritesAttribute(attribute="executesql.query.executiontime", description = "Duration of the query execution time in milliseconds"), - @WritesAttribute(attribute="executesql.query.fetchtime", description = "Duration of the result set fetch time in milliseconds"), - @WritesAttribute(attribute="executesql.resultset.index", description = "Assuming multiple result sets are returned, " - + "the zero based index of this result set."), - @WritesAttribute(attribute="fragment.identifier", description="If 'Max Rows Per Flow File' is set then all FlowFiles from the same query result set " - + "will have the same value for the fragment.identifier attribute. This can then be used to correlate the results."), - @WritesAttribute(attribute = "fragment.count", description = "If 'Max Rows Per Flow File' is set then this is the total number of " - + "FlowFiles produced by a single ResultSet. This can be used in conjunction with the " - + "fragment.identifier attribute in order to know how many FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set, then this " - + "attribute will not be populated."), - @WritesAttribute(attribute="fragment.index", description="If 'Max Rows Per Flow File' is set then the position of this FlowFile in the list of " - + "outgoing FlowFiles that were all derived from the same result set FlowFile. This can be " - + "used in conjunction with the fragment.identifier attribute to know which FlowFiles originated from the same query result set and in what order " + @WritesAttribute(attribute = "executesql.row.count", description = "Contains the number of rows returned in the select query"), + @WritesAttribute(attribute = "executesql.query.duration", description = "Combined duration of the query execution time and fetch time in milliseconds"), + @WritesAttribute(attribute = "executesql.query.executiontime", description = "Duration of the query execution time in milliseconds"), + @WritesAttribute(attribute = "executesql.query.fetchtime", description = "Duration of the result set fetch time in milliseconds"), + @WritesAttribute(attribute = "executesql.resultset.index", description = "Assuming multiple result sets are returned, " + + "the zero based index of this result set."), + @WritesAttribute(attribute = "fragment.identifier", description = "If 'Max Rows Per Flow File' is set then all FlowFiles from the same query result set " + + "will have the same value for the fragment.identifier attribute. This can then be used to correlate the results."), + @WritesAttribute(attribute = "fragment.count", description = "If 'Max Rows Per Flow File' is set then this is the total number of " + + "FlowFiles produced by a single ResultSet. This can be used in conjunction with the " + + "fragment.identifier attribute in order to know how many FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set, then this " + + "attribute will not be populated."), + @WritesAttribute(attribute = "fragment.index", description = "If 'Max Rows Per Flow File' is set then the position of this FlowFile in the list of " + + "outgoing FlowFiles that were all derived from the same result set FlowFile. This can be " + + "used in conjunction with the fragment.identifier attribute to know which FlowFiles originated from the same query result set and in what order " + "FlowFiles were produced") }) -public class ExecuteSQL extends AbstractProcessor { - - public static final String RESULT_ROW_COUNT = "executesql.row.count"; - public static final String RESULT_QUERY_DURATION = "executesql.query.duration"; - public static final String RESULT_QUERY_EXECUTION_TIME = "executesql.query.executiontime"; - public static final String RESULT_QUERY_FETCH_TIME = "executesql.query.fetchtime"; - public static final String RESULTSET_INDEX = "executesql.resultset.index"; - - public static final String FRAGMENT_ID = FragmentAttributes.FRAGMENT_ID.key(); - public static final String FRAGMENT_INDEX = FragmentAttributes.FRAGMENT_INDEX.key(); - public static final String FRAGMENT_COUNT = FragmentAttributes.FRAGMENT_COUNT.key(); - - // Relationships - public static final Relationship REL_SUCCESS = new Relationship.Builder() - .name("success") - .description("Successfully created FlowFile from SQL query result set.") - .build(); - public static final Relationship REL_FAILURE = new Relationship.Builder() - .name("failure") - .description("SQL query execution failed. Incoming FlowFile will be penalized and routed to this relationship") - .build(); - private final Set relationships; - - public static final PropertyDescriptor DBCP_SERVICE = new PropertyDescriptor.Builder() - .name("Database Connection Pooling Service") - .description("The Controller Service that is used to obtain connection to database") - .required(true) - .identifiesControllerService(DBCPService.class) - .build(); - - public static final PropertyDescriptor SQL_SELECT_QUERY = new PropertyDescriptor.Builder() - .name("SQL select query") - .description("The SQL select query to execute. The query can be empty, a constant value, or built from attributes " - + "using Expression Language. If this property is specified, it will be used regardless of the content of " - + "incoming flowfiles. If this property is empty, the content of the incoming flow file is expected " - + "to contain a valid SQL select query, to be issued by the processor to the database. Note that Expression " - + "Language is not evaluated for flow file contents.") - .required(false) - .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) - .build(); - - public static final PropertyDescriptor QUERY_TIMEOUT = new PropertyDescriptor.Builder() - .name("Max Wait Time") - .description("The maximum amount of time allowed for a running SQL select query " - + " , zero means there is no limit. Max time less than 1 second will be equal to zero.") - .defaultValue("0 seconds") - .required(true) - .addValidator(StandardValidators.TIME_PERIOD_VALIDATOR) - .sensitive(false) - .build(); - - public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new PropertyDescriptor.Builder() - .name("esql-max-rows") - .displayName("Max Rows Per Flow File") - .description("The maximum number of result rows that will be included in a single FlowFile. This will allow you to break up very large " - + "result sets into multiple FlowFiles. If the value specified is zero, then all rows are returned in a single FlowFile.") - .defaultValue("0") - .required(true) - .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) - .build(); - - public static final PropertyDescriptor OUTPUT_BATCH_SIZE = new PropertyDescriptor.Builder() - .name("esql-output-batch-size") - .displayName("Output Batch Size") - .description("The number of output FlowFiles to queue before committing the process session. When set to zero, the session will be committed when all result set rows " - + "have been processed and the output FlowFiles are ready for transfer to the downstream relationship. For large result sets, this can cause a large burst of FlowFiles " - + "to be transferred at the end of processor execution. If this property is set, then when the specified number of FlowFiles are ready for transfer, then the session will " - + "be committed, thus releasing the FlowFiles to the downstream relationship. NOTE: The fragment.count attribute will not be set on FlowFiles when this " - + "property is set.") - .defaultValue("0") - .required(true) - .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) - .build(); +public class ExecuteSQL extends AbstractExecuteSQL { public static final PropertyDescriptor COMPRESSION_FORMAT = new PropertyDescriptor.Builder() .name("compression-format") @@ -198,8 +106,6 @@ public class ExecuteSQL extends AbstractProcessor { .required(true) .build(); - private final List propDescriptors; - public ExecuteSQL() { final Set r = new HashSet<>(); r.add(REL_SUCCESS); @@ -212,248 +118,31 @@ public ExecuteSQL() { pds.add(QUERY_TIMEOUT); pds.add(NORMALIZE_NAMES_FOR_AVRO); pds.add(USE_AVRO_LOGICAL_TYPES); + pds.add(COMPRESSION_FORMAT); pds.add(DEFAULT_PRECISION); pds.add(DEFAULT_SCALE); pds.add(MAX_ROWS_PER_FLOW_FILE); pds.add(OUTPUT_BATCH_SIZE); - pds.add(COMPRESSION_FORMAT); propDescriptors = Collections.unmodifiableList(pds); } @Override - public Set getRelationships() { - return relationships; - } - - @Override - protected List getSupportedPropertyDescriptors() { - return propDescriptors; - } - - @OnScheduled - public void setup(ProcessContext context) { - // If the query is not set, then an incoming flow file is needed. Otherwise fail the initialization - if (!context.getProperty(SQL_SELECT_QUERY).isSet() && !context.hasIncomingConnection()) { - final String errorString = "Either the Select Query must be specified or there must be an incoming connection " - + "providing flowfile(s) containing a SQL select query"; - getLogger().error(errorString); - throw new ProcessException(errorString); - } - } - - @Override - public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { - FlowFile fileToProcess = null; - if (context.hasIncomingConnection()) { - fileToProcess = session.get(); - - // If we have no FlowFile, and all incoming connections are self-loops then we can continue on. - // However, if we have no FlowFile and we have connections coming from other Processors, then - // we know that we should run only if we have a FlowFile. - if (fileToProcess == null && context.hasNonLoopConnection()) { - return; - } - } - - final List resultSetFlowFiles = new ArrayList<>(); - - final ComponentLog logger = getLogger(); - final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class); - final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue(); + protected SqlWriter configureSqlWriter(ProcessSession session, ProcessContext context, FlowFile fileToProcess) { final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean(); final Boolean useAvroLogicalTypes = context.getProperty(USE_AVRO_LOGICAL_TYPES).asBoolean(); final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger(); - final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions().asInteger(); - final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField; final Integer defaultPrecision = context.getProperty(DEFAULT_PRECISION).evaluateAttributeExpressions(fileToProcess).asInteger(); final Integer defaultScale = context.getProperty(DEFAULT_SCALE).evaluateAttributeExpressions(fileToProcess).asInteger(); final String codec = context.getProperty(COMPRESSION_FORMAT).getValue(); - final String selectQuery; - if (context.getProperty(SQL_SELECT_QUERY).isSet()) { - selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess).getValue(); - } else { - // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query. - // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled. - final StringBuilder queryContents = new StringBuilder(); - session.read(fileToProcess, in -> queryContents.append(IOUtils.toString(in, Charset.defaultCharset()))); - selectQuery = queryContents.toString(); - } - - int resultCount=0; - try (final Connection con = dbcpService.getConnection(fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes()); - final PreparedStatement st = con.prepareStatement(selectQuery)) { - st.setQueryTimeout(queryTimeout); // timeout in seconds - - if (fileToProcess != null) { - JdbcCommon.setParameters(st, fileToProcess.getAttributes()); - } - logger.debug("Executing query {}", new Object[]{selectQuery}); - - int fragmentIndex=0; - final String fragmentId = UUID.randomUUID().toString(); - - final StopWatch executionTime = new StopWatch(true); - - boolean hasResults = st.execute(); - - long executionTimeElapsed = executionTime.getElapsed(TimeUnit.MILLISECONDS); - - boolean hasUpdateCount = st.getUpdateCount() != -1; - - while(hasResults || hasUpdateCount) { - //getMoreResults() and execute() return false to indicate that the result of the statement is just a number and not a ResultSet - if (hasResults) { - final AtomicLong nrOfRows = new AtomicLong(0L); - - try { - final ResultSet resultSet = st.getResultSet(); - final JdbcCommon.AvroConversionOptions options = JdbcCommon.AvroConversionOptions.builder() - .convertNames(convertNamesForAvro) - .useLogicalTypes(useAvroLogicalTypes) - .defaultPrecision(defaultPrecision) - .defaultScale(defaultScale) - .maxRows(maxRowsPerFlowFile) - .codecFactory(codec) - .build(); - - do { - final StopWatch fetchTime = new StopWatch(true); - - FlowFile resultSetFF; - if (fileToProcess == null) { - resultSetFF = session.create(); - } else { - resultSetFF = session.create(fileToProcess); - resultSetFF = session.putAllAttributes(resultSetFF, fileToProcess.getAttributes()); - } - - try { - resultSetFF = session.write(resultSetFF, out -> { - try { - nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, options, null)); - } catch (SQLException e) { - throw new ProcessException(e); - } - }); - - long fetchTimeElapsed = fetchTime.getElapsed(TimeUnit.MILLISECONDS); - - // set attribute how many rows were selected - resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); - resultSetFF = session.putAttribute(resultSetFF, RESULT_QUERY_DURATION, String.valueOf(executionTimeElapsed + fetchTimeElapsed)); - resultSetFF = session.putAttribute(resultSetFF, RESULT_QUERY_EXECUTION_TIME, String.valueOf(executionTimeElapsed)); - resultSetFF = session.putAttribute(resultSetFF, RESULT_QUERY_FETCH_TIME, String.valueOf(fetchTimeElapsed)); - resultSetFF = session.putAttribute(resultSetFF, CoreAttributes.MIME_TYPE.key(), JdbcCommon.MIME_TYPE_AVRO_BINARY); - resultSetFF = session.putAttribute(resultSetFF, RESULTSET_INDEX, String.valueOf(resultCount)); - - // if fragmented ResultSet, determine if we should keep this fragment; set fragment attributes - if (maxRowsPerFlowFile > 0) { - // if row count is zero and this is not the first fragment, drop it instead of committing it. - if (nrOfRows.get() == 0 && fragmentIndex > 0) { - session.remove(resultSetFF); - break; - } - - resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_ID, fragmentId); - resultSetFF = session.putAttribute(resultSetFF, FRAGMENT_INDEX, String.valueOf(fragmentIndex)); - } - - logger.info("{} contains {} Avro records; transferring to 'success'", - new Object[]{resultSetFF, nrOfRows.get()}); - session.getProvenanceReporter().modifyContent(resultSetFF, "Retrieved " + nrOfRows.get() + " rows", executionTimeElapsed + fetchTimeElapsed); - resultSetFlowFiles.add(resultSetFF); - - // If we've reached the batch size, send out the flow files - if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) { - session.transfer(resultSetFlowFiles, REL_SUCCESS); - session.commit(); - resultSetFlowFiles.clear(); - } - - fragmentIndex++; - } catch (Exception e) { - // Remove the result set flow file and propagate the exception - session.remove(resultSetFF); - if (e instanceof ProcessException) { - throw (ProcessException) e; - } else { - throw new ProcessException(e); - } - } - } while (maxRowsPerFlowFile > 0 && nrOfRows.get() == maxRowsPerFlowFile); - - // If we are splitting results but not outputting batches, set count on all FlowFiles - if (outputBatchSize == 0 && maxRowsPerFlowFile > 0) { - for (int i = 0; i < resultSetFlowFiles.size(); i++) { - resultSetFlowFiles.set(i, - session.putAttribute(resultSetFlowFiles.get(i), FRAGMENT_COUNT, Integer.toString(fragmentIndex))); - } - } - } catch (final SQLException e) { - throw new ProcessException(e); - } - - resultCount++; - } - - // are there anymore result sets? - try{ - hasResults = st.getMoreResults(Statement.CLOSE_CURRENT_RESULT); - hasUpdateCount = st.getUpdateCount() != -1; - } catch(SQLException ex){ - hasResults = false; - hasUpdateCount = false; - } - } - - // Transfer any remaining files to SUCCESS - session.transfer(resultSetFlowFiles, REL_SUCCESS); - resultSetFlowFiles.clear(); - - //If we had at least one result then it's OK to drop the original file, but if we had no results then - // pass the original flow file down the line to trigger downstream processors - if(fileToProcess != null){ - if(resultCount > 0){ - session.remove(fileToProcess); - } else { - fileToProcess = session.write(fileToProcess, JdbcCommon::createEmptyAvroStream); - - fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, "0"); - fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(), JdbcCommon.MIME_TYPE_AVRO_BINARY); - session.transfer(fileToProcess, REL_SUCCESS); - } - } else if(resultCount == 0){ - //If we had no inbound FlowFile, no exceptions, and the SQL generated no result sets (Insert/Update/Delete statements only) - // Then generate an empty Output FlowFile - FlowFile resultSetFF = session.create(); - - resultSetFF = session.write(resultSetFF, out -> JdbcCommon.createEmptyAvroStream(out)); - - resultSetFF = session.putAttribute(resultSetFF, RESULT_ROW_COUNT, "0"); - resultSetFF = session.putAttribute(resultSetFF, CoreAttributes.MIME_TYPE.key(), JdbcCommon.MIME_TYPE_AVRO_BINARY); - session.transfer(resultSetFF, REL_SUCCESS); - } - } catch (final ProcessException | SQLException e) { - //If we had at least one result then it's OK to drop the original file, but if we had no results then - // pass the original flow file down the line to trigger downstream processors - if (fileToProcess == null) { - // This can happen if any exceptions occur while setting up the connection, statement, etc. - logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure", - new Object[]{selectQuery, e}); - context.yield(); - } else { - if (context.hasIncomingConnection()) { - logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure", - new Object[]{selectQuery, fileToProcess, e}); - fileToProcess = session.penalize(fileToProcess); - } else { - logger.error("Unable to execute SQL select query {} due to {}; routing to failure", - new Object[]{selectQuery, e}); - context.yield(); - } - session.transfer(fileToProcess, REL_FAILURE); - } - } + final JdbcCommon.AvroConversionOptions options = JdbcCommon.AvroConversionOptions.builder() + .convertNames(convertNamesForAvro) + .useLogicalTypes(useAvroLogicalTypes) + .defaultPrecision(defaultPrecision) + .defaultScale(defaultScale) + .maxRows(maxRowsPerFlowFile) + .codecFactory(codec) + .build(); + return new DefaultAvroSqlWriter(options); } } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java new file mode 100644 index 000000000000..31d0ec855b29 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ExecuteSQLRecord.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard; + +import org.apache.nifi.annotation.behavior.EventDriven; +import org.apache.nifi.annotation.behavior.InputRequirement; +import org.apache.nifi.annotation.behavior.InputRequirement.Requirement; +import org.apache.nifi.annotation.behavior.ReadsAttribute; +import org.apache.nifi.annotation.behavior.ReadsAttributes; +import org.apache.nifi.annotation.behavior.WritesAttribute; +import org.apache.nifi.annotation.behavior.WritesAttributes; +import org.apache.nifi.annotation.documentation.CapabilityDescription; +import org.apache.nifi.annotation.documentation.Tags; +import org.apache.nifi.components.PropertyDescriptor; +import org.apache.nifi.flowfile.FlowFile; +import org.apache.nifi.processor.ProcessContext; +import org.apache.nifi.processor.ProcessSession; +import org.apache.nifi.processor.Relationship; +import org.apache.nifi.processors.standard.sql.RecordSqlWriter; +import org.apache.nifi.processors.standard.sql.SqlWriter; +import org.apache.nifi.processors.standard.util.JdbcCommon; +import org.apache.nifi.serialization.RecordSetWriterFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.nifi.processors.standard.util.JdbcCommon.USE_AVRO_LOGICAL_TYPES; + +@EventDriven +@InputRequirement(Requirement.INPUT_ALLOWED) +@Tags({"sql", "select", "jdbc", "query", "database", "record"}) +@CapabilityDescription("Executes provided SQL select query. Query result will be converted to the format specified by a Record Writer. " + + "Streaming is used so arbitrarily large result sets are supported. This processor can be scheduled to run on " + + "a timer, or cron expression, using the standard scheduling methods, or it can be triggered by an incoming FlowFile. " + + "If it is triggered by an incoming FlowFile, then attributes of that FlowFile will be available when evaluating the " + + "select query, and the query may use the ? to escape parameters. In this case, the parameters to use must exist as FlowFile attributes " + + "with the naming convention sql.args.N.type and sql.args.N.value, where N is a positive integer. The sql.args.N.type is expected to be " + + "a number indicating the JDBC Type. The content of the FlowFile is expected to be in UTF-8 format. " + + "FlowFile attribute 'executesql.row.count' indicates how many rows were selected.") +@ReadsAttributes({ + @ReadsAttribute(attribute = "sql.args.N.type", description = "Incoming FlowFiles are expected to be parametrized SQL statements. The type of each Parameter is specified as an integer " + + "that represents the JDBC Type of the parameter."), + @ReadsAttribute(attribute = "sql.args.N.value", description = "Incoming FlowFiles are expected to be parametrized SQL statements. The value of the Parameters are specified as " + + "sql.args.1.value, sql.args.2.value, sql.args.3.value, and so on. The type of the sql.args.1.value Parameter is specified by the sql.args.1.type attribute."), + @ReadsAttribute(attribute = "sql.args.N.format", description = "This attribute is always optional, but default options may not always work for your data. " + + "Incoming FlowFiles are expected to be parametrized SQL statements. In some cases " + + "a format option needs to be specified, currently this is only applicable for binary data types, dates, times and timestamps. Binary Data Types (defaults to 'ascii') - " + + "ascii: each string character in your attribute value represents a single byte. This is the format provided by Avro Processors. " + + "base64: the string is a Base64 encoded string that can be decoded to bytes. " + + "hex: the string is hex encoded with all letters in upper case and no '0x' at the beginning. " + + "Dates/Times/Timestamps - " + + "Date, Time and Timestamp formats all support both custom formats or named format ('yyyy-MM-dd','ISO_OFFSET_DATE_TIME') " + + "as specified according to java.time.format.DateTimeFormatter. " + + "If not specified, a long value input is expected to be an unix epoch (milli seconds from 1970/1/1), or a string value in " + + "'yyyy-MM-dd' format for Date, 'HH:mm:ss.SSS' for Time (some database engines e.g. Derby or MySQL do not support milliseconds and will truncate milliseconds), " + + "'yyyy-MM-dd HH:mm:ss.SSS' for Timestamp is used.") +}) +@WritesAttributes({ + @WritesAttribute(attribute = "executesql.row.count", description = "Contains the number of rows returned in the select query"), + @WritesAttribute(attribute = "executesql.query.duration", description = "Combined duration of the query execution time and fetch time in milliseconds"), + @WritesAttribute(attribute = "executesql.query.executiontime", description = "Duration of the query execution time in milliseconds"), + @WritesAttribute(attribute = "executesql.query.fetchtime", description = "Duration of the result set fetch time in milliseconds"), + @WritesAttribute(attribute = "executesql.resultset.index", description = "Assuming multiple result sets are returned, " + + "the zero based index of this result set."), + @WritesAttribute(attribute = "fragment.identifier", description = "If 'Max Rows Per Flow File' is set then all FlowFiles from the same query result set " + + "will have the same value for the fragment.identifier attribute. This can then be used to correlate the results."), + @WritesAttribute(attribute = "fragment.count", description = "If 'Max Rows Per Flow File' is set then this is the total number of " + + "FlowFiles produced by a single ResultSet. This can be used in conjunction with the " + + "fragment.identifier attribute in order to know how many FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set, then this " + + "attribute will not be populated."), + @WritesAttribute(attribute = "fragment.index", description = "If 'Max Rows Per Flow File' is set then the position of this FlowFile in the list of " + + "outgoing FlowFiles that were all derived from the same result set FlowFile. This can be " + + "used in conjunction with the fragment.identifier attribute to know which FlowFiles originated from the same query result set and in what order " + + "FlowFiles were produced"), + @WritesAttribute(attribute = "mime.type", description = "Sets the mime.type attribute to the MIME Type specified by the Record Writer."), + @WritesAttribute(attribute = "record.count", description = "The number of records output by the Record Writer.") +}) +public class ExecuteSQLRecord extends AbstractExecuteSQL { + + + public static final PropertyDescriptor RECORD_WRITER_FACTORY = new PropertyDescriptor.Builder() + .name("esqlrecord-record-writer") + .displayName("Record Writer") + .description("Specifies the Controller Service to use for writing results to a FlowFile. The Record Writer may use Inherit Schema to emulate the inferred schema behavior, i.e. " + + "an explicit schema need not be defined in the writer, and will be supplied by the same logic used to infer the schema from the column types.") + .identifiesControllerService(RecordSetWriterFactory.class) + .required(true) + .build(); + + public static final PropertyDescriptor NORMALIZE_NAMES = new PropertyDescriptor.Builder() + .name("esqlrecord-normalize") + .displayName("Normalize Table/Column Names") + .description("Whether to change characters in column names. For example, colons and periods will be changed to underscores.") + .allowableValues("true", "false") + .defaultValue("false") + .required(true) + .build(); + + public ExecuteSQLRecord() { + final Set r = new HashSet<>(); + r.add(REL_SUCCESS); + r.add(REL_FAILURE); + relationships = Collections.unmodifiableSet(r); + + final List pds = new ArrayList<>(); + pds.add(DBCP_SERVICE); + pds.add(SQL_SELECT_QUERY); + pds.add(QUERY_TIMEOUT); + pds.add(RECORD_WRITER_FACTORY); + pds.add(NORMALIZE_NAMES); + pds.add(USE_AVRO_LOGICAL_TYPES); + pds.add(MAX_ROWS_PER_FLOW_FILE); + pds.add(OUTPUT_BATCH_SIZE); + propDescriptors = Collections.unmodifiableList(pds); + } + + @Override + protected SqlWriter configureSqlWriter(ProcessSession session, ProcessContext context, FlowFile fileToProcess) { + final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger(); + final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES).asBoolean(); + final Boolean useAvroLogicalTypes = context.getProperty(USE_AVRO_LOGICAL_TYPES).asBoolean(); + final JdbcCommon.AvroConversionOptions options = JdbcCommon.AvroConversionOptions.builder() + .convertNames(convertNamesForAvro) + .useLogicalTypes(useAvroLogicalTypes) + .build(); + final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class); + + return new RecordSqlWriter(recordSetWriterFactory, options, maxRowsPerFlowFile, fileToProcess == null ? Collections.emptyMap() : fileToProcess.getAttributes()); + } +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/GenerateFlowFile.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/GenerateFlowFile.java index ba3291da9dfe..1d7ed350108a 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/GenerateFlowFile.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/GenerateFlowFile.java @@ -56,7 +56,7 @@ @Tags({"test", "random", "generate"}) @InputRequirement(Requirement.INPUT_FORBIDDEN) @CapabilityDescription("This processor creates FlowFiles with random data or custom content. GenerateFlowFile is useful" + - "for load testing, configuration, and simulation.") + " for load testing, configuration, and simulation.") @DynamicProperty(name = "Generated FlowFile attribute name", value = "Generated FlowFile attribute value", expressionLanguageScope = ExpressionLanguageScope.VARIABLE_REGISTRY, description = "Specifies an attribute on generated FlowFiles defined by the Dynamic Property's key and value." + diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ListenHTTP.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ListenHTTP.java index 209e6d190272..5ea9f3afee6e 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ListenHTTP.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/ListenHTTP.java @@ -134,6 +134,25 @@ public class ListenHTTP extends AbstractSessionFactoryProcessor { .defaultValue(String.valueOf(HttpServletResponse.SC_OK)) .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) .build(); + public static final PropertyDescriptor MULTIPART_REQUEST_MAX_SIZE = new PropertyDescriptor.Builder() + .name("multipart-request-max-size") + .displayName("Multipart Request Max Size") + .description("The max size of the request. Only applies for requests with Content-Type: multipart/form-data, " + + "and is used to prevent denial of service type of attacks, to prevent filling up the heap or disk space") + .required(true) + .addValidator(StandardValidators.DATA_SIZE_VALIDATOR) + .defaultValue("1 MB") + .build(); + public static final PropertyDescriptor MULTIPART_READ_BUFFER_SIZE = new PropertyDescriptor.Builder() + .name("multipart-read-buffer-size") + .displayName("Multipart Read Buffer Size") + .description("The threshold size, at which the contents of an incoming file would be written to disk. " + + "Only applies for requests with Content-Type: multipart/form-data. " + + "It is used to prevent denial of service type of attacks, to prevent filling up the heap or disk space.") + .required(true) + .addValidator(StandardValidators.DATA_SIZE_VALIDATOR) + .defaultValue("512 KB") + .build(); public static final String CONTEXT_ATTRIBUTE_PROCESSOR = "processor"; public static final String CONTEXT_ATTRIBUTE_LOGGER = "logger"; @@ -145,6 +164,8 @@ public class ListenHTTP extends AbstractSessionFactoryProcessor { public static final String CONTEXT_ATTRIBUTE_STREAM_THROTTLER = "streamThrottler"; public static final String CONTEXT_ATTRIBUTE_BASE_PATH = "basePath"; public static final String CONTEXT_ATTRIBUTE_RETURN_CODE = "returnCode"; + public static final String CONTEXT_ATTRIBUTE_MULTIPART_REQUEST_MAX_SIZE = "multipartRequestMaxSize"; + public static final String CONTEXT_ATTRIBUTE_MULTIPART_READ_BUFFER_SIZE = "multipartReadBufferSize"; private volatile Server server = null; private final ConcurrentMap flowFileMap = new ConcurrentHashMap<>(); @@ -166,6 +187,8 @@ protected void init(final ProcessorInitializationContext context) { descriptors.add(MAX_UNCONFIRMED_TIME); descriptors.add(HEADERS_AS_ATTRIBUTES_REGEX); descriptors.add(RETURN_CODE); + descriptors.add(MULTIPART_REQUEST_MAX_SIZE); + descriptors.add(MULTIPART_READ_BUFFER_SIZE); this.properties = Collections.unmodifiableList(descriptors); } @@ -214,6 +237,8 @@ private void createHttpServerFromService(final ProcessContext context) throws Ex final Double maxBytesPerSecond = context.getProperty(MAX_DATA_RATE).asDataSize(DataUnit.B); final StreamThrottler streamThrottler = (maxBytesPerSecond == null) ? null : new LeakyBucketStreamThrottler(maxBytesPerSecond.intValue()); final int returnCode = context.getProperty(RETURN_CODE).asInteger(); + long requestMaxSize = context.getProperty(MULTIPART_REQUEST_MAX_SIZE).asDataSize(DataUnit.B).longValue(); + int readBufferSize = context.getProperty(MULTIPART_READ_BUFFER_SIZE).asDataSize(DataUnit.B).intValue(); throttlerRef.set(streamThrottler); final boolean needClientAuth = sslContextService != null && sslContextService.getTrustStoreFile() != null; @@ -295,7 +320,9 @@ private void createHttpServerFromService(final ProcessContext context) throws Ex contextHandler.setAttribute(CONTEXT_ATTRIBUTE_AUTHORITY_PATTERN, Pattern.compile(context.getProperty(AUTHORIZED_DN_PATTERN).getValue())); contextHandler.setAttribute(CONTEXT_ATTRIBUTE_STREAM_THROTTLER, streamThrottler); contextHandler.setAttribute(CONTEXT_ATTRIBUTE_BASE_PATH, basePath); - contextHandler.setAttribute(CONTEXT_ATTRIBUTE_RETURN_CODE,returnCode); + contextHandler.setAttribute(CONTEXT_ATTRIBUTE_RETURN_CODE, returnCode); + contextHandler.setAttribute(CONTEXT_ATTRIBUTE_MULTIPART_REQUEST_MAX_SIZE, requestMaxSize); + contextHandler.setAttribute(CONTEXT_ATTRIBUTE_MULTIPART_READ_BUFFER_SIZE, readBufferSize); if (context.getProperty(HEADERS_AS_ATTRIBUTES_REGEX).isSet()) { contextHandler.setAttribute(CONTEXT_ATTRIBUTE_HEADER_PATTERN, Pattern.compile(context.getProperty(HEADERS_AS_ATTRIBUTES_REGEX).getValue())); diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/PutEmail.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/PutEmail.java index 8fdcc14e8ecf..b7b8232802ad 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/PutEmail.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/PutEmail.java @@ -165,28 +165,32 @@ public class PutEmail extends AbstractProcessor { .build(); public static final PropertyDescriptor FROM = new PropertyDescriptor.Builder() .name("From") - .description("Specifies the Email address to use as the sender") + .description("Specifies the Email address to use as the sender. " + + "Comma separated sequence of addresses following RFC822 syntax.") .required(true) .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) .build(); public static final PropertyDescriptor TO = new PropertyDescriptor.Builder() .name("To") - .description("The recipients to include in the To-Line of the email") + .description("The recipients to include in the To-Line of the email. " + + "Comma separated sequence of addresses following RFC822 syntax.") .required(false) .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) .build(); public static final PropertyDescriptor CC = new PropertyDescriptor.Builder() .name("CC") - .description("The recipients to include in the CC-Line of the email") + .description("The recipients to include in the CC-Line of the email. " + + "Comma separated sequence of addresses following RFC822 syntax.") .required(false) .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) .build(); public static final PropertyDescriptor BCC = new PropertyDescriptor.Builder() .name("BCC") - .description("The recipients to include in the BCC-Line of the email") + .description("The recipients to include in the BCC-Line of the email. " + + "Comma separated sequence of addresses following RFC822 syntax.") .required(false) .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES) .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java index 1923e2c1ba9e..71348ef956c6 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTable.java @@ -16,7 +16,6 @@ */ package org.apache.nifi.processors.standard; -import org.apache.commons.lang3.StringUtils; import org.apache.nifi.annotation.behavior.DynamicProperty; import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.InputRequirement.Requirement; @@ -27,49 +26,21 @@ import org.apache.nifi.annotation.documentation.CapabilityDescription; import org.apache.nifi.annotation.documentation.SeeAlso; import org.apache.nifi.annotation.documentation.Tags; -import org.apache.nifi.annotation.lifecycle.OnScheduled; -import org.apache.nifi.annotation.lifecycle.OnStopped; import org.apache.nifi.components.PropertyDescriptor; import org.apache.nifi.components.state.Scope; -import org.apache.nifi.components.state.StateManager; -import org.apache.nifi.components.state.StateMap; -import org.apache.nifi.dbcp.DBCPService; -import org.apache.nifi.expression.AttributeExpression; import org.apache.nifi.expression.ExpressionLanguageScope; -import org.apache.nifi.flowfile.FlowFile; -import org.apache.nifi.flowfile.attributes.CoreAttributes; -import org.apache.nifi.flowfile.attributes.FragmentAttributes; -import org.apache.nifi.logging.ComponentLog; import org.apache.nifi.processor.ProcessContext; import org.apache.nifi.processor.ProcessSession; -import org.apache.nifi.processor.ProcessSessionFactory; import org.apache.nifi.processor.Relationship; -import org.apache.nifi.processor.exception.ProcessException; -import org.apache.nifi.processor.util.StandardValidators; -import org.apache.nifi.processors.standard.db.DatabaseAdapter; +import org.apache.nifi.processors.standard.sql.DefaultAvroSqlWriter; +import org.apache.nifi.processors.standard.sql.SqlWriter; import org.apache.nifi.processors.standard.util.JdbcCommon; -import org.apache.nifi.util.StopWatch; -import java.io.IOException; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.sql.Statement; -import java.text.ParseException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.IntStream; import static org.apache.nifi.processors.standard.util.JdbcCommon.DEFAULT_PRECISION; import static org.apache.nifi.processors.standard.util.JdbcCommon.DEFAULT_SCALE; @@ -112,60 +83,7 @@ @DynamicProperty(name = "initial.maxvalue.", value = "Initial maximum value for the specified column", expressionLanguageScope = ExpressionLanguageScope.VARIABLE_REGISTRY, description = "Specifies an initial max value for max value column(s). Properties should " + "be added in the format `initial.maxvalue.`. This value is only used the first time the table is accessed (when a Maximum Value Column is specified).") -public class QueryDatabaseTable extends AbstractDatabaseFetchProcessor { - - public static final String RESULT_TABLENAME = "tablename"; - public static final String RESULT_ROW_COUNT = "querydbtable.row.count"; - - public static final String FRAGMENT_ID = FragmentAttributes.FRAGMENT_ID.key(); - public static final String FRAGMENT_INDEX = FragmentAttributes.FRAGMENT_INDEX.key(); - - public static final PropertyDescriptor FETCH_SIZE = new PropertyDescriptor.Builder() - .name("Fetch Size") - .description("The number of result rows to be fetched from the result set at a time. This is a hint to the database driver and may not be " - + "honored and/or exact. If the value specified is zero, then the hint is ignored.") - .defaultValue("0") - .required(true) - .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) - .build(); - - public static final PropertyDescriptor MAX_ROWS_PER_FLOW_FILE = new PropertyDescriptor.Builder() - .name("qdbt-max-rows") - .displayName("Max Rows Per Flow File") - .description("The maximum number of result rows that will be included in a single FlowFile. This will allow you to break up very large " - + "result sets into multiple FlowFiles. If the value specified is zero, then all rows are returned in a single FlowFile.") - .defaultValue("0") - .required(true) - .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) - .build(); - - public static final PropertyDescriptor OUTPUT_BATCH_SIZE = new PropertyDescriptor.Builder() - .name("qdbt-output-batch-size") - .displayName("Output Batch Size") - .description("The number of output FlowFiles to queue before committing the process session. When set to zero, the session will be committed when all result set rows " - + "have been processed and the output FlowFiles are ready for transfer to the downstream relationship. For large result sets, this can cause a large burst of FlowFiles " - + "to be transferred at the end of processor execution. If this property is set, then when the specified number of FlowFiles are ready for transfer, then the session will " - + "be committed, thus releasing the FlowFiles to the downstream relationship. NOTE: The maxvalue.* and fragment.count attributes will not be set on FlowFiles when this " - + "property is set.") - .defaultValue("0") - .required(true) - .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) - .build(); - - public static final PropertyDescriptor MAX_FRAGMENTS = new PropertyDescriptor.Builder() - .name("qdbt-max-frags") - .displayName("Maximum Number of Fragments") - .description("The maximum number of fragments. If the value specified is zero, then all fragments are returned. " + - "This prevents OutOfMemoryError when this processor ingests huge table. NOTE: Setting this property can result in data loss, as the incoming results are " - + "not ordered, and fragments may end at arbitrary boundaries where rows are not included in the result set.") - .defaultValue("0") - .required(true) - .addValidator(StandardValidators.NON_NEGATIVE_INTEGER_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) - .build(); +public class QueryDatabaseTable extends AbstractQueryDatabaseTable { public QueryDatabaseTable() { final Set r = new HashSet<>(); @@ -197,365 +115,22 @@ public QueryDatabaseTable() { } @Override - public Set getRelationships() { - return relationships; - } - - @Override - protected List getSupportedPropertyDescriptors() { - return propDescriptors; - } - - @Override - protected PropertyDescriptor getSupportedDynamicPropertyDescriptor(final String propertyDescriptorName) { - return new PropertyDescriptor.Builder() - .name(propertyDescriptorName) - .required(false) - .addValidator(StandardValidators.createAttributeExpressionLanguageValidator(AttributeExpression.ResultType.STRING, true)) - .addValidator(StandardValidators.ATTRIBUTE_KEY_PROPERTY_NAME_VALIDATOR) - .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) - .dynamic(true) - .build(); - } - - @OnScheduled - public void setup(final ProcessContext context) { - maxValueProperties = getDefaultMaxValueProperties(context, null); - } - - @OnStopped - public void stop() { - // Reset the column type map in case properties change - setupComplete.set(false); - } - - @Override - public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException { - // Fetch the column/table info once - if (!setupComplete.get()) { - super.setup(context); - } - ProcessSession session = sessionFactory.createSession(); - final List resultSetFlowFiles = new ArrayList<>(); - - final ComponentLog logger = getLogger(); - - final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class); - final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue()); + protected SqlWriter configureSqlWriter(ProcessSession session, ProcessContext context) { final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue(); - final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue(); - final String sqlQuery = context.getProperty(SQL_QUERY).evaluateAttributeExpressions().getValue(); - final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES).evaluateAttributeExpressions().getValue(); - final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions().getValue(); - final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger(); + final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean(); + final Boolean useAvroLogicalTypes = context.getProperty(USE_AVRO_LOGICAL_TYPES).asBoolean(); final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger(); - final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions().asInteger(); - final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField; - final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet() - ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger() - : 0; + final Integer defaultPrecision = context.getProperty(DEFAULT_PRECISION).evaluateAttributeExpressions().asInteger(); + final Integer defaultScale = context.getProperty(DEFAULT_SCALE).evaluateAttributeExpressions().asInteger(); + final JdbcCommon.AvroConversionOptions options = JdbcCommon.AvroConversionOptions.builder() .recordName(tableName) + .convertNames(convertNamesForAvro) + .useLogicalTypes(useAvroLogicalTypes) + .defaultPrecision(defaultPrecision) + .defaultScale(defaultScale) .maxRows(maxRowsPerFlowFile) - .convertNames(context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean()) - .useLogicalTypes(context.getProperty(USE_AVRO_LOGICAL_TYPES).asBoolean()) - .defaultPrecision(context.getProperty(DEFAULT_PRECISION).evaluateAttributeExpressions().asInteger()) - .defaultScale(context.getProperty(DEFAULT_SCALE).evaluateAttributeExpressions().asInteger()) .build(); - - final StateManager stateManager = context.getStateManager(); - final StateMap stateMap; - - try { - stateMap = stateManager.getState(Scope.CLUSTER); - } catch (final IOException ioe) { - getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform " - + "query until this is accomplished.", ioe); - context.yield(); - return; - } - // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually - // set as the current state map (after the session has been committed) - final Map statePropertyMap = new HashMap<>(stateMap.toMap()); - - //If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map - for (final Map.Entry maxProp : maxValueProperties.entrySet()) { - String maxPropKey = maxProp.getKey().toLowerCase(); - String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey, dbAdapter); - if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) { - String newMaxPropValue; - // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) - // the value has been stored under a key that is only the column name. Fall back to check the column name, - // but store the new initial max value under the fully-qualified key. - if (statePropertyMap.containsKey(maxPropKey)) { - newMaxPropValue = statePropertyMap.get(maxPropKey); - } else { - newMaxPropValue = maxProp.getValue(); - } - statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue); - - } - } - - List maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) - ? null - : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*")); - final String selectQuery = getQuery(dbAdapter, tableName, sqlQuery, columnNames, maxValueColumnNameList, customWhereClause, statePropertyMap); - final StopWatch stopWatch = new StopWatch(true); - final String fragmentIdentifier = UUID.randomUUID().toString(); - - try (final Connection con = dbcpService.getConnection(Collections.emptyMap()); - final Statement st = con.createStatement()) { - - if (fetchSize != null && fetchSize > 0) { - try { - st.setFetchSize(fetchSize); - } catch (SQLException se) { - // Not all drivers support this, just log the error (at debug level) and move on - logger.debug("Cannot set fetch size to {} due to {}", new Object[]{fetchSize, se.getLocalizedMessage()}, se); - } - } - - String jdbcURL = "DBCPService"; - try { - DatabaseMetaData databaseMetaData = con.getMetaData(); - if (databaseMetaData != null) { - jdbcURL = databaseMetaData.getURL(); - } - } catch (SQLException se) { - // Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly - } - - final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions().asTimePeriod(TimeUnit.SECONDS).intValue(); - st.setQueryTimeout(queryTimeout); // timeout in seconds - if (logger.isDebugEnabled()) { - logger.debug("Executing query {}", new Object[] { selectQuery }); - } - try (final ResultSet resultSet = st.executeQuery(selectQuery)) { - int fragmentIndex=0; - // Max values will be updated in the state property map by the callback - final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(tableName, statePropertyMap, dbAdapter); - - while(true) { - final AtomicLong nrOfRows = new AtomicLong(0L); - - FlowFile fileToProcess = session.create(); - try { - fileToProcess = session.write(fileToProcess, out -> { - try { - nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, options, maxValCollector)); - } catch (SQLException | RuntimeException e) { - throw new ProcessException("Error during database query or conversion of records to Avro.", e); - } - }); - } catch (ProcessException e) { - // Add flowfile to results before rethrowing so it will be removed from session in outer catch - resultSetFlowFiles.add(fileToProcess); - throw e; - } - - if (nrOfRows.get() > 0) { - // set attribute how many rows were selected - fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get())); - fileToProcess = session.putAttribute(fileToProcess, RESULT_TABLENAME, tableName); - fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(), JdbcCommon.MIME_TYPE_AVRO_BINARY); - if(maxRowsPerFlowFile > 0) { - fileToProcess = session.putAttribute(fileToProcess, FRAGMENT_ID, fragmentIdentifier); - fileToProcess = session.putAttribute(fileToProcess, FRAGMENT_INDEX, String.valueOf(fragmentIndex)); - } - - logger.info("{} contains {} Avro records; transferring to 'success'", - new Object[]{fileToProcess, nrOfRows.get()}); - - session.getProvenanceReporter().receive(fileToProcess, jdbcURL, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); - resultSetFlowFiles.add(fileToProcess); - // If we've reached the batch size, send out the flow files - if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) { - session.transfer(resultSetFlowFiles, REL_SUCCESS); - session.commit(); - resultSetFlowFiles.clear(); - } - } else { - // If there were no rows returned, don't send the flowfile - session.remove(fileToProcess); - // If no rows and this was first FlowFile, yield - if(fragmentIndex == 0){ - context.yield(); - } - break; - } - - fragmentIndex++; - if (maxFragments > 0 && fragmentIndex >= maxFragments) { - break; - } - - // If we aren't splitting up the data into flow files or fragments, then the result set has been entirely fetched so don't loop back around - if (maxFragments == 0 && maxRowsPerFlowFile == 0) { - break; - } - - // If we are splitting up the data into flow files, don't loop back around if we've gotten all results - if(maxRowsPerFlowFile > 0 && nrOfRows.get() < maxRowsPerFlowFile) { - break; - } - } - - // Apply state changes from the Max Value tracker - maxValCollector.applyStateChanges(); - - // Even though the maximum value and total count are known at this point, to maintain consistent behavior if Output Batch Size is set, do not store the attributes - if (outputBatchSize == 0) { - for (int i = 0; i < resultSetFlowFiles.size(); i++) { - // Add maximum values as attributes - for (Map.Entry entry : statePropertyMap.entrySet()) { - // Get just the column name from the key - String key = entry.getKey(); - String colName = key.substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length()); - resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "maxvalue." + colName, entry.getValue())); - } - - //set count on all FlowFiles - if (maxRowsPerFlowFile > 0) { - resultSetFlowFiles.set(i, - session.putAttribute(resultSetFlowFiles.get(i), "fragment.count", Integer.toString(fragmentIndex))); - } - } - } - } catch (final SQLException e) { - throw e; - } - - session.transfer(resultSetFlowFiles, REL_SUCCESS); - - } catch (final ProcessException | SQLException e) { - logger.error("Unable to execute SQL select query {} due to {}", new Object[]{selectQuery, e}); - if (!resultSetFlowFiles.isEmpty()) { - session.remove(resultSetFlowFiles); - } - context.yield(); - } finally { - session.commit(); - try { - // Update the state - stateManager.setState(statePropertyMap, Scope.CLUSTER); - } catch (IOException ioe) { - getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded", new Object[]{this, ioe}); - } - } - } - - protected String getQuery(DatabaseAdapter dbAdapter, String tableName, String columnNames, List maxValColumnNames, - String customWhereClause, Map stateMap) { - - return getQuery(dbAdapter, tableName, null, columnNames, maxValColumnNames, customWhereClause, stateMap); - } - - protected String getQuery(DatabaseAdapter dbAdapter, String tableName, String sqlQuery, String columnNames, List maxValColumnNames, - String customWhereClause, Map stateMap) { - if (StringUtils.isEmpty(tableName)) { - throw new IllegalArgumentException("Table name must be specified"); - } - final StringBuilder query; - - if (StringUtils.isEmpty(sqlQuery)) { - query = new StringBuilder(dbAdapter.getSelectStatement(tableName, columnNames, null, null, null, null)); - } else { - query = getWrappedQuery(dbAdapter, sqlQuery, tableName); - } - - List whereClauses = new ArrayList<>(); - // Check state map for last max values - if (stateMap != null && !stateMap.isEmpty() && maxValColumnNames != null) { - IntStream.range(0, maxValColumnNames.size()).forEach((index) -> { - String colName = maxValColumnNames.get(index); - String maxValueKey = getStateKey(tableName, colName, dbAdapter); - String maxValue = stateMap.get(maxValueKey); - if (StringUtils.isEmpty(maxValue)) { - // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) - // the value has been stored under a key that is only the column name. Fall back to check the column name; either way, when a new - // maximum value is observed, it will be stored under the fully-qualified key from then on. - maxValue = stateMap.get(colName.toLowerCase()); - } - if (!StringUtils.isEmpty(maxValue)) { - Integer type = columnTypeMap.get(maxValueKey); - if (type == null) { - // This shouldn't happen as we are populating columnTypeMap when the processor is scheduled. - throw new IllegalArgumentException("No column type found for: " + colName); - } - // Add a condition for the WHERE clause - whereClauses.add(colName + (index == 0 ? " > " : " >= ") + getLiteralByType(type, maxValue, dbAdapter.getName())); - } - }); - } - - if (customWhereClause != null) { - whereClauses.add("(" + customWhereClause + ")"); - } - - if (!whereClauses.isEmpty()) { - query.append(" WHERE "); - query.append(StringUtils.join(whereClauses, " AND ")); - } - - return query.toString(); - } - - protected class MaxValueResultSetRowCollector implements JdbcCommon.ResultSetRowCallback { - DatabaseAdapter dbAdapter; - final Map newColMap; - final Map originalState; - String tableName; - - public MaxValueResultSetRowCollector(String tableName, Map stateMap, DatabaseAdapter dbAdapter) { - this.dbAdapter = dbAdapter; - this.originalState = stateMap; - - this.newColMap = new HashMap<>(); - this.newColMap.putAll(stateMap); - - this.tableName = tableName; - } - - @Override - public void processRow(ResultSet resultSet) throws IOException { - if (resultSet == null) { - return; - } - try { - // Iterate over the row, check-and-set max values - final ResultSetMetaData meta = resultSet.getMetaData(); - final int nrOfColumns = meta.getColumnCount(); - if (nrOfColumns > 0) { - for (int i = 1; i <= nrOfColumns; i++) { - String colName = meta.getColumnName(i).toLowerCase(); - String fullyQualifiedMaxValueKey = getStateKey(tableName, colName, dbAdapter); - Integer type = columnTypeMap.get(fullyQualifiedMaxValueKey); - // Skip any columns we're not keeping track of or whose value is null - if (type == null || resultSet.getObject(i) == null) { - continue; - } - String maxValueString = newColMap.get(fullyQualifiedMaxValueKey); - // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) - // the value has been stored under a key that is only the column name. Fall back to check the column name; either way, when a new - // maximum value is observed, it will be stored under the fully-qualified key from then on. - if (StringUtils.isEmpty(maxValueString)) { - maxValueString = newColMap.get(colName); - } - String newMaxValueString = getMaxValueFromRow(resultSet, i, type, maxValueString, dbAdapter.getName()); - if (newMaxValueString != null) { - newColMap.put(fullyQualifiedMaxValueKey, newMaxValueString); - } - } - } - } catch (ParseException | SQLException e) { - throw new IOException(e); - } - } - - @Override - public void applyStateChanges() { - this.originalState.putAll(this.newColMap); - } + return new DefaultAvroSqlWriter(options); } } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java new file mode 100644 index 000000000000..ea89256c5731 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecord.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard; + +import org.apache.nifi.annotation.behavior.DynamicProperty; +import org.apache.nifi.annotation.behavior.InputRequirement; +import org.apache.nifi.annotation.behavior.InputRequirement.Requirement; +import org.apache.nifi.annotation.behavior.Stateful; +import org.apache.nifi.annotation.behavior.TriggerSerially; +import org.apache.nifi.annotation.behavior.WritesAttribute; +import org.apache.nifi.annotation.behavior.WritesAttributes; +import org.apache.nifi.annotation.documentation.CapabilityDescription; +import org.apache.nifi.annotation.documentation.SeeAlso; +import org.apache.nifi.annotation.documentation.Tags; +import org.apache.nifi.components.PropertyDescriptor; +import org.apache.nifi.components.state.Scope; +import org.apache.nifi.expression.ExpressionLanguageScope; +import org.apache.nifi.processor.ProcessContext; +import org.apache.nifi.processor.ProcessSession; +import org.apache.nifi.processor.Relationship; +import org.apache.nifi.processors.standard.sql.RecordSqlWriter; +import org.apache.nifi.processors.standard.sql.SqlWriter; +import org.apache.nifi.processors.standard.util.JdbcCommon; +import org.apache.nifi.serialization.RecordSetWriterFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.apache.nifi.processors.standard.util.JdbcCommon.USE_AVRO_LOGICAL_TYPES; + + +@TriggerSerially +@InputRequirement(Requirement.INPUT_FORBIDDEN) +@Tags({"sql", "select", "jdbc", "query", "database", "record"}) +@SeeAlso({GenerateTableFetch.class, ExecuteSQL.class}) +@CapabilityDescription("Generates a SQL select query, or uses a provided statement, and executes it to fetch all rows whose values in the specified " + + "Maximum Value column(s) are larger than the " + + "previously-seen maxima. Query result will be converted to the format specified by the record writer. Expression Language is supported for several properties, but no incoming " + + "connections are permitted. The Variable Registry may be used to provide values for any property containing Expression Language. If it is desired to " + + "leverage flow file attributes to perform these queries, the GenerateTableFetch and/or ExecuteSQL processors can be used for this purpose. " + + "Streaming is used so arbitrarily large result sets are supported. This processor can be scheduled to run on " + + "a timer or cron expression, using the standard scheduling methods. This processor is intended to be run on the Primary Node only. FlowFile attribute " + + "'querydbtable.row.count' indicates how many rows were selected.") +@Stateful(scopes = Scope.CLUSTER, description = "After performing a query on the specified table, the maximum values for " + + "the specified column(s) will be retained for use in future executions of the query. This allows the Processor " + + "to fetch only those records that have max values greater than the retained values. This can be used for " + + "incremental fetching, fetching of newly added rows, etc. To clear the maximum values, clear the state of the processor " + + "per the State Management documentation") +@WritesAttributes({ + @WritesAttribute(attribute = "tablename", description="Name of the table being queried"), + @WritesAttribute(attribute = "querydbtable.row.count", description="The number of rows selected by the query"), + @WritesAttribute(attribute="fragment.identifier", description="If 'Max Rows Per Flow File' is set then all FlowFiles from the same query result set " + + "will have the same value for the fragment.identifier attribute. This can then be used to correlate the results."), + @WritesAttribute(attribute = "fragment.count", description = "If 'Max Rows Per Flow File' is set then this is the total number of " + + "FlowFiles produced by a single ResultSet. This can be used in conjunction with the " + + "fragment.identifier attribute in order to know how many FlowFiles belonged to the same incoming ResultSet. If Output Batch Size is set, then this " + + "attribute will not be populated."), + @WritesAttribute(attribute="fragment.index", description="If 'Max Rows Per Flow File' is set then the position of this FlowFile in the list of " + + "outgoing FlowFiles that were all derived from the same result set FlowFile. This can be " + + "used in conjunction with the fragment.identifier attribute to know which FlowFiles originated from the same query result set and in what order " + + "FlowFiles were produced"), + @WritesAttribute(attribute = "maxvalue.*", description = "Each attribute contains the observed maximum value of a specified 'Maximum-value Column'. The " + + "suffix of the attribute is the name of the column. If Output Batch Size is set, then this attribute will not be populated."), + @WritesAttribute(attribute = "mime.type", description = "Sets the mime.type attribute to the MIME Type specified by the Record Writer."), + @WritesAttribute(attribute = "record.count", description = "The number of records output by the Record Writer.") +}) +@DynamicProperty(name = "initial.maxvalue.", value = "Initial maximum value for the specified column", + expressionLanguageScope = ExpressionLanguageScope.VARIABLE_REGISTRY, description = "Specifies an initial max value for max value column(s). Properties should " + + "be added in the format `initial.maxvalue.`. This value is only used the first time the table is accessed (when a Maximum Value Column is specified).") +public class QueryDatabaseTableRecord extends AbstractQueryDatabaseTable { + + public static final PropertyDescriptor RECORD_WRITER_FACTORY = new PropertyDescriptor.Builder() + .name("qdbtr-record-writer") + .displayName("Record Writer") + .description("Specifies the Controller Service to use for writing results to a FlowFile. The Record Writer may use Inherit Schema to emulate the inferred schema behavior, i.e. " + + "an explicit schema need not be defined in the writer, and will be supplied by the same logic used to infer the schema from the column types.") + .identifiesControllerService(RecordSetWriterFactory.class) + .required(true) + .build(); + + public static final PropertyDescriptor NORMALIZE_NAMES = new PropertyDescriptor.Builder() + .name("qdbtr-normalize") + .displayName("Normalize Table/Column Names") + .description("Whether to change characters in column names when creating the output schema. For example, colons and periods will be changed to underscores.") + .allowableValues("true", "false") + .defaultValue("false") + .required(true) + .build(); + + public QueryDatabaseTableRecord() { + final Set r = new HashSet<>(); + r.add(REL_SUCCESS); + relationships = Collections.unmodifiableSet(r); + + final List pds = new ArrayList<>(); + pds.add(DBCP_SERVICE); + pds.add(DB_TYPE); + pds.add(new PropertyDescriptor.Builder() + .fromPropertyDescriptor(TABLE_NAME) + .description("The name of the database table to be queried. When a custom query is used, this property is used to alias the query and appears as an attribute on the FlowFile.") + .build()); + pds.add(COLUMN_NAMES); + pds.add(WHERE_CLAUSE); + pds.add(SQL_QUERY); + pds.add(RECORD_WRITER_FACTORY); + pds.add(MAX_VALUE_COLUMN_NAMES); + pds.add(QUERY_TIMEOUT); + pds.add(FETCH_SIZE); + pds.add(MAX_ROWS_PER_FLOW_FILE); + pds.add(OUTPUT_BATCH_SIZE); + pds.add(MAX_FRAGMENTS); + pds.add(NORMALIZE_NAMES); + pds.add(USE_AVRO_LOGICAL_TYPES); + + propDescriptors = Collections.unmodifiableList(pds); + } + + @Override + protected SqlWriter configureSqlWriter(ProcessSession session, ProcessContext context) { + final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger(); + final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES).asBoolean(); + final Boolean useAvroLogicalTypes = context.getProperty(USE_AVRO_LOGICAL_TYPES).asBoolean(); + final JdbcCommon.AvroConversionOptions options = JdbcCommon.AvroConversionOptions.builder() + .convertNames(convertNamesForAvro) + .useLogicalTypes(useAvroLogicalTypes) + .build(); + final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class); + + return new RecordSqlWriter(recordSetWriterFactory, options, maxRowsPerFlowFile, Collections.emptyMap()); + } +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitText.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitText.java index 5ed4d9e7d809..d16bc4be5fb0 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitText.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitText.java @@ -16,31 +16,14 @@ */ package org.apache.nifi.processors.standard; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; - import org.apache.commons.io.IOUtils; import org.apache.nifi.annotation.behavior.EventDriven; -import org.apache.nifi.annotation.behavior.SystemResourceConsideration; import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.InputRequirement.Requirement; import org.apache.nifi.annotation.behavior.SideEffectFree; import org.apache.nifi.annotation.behavior.SupportsBatching; import org.apache.nifi.annotation.behavior.SystemResource; +import org.apache.nifi.annotation.behavior.SystemResourceConsideration; import org.apache.nifi.annotation.behavior.WritesAttribute; import org.apache.nifi.annotation.behavior.WritesAttributes; import org.apache.nifi.annotation.documentation.CapabilityDescription; @@ -65,6 +48,25 @@ import org.apache.nifi.stream.io.util.TextLineDemarcator; import org.apache.nifi.stream.io.util.TextLineDemarcator.OffsetInfo; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + @EventDriven @SideEffectFree @SupportsBatching @@ -158,19 +160,17 @@ public class SplitText extends AbstractProcessor { private static final Set relationships; static { - properties = Collections.unmodifiableList(Arrays.asList(new PropertyDescriptor[]{ - LINE_SPLIT_COUNT, - FRAGMENT_MAX_SIZE, - HEADER_LINE_COUNT, - HEADER_MARKER, - REMOVE_TRAILING_NEWLINES - })); - - relationships = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(new Relationship[]{ - REL_ORIGINAL, - REL_SPLITS, - REL_FAILURE - }))); + properties = Collections.unmodifiableList(Arrays.asList( + LINE_SPLIT_COUNT, + FRAGMENT_MAX_SIZE, + HEADER_LINE_COUNT, + HEADER_MARKER, + REMOVE_TRAILING_NEWLINES)); + + relationships = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( + REL_ORIGINAL, + REL_SPLITS, + REL_FAILURE))); } private volatile boolean removeTrailingNewLines; @@ -259,9 +259,11 @@ public void process(InputStream in) throws IOException { processSession.transfer(sourceFlowFile, REL_FAILURE); } else { final String fragmentId = UUID.randomUUID().toString(); - List splitFlowFiles = this.generateSplitFlowFiles(fragmentId, sourceFlowFile, headerSplitInfoRef.get(), computedSplitsInfo, processSession); + final List splitFlowFiles = this.generateSplitFlowFiles(fragmentId, sourceFlowFile, headerSplitInfoRef.get(), computedSplitsInfo, processSession); + final FlowFile originalFlowFile = FragmentAttributes.copyAttributesToOriginal(processSession, sourceFlowFile, fragmentId, splitFlowFiles.size()); processSession.transfer(originalFlowFile, REL_ORIGINAL); + if (!splitFlowFiles.isEmpty()) { processSession.transfer(splitFlowFiles, REL_SPLITS); } @@ -291,6 +293,7 @@ protected List getSupportedPropertyDescriptors() { */ private List generateSplitFlowFiles(String fragmentId, FlowFile sourceFlowFile, SplitInfo splitInfo, List computedSplitsInfo, ProcessSession processSession){ List splitFlowFiles = new ArrayList<>(); + FlowFile headerFlowFile = null; long headerCrlfLength = 0; if (splitInfo != null) { @@ -305,7 +308,11 @@ private List generateSplitFlowFiles(String fragmentId, FlowFile source fragmentId, fragmentIndex++, sourceFlowFile.getAttribute(CoreAttributes.FILENAME.key())); splitFlowFiles.add(splitFlowFile); } else { - for (SplitInfo computedSplitInfo : computedSplitsInfo) { + final Iterator itr = computedSplitsInfo.iterator(); + while (itr.hasNext()) { + final SplitInfo computedSplitInfo = itr.next(); + itr.remove(); + long length = this.removeTrailingNewLines ? computedSplitInfo.trimmedLength : computedSplitInfo.length; boolean proceedWithClone = headerFlowFile != null || length > 0; if (proceedWithClone) { @@ -326,16 +333,24 @@ private List generateSplitFlowFiles(String fragmentId, FlowFile source splitFlowFiles.add(splitFlowFile); } } + // Update fragment.count with real split count (i.e. don't count files for which there was no clone) - for (FlowFile splitFlowFile : splitFlowFiles) { - splitFlowFile = processSession.putAttribute(splitFlowFile, FRAGMENT_COUNT, String.valueOf(fragmentIndex - 1)); // -1 because the index starts at 1 (see above) + final String fragmentCount = String.valueOf(fragmentIndex - 1); // -1 because the index starts at 1 (see above) + + final ListIterator flowFileItr = splitFlowFiles.listIterator(); + while (flowFileItr.hasNext()) { + FlowFile splitFlowFile = flowFileItr.next(); + + final FlowFile updated = processSession.putAttribute(splitFlowFile, FRAGMENT_COUNT, fragmentCount); + flowFileItr.set(updated); } } - getLogger().info("Split " + sourceFlowFile + " into " + splitFlowFiles.size() + " flow files" + (headerFlowFile != null ? " containing headers." : ".")); + getLogger().info("Split {} into {} FlowFiles{}", new Object[] {sourceFlowFile, splitFlowFiles.size(), headerFlowFile == null ? " containing headers." : "."}); if (headerFlowFile != null) { processSession.remove(headerFlowFile); } + return splitFlowFiles; } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitXml.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitXml.java index bc031fef087b..889c5672a372 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitXml.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/SplitXml.java @@ -36,7 +36,7 @@ import java.util.concurrent.atomic.AtomicInteger; import javax.xml.parsers.ParserConfigurationException; import javax.xml.parsers.SAXParserFactory; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.annotation.behavior.EventDriven; import org.apache.nifi.annotation.behavior.SystemResourceConsideration; import org.apache.nifi.annotation.behavior.InputRequirement; diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/servlets/ListenHTTPServlet.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/servlets/ListenHTTPServlet.java index 85de6f89d253..07ccd69cf043 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/servlets/ListenHTTPServlet.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/servlets/ListenHTTPServlet.java @@ -16,14 +16,47 @@ */ package org.apache.nifi.processors.standard.servlets; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.nifi.flowfile.FlowFile; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.logging.ComponentLog; +import org.apache.nifi.processor.ProcessContext; +import org.apache.nifi.processor.ProcessSession; +import org.apache.nifi.processor.ProcessSessionFactory; +import org.apache.nifi.processor.io.OutputStreamCallback; +import org.apache.nifi.processors.standard.ListenHTTP; +import org.apache.nifi.processors.standard.ListenHTTP.FlowFileEntryTimeWrapper; +import org.apache.nifi.stream.io.StreamThrottler; +import org.apache.nifi.stream.io.StreamUtils; +import org.apache.nifi.util.FlowFileUnpackager; +import org.apache.nifi.util.FlowFileUnpackagerV1; +import org.apache.nifi.util.FlowFileUnpackagerV2; +import org.apache.nifi.util.FlowFileUnpackagerV3; +import org.eclipse.jetty.server.Request; + +import javax.servlet.MultipartConfigElement; +import javax.servlet.ServletConfig; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.Part; +import javax.ws.rs.Path; +import javax.ws.rs.core.MediaType; import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.UnsupportedEncodingException; import java.security.cert.X509Certificate; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; @@ -34,31 +67,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; import java.util.zip.GZIPInputStream; -import javax.servlet.ServletConfig; -import javax.servlet.ServletContext; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.Path; -import javax.ws.rs.core.MediaType; -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.nifi.flowfile.FlowFile; -import org.apache.nifi.flowfile.attributes.CoreAttributes; -import org.apache.nifi.logging.ComponentLog; -import org.apache.nifi.processor.ProcessContext; -import org.apache.nifi.processor.ProcessSession; -import org.apache.nifi.processor.ProcessSessionFactory; -import org.apache.nifi.processor.io.OutputStreamCallback; -import org.apache.nifi.processors.standard.ListenHTTP; -import org.apache.nifi.processors.standard.ListenHTTP.FlowFileEntryTimeWrapper; -import org.apache.nifi.stream.io.StreamThrottler; -import org.apache.nifi.util.FlowFileUnpackager; -import org.apache.nifi.util.FlowFileUnpackagerV1; -import org.apache.nifi.util.FlowFileUnpackagerV2; -import org.apache.nifi.util.FlowFileUnpackagerV3; - @Path("") public class ListenHTTPServlet extends HttpServlet { @@ -94,6 +102,8 @@ public class ListenHTTPServlet extends HttpServlet { private StreamThrottler streamThrottler; private String basePath; private int returnCode; + private long multipartRequestMaxSize; + private int multipartReadBufferSize; @SuppressWarnings("unchecked") @Override @@ -108,6 +118,8 @@ public void init(final ServletConfig config) throws ServletException { this.streamThrottler = (StreamThrottler) context.getAttribute(ListenHTTP.CONTEXT_ATTRIBUTE_STREAM_THROTTLER); this.basePath = (String) context.getAttribute(ListenHTTP.CONTEXT_ATTRIBUTE_BASE_PATH); this.returnCode = (int) context.getAttribute(ListenHTTP.CONTEXT_ATTRIBUTE_RETURN_CODE); + this.multipartRequestMaxSize = (long) context.getAttribute(ListenHTTP.CONTEXT_ATTRIBUTE_MULTIPART_REQUEST_MAX_SIZE); + this.multipartReadBufferSize = (int) context.getAttribute(ListenHTTP.CONTEXT_ATTRIBUTE_MULTIPART_READ_BUFFER_SIZE); } @Override @@ -133,8 +145,6 @@ protected void doPost(final HttpServletRequest request, final HttpServletRespons } while (sessionFactory == null); final ProcessSession session = sessionFactory.createSession(); - FlowFile flowFile = null; - String holdUuid = null; String foundSubject = null; try { final long n = filesReceived.getAndIncrement() % FILES_BEFORE_CHECKING_DESTINATION_SPACE; @@ -191,134 +201,211 @@ protected void doPost(final HttpServletRequest request, final HttpServletRespons logger.debug("Received request from " + request.getRemoteHost() + ", createHold=" + createHold + ", content-type=" + contentType + ", gzip=" + contentGzipped); } - final AtomicBoolean hasMoreData = new AtomicBoolean(false); - final FlowFileUnpackager unpackager; - if (APPLICATION_FLOW_FILE_V3.equals(contentType)) { - unpackager = new FlowFileUnpackagerV3(); - } else if (APPLICATION_FLOW_FILE_V2.equals(contentType)) { - unpackager = new FlowFileUnpackagerV2(); - } else if (APPLICATION_FLOW_FILE_V1.equals(contentType)) { - unpackager = new FlowFileUnpackagerV1(); + Set flowFileSet; + if (!Strings.isNullOrEmpty(request.getContentType()) && request.getContentType().contains("multipart/form-data")) { + flowFileSet = handleMultipartRequest(request, session, foundSubject); } else { - unpackager = null; + flowFileSet = handleRequest(request, session, foundSubject, destinationIsLegacyNiFi, contentType, in); } + proceedFlow(request, response, session, foundSubject, createHold, flowFileSet); + } catch (final Throwable t) { + handleException(request, response, session, foundSubject, t); + } + } - final Set flowFileSet = new HashSet<>(); + private void handleException(final HttpServletRequest request, final HttpServletResponse response, + final ProcessSession session, String foundSubject, final Throwable t) throws IOException { + session.rollback(); + logger.error("Unable to receive file from Remote Host: [{}] SubjectDN [{}] due to {}", new Object[]{request.getRemoteHost(), foundSubject, t}); + response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, t.toString()); + } - do { - final long startNanos = System.nanoTime(); - final Map attributes = new HashMap<>(); - flowFile = session.create(); - flowFile = session.write(flowFile, new OutputStreamCallback() { - @Override - public void process(final OutputStream rawOut) throws IOException { - try (final BufferedOutputStream bos = new BufferedOutputStream(rawOut, 65536)) { - if (unpackager == null) { - IOUtils.copy(in, bos); - hasMoreData.set(false); - } else { - attributes.putAll(unpackager.unpackageFlowFile(in, bos)); - - if (destinationIsLegacyNiFi) { - if (attributes.containsKey("nf.file.name")) { - // for backward compatibility with old nifi... - attributes.put(CoreAttributes.FILENAME.key(), attributes.remove("nf.file.name")); - } - - if (attributes.containsKey("nf.file.path")) { - attributes.put(CoreAttributes.PATH.key(), attributes.remove("nf.file.path")); - } + private Set handleMultipartRequest(HttpServletRequest request, ProcessSession session, String foundSubject) throws IOException, IllegalStateException, ServletException { + Set flowFileSet = new HashSet<>(); + String tempDir = System.getProperty("java.io.tmpdir"); + request.setAttribute(Request.__MULTIPART_CONFIG_ELEMENT, new MultipartConfigElement(tempDir, multipartRequestMaxSize, multipartRequestMaxSize, multipartReadBufferSize)); + List requestParts = ImmutableList.copyOf(request.getParts()); + for (int i = 0; i < requestParts.size(); i++) { + Part part = requestParts.get(i); + FlowFile flowFile = session.create(); + try (OutputStream flowFileOutputStream = session.write(flowFile)) { + StreamUtils.copy(part.getInputStream(), flowFileOutputStream); + } + flowFile = saveRequestDetailsAsAttributes(request, session, foundSubject, flowFile); + flowFile = savePartDetailsAsAttributes(session, part, flowFile, i, requestParts.size()); + flowFileSet.add(flowFile); + } + return flowFileSet; + } + + private FlowFile savePartDetailsAsAttributes(final ProcessSession session, final Part part, final FlowFile flowFile, final int sequenceNumber, final int allPartsCount) { + final Map attributes = new HashMap<>(); + for (String headerName : part.getHeaderNames()) { + final String headerValue = part.getHeader(headerName); + putAttribute(attributes, "http.headers.multipart." + headerName, headerValue); + } + putAttribute(attributes, "http.multipart.size", part.getSize()); + putAttribute(attributes, "http.multipart.content.type", part.getContentType()); + putAttribute(attributes, "http.multipart.name", part.getName()); + putAttribute(attributes, "http.multipart.filename", part.getSubmittedFileName()); + putAttribute(attributes, "http.multipart.fragments.sequence.number", sequenceNumber + 1); + putAttribute(attributes, "http.multipart.fragments.total.number", allPartsCount); + return session.putAllAttributes(flowFile, attributes); + } + + private Set handleRequest(final HttpServletRequest request, final ProcessSession session, + String foundSubject, final boolean destinationIsLegacyNiFi, final String contentType, final InputStream in) { + FlowFile flowFile = null; + String holdUuid = null; + final AtomicBoolean hasMoreData = new AtomicBoolean(false); + final FlowFileUnpackager unpackager; + if (APPLICATION_FLOW_FILE_V3.equals(contentType)) { + unpackager = new FlowFileUnpackagerV3(); + } else if (APPLICATION_FLOW_FILE_V2.equals(contentType)) { + unpackager = new FlowFileUnpackagerV2(); + } else if (APPLICATION_FLOW_FILE_V1.equals(contentType)) { + unpackager = new FlowFileUnpackagerV1(); + } else { + unpackager = null; + } + + final Set flowFileSet = new HashSet<>(); + + do { + final long startNanos = System.nanoTime(); + final Map attributes = new HashMap<>(); + flowFile = session.create(); + flowFile = session.write(flowFile, new OutputStreamCallback() { + @Override + public void process(final OutputStream rawOut) throws IOException { + try (final BufferedOutputStream bos = new BufferedOutputStream(rawOut, 65536)) { + if (unpackager == null) { + IOUtils.copy(in, bos); + hasMoreData.set(false); + } else { + attributes.putAll(unpackager.unpackageFlowFile(in, bos)); + + if (destinationIsLegacyNiFi) { + if (attributes.containsKey("nf.file.name")) { + // for backward compatibility with old nifi... + attributes.put(CoreAttributes.FILENAME.key(), attributes.remove("nf.file.name")); } - hasMoreData.set(unpackager.hasMoreData()); + if (attributes.containsKey("nf.file.path")) { + attributes.put(CoreAttributes.PATH.key(), attributes.remove("nf.file.path")); + } } + + hasMoreData.set(unpackager.hasMoreData()); } } - }); + } + }); - final long transferNanos = System.nanoTime() - startNanos; - final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS); + final long transferNanos = System.nanoTime() - startNanos; + final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS); - // put metadata on flowfile - final String nameVal = request.getHeader(CoreAttributes.FILENAME.key()); - if (StringUtils.isNotBlank(nameVal)) { - attributes.put(CoreAttributes.FILENAME.key(), nameVal); - } + // put metadata on flowfile + final String nameVal = request.getHeader(CoreAttributes.FILENAME.key()); + if (StringUtils.isNotBlank(nameVal)) { + attributes.put(CoreAttributes.FILENAME.key(), nameVal); + } - // put arbitrary headers on flow file - for (Enumeration headerEnum = request.getHeaderNames(); - headerEnum.hasMoreElements();) { - String headerName = headerEnum.nextElement(); - if (headerPattern != null && headerPattern.matcher(headerName).matches()) { - String headerValue = request.getHeader(headerName); - attributes.put(headerName, headerValue); - } - } + String sourceSystemFlowFileIdentifier = attributes.get(CoreAttributes.UUID.key()); + if (sourceSystemFlowFileIdentifier != null) { + sourceSystemFlowFileIdentifier = "urn:nifi:" + sourceSystemFlowFileIdentifier; - String sourceSystemFlowFileIdentifier = attributes.get(CoreAttributes.UUID.key()); - if (sourceSystemFlowFileIdentifier != null) { - sourceSystemFlowFileIdentifier = "urn:nifi:" + sourceSystemFlowFileIdentifier; + // If we receveied a UUID, we want to give the FlowFile a new UUID and register the sending system's + // identifier as the SourceSystemFlowFileIdentifier field in the Provenance RECEIVE event + attributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString()); + } - // If we receveied a UUID, we want to give the FlowFile a new UUID and register the sending system's - // identifier as the SourceSystemFlowFileIdentifier field in the Provenance RECEIVE event - attributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString()); - } + flowFile = session.putAllAttributes(flowFile, attributes); + flowFile = saveRequestDetailsAsAttributes(request, session, foundSubject, flowFile); + session.getProvenanceReporter().receive(flowFile, request.getRequestURL().toString(), sourceSystemFlowFileIdentifier, "Remote DN=" + foundSubject, transferMillis); + flowFileSet.add(flowFile); - flowFile = session.putAllAttributes(flowFile, attributes); - session.getProvenanceReporter().receive(flowFile, request.getRequestURL().toString(), sourceSystemFlowFileIdentifier, "Remote DN=" + foundSubject, transferMillis); - flowFile = session.putAttribute(flowFile, "restlistener.remote.source.host", request.getRemoteHost()); - flowFile = session.putAttribute(flowFile, "restlistener.request.uri", request.getRequestURI()); - flowFile = session.putAttribute(flowFile, "restlistener.remote.user.dn", foundSubject); - flowFileSet.add(flowFile); + if (holdUuid == null) { + holdUuid = flowFile.getAttribute(CoreAttributes.UUID.key()); + } + } while (hasMoreData.get()); + return flowFileSet; + } - if (holdUuid == null) { - holdUuid = flowFile.getAttribute(CoreAttributes.UUID.key()); - } - } while (hasMoreData.get()); + protected FlowFile saveRequestDetailsAsAttributes(final HttpServletRequest request, final ProcessSession session, + String foundSubject, FlowFile flowFile) { + Map attributes = new HashMap<>(); + addMatchingRequestHeaders(request, attributes); + flowFile = session.putAllAttributes(flowFile, attributes); + flowFile = session.putAttribute(flowFile, "restlistener.remote.source.host", request.getRemoteHost()); + flowFile = session.putAttribute(flowFile, "restlistener.request.uri", request.getRequestURI()); + flowFile = session.putAttribute(flowFile, "restlistener.remote.user.dn", foundSubject); + return flowFile; + } - if (createHold) { - String uuid = (holdUuid == null) ? UUID.randomUUID().toString() : holdUuid; + private void addMatchingRequestHeaders(final HttpServletRequest request, final Map attributes) { + // put arbitrary headers on flow file + for (Enumeration headerEnum = request.getHeaderNames(); + headerEnum.hasMoreElements(); ) { + String headerName = headerEnum.nextElement(); + if (headerPattern != null && headerPattern.matcher(headerName).matches()) { + String headerValue = request.getHeader(headerName); + attributes.put(headerName, headerValue); + } + } + } - if (flowFileMap.containsKey(uuid)) { - uuid = UUID.randomUUID().toString(); - } + protected void proceedFlow(final HttpServletRequest request, final HttpServletResponse response, + final ProcessSession session, String foundSubject, final boolean createHold, + final Set flowFileSet) throws IOException, UnsupportedEncodingException { + if (createHold) { + String uuid = UUID.randomUUID().toString(); - final FlowFileEntryTimeWrapper wrapper = new FlowFileEntryTimeWrapper(session, flowFileSet, System.currentTimeMillis(), request.getRemoteHost()); - FlowFileEntryTimeWrapper previousWrapper; - do { - previousWrapper = flowFileMap.putIfAbsent(uuid, wrapper); - if (previousWrapper != null) { - uuid = UUID.randomUUID().toString(); - } - } while (previousWrapper != null); - - response.setStatus(HttpServletResponse.SC_SEE_OTHER); - final String ackUri = "/" + basePath + "/holds/" + uuid; - response.addHeader(LOCATION_HEADER_NAME, ackUri); - response.addHeader(LOCATION_URI_INTENT_NAME, LOCATION_URI_INTENT_VALUE); - response.getOutputStream().write(ackUri.getBytes("UTF-8")); - if (logger.isDebugEnabled()) { - logger.debug("Ingested {} from Remote Host: [{}] Port [{}] SubjectDN [{}]; placed hold on these {} files with ID {}", - new Object[]{flowFileSet, request.getRemoteHost(), request.getRemotePort(), foundSubject, flowFileSet.size(), uuid}); + if (flowFileMap.containsKey(uuid)) { + uuid = UUID.randomUUID().toString(); + } + + final FlowFileEntryTimeWrapper wrapper = new FlowFileEntryTimeWrapper(session, flowFileSet, System.currentTimeMillis(), request.getRemoteHost()); + FlowFileEntryTimeWrapper previousWrapper; + do { + previousWrapper = flowFileMap.putIfAbsent(uuid, wrapper); + if (previousWrapper != null) { + uuid = UUID.randomUUID().toString(); } - } else { - response.setStatus(this.returnCode); - logger.info("Received from Remote Host: [{}] Port [{}] SubjectDN [{}]; transferring to 'success' {}", - new Object[]{request.getRemoteHost(), request.getRemotePort(), foundSubject, flowFile}); + } while (previousWrapper != null); - session.transfer(flowFileSet, ListenHTTP.RELATIONSHIP_SUCCESS); - session.commit(); - } - } catch (final Throwable t) { - session.rollback(); - if (flowFile == null) { - logger.error("Unable to receive file from Remote Host: [{}] SubjectDN [{}] due to {}", - new Object[]{request.getRemoteHost(), foundSubject, t}); - } else { - logger.error("Unable to receive file {} from Remote Host: [{}] SubjectDN [{}] due to {}", - new Object[]{flowFile, request.getRemoteHost(), foundSubject, t}); + response.setStatus(HttpServletResponse.SC_SEE_OTHER); + final String ackUri = "/" + basePath + "/holds/" + uuid; + response.addHeader(LOCATION_HEADER_NAME, ackUri); + response.addHeader(LOCATION_URI_INTENT_NAME, LOCATION_URI_INTENT_VALUE); + response.getOutputStream().write(ackUri.getBytes("UTF-8")); + if (logger.isDebugEnabled()) { + logger.debug("Ingested {} from Remote Host: [{}] Port [{}] SubjectDN [{}]; placed hold on these {} files with ID {}", + new Object[]{flowFileSet, request.getRemoteHost(), request.getRemotePort(), foundSubject, flowFileSet.size(), uuid}); } - response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, t.toString()); + } else { + response.setStatus(this.returnCode); + logger.info("Received from Remote Host: [{}] Port [{}] SubjectDN [{}]; transferring to 'success'", + new Object[]{request.getRemoteHost(), request.getRemotePort(), foundSubject}); + + session.transfer(flowFileSet, ListenHTTP.RELATIONSHIP_SUCCESS); + session.commit(); } } + + private void putAttribute(final Map map, final String key, final Object value) { + if (value == null) { + return; + } + + putAttribute(map, key, value.toString()); + } + + private void putAttribute(final Map map, final String key, final String value) { + if (value == null) { + return; + } + + map.put(key, value); + } } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/DefaultAvroSqlWriter.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/DefaultAvroSqlWriter.java new file mode 100644 index 000000000000..574aca7c9431 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/DefaultAvroSqlWriter.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard.sql; + +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.logging.ComponentLog; +import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.processors.standard.AbstractQueryDatabaseTable; +import org.apache.nifi.processors.standard.util.JdbcCommon; + +import java.io.IOException; +import java.io.OutputStream; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; + +public class DefaultAvroSqlWriter implements SqlWriter { + + private final JdbcCommon.AvroConversionOptions options; + + private final Map attributesToAdd = new HashMap() {{ + put(CoreAttributes.MIME_TYPE.key(), JdbcCommon.MIME_TYPE_AVRO_BINARY); + }}; + + public DefaultAvroSqlWriter(JdbcCommon.AvroConversionOptions options) { + this.options = options; + } + + @Override + public long writeResultSet(ResultSet resultSet, OutputStream outputStream, ComponentLog logger, AbstractQueryDatabaseTable.MaxValueResultSetRowCollector callback) throws Exception { + try { + return JdbcCommon.convertToAvroStream(resultSet, outputStream, options, callback); + } catch (SQLException e) { + throw new ProcessException(e); + } + } + + @Override + public Map getAttributesToAdd() { + return attributesToAdd; + } + + @Override + public void writeEmptyResultSet(OutputStream outputStream, ComponentLog logger) throws IOException { + JdbcCommon.createEmptyAvroStream(outputStream); + } + + @Override + public String getMimeType() { + return JdbcCommon.MIME_TYPE_AVRO_BINARY; + } +} \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/RecordSqlWriter.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/RecordSqlWriter.java new file mode 100644 index 000000000000..c1a76b4ea1b0 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/RecordSqlWriter.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard.sql; + +import org.apache.avro.Schema; +import org.apache.nifi.avro.AvroTypeUtil; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.logging.ComponentLog; +import org.apache.nifi.processor.ProcessSession; +import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.processors.standard.AbstractQueryDatabaseTable; +import org.apache.nifi.processors.standard.util.JdbcCommon; +import org.apache.nifi.schema.access.SchemaNotFoundException; +import org.apache.nifi.serialization.RecordSetWriter; +import org.apache.nifi.serialization.RecordSetWriterFactory; +import org.apache.nifi.serialization.WriteResult; +import org.apache.nifi.serialization.record.Record; +import org.apache.nifi.serialization.record.RecordSchema; +import org.apache.nifi.serialization.record.RecordSet; +import org.apache.nifi.serialization.record.ResultSetRecordSet; + +import java.io.IOException; +import java.io.OutputStream; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class RecordSqlWriter implements SqlWriter { + + private final RecordSetWriterFactory recordSetWriterFactory; + private final AtomicReference writeResultRef; + private final JdbcCommon.AvroConversionOptions options; + private final int maxRowsPerFlowFile; + private final Map originalAttributes; + private ResultSetRecordSet fullRecordSet; + private RecordSchema writeSchema; + private String mimeType; + + public RecordSqlWriter(RecordSetWriterFactory recordSetWriterFactory, JdbcCommon.AvroConversionOptions options, int maxRowsPerFlowFile, Map originalAttributes) { + this.recordSetWriterFactory = recordSetWriterFactory; + this.writeResultRef = new AtomicReference<>(); + this.maxRowsPerFlowFile = maxRowsPerFlowFile; + this.options = options; + this.originalAttributes = originalAttributes; + } + + @Override + public long writeResultSet(ResultSet resultSet, OutputStream outputStream, ComponentLog logger, AbstractQueryDatabaseTable.MaxValueResultSetRowCollector callback) throws Exception { + final RecordSet recordSet; + try { + if (fullRecordSet == null) { + final Schema avroSchema = JdbcCommon.createSchema(resultSet, options); + final RecordSchema recordAvroSchema = AvroTypeUtil.createSchema(avroSchema); + fullRecordSet = new ResultSetRecordSetWithCallback(resultSet, recordAvroSchema, callback); + writeSchema = recordSetWriterFactory.getSchema(originalAttributes, fullRecordSet.getSchema()); + } + recordSet = (maxRowsPerFlowFile > 0) ? fullRecordSet.limit(maxRowsPerFlowFile) : fullRecordSet; + + } catch (final SQLException | SchemaNotFoundException | IOException e) { + throw new ProcessException(e); + } + try (final RecordSetWriter resultSetWriter = recordSetWriterFactory.createWriter(logger, writeSchema, outputStream)) { + writeResultRef.set(resultSetWriter.write(recordSet)); + if (mimeType == null) { + mimeType = resultSetWriter.getMimeType(); + } + return writeResultRef.get().getRecordCount(); + } catch (final Exception e) { + throw new IOException(e); + } + } + + @Override + public Map getAttributesToAdd() { + Map attributesToAdd = new HashMap<>(); + attributesToAdd.put(CoreAttributes.MIME_TYPE.key(), mimeType); + + // Add any attributes from the record writer (if present) + final WriteResult result = writeResultRef.get(); + if (result != null) { + if (result.getAttributes() != null) { + attributesToAdd.putAll(result.getAttributes()); + } + + attributesToAdd.put("record.count", String.valueOf(result.getRecordCount())); + } + return attributesToAdd; + } + + @Override + public void updateCounters(ProcessSession session) { + final WriteResult result = writeResultRef.get(); + if (result != null) { + session.adjustCounter("Records Written", result.getRecordCount(), false); + } + } + + @Override + public void writeEmptyResultSet(OutputStream outputStream, ComponentLog logger) throws IOException { + try (final RecordSetWriter resultSetWriter = recordSetWriterFactory.createWriter(logger, writeSchema, outputStream)) { + mimeType = resultSetWriter.getMimeType(); + resultSetWriter.beginRecordSet(); + resultSetWriter.finishRecordSet(); + } catch (final Exception e) { + throw new IOException(e); + } + } + + @Override + public String getMimeType() { + return mimeType; + } + + private static class ResultSetRecordSetWithCallback extends ResultSetRecordSet { + + private final AbstractQueryDatabaseTable.MaxValueResultSetRowCollector callback; + + ResultSetRecordSetWithCallback(ResultSet rs, RecordSchema readerSchema, AbstractQueryDatabaseTable.MaxValueResultSetRowCollector callback) throws SQLException { + super(rs, readerSchema); + this.callback = callback; + } + + @Override + public Record next() throws IOException { + try { + if (hasMoreRows()) { + ResultSet rs = getResultSet(); + final Record record = createRecord(rs); + if (callback != null) { + callback.processRow(rs); + } + setMoreRows(rs.next()); + return record; + } else { + return null; + } + } catch (final SQLException e) { + throw new IOException("Could not obtain next record from ResultSet", e); + } + } + } +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/SqlWriter.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/SqlWriter.java new file mode 100644 index 000000000000..08fc3fdd9cc9 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/sql/SqlWriter.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard.sql; + +import org.apache.nifi.logging.ComponentLog; +import org.apache.nifi.processor.ProcessSession; +import org.apache.nifi.processors.standard.AbstractQueryDatabaseTable; + +import java.io.IOException; +import java.io.OutputStream; +import java.sql.ResultSet; +import java.util.Collections; +import java.util.Map; + +/** + * The SqlWriter interface provides a standard way for processors such as ExecuteSQL, ExecuteSQLRecord, QueryDatabaseTable, and QueryDatabaseTableRecord + * to write SQL result sets out to a flow file in whichever manner is appropriate. For example, ExecuteSQL writes the result set as Avro but ExecuteSQLRecord + * uses the Record API to write the result set out as prescribed by the selected RecordSetWriter. + */ +public interface SqlWriter { + + /** + * Writes the given result set out to the given output stream, possibly applying a callback as each row is processed. + * @param resultSet the ResultSet to be written + * @param outputStream the OutputStream to write the result set to + * @param logger a common logger that can be used to log messages during write + * @param callback a MaxValueResultSetRowCollector that may be called as each row in the ResultSet is processed + * @return the number of rows written to the output stream + * @throws Exception if any errors occur during the writing of the result set to the output stream + */ + long writeResultSet(ResultSet resultSet, OutputStream outputStream, ComponentLog logger, AbstractQueryDatabaseTable.MaxValueResultSetRowCollector callback) throws Exception; + + /** + * Returns a map of attribute key/value pairs to be added to any outgoing flow file(s). The default implementation is to return an empty map. + * @return a map of attribute key/value pairs + */ + default Map getAttributesToAdd() { + return Collections.emptyMap(); + } + + /** + * Updates any session counters as a result of processing result sets. The default implementation is empty, no counters will be updated. + * @param session the session upon which to update counters + */ + default void updateCounters(ProcessSession session) { + } + + /** + * Writes an empty result set to the output stream. In some cases a ResultSet might not have any viable rows, but will throw an error or + * behave unexpectedly if rows are attempted to be retrieved. This method indicates the implementation should write whatever output is + * appropriate for a result set with no rows. + * @param outputStream the OutputStream to write the empty result set to + * @param logger a common logger that can be used to log messages during write + * @throws IOException if any errors occur during the writing of an empty result set to the output stream + */ + void writeEmptyResultSet(OutputStream outputStream, ComponentLog logger) throws IOException; + + /** + * Returns the MIME type of the output format. This can be used in FlowFile attributes or to perform format-specific processing as necessary. + * @return the MIME type string of the output format. + */ + String getMimeType(); +} \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/util/JdbcCommon.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/util/JdbcCommon.java index 03761c6dd528..9681e2fd37c9 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/util/JdbcCommon.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/java/org/apache/nifi/processors/standard/util/JdbcCommon.java @@ -91,11 +91,13 @@ import org.apache.avro.SchemaBuilder.NullDefault; import org.apache.avro.SchemaBuilder.UnionAccumulator; import org.apache.avro.file.CodecFactory; +import org.apache.avro.UnresolvedUnionException; import org.apache.avro.file.DataFileWriter; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.generic.GenericRecord; import org.apache.avro.io.DatumWriter; +import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.nifi.avro.AvroTypeUtil; import org.apache.nifi.components.PropertyDescriptor; @@ -449,8 +451,11 @@ public static long convertToAvroStream(final ResultSet rs, final OutputStream ou } else { rec.put(i - 1, value); } + } else if ((value instanceof Long) && meta.getPrecision(i) < MAX_DIGITS_IN_INT) { + int intValue = ((Long)value).intValue(); + rec.put(i-1, intValue); } else { - rec.put(i - 1, value); + rec.put(i-1, value); } } else if (value instanceof Date) { @@ -470,8 +475,22 @@ public static long convertToAvroStream(final ResultSet rs, final OutputStream ou rec.put(i - 1, value.toString()); } } - dataFileWriter.append(rec); - nrOfRows += 1; + try { + dataFileWriter.append(rec); + nrOfRows += 1; + } catch (DataFileWriter.AppendWriteException awe) { + Throwable rootCause = ExceptionUtils.getRootCause(awe); + if(rootCause instanceof UnresolvedUnionException) { + UnresolvedUnionException uue = (UnresolvedUnionException) rootCause; + throw new RuntimeException( + "Unable to resolve union for value " + uue.getUnresolvedDatum() + + " with type " + uue.getUnresolvedDatum().getClass().getCanonicalName() + + " while appending record " + rec, + awe); + } else { + throw awe; + } + } if (options.maxRows > 0 && nrOfRows == options.maxRows) break; diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor index d21b7f4704b1..bfe1403d603f 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor @@ -35,6 +35,7 @@ org.apache.nifi.processors.standard.EvaluateXPath org.apache.nifi.processors.standard.EvaluateXQuery org.apache.nifi.processors.standard.ExecuteProcess org.apache.nifi.processors.standard.ExecuteSQL +org.apache.nifi.processors.standard.ExecuteSQLRecord org.apache.nifi.processors.standard.ExecuteStreamCommand org.apache.nifi.processors.standard.ExtractGrok org.apache.nifi.processors.standard.ExtractText @@ -96,6 +97,7 @@ org.apache.nifi.processors.standard.PutSyslog org.apache.nifi.processors.standard.PutTCP org.apache.nifi.processors.standard.PutUDP org.apache.nifi.processors.standard.QueryDatabaseTable +org.apache.nifi.processors.standard.QueryDatabaseTableRecord org.apache.nifi.processors.standard.QueryRecord org.apache.nifi.processors.standard.ReplaceText org.apache.nifi.processors.standard.ReplaceTextWithMapping diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestGetHTTPGroovy.groovy b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestGetHTTPGroovy.groovy index a1d7db6c79c4..4cf8964edd95 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestGetHTTPGroovy.groovy +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestGetHTTPGroovy.groovy @@ -80,12 +80,12 @@ class TestGetHTTPGroovy extends GroovyTestCase { private static final String TLS_1_URL = "https://nifi.apache.org/" private static final String TLS_1_1_URL = "https://nifi.apache.org/" - private static final String KEYSTORE_PATH = "src/test/resources/localhost-ks.jks" - private static final String TRUSTSTORE_PATH = "src/test/resources/localhost-ts.jks" + private static final String KEYSTORE_PATH = "src/test/resources/keystore.jks" + private static final String TRUSTSTORE_PATH = "src/test/resources/truststore.jks" private static final String CACERTS_PATH = "/Library/Java/JavaVirtualMachines/jdk1.8.0_101.jdk/Contents/Home/jre/lib/security/cacerts" - private static final String KEYSTORE_PASSWORD = "localtest" - private static final String TRUSTSTORE_PASSWORD = "localtest" + private static final String KEYSTORE_PASSWORD = "passwordpassword" + private static final String TRUSTSTORE_PASSWORD = "passwordpassword" private static final String CACERTS_PASSWORD = "changeit" private static Server server diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestPostHTTPGroovy.groovy b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestPostHTTPGroovy.groovy index d592b9923a49..e13482db9269 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestPostHTTPGroovy.groovy +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/groovy/org/apache/nifi/processors/standard/TestPostHTTPGroovy.groovy @@ -74,11 +74,11 @@ class TestPostHTTPGroovy extends GroovyTestCase { private static final String HTTPS_URL = "https://${DEFAULT_HOSTNAME}:${DEFAULT_TLS_PORT}" private static final String POST_URL = "${HTTPS_URL}/PostHandler.groovy" - private static final String KEYSTORE_PATH = "src/test/resources/localhost-ks.jks" - private static final String TRUSTSTORE_PATH = "src/test/resources/localhost-ts.jks" + private static final String KEYSTORE_PATH = "src/test/resources/keystore.jks" + private static final String TRUSTSTORE_PATH = "src/test/resources/truststore.jks" - private static final String KEYSTORE_PASSWORD = "localtest" - private static final String TRUSTSTORE_PASSWORD = "localtest" + private static final String KEYSTORE_PASSWORD = "passwordpassword" + private static final String TRUSTSTORE_PASSWORD = "passwordpassword" private static Server server private static X509TrustManager nullTrustManager diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/ITListenAndPutSyslog.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/ITListenAndPutSyslog.java index 5d0562d262ca..89c87b2a7730 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/ITListenAndPutSyslog.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/ITListenAndPutSyslog.java @@ -104,11 +104,11 @@ public void testTLSListenerNoTLSPut() throws InitializationException, IOExceptio private SSLContextService configureSSLContextService(TestRunner runner) throws InitializationException { final SSLContextService sslContextService = new StandardSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/keystore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); runner.enableControllerService(sslContextService); return sslContextService; diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java new file mode 100644 index 000000000000..a1d67c05cbb6 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableRecordTest.java @@ -0,0 +1,1332 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard; + +import org.apache.nifi.annotation.behavior.Stateful; +import org.apache.nifi.components.state.Scope; +import org.apache.nifi.components.state.StateManager; +import org.apache.nifi.controller.AbstractControllerService; +import org.apache.nifi.dbcp.DBCPService; +import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.processors.standard.db.DatabaseAdapter; +import org.apache.nifi.processors.standard.db.impl.GenericDatabaseAdapter; +import org.apache.nifi.processors.standard.db.impl.MSSQLDatabaseAdapter; +import org.apache.nifi.processors.standard.db.impl.MySQLDatabaseAdapter; +import org.apache.nifi.processors.standard.db.impl.OracleDatabaseAdapter; +import org.apache.nifi.processors.standard.db.impl.PhoenixDatabaseAdapter; +import org.apache.nifi.reporting.InitializationException; +import org.apache.nifi.serialization.record.MockRecordWriter; +import org.apache.nifi.util.MockFlowFile; +import org.apache.nifi.util.TestRunner; +import org.apache.nifi.util.TestRunners; +import org.apache.nifi.util.file.FileUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.SQLNonTransientConnectionException; +import java.sql.Statement; +import java.sql.Types; +import java.text.SimpleDateFormat; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.TimeZone; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Unit tests for the QueryDatabaseTableRecord processor + */ +public class QueryDatabaseTableRecordTest { + + MockQueryDatabaseTableRecord processor; + private TestRunner runner; + private final static String DB_LOCATION = "target/db_qdt"; + private DatabaseAdapter dbAdapter; + private HashMap origDbAdapters; + private final static String TABLE_NAME_KEY = "tableName"; + private final static String MAX_ROWS_KEY = "maxRows"; + + + @BeforeClass + public static void setupBeforeClass() { + System.setProperty("derby.stream.error.file", "target/derby.log"); + + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + try { + FileUtils.deleteFile(dbLocation, true); + } catch (IOException ioe) { + // Do nothing, may not have existed + } + } + + @AfterClass + public static void cleanUpAfterClass() throws Exception { + try { + DriverManager.getConnection("jdbc:derby:" + DB_LOCATION + ";shutdown=true"); + } catch (SQLNonTransientConnectionException e) { + // Do nothing, this is what happens at Derby shutdown + } + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + try { + FileUtils.deleteFile(dbLocation, true); + } catch (IOException ioe) { + // Do nothing, may not have existed + } + } + + + @Before + public void setup() throws InitializationException, IOException { + final DBCPService dbcp = new DBCPServiceSimpleImpl(); + final Map dbcpProperties = new HashMap<>(); + origDbAdapters = new HashMap<>(QueryDatabaseTableRecord.dbAdapters); + dbAdapter = new GenericDatabaseAdapter(); + QueryDatabaseTableRecord.dbAdapters.put(dbAdapter.getName(), dbAdapter); + processor = new MockQueryDatabaseTableRecord(); + runner = TestRunners.newTestRunner(processor); + runner.addControllerService("dbcp", dbcp, dbcpProperties); + runner.enableControllerService(dbcp); + runner.setProperty(QueryDatabaseTableRecord.DBCP_SERVICE, "dbcp"); + runner.setProperty(QueryDatabaseTableRecord.DB_TYPE, dbAdapter.getName()); + runner.getStateManager().clear(Scope.CLUSTER); + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(QueryDatabaseTableRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + } + + @After + public void teardown() throws IOException { + runner.getStateManager().clear(Scope.CLUSTER); + runner = null; + QueryDatabaseTableRecord.dbAdapters.clear(); + QueryDatabaseTableRecord.dbAdapters.putAll(origDbAdapters); + } + + @Test + public void testGetQuery() throws Exception { + String query = processor.getQuery(dbAdapter, "myTable", null, null, null, null); + assertEquals("SELECT * FROM myTable", query); + query = processor.getQuery(dbAdapter, "myTable", "col1,col2", null, null, null); + assertEquals("SELECT col1,col2 FROM myTable", query); + + query = processor.getQuery(dbAdapter, "myTable", null, Collections.singletonList("id"), null, null); + assertEquals("SELECT * FROM myTable", query); + + Map maxValues = new HashMap<>(); + maxValues.put("id", "509"); + StateManager stateManager = runner.getStateManager(); + stateManager.setState(maxValues, Scope.CLUSTER); + processor.putColumnType(AbstractDatabaseFetchProcessor.getStateKey("mytable", "id", dbAdapter), Types.INTEGER); + query = processor.getQuery(dbAdapter, "myTable", null, Collections.singletonList("id"), null, stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM myTable WHERE id > 509", query); + + maxValues.put("date_created", "2016-03-07 12:34:56"); + stateManager.setState(maxValues, Scope.CLUSTER); + processor.putColumnType(AbstractDatabaseFetchProcessor.getStateKey("mytable", "date_created", dbAdapter), Types.TIMESTAMP); + query = processor.getQuery(dbAdapter, "myTable", null, Arrays.asList("id", "DATE_CREATED"), null, stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM myTable WHERE id > 509 AND DATE_CREATED >= '2016-03-07 12:34:56'", query); + + // Double quotes can be used to escape column and table names with most ANSI compatible database engines. + maxValues.put("mytable@!@date-created", "2016-03-07 12:34:56"); + stateManager.setState(maxValues, Scope.CLUSTER); + processor.putColumnType(AbstractDatabaseFetchProcessor.getStateKey("\"myTable\"", "\"DATE-CREATED\"", dbAdapter), Types.TIMESTAMP); + query = processor.getQuery(dbAdapter, "\"myTable\"", null, Arrays.asList("id", "\"DATE-CREATED\""), null, stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM \"myTable\" WHERE id > 509 AND \"DATE-CREATED\" >= '2016-03-07 12:34:56'", query); + + // Back-ticks can be used to escape MySQL column and table names. + dbAdapter = new MySQLDatabaseAdapter(); + processor.putColumnType(AbstractDatabaseFetchProcessor.getStateKey("`myTable`", "`DATE-CREATED`", dbAdapter), Types.TIMESTAMP); + query = processor.getQuery(dbAdapter, "`myTable`", null, Arrays.asList("id", "`DATE-CREATED`"), null, stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM `myTable` WHERE id > 509 AND `DATE-CREATED` >= '2016-03-07 12:34:56'", query); + + // Square brackets can be used to escape Microsoft SQL Server column and table names. + dbAdapter = new MSSQLDatabaseAdapter(); + processor.putColumnType(AbstractDatabaseFetchProcessor.getStateKey("[myTable]", "[DATE-CREATED]", dbAdapter), Types.TIMESTAMP); + query = processor.getQuery(dbAdapter, "[myTable]", null, Arrays.asList("id", "[DATE-CREATED]"), null, stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM [myTable] WHERE id > 509 AND [DATE-CREATED] >= '2016-03-07 12:34:56'", query); + + // Test Oracle strategy + dbAdapter = new OracleDatabaseAdapter(); + query = processor.getQuery(dbAdapter, "myTable", null, Arrays.asList("id", "DATE_CREATED"), "type = \"CUSTOMER\"", stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM myTable WHERE id > 509 AND DATE_CREATED >= timestamp '2016-03-07 12:34:56' AND (type = \"CUSTOMER\")", query); + + // Test time. + processor.putColumnType("mytable" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "time_created", Types.TIME); + maxValues.clear(); + maxValues.put("id", "509"); + maxValues.put("time_created", "12:34:57"); + maxValues.put("date_created", "2016-03-07 12:34:56"); + stateManager = runner.getStateManager(); + stateManager.clear(Scope.CLUSTER); + stateManager.setState(maxValues, Scope.CLUSTER); + query = processor.getQuery(dbAdapter, "myTable", null, Arrays.asList("id", "DATE_CREATED", "TIME_CREATED"), "type = \"CUSTOMER\"", stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM myTable WHERE id > 509 AND DATE_CREATED >= timestamp '2016-03-07 12:34:56' AND TIME_CREATED >= timestamp '12:34:57' AND (type = \"CUSTOMER\")", query); + dbAdapter = new GenericDatabaseAdapter(); + query = processor.getQuery(dbAdapter, "myTable", null, Arrays.asList("id", "DATE_CREATED", "TIME_CREATED"), "type = \"CUSTOMER\"", stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM myTable WHERE id > 509 AND DATE_CREATED >= '2016-03-07 12:34:56' AND TIME_CREATED >= '12:34:57' AND (type = \"CUSTOMER\")", query); + } + + @Test + public void testGetQueryUsingPhoenixAdapter() throws Exception { + Map maxValues = new HashMap<>(); + StateManager stateManager = runner.getStateManager(); + processor.putColumnType("mytable" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "id", Types.INTEGER); + processor.putColumnType("mytable" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "time_created", Types.TIME); + processor.putColumnType("mytable" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "date_created", Types.TIMESTAMP); + + maxValues.put("id", "509"); + maxValues.put("time_created", "12:34:57"); + maxValues.put("date_created", "2016-03-07 12:34:56"); + stateManager.setState(maxValues, Scope.CLUSTER); + + dbAdapter = new PhoenixDatabaseAdapter(); + String query = processor.getQuery(dbAdapter, "myTable", null, Arrays.asList("id", "DATE_CREATED", "TIME_CREATED"), "type = \"CUSTOMER\"", stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM myTable WHERE id > 509 AND DATE_CREATED >= timestamp '2016-03-07 12:34:56' AND TIME_CREATED >= time '12:34:57' AND (type = \"CUSTOMER\")", query); + // Cover the other path + dbAdapter = new GenericDatabaseAdapter(); + query = processor.getQuery(dbAdapter, "myTable", null, Arrays.asList("id", "DATE_CREATED", "TIME_CREATED"), "type = \"CUSTOMER\"", stateManager.getState(Scope.CLUSTER).toMap()); + assertEquals("SELECT * FROM myTable WHERE id > 509 AND DATE_CREATED >= '2016-03-07 12:34:56' AND TIME_CREATED >= '12:34:57' AND (type = \"CUSTOMER\")", query); + } + + @Test(expected = IllegalArgumentException.class) + public void testGetQueryNoTable() { + processor.getQuery(dbAdapter, null, null, null, null, null); + } + + @Test + public void testAddedRows() throws SQLException, IOException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (0, 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (1, 'Carrie Jones', 5.0, '2000-01-01 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (2, NULL, 2.0, '2010-01-01 00:00:00')"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "2"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 2); + + MockFlowFile flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals("TEST_QUERY_DB_TABLE", flowFile.getAttribute(QueryDatabaseTableRecord.RESULT_TABLENAME)); + assertEquals(flowFile.getAttribute("maxvalue.id"), "2"); + runner.setProperty(QueryDatabaseTableRecord.FETCH_SIZE, "2"); + flowFile.assertAttributeEquals("record.count", "2"); + + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(1); + assertEquals(flowFile.getAttribute("maxvalue.id"), "2"); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + //Remove Max Rows Per Flow File + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "0"); + + // Add a new row with a higher ID and run, one flowfile with one new row should be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (3, 'Mary West', 15.0, '2000-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals(flowFile.getAttribute("maxvalue.id"), "3"); + flowFile.assertAttributeEquals("record.count", "1"); + + // Sanity check - run again, this time no flowfiles/rows should be transferred + runner.clearTransferState(); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add timestamp as a max value column name + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "id, created_on"); + + // Add a new row with a higher ID and run, one flow file will be transferred because no max value for the timestamp has been stored + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (4, 'Marty Johnson', 15.0, '2011-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals(flowFile.getAttribute("maxvalue.id"), "4"); + assertEquals(flowFile.getAttribute("maxvalue.created_on"), "2011-01-01 03:23:34.234"); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Add a new row with a higher ID but lower timestamp and run, no flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (5, 'NO NAME', 15.0, '2001-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add a new row with a higher ID and run, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (6, 'Mr. NiFi', 1.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set name as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "name"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "7"); + runner.clearTransferState(); + + // Add a new row with a "higher" name than the max but lower than "NULL" (to test that null values are skipped), one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (7, 'NULK', 1.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set scale as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "scale"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "8"); + runner.clearTransferState(); + + // Add a new row with a higher value for scale than the max, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (8, 'NULK', 100.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set scale as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "bignum"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "9"); + runner.clearTransferState(); + + // Add a new row with a higher value for scale than the max, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on, bignum) VALUES (9, 'Alice Bob', 100.0, '2012-01-01 03:23:34.234', 1)"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + } + + @Test + public void testAddedRowsTwoTables() throws SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE2"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (0, 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (1, 'Carrie Jones', 5.0, '2000-01-01 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (2, NULL, 2.0, '2010-01-01 00:00:00')"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "2"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 2); + + MockFlowFile flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals("TEST_QUERY_DB_TABLE", flowFile.getAttribute(QueryDatabaseTableRecord.RESULT_TABLENAME)); + assertEquals(flowFile.getAttribute("maxvalue.id"), "2"); + flowFile.assertAttributeEquals("record.count", "2"); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(1); + assertEquals(flowFile.getAttribute("maxvalue.id"), "2"); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Populate a second table and set + stmt.execute("create table TEST_QUERY_DB_TABLE2 (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE2 (id, name, scale, created_on) VALUES (0, 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE2 (id, name, scale, created_on) VALUES (1, 'Carrie Jones', 5.0, '2000-01-01 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE2 (id, name, scale, created_on) VALUES (2, NULL, 2.0, '2010-01-01 00:00:00')"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE2"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "0"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals("TEST_QUERY_DB_TABLE2", flowFile.getAttribute(QueryDatabaseTableRecord.RESULT_TABLENAME)); + assertEquals(flowFile.getAttribute("maxvalue.id"), "2"); + flowFile.assertAttributeEquals("record.count", "3"); + runner.clearTransferState(); + + // Add a new row with a higher ID and run, one flowfile with one new row should be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE2 (id, name, scale, created_on) VALUES (3, 'Mary West', 15.0, '2000-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals(flowFile.getAttribute("maxvalue.id"), "3"); + flowFile.assertAttributeEquals("record.count", "1"); + + // Sanity check - run again, this time no flowfiles/rows should be transferred + runner.clearTransferState(); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + } + + @Test + public void testMultiplePartitions() throws SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, bucket integer not null)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, bucket) VALUES (0, 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, bucket) VALUES (1, 0)"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID, BUCKET"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + assertEquals("2", + runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0).getAttribute(QueryDatabaseTableRecord.RESULT_ROW_COUNT) + ); + runner.clearTransferState(); + + // Add a new row in the same bucket + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, bucket) VALUES (2, 0)"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + assertEquals("1", + runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0).getAttribute(QueryDatabaseTableRecord.RESULT_ROW_COUNT) + ); + runner.clearTransferState(); + + // Add a new row in a new bucket + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, bucket) VALUES (3, 1)"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + assertEquals("1", + runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0).getAttribute(QueryDatabaseTableRecord.RESULT_ROW_COUNT) + ); + runner.clearTransferState(); + + // Add a new row in an old bucket, it should not be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, bucket) VALUES (4, 0)"); + runner.run(); + runner.assertTransferCount(QueryDatabaseTableRecord.REL_SUCCESS, 0); + + // Add a new row in the second bucket, only the new row should be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, bucket) VALUES (5, 1)"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + assertEquals("1", + runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0).getAttribute(QueryDatabaseTableRecord.RESULT_ROW_COUNT) + ); + runner.clearTransferState(); + } + + @Test + public void testTimestampNanos() throws SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (1, 'Carrie Jones', 5.0, '2000-01-01 03:23:34.000123456')"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "created_on"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + MockFlowFile flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add a new row with a lower timestamp (but same millisecond value), no flow file should be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (3, 'Mary West', 15.0, '2000-01-01 03:23:34.000')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add a new row with a higher timestamp, one flow file should be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (3, 'Mary West', 15.0, '2000-01-01 03:23:34.0003')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + } + + @Test + public void testWithNullIntColumn() throws SQLException { + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NULL_INT"); + } catch (final SQLException sqle) { + // Ignore, usually due to Derby not having DROP TABLE IF EXISTS + } + + stmt.execute("create table TEST_NULL_INT (id integer not null, val1 integer, val2 integer, constraint my_pk primary key (id))"); + + stmt.execute("insert into TEST_NULL_INT (id, val1, val2) VALUES (0, NULL, 1)"); + stmt.execute("insert into TEST_NULL_INT (id, val1, val2) VALUES (1, 1, 1)"); + + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_NULL_INT"); + runner.run(); + + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0).assertAttributeEquals(QueryDatabaseTableRecord.RESULT_ROW_COUNT, "2"); + } + + @Test + public void testWithRuntimeException() throws SQLException { + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NULL_INT"); + } catch (final SQLException sqle) { + // Ignore, usually due to Derby not having DROP TABLE IF EXISTS + } + + stmt.execute("create table TEST_NULL_INT (id integer not null, val1 integer, val2 integer, constraint my_pk primary key (id))"); + + stmt.execute("insert into TEST_NULL_INT (id, val1, val2) VALUES (0, NULL, 1)"); + stmt.execute("insert into TEST_NULL_INT (id, val1, val2) VALUES (1, 1, 1)"); + + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_NULL_INT"); + runner.setProperty(AbstractDatabaseFetchProcessor.MAX_VALUE_COLUMN_NAMES, "id"); + + QueryDatabaseTableRecord.dbAdapters.put(dbAdapter.getName(), new GenericDatabaseAdapter() { + @Override + public String getName() { + throw new RuntimeException("test"); + } + }); + runner.run(); + + assertTrue(runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).isEmpty()); + } + + @Test + public void testWithSqlException() throws SQLException { + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NO_ROWS"); + } catch (final SQLException sqle) { + // Ignore, usually due to Derby not having DROP TABLE IF EXISTS + } + + stmt.execute("create table TEST_NO_ROWS (id integer)"); + + runner.setIncomingConnection(false); + // Try a valid SQL statement that will generate an error (val1 does not exist, e.g.) + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_NO_ROWS"); + runner.setProperty(QueryDatabaseTableRecord.COLUMN_NAMES, "val1"); + runner.run(); + + assertTrue(runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).isEmpty()); + } + + @Test + public void testOutputBatchSize() throws SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + MockFlowFile mff; + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + int rowCount = 0; + // Create larger row set + for (int batch = 0; batch < 100; batch++) { + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + rowCount++; + } + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "${" + MAX_ROWS_KEY + "}"); + runner.setVariable(MAX_ROWS_KEY, "7"); + runner.setProperty(QueryDatabaseTableRecord.OUTPUT_BATCH_SIZE, "${outputBatchSize}"); + runner.setVariable("outputBatchSize", "4"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 15); + + // Ensure all but the last file have 7 records each + for (int ff = 0; ff < 14; ff++) { + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(ff); + mff.assertAttributeEquals("record.count", "7"); + + mff.assertAttributeExists("fragment.identifier"); + assertEquals(Integer.toString(ff), mff.getAttribute("fragment.index")); + // No fragment.count set for flow files sent when Output Batch Size is set + assertNull(mff.getAttribute("fragment.count")); + } + + // Last file should have 2 records + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(14); + mff.assertAttributeEquals("record.count", "2"); + mff.assertAttributeExists("fragment.identifier"); + assertEquals(Integer.toString(14), mff.getAttribute("fragment.index")); + // No fragment.count set for flow files sent when Output Batch Size is set + assertNull(mff.getAttribute("fragment.count")); + } + + @Test + public void testMaxRowsPerFlowFile() throws IOException, SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + MockFlowFile mff; + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + int rowCount = 0; + //create larger row set + for (int batch = 0; batch < 100; batch++) { + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + rowCount++; + } + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "${" + MAX_ROWS_KEY + "}"); + runner.setVariable(MAX_ROWS_KEY, "9"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 12); + + //ensure all but the last file have 9 records each + for (int ff = 0; ff < 11; ff++) { + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(ff); + mff.assertAttributeEquals("record.count", "9"); + + mff.assertAttributeExists("fragment.identifier"); + assertEquals(Integer.toString(ff), mff.getAttribute("fragment.index")); + assertEquals("12", mff.getAttribute("fragment.count")); + } + + //last file should have 1 record + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(11); + mff.assertAttributeEquals("record.count", "1"); + mff.assertAttributeExists("fragment.identifier"); + assertEquals(Integer.toString(11), mff.getAttribute("fragment.index")); + assertEquals("12", mff.getAttribute("fragment.count")); + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Run again, this time should be a single partial flow file + for (int batch = 0; batch < 5; batch++) { + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + rowCount++; + } + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + mff.assertAttributeExists("fragment.identifier"); + assertEquals(Integer.toString(0), mff.getAttribute("fragment.index")); + assertEquals("1", mff.getAttribute("fragment.count")); + mff.assertAttributeEquals("record.count", "5"); + runner.clearTransferState(); + + // Run again, this time should be a full batch and a partial + for (int batch = 0; batch < 14; batch++) { + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + rowCount++; + } + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 2); + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + mff.assertAttributeEquals("record.count", "9"); + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(1); + mff.assertAttributeEquals("record.count", "5"); + runner.clearTransferState(); + + // Run again with a cleaned state. Should get all rows split into batches + int ffCount = (int) Math.ceil(rowCount / 9D); + runner.getStateManager().clear(Scope.CLUSTER); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, ffCount); + + //ensure all but the last file have 9 records each + for (int ff = 0; ff < ffCount - 1; ff++) { + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(ff); + mff.assertAttributeEquals("record.count", "9"); + } + + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(ffCount - 1); + mff.assertAttributeEquals("record.count", Integer.toString(rowCount % 9)); + runner.clearTransferState(); + } + + @Test + public void testMaxRowsPerFlowFileWithMaxFragments() throws SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + MockFlowFile mff; + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + int rowCount = 0; + //create larger row set + for (int batch = 0; batch < 100; batch++) { + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + rowCount++; + } + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "9"); + Integer maxFragments = 3; + runner.setProperty(QueryDatabaseTableRecord.MAX_FRAGMENTS, maxFragments.toString()); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, maxFragments); + + for (int i = 0; i < maxFragments; i++) { + mff = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(i); + mff.assertAttributeEquals("record.count", "9"); + + mff.assertAttributeExists("fragment.identifier"); + assertEquals(Integer.toString(i), mff.getAttribute("fragment.index")); + assertEquals(maxFragments.toString(), mff.getAttribute("fragment.count")); + } + + runner.clearTransferState(); + } + + @Test + public void testInitialMaxValue() throws SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + cal.setTimeInMillis(0); + + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + + int rowCount = 0; + //create larger row set + for (int batch = 0; batch < 10; batch++) { + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '" + dateFormat.format(cal.getTime().getTime()) + "')"); + + rowCount++; + cal.add(Calendar.MINUTE, 1); + } + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "${" + TABLE_NAME_KEY + "}"); + runner.setVariable(TABLE_NAME_KEY, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "created_on"); + + cal.setTimeInMillis(0); + cal.add(Calendar.MINUTE, 5); + runner.setProperty("initial.maxvalue.CREATED_ON", dateFormat.format(cal.getTime().getTime())); + // Initial run with no previous state. Should get only last 4 records + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + MockFlowFile flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "4"); + runner.getStateManager().assertStateEquals("test_query_db_table" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "created_on", "1970-01-01 00:09:00.0", Scope.CLUSTER); + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + // Validate Max Value doesn't change also + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.getStateManager().assertStateEquals("test_query_db_table" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "created_on", "1970-01-01 00:09:00.0", Scope.CLUSTER); + runner.clearTransferState(); + } + + @Test + public void testInitialMaxValueWithEL() throws SQLException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + cal.setTimeInMillis(0); + + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + + int rowCount = 0; + //create larger row set + for (int batch = 0; batch < 10; batch++) { + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '" + dateFormat.format(cal.getTime().getTime()) + "')"); + + rowCount++; + cal.add(Calendar.MINUTE, 1); + } + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "${" + TABLE_NAME_KEY + "}"); + runner.setVariable(TABLE_NAME_KEY, "TEST_QUERY_DB_TABLE"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "created_on"); + + cal.setTimeInMillis(0); + cal.add(Calendar.MINUTE, 5); + runner.setProperty("initial.maxvalue.CREATED_ON", "${created.on}"); + runner.setVariable("created.on", dateFormat.format(cal.getTime().getTime())); + // Initial run with no previous state. Should get only last 4 records + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + MockFlowFile flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "4"); + runner.getStateManager().assertStateEquals("test_query_db_table" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "created_on", "1970-01-01 00:09:00.0", Scope.CLUSTER); + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + // Validate Max Value doesn't change also + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.getStateManager().assertStateEquals("test_query_db_table" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "created_on", "1970-01-01 00:09:00.0", Scope.CLUSTER); + runner.clearTransferState(); + + // Append a new row, expect 1 flowfile one row + cal.setTimeInMillis(0); + cal.add(Calendar.MINUTE, rowCount); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, name, scale, created_on) VALUES (" + rowCount + ", 'Joe Smith', 1.0, '" + dateFormat.format(cal.getTime().getTime()) + "')"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.getStateManager().assertStateEquals("test_query_db_table" + AbstractDatabaseFetchProcessor.NAMESPACE_DELIMITER + "created_on", "1970-01-01 00:10:00.0", Scope.CLUSTER); + runner.clearTransferState(); + } + + @Test + public void testAddedRowsCustomWhereClause() throws SQLException, IOException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, type varchar(20), name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (0, 'male', 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (1, 'female', 'Carrie Jones', 5.0, '2000-01-01 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (2, NULL, NULL, 2.0, '2010-01-01 00:00:00')"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setProperty(QueryDatabaseTableRecord.WHERE_CLAUSE, "type = 'male'"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "2"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + + MockFlowFile flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals("TEST_QUERY_DB_TABLE", flowFile.getAttribute(QueryDatabaseTableRecord.RESULT_TABLENAME)); + assertEquals(flowFile.getAttribute("maxvalue.id"), "0"); + flowFile.assertAttributeEquals("record.count", "1"); + + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + //Remove Max Rows Per Flow File + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "0"); + + // Add a new row with a higher ID and run, one flowfile with one new row should be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (3, 'female', 'Mary West', 15.0, '2000-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Sanity check - run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add timestamp as a max value column name + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "id, created_on"); + + // Add a new row with a higher ID and run, one flow file will be transferred because no max value for the timestamp has been stored + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (4, 'male', 'Marty Johnson', 15.0, '2011-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals(flowFile.getAttribute("maxvalue.id"), "4"); + assertEquals(flowFile.getAttribute("maxvalue.created_on"), "2011-01-01 03:23:34.234"); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Add a new row with a higher ID but lower timestamp and run, no flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (5, 'male', 'NO NAME', 15.0, '2001-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add a new row with a higher ID and run, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (6, 'male', 'Mr. NiFi', 1.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set name as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "name"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "4"); + runner.clearTransferState(); + + // Add a new row with a "higher" name than the max but lower than "NULL" (to test that null values are skipped), one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (7, 'male', 'NULK', 1.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set scale as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "scale"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "5"); + runner.clearTransferState(); + + // Add a new row with a higher value for scale than the max, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (8, 'male', 'NULK', 100.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set scale as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "bignum"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "6"); + runner.clearTransferState(); + + // Add a new row with a higher value for scale than the max, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on, bignum) VALUES (9, 'female', 'Alice Bob', 100.0, '2012-01-01 03:23:34.234', 1)"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + } + + @Test + public void testCustomSQL() throws SQLException, IOException { + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + try { + stmt.execute("drop table TYPE_LIST"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, type varchar(20), name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (0, 'male', 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (1, 'female', 'Carrie Jones', 5.0, '2000-01-01 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (2, NULL, NULL, 2.0, '2010-01-01 00:00:00')"); + + stmt.execute("create table TYPE_LIST (type_id integer not null, type varchar(20), descr varchar(255))"); + stmt.execute("insert into TYPE_LIST (type_id, type,descr) VALUES (0, 'male', 'Man')"); + stmt.execute("insert into TYPE_LIST (type_id, type,descr) VALUES (1, 'female', 'Woman')"); + stmt.execute("insert into TYPE_LIST (type_id, type,descr) VALUES (2, '', 'Unspecified')"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_QUERY_DB_TABLE"); + runner.setProperty(QueryDatabaseTableRecord.SQL_QUERY, + "SELECT id, b.type as gender, b.descr, name, scale, created_on, bignum FROM TEST_QUERY_DB_TABLE a INNER JOIN TYPE_LIST b ON (a.type=b.type)"); + runner.setProperty(QueryDatabaseTableRecord.WHERE_CLAUSE, "gender = 'male'"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "2"); + + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + + MockFlowFile flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals("TEST_QUERY_DB_TABLE", flowFile.getAttribute(QueryDatabaseTableRecord.RESULT_TABLENAME)); + assertEquals(flowFile.getAttribute("maxvalue.id"), "0"); + flowFile.assertAttributeEquals("record.count", "1"); + + runner.clearTransferState(); + + // Run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + //Remove Max Rows Per Flow File + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "0"); + + // Add a new row with a higher ID and run, one flowfile with one new row should be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (3, 'female', 'Mary West', 15.0, '2000-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Sanity check - run again, this time no flowfiles/rows should be transferred + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add timestamp as a max value column name + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "id, created_on"); + + // Add a new row with a higher ID and run, one flow file will be transferred because no max value for the timestamp has been stored + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (4, 'male', 'Marty Johnson', 15.0, '2011-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + assertEquals(flowFile.getAttribute("maxvalue.id"), "4"); + assertEquals(flowFile.getAttribute("maxvalue.created_on"), "2011-01-01 03:23:34.234"); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Add a new row with a higher ID but lower timestamp and run, no flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (5, 'male', 'NO NAME', 15.0, '2001-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + + // Add a new row with a higher ID and run, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (6, 'male', 'Mr. NiFi', 1.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set name as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "name"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "4"); + runner.clearTransferState(); + + // Add a new row with a "higher" name than the max but lower than "NULL" (to test that null values are skipped), one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (7, 'male', 'NULK', 1.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set scale as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "scale"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "5"); + runner.clearTransferState(); + + // Add a new row with a higher value for scale than the max, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (8, 'male', 'NULK', 100.0, '2012-01-01 03:23:34.234')"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "1"); + runner.clearTransferState(); + + // Set scale as the max value column name (and clear the state), all rows should be returned since the max value for name has not been set + runner.getStateManager().clear(Scope.CLUSTER); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "bignum"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 1); + flowFile = runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).get(0); + flowFile.assertAttributeEquals("record.count", "6"); + runner.clearTransferState(); + + // Add a new row with a higher value for scale than the max, one flow file will be transferred + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on, bignum) VALUES (9, 'female', 'Alice Bob', 100.0, '2012-01-01 03:23:34.234', 1)"); + runner.run(); + runner.assertAllFlowFilesTransferred(QueryDatabaseTableRecord.REL_SUCCESS, 0); + runner.clearTransferState(); + } + + @Test(expected = AssertionError.class) + public void testMissingColumn() throws ProcessException, SQLException { + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_QUERY_DB_TABLE"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + try { + stmt.execute("drop table TYPE_LIST"); + } catch (final SQLException sqle) { + // Ignore this error, probably a "table does not exist" since Derby doesn't yet support DROP IF EXISTS [DERBY-4842] + } + + stmt.execute("create table TEST_QUERY_DB_TABLE (id integer not null, type varchar(20), name varchar(100), scale float, created_on timestamp, bignum bigint default 0)"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (0, 'male', 'Joe Smith', 1.0, '1962-09-23 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (1, 'female', 'Carrie Jones', 5.0, '2000-01-01 03:23:34.234')"); + stmt.execute("insert into TEST_QUERY_DB_TABLE (id, type, name, scale, created_on) VALUES (2, NULL, NULL, 2.0, '2010-01-01 00:00:00')"); + + stmt.execute("create table TYPE_LIST (type_id integer not null, type varchar(20), descr varchar(255))"); + stmt.execute("insert into TYPE_LIST (type_id, type,descr) VALUES (0, 'male', 'Man')"); + stmt.execute("insert into TYPE_LIST (type_id, type,descr) VALUES (1, 'female', 'Woman')"); + stmt.execute("insert into TYPE_LIST (type_id, type,descr) VALUES (2, '', 'Unspecified')"); + + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TYPE_LIST"); + runner.setProperty(QueryDatabaseTableRecord.SQL_QUERY, "SELECT b.type, b.descr, name, scale, created_on, bignum FROM TEST_QUERY_DB_TABLE a INNER JOIN TYPE_LIST b ON (a.type=b.type)"); + runner.setProperty(QueryDatabaseTableRecord.WHERE_CLAUSE, "type = 'male'"); + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.MAX_VALUE_COLUMN_NAMES, "ID"); + runner.setProperty(QueryDatabaseTableRecord.MAX_ROWS_PER_FLOW_FILE, "2"); + + runner.run(); + } + + @Test + public void testWithExceptionAfterSomeRowsProcessed() throws SQLException { + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NULL_INT"); + } catch (final SQLException sqle) { + // Ignore, usually due to Derby not having DROP TABLE IF EXISTS + } + + stmt.execute("create table TEST_NULL_INT (id integer not null, val1 integer, val2 integer, constraint my_pk primary key (id))"); + + stmt.execute("insert into TEST_NULL_INT (id, val1, val2) VALUES (1, NULL, 1)"); + stmt.execute("insert into TEST_NULL_INT (id, val1, val2) VALUES (2, 1, 1)"); + + runner.setIncomingConnection(false); + runner.setProperty(QueryDatabaseTableRecord.TABLE_NAME, "TEST_NULL_INT"); + runner.setProperty(AbstractDatabaseFetchProcessor.MAX_VALUE_COLUMN_NAMES, "id"); + + // Override adapter with one that fails after the first row is processed + QueryDatabaseTableRecord.dbAdapters.put(dbAdapter.getName(), new GenericDatabaseAdapter() { + boolean fail = false; + + @Override + public String getName() { + if (!fail) { + fail = true; + return super.getName(); + } + throw new RuntimeException("test"); + } + }); + runner.run(); + assertTrue(runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).isEmpty()); + // State should not have been updated + runner.getStateManager().assertStateNotSet("test_null_int@!@id", Scope.CLUSTER); + + // Restore original (working) adapter and run again + QueryDatabaseTableRecord.dbAdapters.put(dbAdapter.getName(), dbAdapter); + runner.run(); + assertFalse(runner.getFlowFilesForRelationship(QueryDatabaseTableRecord.REL_SUCCESS).isEmpty()); + runner.getStateManager().assertStateEquals("test_null_int@!@id", "2", Scope.CLUSTER); + } + + /** + * Simple implementation only for QueryDatabaseTableRecord processor testing. + */ + private class DBCPServiceSimpleImpl extends AbstractControllerService implements DBCPService { + + @Override + public String getIdentifier() { + return "dbcp"; + } + + @Override + public Connection getConnection() throws ProcessException { + try { + Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); + return DriverManager.getConnection("jdbc:derby:" + DB_LOCATION + ";create=true"); + } catch (final Exception e) { + throw new ProcessException("getConnection failed: " + e); + } + } + } + + @Stateful(scopes = Scope.CLUSTER, description = "Mock for QueryDatabaseTableRecord processor") + private static class MockQueryDatabaseTableRecord extends QueryDatabaseTableRecord { + void putColumnType(String colName, Integer colType) { + columnTypeMap.put(colName, colType); + } + } +} \ No newline at end of file diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java index 1624c6d26f82..8b51fe2415ee 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/QueryDatabaseTableTest.java @@ -38,13 +38,13 @@ import org.apache.nifi.util.TestRunner; import org.apache.nifi.util.TestRunners; import org.apache.nifi.util.file.FileUtils; -import org.fusesource.hawtbuf.ByteArrayInputStream; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java index 33633f23fd56..63de91ad8997 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQL.java @@ -22,6 +22,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -51,7 +52,6 @@ import org.apache.nifi.util.MockFlowFile; import org.apache.nifi.util.TestRunner; import org.apache.nifi.util.TestRunners; -import org.fusesource.hawtbuf.ByteArrayInputStream; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -351,6 +351,48 @@ public void testInsertStatementCreatesFlowFile() throws SQLException { runner.getFlowFilesForRelationship(ExecuteSQL.REL_SUCCESS).get(0).assertAttributeEquals(ExecuteSQL.RESULT_ROW_COUNT, "0"); } + @Test + public void testNoRowsStatementCreatesEmptyFlowFile() throws Exception { + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + dbLocation.delete(); + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NULL_INT"); + } catch (final SQLException sqle) { + } + + stmt.execute("create table TEST_NULL_INT (id integer not null, val1 integer, val2 integer, constraint my_pk primary key (id))"); + + runner.setIncomingConnection(true); + runner.setProperty(ExecuteSQL.SQL_SELECT_QUERY, "select * from TEST_NULL_INT"); + runner.enqueue("Hello".getBytes()); + runner.run(); + + runner.assertAllFlowFilesTransferred(ExecuteSQL.REL_SUCCESS, 1); + MockFlowFile firstFlowFile = runner.getFlowFilesForRelationship(ExecuteSQL.REL_SUCCESS).get(0); + firstFlowFile.assertAttributeEquals(ExecuteSQL.RESULT_ROW_COUNT, "0"); + final InputStream in = new ByteArrayInputStream(firstFlowFile.toByteArray()); + final DatumReader datumReader = new GenericDatumReader<>(); + try (DataFileStream dataFileReader = new DataFileStream<>(in, datumReader)) { + GenericRecord record = null; + long recordsFromStream = 0; + while (dataFileReader.hasNext()) { + // Reuse record object by passing it to next(). This saves us from + // allocating and garbage collecting many objects for files with + // many items. + record = dataFileReader.next(record); + recordsFromStream += 1; + } + + assertEquals(0, recordsFromStream); + } + } + @Test public void testWithDuplicateColumns() throws SQLException { // remove previous test database, if any diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java new file mode 100644 index 000000000000..04c4c00c5e1e --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestExecuteSQLRecord.java @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard; + +import org.apache.nifi.controller.AbstractControllerService; +import org.apache.nifi.dbcp.DBCPService; +import org.apache.nifi.flowfile.attributes.CoreAttributes; +import org.apache.nifi.flowfile.attributes.FragmentAttributes; +import org.apache.nifi.processor.exception.ProcessException; +import org.apache.nifi.processors.standard.util.TestJdbcHugeStream; +import org.apache.nifi.provenance.ProvenanceEventType; +import org.apache.nifi.reporting.InitializationException; +import org.apache.nifi.serialization.record.MockRecordWriter; +import org.apache.nifi.util.MockFlowFile; +import org.apache.nifi.util.TestRunner; +import org.apache.nifi.util.TestRunners; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class TestExecuteSQLRecord { + + private static final Logger LOGGER; + + static { + System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "info"); + System.setProperty("org.slf4j.simpleLogger.showDateTime", "true"); + System.setProperty("org.slf4j.simpleLogger.log.nifi.io.nio", "debug"); + System.setProperty("org.slf4j.simpleLogger.log.nifi.processors.standard.ExecuteSQLRecord", "debug"); + System.setProperty("org.slf4j.simpleLogger.log.nifi.processors.standard.TestExecuteSQLRecord", "debug"); + LOGGER = LoggerFactory.getLogger(TestExecuteSQLRecord.class); + } + + final static String DB_LOCATION = "target/db"; + + final static String QUERY_WITH_EL = "select " + + " PER.ID as PersonId, PER.NAME as PersonName, PER.CODE as PersonCode" + + ", PRD.ID as ProductId,PRD.NAME as ProductName,PRD.CODE as ProductCode" + + ", REL.ID as RelId, REL.NAME as RelName, REL.CODE as RelCode" + + ", ROW_NUMBER() OVER () as rownr " + + " from persons PER, products PRD, relationships REL" + + " where PER.ID = ${person.id}"; + + final static String QUERY_WITHOUT_EL = "select " + + " PER.ID as PersonId, PER.NAME as PersonName, PER.CODE as PersonCode" + + ", PRD.ID as ProductId,PRD.NAME as ProductName,PRD.CODE as ProductCode" + + ", REL.ID as RelId, REL.NAME as RelName, REL.CODE as RelCode" + + ", ROW_NUMBER() OVER () as rownr " + + " from persons PER, products PRD, relationships REL" + + " where PER.ID = 10"; + + final static String QUERY_WITHOUT_EL_WITH_PARAMS = "select " + + " PER.ID as PersonId, PER.NAME as PersonName, PER.CODE as PersonCode" + + ", PRD.ID as ProductId,PRD.NAME as ProductName,PRD.CODE as ProductCode" + + ", REL.ID as RelId, REL.NAME as RelName, REL.CODE as RelCode" + + ", ROW_NUMBER() OVER () as rownr " + + " from persons PER, products PRD, relationships REL" + + " where PER.ID < ? AND REL.ID < ?"; + + + @BeforeClass + public static void setupClass() { + System.setProperty("derby.stream.error.file", "target/derby.log"); + } + + private TestRunner runner; + + @Before + public void setup() throws InitializationException { + final DBCPService dbcp = new DBCPServiceSimpleImpl(); + final Map dbcpProperties = new HashMap<>(); + + runner = TestRunners.newTestRunner(ExecuteSQLRecord.class); + runner.addControllerService("dbcp", dbcp, dbcpProperties); + runner.enableControllerService(dbcp); + runner.setProperty(AbstractExecuteSQL.DBCP_SERVICE, "dbcp"); + } + + @Test + public void testIncomingConnectionWithNoFlowFile() throws InitializationException { + runner.setIncomingConnection(true); + runner.setProperty(AbstractExecuteSQL.SQL_SELECT_QUERY, "SELECT * FROM persons"); + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(ExecuteSQLRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + runner.run(); + runner.assertTransferCount(AbstractExecuteSQL.REL_SUCCESS, 0); + runner.assertTransferCount(AbstractExecuteSQL.REL_FAILURE, 0); + } + + @Test + public void testIncomingConnectionWithNoFlowFileAndNoQuery() throws InitializationException { + runner.setIncomingConnection(true); + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(ExecuteSQLRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + runner.run(); + runner.assertTransferCount(AbstractExecuteSQL.REL_SUCCESS, 0); + runner.assertTransferCount(AbstractExecuteSQL.REL_FAILURE, 0); + } + + @Test(expected = AssertionError.class) + public void testNoIncomingConnectionAndNoQuery() throws InitializationException { + runner.setIncomingConnection(false); + runner.run(); + } + + @Test + public void testNoIncomingConnection() throws ClassNotFoundException, SQLException, InitializationException, IOException { + runner.setIncomingConnection(false); + invokeOnTriggerRecords(null, QUERY_WITHOUT_EL, false, null, true); + assertEquals(ProvenanceEventType.RECEIVE, runner.getProvenanceEvents().get(0).getEventType()); + } + + @Test + public void testSelectQueryInFlowFile() throws InitializationException, ClassNotFoundException, SQLException, IOException { + invokeOnTriggerRecords(null, QUERY_WITHOUT_EL, true, null, false); + assertEquals(ProvenanceEventType.FORK, runner.getProvenanceEvents().get(0).getEventType()); + assertEquals(ProvenanceEventType.FETCH, runner.getProvenanceEvents().get(1).getEventType()); + } + + @Test + public void testMaxRowsPerFlowFile() throws Exception { + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + dbLocation.delete(); + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NULL_INT"); + } catch (final SQLException sqle) { + } + + stmt.execute("create table TEST_NULL_INT (id integer not null, val1 integer, val2 integer, constraint my_pk primary key (id))"); + + for (int i = 0; i < 1000; i++) { + stmt.execute("insert into TEST_NULL_INT (id, val1, val2) VALUES (" + i + ", 1, 1)"); + } + + runner.setIncomingConnection(false); + runner.setProperty(AbstractExecuteSQL.MAX_ROWS_PER_FLOW_FILE, "5"); + runner.setProperty(AbstractExecuteSQL.OUTPUT_BATCH_SIZE, "0"); + runner.setProperty(AbstractExecuteSQL.SQL_SELECT_QUERY, "SELECT * FROM TEST_NULL_INT"); + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(ExecuteSQLRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + runner.run(); + + runner.assertAllFlowFilesTransferred(AbstractExecuteSQL.REL_SUCCESS, 200); + runner.assertTransferCount(AbstractExecuteSQL.REL_FAILURE, 0); + runner.assertAllFlowFilesContainAttribute(AbstractExecuteSQL.REL_SUCCESS, FragmentAttributes.FRAGMENT_INDEX.key()); + runner.assertAllFlowFilesContainAttribute(AbstractExecuteSQL.REL_SUCCESS, FragmentAttributes.FRAGMENT_ID.key()); + runner.assertAllFlowFilesContainAttribute(AbstractExecuteSQL.REL_SUCCESS, FragmentAttributes.FRAGMENT_COUNT.key()); + + MockFlowFile firstFlowFile = runner.getFlowFilesForRelationship(AbstractExecuteSQL.REL_SUCCESS).get(0); + + firstFlowFile.assertAttributeEquals(AbstractExecuteSQL.RESULT_ROW_COUNT, "5"); + firstFlowFile.assertAttributeEquals("record.count", "5"); + firstFlowFile.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain"); // MockRecordWriter has text/plain MIME type + firstFlowFile.assertAttributeEquals(FragmentAttributes.FRAGMENT_INDEX.key(), "0"); + firstFlowFile.assertAttributeEquals(AbstractExecuteSQL.RESULTSET_INDEX, "0"); + + MockFlowFile lastFlowFile = runner.getFlowFilesForRelationship(AbstractExecuteSQL.REL_SUCCESS).get(199); + + lastFlowFile.assertAttributeEquals(AbstractExecuteSQL.RESULT_ROW_COUNT, "5"); + lastFlowFile.assertAttributeEquals("record.count", "5"); + lastFlowFile.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain"); // MockRecordWriter has text/plain MIME type + lastFlowFile.assertAttributeEquals(FragmentAttributes.FRAGMENT_INDEX.key(), "199"); + lastFlowFile.assertAttributeEquals(AbstractExecuteSQL.RESULTSET_INDEX, "0"); + } + + @Test + public void testInsertStatementCreatesFlowFile() throws Exception { + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + dbLocation.delete(); + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NULL_INT"); + } catch (final SQLException sqle) { + } + + stmt.execute("create table TEST_NULL_INT (id integer not null, val1 integer, val2 integer, constraint my_pk primary key (id))"); + + runner.setIncomingConnection(false); + runner.setProperty(AbstractExecuteSQL.SQL_SELECT_QUERY, "insert into TEST_NULL_INT (id, val1, val2) VALUES (0, NULL, 1)"); + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(ExecuteSQLRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + runner.run(); + + runner.assertAllFlowFilesTransferred(AbstractExecuteSQL.REL_SUCCESS, 1); + runner.getFlowFilesForRelationship(AbstractExecuteSQL.REL_SUCCESS).get(0).assertAttributeEquals(AbstractExecuteSQL.RESULT_ROW_COUNT, "0"); + } + + @Test + public void testNoRowsStatementCreatesEmptyFlowFile() throws Exception { + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + dbLocation.delete(); + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NULL_INT"); + } catch (final SQLException sqle) { + } + + stmt.execute("create table TEST_NULL_INT (id integer not null, val1 integer, val2 integer, constraint my_pk primary key (id))"); + + runner.setIncomingConnection(true); + runner.setProperty(ExecuteSQL.SQL_SELECT_QUERY, "select * from TEST_NULL_INT"); + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(ExecuteSQLRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + runner.enqueue("Hello".getBytes()); + runner.run(); + + runner.assertAllFlowFilesTransferred(ExecuteSQL.REL_SUCCESS, 1); + MockFlowFile firstFlowFile = runner.getFlowFilesForRelationship(ExecuteSQL.REL_SUCCESS).get(0); + firstFlowFile.assertAttributeEquals(ExecuteSQL.RESULT_ROW_COUNT, "0"); + firstFlowFile.assertContentEquals(""); + } + + @Test + public void testWithSqlException() throws Exception { + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + dbLocation.delete(); + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + Statement stmt = con.createStatement(); + + try { + stmt.execute("drop table TEST_NO_ROWS"); + } catch (final SQLException sqle) { + } + + stmt.execute("create table TEST_NO_ROWS (id integer)"); + + runner.setIncomingConnection(false); + // Try a valid SQL statement that will generate an error (val1 does not exist, e.g.) + runner.setProperty(AbstractExecuteSQL.SQL_SELECT_QUERY, "SELECT val1 FROM TEST_NO_ROWS"); + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(ExecuteSQLRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + runner.run(); + + //No incoming flow file containing a query, and an exception causes no outbound flowfile. + // There should be no flow files on either relationship + runner.assertAllFlowFilesTransferred(AbstractExecuteSQL.REL_FAILURE, 0); + runner.assertAllFlowFilesTransferred(AbstractExecuteSQL.REL_SUCCESS, 0); + } + + public void invokeOnTriggerRecords(final Integer queryTimeout, final String query, final boolean incomingFlowFile, final Map attrs, final boolean setQueryProperty) + throws InitializationException, ClassNotFoundException, SQLException, IOException { + + if (queryTimeout != null) { + runner.setProperty(AbstractExecuteSQL.QUERY_TIMEOUT, queryTimeout.toString() + " secs"); + } + + // remove previous test database, if any + final File dbLocation = new File(DB_LOCATION); + dbLocation.delete(); + + // load test data to database + final Connection con = ((DBCPService) runner.getControllerService("dbcp")).getConnection(); + TestJdbcHugeStream.loadTestData2Database(con, 100, 200, 100); + LOGGER.info("test data loaded"); + + // ResultSet size will be 1x200x100 = 20 000 rows + // because of where PER.ID = ${person.id} + final int nrOfRows = 20000; + + MockRecordWriter recordWriter = new MockRecordWriter(null, true, -1); + runner.addControllerService("writer", recordWriter); + runner.setProperty(ExecuteSQLRecord.RECORD_WRITER_FACTORY, "writer"); + runner.enableControllerService(recordWriter); + + if (incomingFlowFile) { + // incoming FlowFile content is not used, but attributes are used + final Map attributes = (attrs == null) ? new HashMap<>() : attrs; + attributes.put("person.id", "10"); + if (!setQueryProperty) { + runner.enqueue(query.getBytes(), attributes); + } else { + runner.enqueue("Hello".getBytes(), attributes); + } + } + + if (setQueryProperty) { + runner.setProperty(AbstractExecuteSQL.SQL_SELECT_QUERY, query); + } + + runner.run(); + runner.assertAllFlowFilesTransferred(AbstractExecuteSQL.REL_SUCCESS, 1); + runner.assertAllFlowFilesContainAttribute(AbstractExecuteSQL.REL_SUCCESS, AbstractExecuteSQL.RESULT_QUERY_DURATION); + runner.assertAllFlowFilesContainAttribute(AbstractExecuteSQL.REL_SUCCESS, AbstractExecuteSQL.RESULT_QUERY_EXECUTION_TIME); + runner.assertAllFlowFilesContainAttribute(AbstractExecuteSQL.REL_SUCCESS, AbstractExecuteSQL.RESULT_QUERY_FETCH_TIME); + runner.assertAllFlowFilesContainAttribute(AbstractExecuteSQL.REL_SUCCESS, AbstractExecuteSQL.RESULT_ROW_COUNT); + + final List flowfiles = runner.getFlowFilesForRelationship(AbstractExecuteSQL.REL_SUCCESS); + final long executionTime = Long.parseLong(flowfiles.get(0).getAttribute(AbstractExecuteSQL.RESULT_QUERY_EXECUTION_TIME)); + final long fetchTime = Long.parseLong(flowfiles.get(0).getAttribute(AbstractExecuteSQL.RESULT_QUERY_FETCH_TIME)); + final long durationTime = Long.parseLong(flowfiles.get(0).getAttribute(AbstractExecuteSQL.RESULT_QUERY_DURATION)); + assertEquals(durationTime, fetchTime + executionTime); + } + + + /** + * Simple implementation only for ExecuteSQL processor testing. + */ + class DBCPServiceSimpleImpl extends AbstractControllerService implements DBCPService { + + @Override + public String getIdentifier() { + return "dbcp"; + } + + @Override + public Connection getConnection() throws ProcessException { + try { + Class.forName("org.apache.derby.jdbc.EmbeddedDriver"); + final Connection con = DriverManager.getConnection("jdbc:derby:" + DB_LOCATION + ";create=true"); + return con; + } catch (final Exception e) { + throw new ProcessException("getConnection failed: " + e); + } + } + } + +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestGetHTTP.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestGetHTTP.java index e1d76e02428c..6666f19a74ed 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestGetHTTP.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestGetHTTP.java @@ -505,16 +505,16 @@ public final void testCookiePolicy() throws Exception { private static Map getTruststoreProperties() { final Map props = new HashMap<>(); - props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); props.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); return props; } private static Map getKeystoreProperties() { final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); return properties; } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestHandleHttpRequest.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestHandleHttpRequest.java index 19e147e0d952..8c98a1ccc195 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestHandleHttpRequest.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestHandleHttpRequest.java @@ -52,16 +52,16 @@ public class TestHandleHttpRequest { private static Map getTruststoreProperties() { final Map props = new HashMap<>(); - props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + props.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + props.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); props.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); return props; } private static Map getKeystoreProperties() { final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); return properties; } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHTTP.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHTTP.java index 70a183ab3e0a..ca048022b6c6 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHTTP.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHTTP.java @@ -85,11 +85,11 @@ private static TestServer createServer() throws IOException { public void testSslSetHttpRequest() throws Exception { final Map sslProperties = new HashMap<>(); - sslProperties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - sslProperties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + sslProperties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + sslProperties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); sslProperties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - sslProperties.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - sslProperties.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + sslProperties.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + sslProperties.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); sslProperties.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); runner = TestRunners.newTestRunner(InvokeHTTP.class); diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHttpSSL.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHttpSSL.java index 43380aba4c5a..5ed3a16c14ad 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHttpSSL.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestInvokeHttpSSL.java @@ -89,41 +89,49 @@ public void after() { runner.shutdown(); } - protected static TestServer createServer() throws IOException { + static TestServer createServer() throws IOException { return new TestServer(serverSslProperties); } - protected static Map createServerSslProperties(boolean clientAuth) { + static Map createServerSslProperties(boolean clientAuth) { final Map map = new HashMap<>(); // if requesting client auth then we must also provide a truststore if (clientAuth) { map.put(TestServer.NEED_CLIENT_AUTH, Boolean.toString(true)); - map.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - map.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - map.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + map.putAll(getTruststoreProperties()); } else { map.put(TestServer.NEED_CLIENT_AUTH, Boolean.toString(false)); } // keystore is always required for the server SSL properties - map.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - map.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - map.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + map.putAll(getKeystoreProperties()); return map; } - protected static Map createSslProperties(boolean clientAuth) { + static Map createSslProperties(boolean clientAuth) { final Map map = new HashMap<>(); // if requesting client auth then we must provide a keystore if (clientAuth) { - map.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - map.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - map.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + map.putAll(getKeystoreProperties()); } // truststore is always required for the client SSL properties - map.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - map.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + map.putAll(getTruststoreProperties()); + return map; + } + + private static Map getKeystoreProperties() { + final Map map = new HashMap<>(); + map.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + map.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); + map.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + return map; + } + + private static Map getTruststoreProperties() { + final Map map = new HashMap<>(); + map.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + map.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); map.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); return map; } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenHTTP.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenHTTP.java index 799d1b7aad4d..f8e9015238fc 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenHTTP.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenHTTP.java @@ -27,15 +27,31 @@ import org.apache.nifi.util.TestRunner; import org.apache.nifi.util.TestRunners; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import com.google.common.base.Charsets; +import com.google.common.base.Optional; +import com.google.common.collect.Iterables; +import com.google.common.io.Files; + +import okhttp3.MediaType; +import okhttp3.MultipartBody; +import okhttp3.OkHttpClient; +import okhttp3.Request; +import okhttp3.RequestBody; +import okhttp3.Response; + import java.io.DataOutputStream; +import java.io.File; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; import java.util.ArrayList; import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLContext; @@ -66,7 +82,7 @@ public class TestListenHTTP { public void setup() throws IOException { proc = new ListenHTTP(); runner = TestRunners.newTestRunner(proc); - availablePort = NetworkUtils.availablePort();; + availablePort = NetworkUtils.availablePort(); runner.setVariable(PORT_VARIABLE, Integer.toString(availablePort)); runner.setVariable(BASEPATH_VARIABLE, HTTP_BASE_PATH); @@ -181,8 +197,8 @@ public void testSecureInvalidSSLConfiguration() throws Exception { private int executePOST(String message) throws Exception { final SSLContextService sslContextService = runner.getControllerService(SSL_CONTEXT_SERVICE_IDENTIFIER, SSLContextService.class); final boolean secure = (sslContextService != null); - final String scheme = secure ? "https" : "http"; - final URL url = new URL(scheme + "://localhost:" + availablePort + "/" + HTTP_BASE_PATH); + String endpointUrl = buildUrl(secure); + final URL url = new URL(endpointUrl); HttpURLConnection connection; if (secure) { @@ -207,6 +223,10 @@ private int executePOST(String message) throws Exception { return connection.getResponseCode(); } + private String buildUrl(final boolean secure) { + return String.format("%s://localhost:%s/%s", secure ? "https" : "http" , availablePort, HTTP_BASE_PATH); + } + private void testPOSTRequestsReceived(int returnCode) throws Exception { final List messages = new ArrayList<>(); messages.add("payload 1"); @@ -225,13 +245,29 @@ private void testPOSTRequestsReceived(int returnCode) throws Exception { mockFlowFiles.get(3).assertContentEquals("payload 2"); } + private void startWebServerAndSendRequests(Runnable sendRequestToWebserver, int numberOfExpectedFlowFiles, int returnCode) throws Exception { + final ProcessSessionFactory processSessionFactory = runner.getProcessSessionFactory(); + final ProcessContext context = runner.getProcessContext(); + proc.createHttpServer(context); + + new Thread(sendRequestToWebserver).start(); + + long responseTimeout = 10000; + + int numTransferred = 0; + long startTime = System.currentTimeMillis(); + while (numTransferred < numberOfExpectedFlowFiles && (System.currentTimeMillis() - startTime < responseTimeout)) { + proc.onTrigger(context, processSessionFactory); + numTransferred = runner.getFlowFilesForRelationship(RELATIONSHIP_SUCCESS).size(); + Thread.sleep(100); + } + + runner.assertTransferCount(ListenHTTP.RELATIONSHIP_SUCCESS, numberOfExpectedFlowFiles); + } + private void startWebServerAndSendMessages(final List messages, int returnCode) throws Exception { - final ProcessSessionFactory processSessionFactory = runner.getProcessSessionFactory(); - final ProcessContext context = runner.getProcessContext(); - proc.createHttpServer(context); - Runnable sendMessagestoWebServer = () -> { try { for (final String message : messages) { @@ -244,30 +280,18 @@ private void startWebServerAndSendMessages(final List messages, int retu fail("Not expecting error here."); } }; - new Thread(sendMessagestoWebServer).start(); - - long responseTimeout = 10000; - - int numTransferred = 0; - long startTime = System.currentTimeMillis(); - while (numTransferred < messages.size() && (System.currentTimeMillis() - startTime < responseTimeout)) { - proc.onTrigger(context, processSessionFactory); - numTransferred = runner.getFlowFilesForRelationship(RELATIONSHIP_SUCCESS).size(); - Thread.sleep(100); - } - - runner.assertTransferCount(ListenHTTP.RELATIONSHIP_SUCCESS, messages.size()); + startWebServerAndSendRequests(sendMessagestoWebServer, messages.size(), returnCode); } private SSLContextService configureProcessorSslContextService() throws InitializationException { final SSLContextService sslContextService = new StandardRestrictedSSLContextService(); runner.addControllerService(SSL_CONTEXT_SERVICE_IDENTIFIER, sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/keystore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); runner.setProperty(ListenHTTP.SSL_CONTEXT_SERVICE, SSL_CONTEXT_SERVICE_IDENTIFIER); @@ -277,14 +301,124 @@ private SSLContextService configureProcessorSslContextService() throws Initializ private SSLContextService configureInvalidProcessorSslContextService() throws InitializationException { final SSLContextService sslContextService = new StandardSSLContextService(); runner.addControllerService(SSL_CONTEXT_SERVICE_IDENTIFIER, sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/keystore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); runner.setProperty(ListenHTTP.SSL_CONTEXT_SERVICE, SSL_CONTEXT_SERVICE_IDENTIFIER); return sslContextService; } + + + @Test(/*timeout=10000*/) + public void testMultipartFormDataRequest() throws Exception { + + runner.setProperty(ListenHTTP.PORT, Integer.toString(availablePort)); + runner.setProperty(ListenHTTP.BASE_PATH, HTTP_BASE_PATH); + runner.setProperty(ListenHTTP.RETURN_CODE, Integer.toString(HttpServletResponse.SC_OK)); + + final SSLContextService sslContextService = runner.getControllerService(SSL_CONTEXT_SERVICE_IDENTIFIER, SSLContextService.class); + final boolean isSecure = (sslContextService != null); + + Runnable sendRequestToWebserver = () -> { + try { + MultipartBody multipartBody = new MultipartBody.Builder().setType(MultipartBody.FORM) + .addFormDataPart("p1", "v1") + .addFormDataPart("p2", "v2") + .addFormDataPart("file1", "my-file-text.txt", RequestBody.create(MediaType.parse("text/plain"), createTextFile("my-file-text.txt", "Hello", "World"))) + .addFormDataPart("file2", "my-file-data.json", RequestBody.create(MediaType.parse("application/json"), createTextFile("my-file-text.txt", "{ \"name\":\"John\", \"age\":30 }"))) + .addFormDataPart("file3", "my-file-binary.bin", RequestBody.create(MediaType.parse("application/octet-stream"), generateRandomBinaryData(100))) + .build(); + + Request request = + new Request.Builder() + .url(buildUrl(isSecure)) + .post(multipartBody) + .build(); + + int timeout = 3000; + OkHttpClient client = new OkHttpClient.Builder() + .readTimeout(timeout, TimeUnit.MILLISECONDS) + .writeTimeout(timeout, TimeUnit.MILLISECONDS) + .build(); + + try (Response response = client.newCall(request).execute()) { + Assert.assertTrue(String.format("Unexpected code: %s, body: %s", response.code(), response.body().string()), response.isSuccessful()); + } + } catch (final Throwable t) { + t.printStackTrace(); + Assert.fail(t.toString()); + } + }; + + + startWebServerAndSendRequests(sendRequestToWebserver, 5, 200); + + runner.assertAllFlowFilesTransferred(ListenHTTP.RELATIONSHIP_SUCCESS, 5); + List flowFilesForRelationship = runner.getFlowFilesForRelationship(ListenHTTP.RELATIONSHIP_SUCCESS); + // Part fragments are not processed in the order we submitted them. + // We cannot rely on the order we sent them in. + MockFlowFile mff = findFlowFile(flowFilesForRelationship, "http.multipart.name", "p1"); + mff.assertAttributeEquals("http.multipart.name", "p1"); + mff.assertAttributeExists("http.multipart.size"); + mff.assertAttributeEquals("http.multipart.fragments.sequence.number", "1"); + mff.assertAttributeEquals("http.multipart.fragments.total.number", "5"); + mff.assertAttributeExists("http.headers.multipart.content-disposition"); + + mff = findFlowFile(flowFilesForRelationship, "http.multipart.name", "p2"); + mff.assertAttributeEquals("http.multipart.name", "p2"); + mff.assertAttributeExists("http.multipart.size"); + mff.assertAttributeExists("http.multipart.fragments.sequence.number"); + mff.assertAttributeEquals("http.multipart.fragments.total.number", "5"); + mff.assertAttributeExists("http.headers.multipart.content-disposition"); + + mff = findFlowFile(flowFilesForRelationship, "http.multipart.name", "file1"); + mff.assertAttributeEquals("http.multipart.name", "file1"); + mff.assertAttributeEquals("http.multipart.filename", "my-file-text.txt"); + mff.assertAttributeEquals("http.headers.multipart.content-type", "text/plain"); + mff.assertAttributeExists("http.multipart.size"); + mff.assertAttributeExists("http.multipart.fragments.sequence.number"); + mff.assertAttributeEquals("http.multipart.fragments.total.number", "5"); + mff.assertAttributeExists("http.headers.multipart.content-disposition"); + + mff = findFlowFile(flowFilesForRelationship, "http.multipart.name", "file2"); + mff.assertAttributeEquals("http.multipart.name", "file2"); + mff.assertAttributeEquals("http.multipart.filename", "my-file-data.json"); + mff.assertAttributeEquals("http.headers.multipart.content-type", "application/json"); + mff.assertAttributeExists("http.multipart.size"); + mff.assertAttributeExists("http.multipart.fragments.sequence.number"); + mff.assertAttributeEquals("http.multipart.fragments.total.number", "5"); + mff.assertAttributeExists("http.headers.multipart.content-disposition"); + + mff = findFlowFile(flowFilesForRelationship, "http.multipart.name", "file3"); + mff.assertAttributeEquals("http.multipart.name", "file3"); + mff.assertAttributeEquals("http.multipart.filename", "my-file-binary.bin"); + mff.assertAttributeEquals("http.headers.multipart.content-type", "application/octet-stream"); + mff.assertAttributeExists("http.multipart.size"); + mff.assertAttributeExists("http.multipart.fragments.sequence.number"); + mff.assertAttributeEquals("http.multipart.fragments.total.number", "5"); + mff.assertAttributeExists("http.headers.multipart.content-disposition"); + } + + private byte[] generateRandomBinaryData(int i) { + byte[] bytes = new byte[100]; + new Random().nextBytes(bytes); + return bytes; + } + private File createTextFile(String fileName, String... lines) throws IOException { + File file = new File(fileName); + file.deleteOnExit(); + for (String string : lines) { + Files.append(string, file, Charsets.UTF_8); + } + return file; + } + protected MockFlowFile findFlowFile(List flowFilesForRelationship, String attributeName, String attributeValue) { + Optional optional = Iterables.tryFind(flowFilesForRelationship, ff -> ff.getAttribute(attributeName).equals(attributeValue)); + Assert.assertTrue(optional.isPresent()); + return optional.get(); + } } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenRELP.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenRELP.java index 5ff47dcc9c1d..3843183abca4 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenRELP.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenRELP.java @@ -152,11 +152,11 @@ public void testBatching() throws IOException, InterruptedException { public void testTLS() throws InitializationException, IOException, InterruptedException { final SSLContextService sslContextService = new StandardSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/keystore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); runner.enableControllerService(sslContextService); diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCP.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCP.java index 08127d1e03a1..f4d3aa059400 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCP.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCP.java @@ -123,11 +123,11 @@ public void testTLSClientAuthRequiredAndClientCertProvided() throws Initializati // Make an SSLContext with a key and trust store to send the test messages final SSLContext clientSslContext = SslContextFactory.createSslContext( - "src/test/resources/localhost-ks.jks", - "localtest".toCharArray(), + "src/test/resources/keystore.jks", + "passwordpassword".toCharArray(), "jks", - "src/test/resources/localhost-ts.jks", - "localtest".toCharArray(), + "src/test/resources/truststore.jks", + "passwordpassword".toCharArray(), "jks", org.apache.nifi.security.util.SslContextFactory.ClientAuth.valueOf("NONE"), "TLS"); @@ -156,8 +156,8 @@ public void testTLSClientAuthRequiredAndClientCertNotProvided() throws Initializ // Make an SSLContext that only has the trust store, this should not work since the processor has client auth REQUIRED final SSLContext clientSslContext = SslContextFactory.createTrustSslContext( - "src/test/resources/localhost-ts.jks", - "localtest".toCharArray(), + "src/test/resources/truststore.jks", + "passwordpassword".toCharArray(), "jks", "TLS"); @@ -185,8 +185,8 @@ public void testTLSClientAuthNoneAndClientCertNotProvided() throws Initializatio // Make an SSLContext that only has the trust store, this should not work since the processor has client auth REQUIRED final SSLContext clientSslContext = SslContextFactory.createTrustSslContext( - "src/test/resources/localhost-ts.jks", - "localtest".toCharArray(), + "src/test/resources/truststore.jks", + "passwordpassword".toCharArray(), "jks", "TLS"); @@ -261,11 +261,11 @@ protected void runTCP(final List messages, final int expectedTransferred private SSLContextService configureProcessorSslContextService() throws InitializationException { final SSLContextService sslContextService = new StandardRestrictedSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/keystore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); runner.enableControllerService(sslContextService); diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCPRecord.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCPRecord.java index 7314f9853227..20145b662798 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCPRecord.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestListenTCPRecord.java @@ -161,11 +161,11 @@ public void testTLSClientAuthRequiredAndClientCertProvided() throws Initializati // Make an SSLContext with a key and trust store to send the test messages final SSLContext clientSslContext = SslContextFactory.createSslContext( - "src/test/resources/localhost-ks.jks", - "localtest".toCharArray(), + "src/test/resources/keystore.jks", + "passwordpassword".toCharArray(), "jks", - "src/test/resources/localhost-ts.jks", - "localtest".toCharArray(), + "src/test/resources/truststore.jks", + "passwordpassword".toCharArray(), "jks", org.apache.nifi.security.util.SslContextFactory.ClientAuth.valueOf("NONE"), "TLS"); @@ -192,8 +192,8 @@ public void testTLSClientAuthRequiredAndClientCertNotProvided() throws Initializ // Make an SSLContext that only has the trust store, this should not work since the processor has client auth REQUIRED final SSLContext clientSslContext = SslContextFactory.createTrustSslContext( - "src/test/resources/localhost-ts.jks", - "localtest".toCharArray(), + "src/test/resources/truststore.jks", + "passwordpassword".toCharArray(), "jks", "TLS"); @@ -209,8 +209,8 @@ public void testTLSClientAuthNoneAndClientCertNotProvided() throws Initializatio // Make an SSLContext that only has the trust store, this should work since the processor has client auth NONE final SSLContext clientSslContext = SslContextFactory.createTrustSslContext( - "src/test/resources/localhost-ts.jks", - "localtest".toCharArray(), + "src/test/resources/truststore.jks", + "passwordpassword".toCharArray(), "jks", "TLS"); @@ -266,11 +266,11 @@ protected void runTCP(final List messages, final int expectedTransferred private SSLContextService configureProcessorSslContextService() throws InitializationException { final SSLContextService sslContextService = new StandardRestrictedSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/keystore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); runner.enableControllerService(sslContextService); diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPostHTTP.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPostHTTP.java index ef3448735b65..3b6917963939 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPostHTTP.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPostHTTP.java @@ -44,11 +44,15 @@ import org.junit.Assert; public class TestPostHTTP { - private TestServer server; private TestRunner runner; private CaptureServlet servlet; + private final String KEYSTORE_PATH = "src/test/resources/keystore.jks"; + private final String KEYSTORE_AND_TRUSTSTORE_PASSWORD = "passwordpassword"; + private final String TRUSTSTORE_PATH = "src/test/resources/truststore.jks"; + private final String JKS_TYPE = "JKS"; + private void setup(final Map sslProperties) throws Exception { // set up web service ServletHandler handler = new ServletHandler(); @@ -75,16 +79,16 @@ public void cleanup() throws Exception { public void testTruststoreSSLOnly() throws Exception { final Map sslProps = new HashMap<>(); sslProps.put(TestServer.NEED_CLIENT_AUTH, "false"); - sslProps.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + sslProps.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); setup(sslProps); final SSLContextService sslContextService = new StandardSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, TRUSTSTORE_PATH); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, JKS_TYPE); runner.enableControllerService(sslContextService); runner.setProperty(PostHTTP.URL, server.getSecureUrl()); @@ -100,23 +104,23 @@ public void testTruststoreSSLOnly() throws Exception { @Test public void testTwoWaySSL() throws Exception { final Map sslProps = new HashMap<>(); - sslProps.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - sslProps.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - sslProps.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - sslProps.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + sslProps.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); + sslProps.put(StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); + sslProps.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + sslProps.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); sslProps.put(TestServer.NEED_CLIENT_AUTH, "true"); setup(sslProps); final SSLContextService sslContextService = new StandardSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, TRUSTSTORE_PATH); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, JKS_TYPE); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, KEYSTORE_PATH); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, JKS_TYPE); runner.enableControllerService(sslContextService); runner.setProperty(PostHTTP.URL, server.getSecureUrl()); @@ -132,20 +136,20 @@ public void testTwoWaySSL() throws Exception { @Test public void testOneWaySSLWhenServerConfiguredForTwoWay() throws Exception { final Map sslProps = new HashMap<>(); - sslProps.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - sslProps.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - sslProps.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - sslProps.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + sslProps.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); + sslProps.put(StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); + sslProps.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + sslProps.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); sslProps.put(TestServer.NEED_CLIENT_AUTH, "true"); setup(sslProps); final SSLContextService sslContextService = new StandardSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/truststore.jks"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, JKS_TYPE); runner.enableControllerService(sslContextService); runner.setProperty(PostHTTP.URL, server.getSecureUrl()); @@ -202,23 +206,23 @@ public void testSendAsFlowFile() throws Exception { @Test public void testSendAsFlowFileSecure() throws Exception { final Map sslProps = new HashMap<>(); - sslProps.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - sslProps.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - sslProps.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - sslProps.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + sslProps.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + sslProps.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + sslProps.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); + sslProps.put(StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); + sslProps.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + sslProps.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); sslProps.put(TestServer.NEED_CLIENT_AUTH, "true"); setup(sslProps); final SSLContextService sslContextService = new StandardSSLContextService(); runner.addControllerService("ssl-context", sslContextService); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, "src/test/resources/localhost-ts.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); - runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, "src/test/resources/localhost-ks.jks"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); - runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, "JKS"); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE, TRUSTSTORE_PATH); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_PASSWORD, KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(sslContextService, StandardSSLContextService.TRUSTSTORE_TYPE, JKS_TYPE); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE, KEYSTORE_PATH); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_PASSWORD, KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(sslContextService, StandardSSLContextService.KEYSTORE_TYPE, JKS_TYPE); runner.enableControllerService(sslContextService); runner.setProperty(PostHTTP.URL, server.getSecureUrl()); diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutEmail.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutEmail.java index 7df04ba61d59..97ff77effdde 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutEmail.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutEmail.java @@ -264,9 +264,11 @@ public void testOutgoingMessageWithFlowfileContent() throws Exception { // verifies that are set on the outgoing Message correctly runner.setProperty(PutEmail.SMTP_HOSTNAME, "smtp-host"); runner.setProperty(PutEmail.HEADER_XMAILER, "TestingNiFi"); - runner.setProperty(PutEmail.FROM, "test@apache.org"); + runner.setProperty(PutEmail.FROM, "test@apache.org,from@apache.org"); runner.setProperty(PutEmail.MESSAGE, "${body}"); - runner.setProperty(PutEmail.TO, "recipient@apache.org"); + runner.setProperty(PutEmail.TO, "recipient@apache.org,another@apache.org"); + runner.setProperty(PutEmail.CC, "recipientcc@apache.org,anothercc@apache.org"); + runner.setProperty(PutEmail.BCC, "recipientbcc@apache.org,anotherbcc@apache.org"); runner.setProperty(PutEmail.CONTENT_AS_MESSAGE, "${sendContent}"); Map attributes = new HashMap(); @@ -283,11 +285,15 @@ public void testOutgoingMessageWithFlowfileContent() throws Exception { assertEquals("Expected a single message to be sent", 1, processor.getMessages().size()); Message message = processor.getMessages().get(0); assertEquals("test@apache.org", message.getFrom()[0].toString()); + assertEquals("from@apache.org", message.getFrom()[1].toString()); assertEquals("X-Mailer Header", "TestingNiFi", message.getHeader("X-Mailer")[0]); assertEquals("Some Text", message.getContent()); assertEquals("recipient@apache.org", message.getRecipients(RecipientType.TO)[0].toString()); - assertNull(message.getRecipients(RecipientType.BCC)); - assertNull(message.getRecipients(RecipientType.CC)); + assertEquals("another@apache.org", message.getRecipients(RecipientType.TO)[1].toString()); + assertEquals("recipientcc@apache.org", message.getRecipients(RecipientType.CC)[0].toString()); + assertEquals("anothercc@apache.org", message.getRecipients(RecipientType.CC)[1].toString()); + assertEquals("recipientbcc@apache.org", message.getRecipients(RecipientType.BCC)[0].toString()); + assertEquals("anotherbcc@apache.org", message.getRecipients(RecipientType.BCC)[1].toString()); } } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutTcpSSL.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutTcpSSL.java index b7589875a555..70e98ca55c5e 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutTcpSSL.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/TestPutTcpSSL.java @@ -57,11 +57,11 @@ public void configureProperties(String host, int port, String outgoingMessageDel private static Map createSslProperties() { final Map map = new HashMap<>(); - map.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - map.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + map.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/keystore.jks"); + map.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "passwordpassword"); map.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - map.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - map.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + map.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/truststore.jks"); + map.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "passwordpassword"); map.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); return map; } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/JdbcCommonTestUtils.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/JdbcCommonTestUtils.java new file mode 100644 index 000000000000..ad571588dc06 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/JdbcCommonTestUtils.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard.util; + +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class JdbcCommonTestUtils { + static ResultSet resultSetReturningMetadata(ResultSetMetaData metadata) throws SQLException { + final ResultSet rs = mock(ResultSet.class); + when(rs.getMetaData()).thenReturn(metadata); + + final AtomicInteger counter = new AtomicInteger(1); + Mockito.doAnswer(new Answer() { + @Override + public Boolean answer(InvocationOnMock invocation) throws Throwable { + return counter.getAndDecrement() > 0; + } + }).when(rs).next(); + + return rs; + } + + static InputStream convertResultSetToAvroInputStream(ResultSet rs) throws SQLException, IOException { + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + JdbcCommon.convertToAvroStream(rs, baos, false); + + final byte[] serializedBytes = baos.toByteArray(); + + return new ByteArrayInputStream(serializedBytes); + } +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TCPTestServer.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TCPTestServer.java index 01d492e352b1..a698f398b537 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TCPTestServer.java +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TCPTestServer.java @@ -53,8 +53,8 @@ public TCPTestServer(final InetAddress ipAddress, final ArrayBlockingQueue() { - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - return counter.getAndDecrement() > 0; - } - }).when(rs).next(); + final ResultSet rs = resultSetReturningMetadata(metadata); when(rs.getObject(Mockito.anyInt())).thenReturn(bigDecimal); @@ -587,34 +580,75 @@ public void testConvertToAvroStreamForShort() throws SQLException, IOException { when(metadata.getColumnName(1)).thenReturn("t_int"); when(metadata.getTableName(1)).thenReturn("table"); - final ResultSet rs = mock(ResultSet.class); - when(rs.getMetaData()).thenReturn(metadata); - - final AtomicInteger counter = new AtomicInteger(1); - Mockito.doAnswer(new Answer() { - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - return counter.getAndDecrement() > 0; - } - }).when(rs).next(); + final ResultSet rs = resultSetReturningMetadata(metadata); final short s = 25; when(rs.getObject(Mockito.anyInt())).thenReturn(s); - final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + final InputStream instream = convertResultSetToAvroInputStream(rs); - JdbcCommon.convertToAvroStream(rs, baos, false); + final DatumReader datumReader = new GenericDatumReader<>(); + try (final DataFileStream dataFileReader = new DataFileStream<>(instream, datumReader)) { + GenericRecord record = null; + while (dataFileReader.hasNext()) { + record = dataFileReader.next(record); + assertEquals(Short.toString(s), record.get("t_int").toString()); + } + } + } - final byte[] serializedBytes = baos.toByteArray(); + @Test + public void testConvertToAvroStreamForUnsignedIntegerWithPrecision1ReturnedAsLong_NIFI5612() throws SQLException, IOException { + final String mockColumnName = "t_int"; + final ResultSetMetaData metadata = mock(ResultSetMetaData.class); + when(metadata.getColumnCount()).thenReturn(1); + when(metadata.getColumnType(1)).thenReturn(Types.INTEGER); + when(metadata.isSigned(1)).thenReturn(false); + when(metadata.getPrecision(1)).thenReturn(1); + when(metadata.getColumnName(1)).thenReturn(mockColumnName); + when(metadata.getTableName(1)).thenReturn("table"); - final InputStream instream = new ByteArrayInputStream(serializedBytes); + final ResultSet rs = resultSetReturningMetadata(metadata); + + final Long ret = 0L; + when(rs.getObject(Mockito.anyInt())).thenReturn(ret); + + final InputStream instream = convertResultSetToAvroInputStream(rs); final DatumReader datumReader = new GenericDatumReader<>(); try (final DataFileStream dataFileReader = new DataFileStream<>(instream, datumReader)) { GenericRecord record = null; while (dataFileReader.hasNext()) { record = dataFileReader.next(record); - assertEquals(Short.toString(s), record.get("t_int").toString()); + assertEquals(Long.toString(ret), record.get(mockColumnName).toString()); + } + } + } + + @Test + public void testConvertToAvroStreamForUnsignedIntegerWithPrecision10() throws SQLException, IOException { + final String mockColumnName = "t_int"; + final ResultSetMetaData metadata = mock(ResultSetMetaData.class); + when(metadata.getColumnCount()).thenReturn(1); + when(metadata.getColumnType(1)).thenReturn(Types.INTEGER); + when(metadata.isSigned(1)).thenReturn(false); + when(metadata.getPrecision(1)).thenReturn(10); + when(metadata.getColumnName(1)).thenReturn(mockColumnName); + when(metadata.getTableName(1)).thenReturn("table"); + + final ResultSet rs = resultSetReturningMetadata(metadata); + + final Long ret = 0L; + when(rs.getObject(Mockito.anyInt())).thenReturn(ret); + + final InputStream instream = convertResultSetToAvroInputStream(rs); + + final DatumReader datumReader = new GenericDatumReader<>(); + try (final DataFileStream dataFileReader = new DataFileStream<>(instream, datumReader)) { + GenericRecord record = null; + while (dataFileReader.hasNext()) { + record = dataFileReader.next(record); + assertEquals(Long.toString(ret), record.get(mockColumnName).toString()); } } } diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TestJdbcCommonConvertToAvro.java b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TestJdbcCommonConvertToAvro.java new file mode 100644 index 000000000000..eb736e29fca0 --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/java/org/apache/nifi/processors/standard/util/TestJdbcCommonConvertToAvro.java @@ -0,0 +1,152 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.processors.standard.util; + +import org.apache.avro.file.DataFileStream; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.io.DatumReader; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mockito.Mockito; + +import java.io.IOException; +import java.io.InputStream; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.IntStream; + +import static java.sql.Types.INTEGER; +import static java.sql.Types.SMALLINT; +import static java.sql.Types.TINYINT; +import static java.sql.Types.BIGINT; +import static org.apache.nifi.processors.standard.util.JdbcCommonTestUtils.convertResultSetToAvroInputStream; +import static org.apache.nifi.processors.standard.util.JdbcCommonTestUtils.resultSetReturningMetadata; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@RunWith(Parameterized.class) +public class TestJdbcCommonConvertToAvro { + + private final static boolean SIGNED = true; + private final static boolean UNSIGNED = false; + + private static int[] range(int start, int end) { + return IntStream.rangeClosed(start, end).toArray(); + } + + @Parameterized.Parameters(name = "{index}: {0}") + public static Collection data() { + Map typeWithPrecisionRange = new HashMap<>(); + typeWithPrecisionRange.put(TINYINT, range(1,3)); + typeWithPrecisionRange.put(SMALLINT, range(1,5)); + typeWithPrecisionRange.put(INTEGER, range(1,9)); + + ArrayList params = new ArrayList<>(); + + typeWithPrecisionRange.forEach( (sqlType, precisions) -> { + for (int precision : precisions) { + params.add(new TestParams(sqlType, precision, SIGNED)); + params.add(new TestParams(sqlType, precision, UNSIGNED)); + } + }); + // remove cases that we know should fail + params.removeIf(param -> + param.sqlType == INTEGER + && + param.precision == 9 + && + param.signed == UNSIGNED + ); + + return params; + } + + @Parameterized.Parameter + public TestParams testParams; + + static class TestParams { + int sqlType; + int precision; + boolean signed; + + TestParams(int sqlType, int precision, boolean signed) { + this.sqlType = sqlType; + this.precision = precision; + this.signed = signed; + } + private String humanReadableType() { + switch(sqlType){ + case TINYINT: + return "TINYINT"; + case INTEGER: + return "INTEGER"; + case SMALLINT: + return "SMALLINT"; + case BIGINT: + return "BIGINT"; + default: + return "UNKNOWN - ADD TO LIST"; + } + } + private String humanReadableSigned() { + if(signed) return "SIGNED"; + return "UNSIGNED"; + } + public String toString(){ + return String.format( + "TestParams(SqlType=%s, Precision=%s, Signed=%s)", + humanReadableType(), + precision, + humanReadableSigned()); + } + } + + @Test + public void testConvertToAvroStreamForNumbers() throws SQLException, IOException { + final ResultSetMetaData metadata = mock(ResultSetMetaData.class); + when(metadata.getColumnCount()).thenReturn(1); + when(metadata.getColumnType(1)).thenReturn(testParams.sqlType); + when(metadata.isSigned(1)).thenReturn(testParams.signed); + when(metadata.getPrecision(1)).thenReturn(testParams.precision); + when(metadata.getColumnName(1)).thenReturn("t_int"); + when(metadata.getTableName(1)).thenReturn("table"); + + final ResultSet rs = resultSetReturningMetadata(metadata); + + final int ret = 0; + when(rs.getObject(Mockito.anyInt())).thenReturn(ret); + + final InputStream instream = convertResultSetToAvroInputStream(rs); + + final DatumReader datumReader = new GenericDatumReader<>(); + try (final DataFileStream dataFileReader = new DataFileStream<>(instream, datumReader)) { + GenericRecord record = null; + while (dataFileReader.hasNext()) { + record = dataFileReader.next(record); + assertEquals(Integer.toString(ret), record.get("t_int").toString()); + } + } + } +} diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/keystore.jks b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/localhost-ks.jks b/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/test/resources/localhost-ks.jks deleted file mode 100755 index df36197d92ab8e9870f42666d74c47646fd56f26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3512 zcmchZXH*l&7RQqiLLfv+Xd3T`p&Iir~-L#Y=#ud?{E1Ldil!ycf^TKF)2%4dD_ z&CRl2juv-w;hX`>r;n!ME0*eJZQo{liYbqFr%&s4KWji3-S|{|Q#z3Bi!_n-HQIvn z)_yhbW3OmoReGZ$;mdsOy)j4ml{e?MpM3BXEZ&%y>@=UK++W7rcU+QDvQxbsYBP>C ziqWW_4}oMI2<5S^ml11u$vzs(Bz1QY%@RE`7dI!_J9pQZTH;ai+~*FZ-!&0FO}AsY zOxhC^^;ctKwcW!%@WtyMsu@6xd3zdv(I!8(v5$IseOUHF#yBeb=(KkBD?D*{)a_{6 zy11;ZtH1s5w8!+ewZvnrkKmE%X*#>Ul%b`b!V6_&L1)$_<6^i6k7Bh$Cbm8X7HN40 zS#G)q)jhM1yqIk|ug4$}yr>lNM^7CDi=S{rQqn53pE8J!Vk=?&Q_pATc&ICwBQ zS(^FTsqy1f=9leGJUj=gReI>!b5N4p{xQ7Yh?)gcpugwPJJKnkHLG#|+$oVkg4yV1aO1A$e7 zaQjo^Q#=uo%^bn4wLVp1-Lpy>m3Om-GmM2@#_FNth9W;Io4*MtEVVL^kgC7SFA-we z#qVjp#>O>$RucpY72eI-)`&+06CPE;lJYi4}@3m`# zJ_AU}qlHP&l8^Sxdy9$-4gOUb4UL4637oYGzAr%oZTy>dW-CT`%o3B(duSJ1(e{$Y zM<9UyvWx;+833RQMN{a4(G-wlHXR5E0)ZV>5?#@72%}__LDViB2!zoC&;$$&%?P2h z0z(iWD~mq^C<3ITh2caaj#n5E%ofhx0nUQPL~nPTGlqqB22Ex{K(u_Eac+1F2b%p@ zfFWRi2!bZ=dhQr@H0!ZShxiYx(fr(S%o#KWt$@YIDPiPok3$Sr4*fIyhqIvoh5uR( z+G9aS0kQzl6d)6b0t5omn(X@$hGj=yE`{&~S2Gtia5Gn?EL_(yG|G+K@=fp0D^(rz zxT1R64#p$fx05POs#deg9+l!c8gwhEor|BbmTA)uRlj-gz6)6_cB&4*Tc-M`bK9>c z*H4msFu-a#7iT^GkUgZvxqIcr(X*;=?XWBEh_4N)!@=`Ah5M!kt4cNNSPATwH?AXC zdENd&XqoAr2Dq}BQ6Gnc3D~XB-xhZWLe^fld)&QlbH&rFP$(?%sxBMiB_=cw?r7CH@9Dd8TnkYHTi)yt>lPMf~Qh{TVz-%zd}mpoX@Lx z7dHOF@cCta&Y}DYj>8M>y0uqvg+{1>9qQK_{DUz^17>%6baZre>Zg9-*JTh{JeEgE(Xc$3KCdGsnB0X~&288Q1yu50`xi`1$u zxw%0F{zoTzg?QpaXg#S%Pc}TD&G9sE#r*FN1sL2ia!PT<-siU_xsUiWo{_zcpd9U!Ni)~G zLi}%abS2t*$1jmQ&rh~)%FTUKeNh{2;~_;7Z1a$&S<~zN0o(9-C8gCXFPUtQaEi(Ok}L|C$~05J}GOTeZ2`>N!9w z|5?&Yv(xUn4w}Md-)+>Xm-idnwqK!l-ep)3M#!opq&#uM)v4O^f$5XSSy^-7P*&lV zi*Bv9WLRzp8QFh_Sp$75|b~$}d%! zADHN!cN?}Zq;Pfp`_&u3UsSsuum4tHmJnSKKJnFdCJT}j<9dY@Y9;CdG*Uh6JugW| zjszU%k%LnRdK;+FkhCS;r3tV3Qu-?q>U@4Gz20FckyBYJ$a2l5D|g6nnw|8he9Zuw zE>xvKu;5sW8RFB^dtl3__u=TrP;92~^c`S>V6o8(>LDq#2#WbkDhztv-Y+KRxxc_( z9-Ig8g=a}sc!GElV)j`DAZZobG^EycOweBae{tMx(CCHt3QRem*{+4B%V0XzUy$!_ zUZ;}$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-standard-bundle/pom.xml b/nifi-nar-bundles/nifi-standard-bundle/pom.xml index 0a0952b3b8bf..e06a76687839 100644 --- a/nifi-nar-bundles/nifi-standard-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-standard-bundle/pom.xml @@ -33,9 +33,9 @@ nifi-standard-web-test-utils - 2.9.5 + 2.9.7 2.2.0 - 0.1.0 + 0.1.1 2.26 @@ -170,27 +170,27 @@ org.apache.commons commons-compress - 1.16.1 + 1.18 org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.bouncycastle bcprov-jdk15on - 1.59 + 1.60 org.bouncycastle bcpg-jdk15on - 1.59 + 1.60 org.bouncycastle bcpkix-jdk15on - 1.59 + 1.60 commons-codec @@ -230,7 +230,7 @@ org.tukaani xz - 1.6 + 1.8 net.sf.saxon @@ -250,12 +250,12 @@ org.apache.activemq activemq-client - 5.15.3 + 5.15.6 org.apache.activemq activemq-broker - 5.15.3 + 5.15.6 test @@ -271,7 +271,7 @@ org.apache.tika tika-core - 1.17 + 1.19 com.squareup.okhttp3 @@ -281,7 +281,7 @@ com.burgstaller okhttp-digest - 1.13 + 1.18 jar @@ -297,7 +297,7 @@ org.xerial.snappy snappy-java - 1.1.2 + 1.1.7.2 com.h2database @@ -330,7 +330,7 @@ com.github.wnameless json-flattener - 0.5.0 + 0.6.0 org.apache.bval @@ -373,7 +373,7 @@ org.apache.calcite calcite-core - 1.12.0 + 1.17.0 org.apache.avro diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/keystore.jks b/nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/localhost-ks.jks b/nifi-nar-bundles/nifi-standard-services/nifi-distributed-cache-services-bundle/nifi-distributed-cache-server/src/test/resources/localhost-ks.jks deleted file mode 100755 index df36197d92ab8e9870f42666d74c47646fd56f26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3512 zcmchZXH*l&7RQqiLLfv+Xd3T`p&Iir~-L#Y=#ud?{E1Ldil!ycf^TKF)2%4dD_ z&CRl2juv-w;hX`>r;n!ME0*eJZQo{liYbqFr%&s4KWji3-S|{|Q#z3Bi!_n-HQIvn z)_yhbW3OmoReGZ$;mdsOy)j4ml{e?MpM3BXEZ&%y>@=UK++W7rcU+QDvQxbsYBP>C ziqWW_4}oMI2<5S^ml11u$vzs(Bz1QY%@RE`7dI!_J9pQZTH;ai+~*FZ-!&0FO}AsY zOxhC^^;ctKwcW!%@WtyMsu@6xd3zdv(I!8(v5$IseOUHF#yBeb=(KkBD?D*{)a_{6 zy11;ZtH1s5w8!+ewZvnrkKmE%X*#>Ul%b`b!V6_&L1)$_<6^i6k7Bh$Cbm8X7HN40 zS#G)q)jhM1yqIk|ug4$}yr>lNM^7CDi=S{rQqn53pE8J!Vk=?&Q_pATc&ICwBQ zS(^FTsqy1f=9leGJUj=gReI>!b5N4p{xQ7Yh?)gcpugwPJJKnkHLG#|+$oVkg4yV1aO1A$e7 zaQjo^Q#=uo%^bn4wLVp1-Lpy>m3Om-GmM2@#_FNth9W;Io4*MtEVVL^kgC7SFA-we z#qVjp#>O>$RucpY72eI-)`&+06CPE;lJYi4}@3m`# zJ_AU}qlHP&l8^Sxdy9$-4gOUb4UL4637oYGzAr%oZTy>dW-CT`%o3B(duSJ1(e{$Y zM<9UyvWx;+833RQMN{a4(G-wlHXR5E0)ZV>5?#@72%}__LDViB2!zoC&;$$&%?P2h z0z(iWD~mq^C<3ITh2caaj#n5E%ofhx0nUQPL~nPTGlqqB22Ex{K(u_Eac+1F2b%p@ zfFWRi2!bZ=dhQr@H0!ZShxiYx(fr(S%o#KWt$@YIDPiPok3$Sr4*fIyhqIvoh5uR( z+G9aS0kQzl6d)6b0t5omn(X@$hGj=yE`{&~S2Gtia5Gn?EL_(yG|G+K@=fp0D^(rz zxT1R64#p$fx05POs#deg9+l!c8gwhEor|BbmTA)uRlj-gz6)6_cB&4*Tc-M`bK9>c z*H4msFu-a#7iT^GkUgZvxqIcr(X*;=?XWBEh_4N)!@=`Ah5M!kt4cNNSPATwH?AXC zdENd&XqoAr2Dq}BQ6Gnc3D~XB-xhZWLe^fld)&QlbH&rFP$(?%sxBMiB_=cw?r7CH@9Dd8TnkYHTi)yt>lPMf~Qh{TVz-%zd}mpoX@Lx z7dHOF@cCta&Y}DYj>8M>y0uqvg+{1>9qQK_{DUz^17>%6baZre>Zg9-*JTh{JeEgE(Xc$3KCdGsnB0X~&288Q1yu50`xi`1$u zxw%0F{zoTzg?QpaXg#S%Pc}TD&G9sE#r*FN1sL2ia!PT<-siU_xsUiWo{_zcpd9U!Ni)~G zLi}%abS2t*$1jmQ&rh~)%FTUKeNh{2;~_;7Z1a$&S<~zN0o(9-C8gCXFPUtQaEi(Ok}L|C$~05J}GOTeZ2`>N!9w z|5?&Yv(xUn4w}Md-)+>Xm-idnwqK!l-ep)3M#!opq&#uM)v4O^f$5XSSy^-7P*&lV zi*Bv9WLRzp8QFh_Sp$75|b~$}d%! zADHN!cN?}Zq;Pfp`_&u3UsSsuum4tHmJnSKKJnFdCJT}j<9dY@Y9;CdG*Uh6JugW| zjszU%k%LnRdK;+FkhCS;r3tV3Qu-?q>U@4Gz20FckyBYJ$a2l5D|g6nnw|8he9Zuw zE>xvKu;5sW8RFB^dtl3__u=TrP;92~^c`S>V6o8(>LDq#2#WbkDhztv-Y+KRxxc_( z9-Ig8g=a}sc!GElV)j`DAZZobG^EycOweBae{tMx(CCHt3QRem*{+4B%V0XzUy$!_ zUZ;}$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/nifi-hbase_1_1_2-client-service/pom.xml b/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/nifi-hbase_1_1_2-client-service/pom.xml index 41f29fc038d0..1423ea3cea15 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/nifi-hbase_1_1_2-client-service/pom.xml +++ b/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/nifi-hbase_1_1_2-client-service/pom.xml @@ -93,7 +93,7 @@ org.apache.commons commons-lang3 - 3.4 + 3.8.1 org.slf4j diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/pom.xml b/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/pom.xml index 0b16b4742a0c..c428f1231c2f 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/pom.xml +++ b/nifi-nar-bundles/nifi-standard-services/nifi-hbase_1_1_2-client-service-bundle/pom.xml @@ -82,6 +82,12 @@ hadoop-auth ${hadoop.version} + + + io.netty + netty + 3.6.9.Final + diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-hwx-schema-registry-bundle/nifi-hwx-schema-registry-service/pom.xml b/nifi-nar-bundles/nifi-standard-services/nifi-hwx-schema-registry-bundle/nifi-hwx-schema-registry-service/pom.xml index e923cf1c03f1..aa521ca358a3 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-hwx-schema-registry-bundle/nifi-hwx-schema-registry-service/pom.xml +++ b/nifi-nar-bundles/nifi-standard-services/nifi-hwx-schema-registry-bundle/nifi-hwx-schema-registry-service/pom.xml @@ -28,8 +28,8 @@ limitations under the License. nifi-hwx-schema-registry-service jar - 0.5.1 - 2.9.5 + 0.5.3 + 2.9.7 diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-lookup-services-bundle/nifi-lookup-services/pom.xml b/nifi-nar-bundles/nifi-standard-services/nifi-lookup-services-bundle/nifi-lookup-services/pom.xml index 7fd63361a4a8..26a88395f614 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-lookup-services-bundle/nifi-lookup-services/pom.xml +++ b/nifi-nar-bundles/nifi-standard-services/nifi-lookup-services-bundle/nifi-lookup-services/pom.xml @@ -53,12 +53,12 @@ org.apache.commons commons-configuration2 - 2.1.1 + 2.3 org.apache.commons commons-csv - 1.4 + 1.5 commons-beanutils @@ -143,7 +143,7 @@ com.burgstaller okhttp-digest - 1.13 + 1.18 compile diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/pom.xml b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/pom.xml index 30694b50d9c9..260fabb4f2a0 100755 --- a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/pom.xml +++ b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/pom.xml @@ -59,17 +59,12 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 - - - org.apache.commons - commons-lang3 - 3.7 + 2.9.7 org.apache.commons commons-csv - 1.4 + 1.5 com.fasterxml.jackson.dataformat @@ -114,6 +109,11 @@ 2.2.1 test + + org.apache.commons + commons-text + 1.4 + diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReader.java b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReader.java index eed37f867f4d..97643aa28e29 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReader.java +++ b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReader.java @@ -17,14 +17,6 @@ package org.apache.nifi.avro; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; - import org.apache.avro.Schema; import org.apache.nifi.annotation.documentation.CapabilityDescription; import org.apache.nifi.annotation.documentation.Tags; @@ -35,12 +27,19 @@ import org.apache.nifi.schema.access.SchemaAccessStrategy; import org.apache.nifi.schema.access.SchemaNotFoundException; import org.apache.nifi.schemaregistry.services.SchemaRegistry; -import org.apache.nifi.serialization.MalformedRecordException; import org.apache.nifi.serialization.RecordReader; import org.apache.nifi.serialization.RecordReaderFactory; import org.apache.nifi.serialization.SchemaRegistryService; import org.apache.nifi.serialization.record.RecordSchema; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + @Tags({"avro", "parse", "record", "row", "reader", "delimited", "comma", "separated", "values"}) @CapabilityDescription("Parses Avro data and returns each Avro record as an separate Record object. The Avro data may contain the schema itself, " + "or the schema can be externalized and accessed by one of the methods offered by the 'Schema Access Strategy' property.") @@ -83,7 +82,7 @@ protected SchemaAccessStrategy getSchemaAccessStrategy(String allowableValue, Sc } @Override - public RecordReader createRecordReader(final Map variables, final InputStream in, final ComponentLog logger) throws MalformedRecordException, IOException, SchemaNotFoundException { + public RecordReader createRecordReader(final Map variables, final InputStream in, final ComponentLog logger) throws IOException, SchemaNotFoundException { final String schemaAccessStrategy = getConfigurationContext().getProperty(getSchemaAcessStrategyDescriptor()).getValue(); if (EMBEDDED_AVRO_SCHEMA.getValue().equals(schemaAccessStrategy)) { return new AvroReaderWithEmbeddedSchema(in); diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithEmbeddedSchema.java b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithEmbeddedSchema.java index aa61e4cf2722..a5e5ce712e06 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithEmbeddedSchema.java +++ b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithEmbeddedSchema.java @@ -17,16 +17,14 @@ package org.apache.nifi.avro; -import java.io.IOException; -import java.io.InputStream; - import org.apache.avro.Schema; import org.apache.avro.file.DataFileStream; -import org.apache.avro.generic.GenericDatumReader; import org.apache.avro.generic.GenericRecord; -import org.apache.nifi.serialization.MalformedRecordException; import org.apache.nifi.serialization.record.RecordSchema; +import java.io.IOException; +import java.io.InputStream; + public class AvroReaderWithEmbeddedSchema extends AvroRecordReader { private final DataFileStream dataFileStream; private final InputStream in; @@ -35,7 +33,7 @@ public class AvroReaderWithEmbeddedSchema extends AvroRecordReader { public AvroReaderWithEmbeddedSchema(final InputStream in) throws IOException { this.in = in; - dataFileStream = new DataFileStream<>(in, new GenericDatumReader()); + dataFileStream = new DataFileStream<>(in, new NonCachingDatumReader<>()); this.avroSchema = dataFileStream.getSchema(); recordSchema = AvroTypeUtil.createSchema(avroSchema); } @@ -56,7 +54,7 @@ protected GenericRecord nextAvroRecord() { } @Override - public RecordSchema getSchema() throws MalformedRecordException { + public RecordSchema getSchema() { return recordSchema; } } diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithExplicitSchema.java b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithExplicitSchema.java index ce49443b7b57..9197e478339e 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithExplicitSchema.java +++ b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/AvroReaderWithExplicitSchema.java @@ -17,20 +17,17 @@ package org.apache.nifi.avro; -import java.io.EOFException; -import java.io.IOException; -import java.io.InputStream; - import org.apache.avro.Schema; -import org.apache.avro.generic.GenericDatumReader; import org.apache.avro.generic.GenericRecord; import org.apache.avro.io.BinaryDecoder; import org.apache.avro.io.DatumReader; import org.apache.avro.io.DecoderFactory; -import org.apache.nifi.schema.access.SchemaNotFoundException; -import org.apache.nifi.serialization.MalformedRecordException; import org.apache.nifi.serialization.record.RecordSchema; +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + public class AvroReaderWithExplicitSchema extends AvroRecordReader { private final InputStream in; private final RecordSchema recordSchema; @@ -38,11 +35,11 @@ public class AvroReaderWithExplicitSchema extends AvroRecordReader { private final BinaryDecoder decoder; private GenericRecord genericRecord; - public AvroReaderWithExplicitSchema(final InputStream in, final RecordSchema recordSchema, final Schema avroSchema) throws IOException, SchemaNotFoundException { + public AvroReaderWithExplicitSchema(final InputStream in, final RecordSchema recordSchema, final Schema avroSchema) { this.in = in; this.recordSchema = recordSchema; - datumReader = new GenericDatumReader(avroSchema); + datumReader = new NonCachingDatumReader<>(avroSchema); decoder = DecoderFactory.get().binaryDecoder(in, null); } @@ -67,7 +64,7 @@ protected GenericRecord nextAvroRecord() throws IOException { } @Override - public RecordSchema getSchema() throws MalformedRecordException { + public RecordSchema getSchema() { return recordSchema; } } diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/NonCachingDatumReader.java b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/NonCachingDatumReader.java new file mode 100644 index 000000000000..fa4ab7d3064f --- /dev/null +++ b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/main/java/org/apache/nifi/avro/NonCachingDatumReader.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.avro; + +import org.apache.avro.Schema; +import org.apache.avro.generic.GenericData; +import org.apache.avro.generic.GenericDatumReader; +import org.apache.avro.io.Decoder; + +import java.io.IOException; + +/** + * Override the GenericDatumReader to provide a much more efficient implementation of #readString. The one that is provided by + * GenericDatumReader performs very poorly in some cases because it uses an IdentityHashMap with the key being the Schema so that + * it can stash away the "stringClass" but that performs far worse than just calling JsonNode#getProp. I.e., {@link #readString(Object, Schema, Decoder)} + * in GenericDatumReader calls #getStringClass, which uses an IdentityHashMap to cache results in order to avoid calling {@link #findStringClass(Schema)}. + * However, {@link #findStringClass(Schema)} is much more efficient than using an IdentityHashMap anyway. Additionally, the performance of {@link #findStringClass(Schema)}} + * can be improved slightly and made more readable. + */ +public class NonCachingDatumReader extends GenericDatumReader { + public NonCachingDatumReader() { + super(); + } + + public NonCachingDatumReader(final Schema schema) { + super(schema); + } + + @Override + protected Object readString(final Object old, final Schema expected, final Decoder in) throws IOException { + final Class stringClass = findStringClass(expected); + if (stringClass == String.class) { + return in.readString(); + } + + if (stringClass == CharSequence.class) { + return readString(old, in); + } + + return newInstanceFromString(stringClass, in.readString()); + } + + protected Class findStringClass(Schema schema) { + final String name = schema.getProp(GenericData.STRING_PROP); + if ("String".equals(name)) { + return String.class; + } + + return CharSequence.class; + } +} diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestCSVRecordReader.java b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestCSVRecordReader.java index a4415b7292ce..5095767afbde 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestCSVRecordReader.java +++ b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestCSVRecordReader.java @@ -18,7 +18,7 @@ package org.apache.nifi.csv; import org.apache.commons.csv.CSVFormat; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.logging.ComponentLog; import org.apache.nifi.serialization.MalformedRecordException; import org.apache.nifi.serialization.SimpleRecordSchema; diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestJacksonCSVRecordReader.java b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestJacksonCSVRecordReader.java index 66486eef1a16..d83cbfbb22a1 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestJacksonCSVRecordReader.java +++ b/nifi-nar-bundles/nifi-standard-services/nifi-record-serialization-services-bundle/nifi-record-serialization-services/src/test/java/org/apache/nifi/csv/TestJacksonCSVRecordReader.java @@ -18,7 +18,7 @@ package org.apache.nifi.csv; import org.apache.commons.csv.CSVFormat; -import org.apache.commons.lang3.StringEscapeUtils; +import org.apache.commons.text.StringEscapeUtils; import org.apache.nifi.logging.ComponentLog; import org.apache.nifi.serialization.MalformedRecordException; import org.apache.nifi.serialization.SimpleRecordSchema; diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/groovy/org/apache/nifi/ssl/StandardSSLContextServiceTest.groovy b/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/groovy/org/apache/nifi/ssl/StandardSSLContextServiceTest.groovy index 6d2f7b20dee7..19c35682d394 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/groovy/org/apache/nifi/ssl/StandardSSLContextServiceTest.groovy +++ b/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/groovy/org/apache/nifi/ssl/StandardSSLContextServiceTest.groovy @@ -46,12 +46,12 @@ import static groovy.test.GroovyAssert.shouldFail class StandardSSLContextServiceTest { private static final Logger logger = LoggerFactory.getLogger(StandardSSLContextServiceTest.class) - private static final String KEYSTORE_PATH = "src/test/resources/localhost-ks.jks" - private static final String TRUSTSTORE_PATH = "src/test/resources/localhost-ts.jks" - private static final String TRUSTSTORE_PATH_WITH_EL = "\${someAttribute}/localhost-ts.jks" + private static final String KEYSTORE_PATH = "src/test/resources/keystore.jks" + private static final String TRUSTSTORE_PATH = "src/test/resources/truststore.jks" + private static final String TRUSTSTORE_PATH_WITH_EL = "\${someAttribute}/truststore.jks" - private static final String KEYSTORE_PASSWORD = "localtest" - private static final String TRUSTSTORE_PASSWORD = "localtest" + private static final String KEYSTORE_PASSWORD = "passwordpassword" + private static final String TRUSTSTORE_PASSWORD = "passwordpassword" private static final String KEYSTORE_TYPE = "JKS" private static final String TRUSTSTORE_TYPE = "JKS" diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/java/org/apache/nifi/ssl/SSLContextServiceTest.java b/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/java/org/apache/nifi/ssl/SSLContextServiceTest.java index 6cddc7df57f2..b98824c11f29 100644 --- a/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/java/org/apache/nifi/ssl/SSLContextServiceTest.java +++ b/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/java/org/apache/nifi/ssl/SSLContextServiceTest.java @@ -54,6 +54,12 @@ public class SSLContextServiceTest { private static final Logger logger = LoggerFactory.getLogger(SSLContextServiceTest.class); + private final String KEYSTORE_PATH = "src/test/resources/keystore.jks"; + private final String KEYSTORE_AND_TRUSTSTORE_PASSWORD = "passwordpassword"; + private final String JKS_TYPE = "JKS"; + private final String TRUSTSTORE_PATH = "src/test/resources/truststore.jks"; + private final String DIFFERENT_PASS_KEYSTORE_PATH = "src/test/resources/keystore-different-password.jks"; + private final String DIFFERENT_KEYSTORE_PASSWORD = "differentpassword"; @Rule public TemporaryFolder tmp = new TemporaryFolder(new File("src/test/resources")); @@ -72,8 +78,8 @@ public void testBad2() throws InitializationException { final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); final SSLContextService service = new StandardSSLContextService(); final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); runner.addControllerService("test-bad2", service, properties); runner.assertNotValid(service); } @@ -83,10 +89,10 @@ public void testBad3() throws InitializationException { final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); final SSLContextService service = new StandardSSLContextService(); final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - properties.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); + properties.put(StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); runner.addControllerService("test-bad3", service, properties); runner.assertNotValid(service); } @@ -96,12 +102,12 @@ public void testBad4() throws InitializationException { final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); final SSLContextService service = new StandardSSLContextService(); final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "wrongpassword"); properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "PKCS12"); - properties.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); + properties.put(StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); properties.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "wrongpassword"); - properties.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + properties.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); runner.addControllerService("test-bad4", service, properties); runner.assertNotValid(service); @@ -113,11 +119,11 @@ public void testBad5() throws InitializationException { final SSLContextService service = new StandardSSLContextService(); final Map properties = new HashMap<>(); properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/DOES-NOT-EXIST.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "PKCS12"); - properties.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - properties.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - properties.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + properties.put(StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); + properties.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + properties.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); runner.addControllerService("test-bad5", service, properties); runner.assertNotValid(service); } @@ -127,12 +133,12 @@ public void testGood() throws InitializationException { final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); SSLContextService service = new StandardSSLContextService(); runner.addControllerService("test-good1", service); - runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - runner.setProperty(service, StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - runner.setProperty(service, StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + runner.setProperty(service, StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(service, StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); runner.enableControllerService(service); runner.setProperty("SSL Context Svc ID", "test-good1"); @@ -150,12 +156,12 @@ public void testWithChanges() throws InitializationException { final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); SSLContextService service = new StandardSSLContextService(); runner.addControllerService("test-good1", service); - runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - runner.setProperty(service, StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - runner.setProperty(service, StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + runner.setProperty(service, StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(service, StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); runner.enableControllerService(service); runner.setProperty("SSL Context Svc ID", "test-good1"); @@ -165,11 +171,11 @@ public void testWithChanges() throws InitializationException { runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/DOES-NOT-EXIST.jks"); runner.assertNotValid(service); - runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); + runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "badpassword"); runner.assertNotValid(service); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); runner.enableControllerService(service); runner.assertValid(service); } @@ -179,8 +185,8 @@ public void testValidationResultsCacheShouldExpire() throws InitializationExcept // Arrange // Copy the keystore and truststore to a tmp directory so the originals are not modified - File originalKeystore = new File("src/test/resources/localhost-ks.jks"); - File originalTruststore = new File("src/test/resources/localhost-ts.jks"); + File originalKeystore = new File(KEYSTORE_PATH); + File originalTruststore = new File(TRUSTSTORE_PATH); File tmpKeystore = tmp.newFile("keystore-tmp.jks"); File tmpTruststore = tmp.newFile("truststore-tmp.jks"); @@ -193,11 +199,11 @@ public void testValidationResultsCacheShouldExpire() throws InitializationExcept final String serviceIdentifier = "test-should-expire"; runner.addControllerService(serviceIdentifier, service); runner.setProperty(service, StandardSSLContextService.KEYSTORE.getName(), tmpKeystore.getAbsolutePath()); - runner.setProperty(service, StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - runner.setProperty(service, StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + runner.setProperty(service, StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(service, StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); runner.setProperty(service, StandardSSLContextService.TRUSTSTORE.getName(), tmpTruststore.getAbsolutePath()); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + runner.setProperty(service, StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); runner.enableControllerService(service); runner.setProperty("SSL Context Svc ID", serviceIdentifier); @@ -241,9 +247,9 @@ public void testGoodTrustOnly() { TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); SSLContextService service = new StandardSSLContextService(); HashMap properties = new HashMap<>(); - properties.put(StandardSSLContextService.TRUSTSTORE.getName(), "src/test/resources/localhost-ts.jks"); - properties.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), "localtest"); - properties.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), "JKS"); + properties.put(StandardSSLContextService.TRUSTSTORE.getName(), TRUSTSTORE_PATH); + properties.put(StandardSSLContextService.TRUSTSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + properties.put(StandardSSLContextService.TRUSTSTORE_TYPE.getName(), JKS_TYPE); runner.addControllerService("test-good2", service, properties); runner.enableControllerService(service); @@ -262,9 +268,9 @@ public void testGoodKeyOnly() { TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); SSLContextService service = new StandardSSLContextService(); HashMap properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/localhost-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "localtest"); - properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), KEYSTORE_PATH); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), KEYSTORE_AND_TRUSTSTORE_PASSWORD); + properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); runner.addControllerService("test-good3", service, properties); runner.enableControllerService(service); @@ -280,16 +286,21 @@ public void testGoodKeyOnly() { } } + /** + * This test asserts that the keystore password and key password are different. This is only + * true because they were explicitly set that way. Normal keystores that do not have passwords + * set on individual keys will fail this test. + */ @Test public void testDifferentKeyPassword() { try { final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); final SSLContextService service = new StandardSSLContextService(); final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/diffpass-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "storepassword"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), DIFFERENT_PASS_KEYSTORE_PATH); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), DIFFERENT_KEYSTORE_PASSWORD); properties.put(StandardSSLContextService.KEY_PASSWORD.getName(), "keypassword"); - properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); runner.addControllerService("test-diff-keys", service, properties); runner.enableControllerService(service); @@ -305,17 +316,23 @@ public void testDifferentKeyPassword() { } } + /** + * This test asserts that the keystore password and key password are different. This is only + * true because they were explicitly set that way. Normal keystores that do not have passwords + * set on individual keys will fail this test. + */ @Test - public void testDifferentKeyPasswordWithoutSpecifyingPassword() { + public void testDifferentKeyPasswordWithoutSpecifyingKeyPassword() { try { final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class); final SSLContextService service = new StandardSSLContextService(); final Map properties = new HashMap<>(); - properties.put(StandardSSLContextService.KEYSTORE.getName(), "src/test/resources/diffpass-ks.jks"); - properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), "storepassword"); - properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), "JKS"); + properties.put(StandardSSLContextService.KEYSTORE.getName(), DIFFERENT_PASS_KEYSTORE_PATH); + properties.put(StandardSSLContextService.KEYSTORE_PASSWORD.getName(), DIFFERENT_KEYSTORE_PASSWORD); + properties.put(StandardSSLContextService.KEYSTORE_TYPE.getName(), JKS_TYPE); runner.addControllerService("test-diff-keys", service, properties); + // Assert the service is not valid due to an internal "cannot recover key" because the key password is missing runner.assertNotValid(service); } catch (Exception e) { System.out.println(e); diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/diffpass-ks.jks b/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/diffpass-ks.jks deleted file mode 100644 index c4bd59c6554ba69f722bb36e7c3bcccbe4f15ea0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2246 zcmchY`8(8$7sux_i)|1Mi7-U6W`;0jUt&bLV;PcdW-?{TURf_jMp3rD32BKfGV(dCqg5_c_lwyWCwa2m}J}2k>w3UGgE5 zg9xFa`v8w~Uwj4v!N4dAbQdMeBcR3uKmk>-C;-4fPzrSRqVq$fV10siD>dO|=_Byt zn-{9+`I&uP4wd8pTK`I;?xo>FZ)KYzF%h2$hli7ouZbiWOXJ6=FH3U^O}DVTf$VsJ zWcTj25xW@SaC3aB&`AG9$4k93$k17~Ha>Oe>^z>X>dzDg-qjrzBXh^G96st zMCxEtTP!dBrsLw*tBlhV(Z`yrGkP=ssuVcMYAOC0&19Q;P+qKy%XG9Pb=y*yV+D^> zHqL??26l!(pRnaNgB-m{WR*90C&Mn=q^q9MDVN-;cY6}9d?G0| zOec7zedlX;4^u^9%RNQ1Q`NROh1|eG?&V2MYoAnnUM!Y?NYJ+VYC;Wg&UTa~+c6aI%$zXoD7BawBwD6f}@0}^_m83_MdFN-?o&F z;iD~wGVvP?qNn>cgo6h|s2reGsV{o$n@G7-;}V`N|F)lg_dv|=_j7W^Xh&`CS{Mu5 zJIXq9`_3zAvtG)?mG2guqVNgUsNCvux__H`$(0xp+c}GjDgGM8W(z}3>8-frrdeED zmT09u>?R?#YY&Ty?O#Xv`HgV>)oapnQc)QLD^L>~hDQ;g9;HbK^Q!iH%D7!FCv5tWUE%sKeZ)SnE9Jj`2Mm zEyJ`=FT*nAaYT*E&G?(M41DFg{7=fVHV0YLqnHJ$$!fQ1O7-MhAH6?f+QJ3~6RN0IMfyA{9o zY9o2iwExWb41kr%)TeO>Um)$MNQQ8DXm=Pq_7yrWZ)z1ZA)fXZ?7A}ZTUgicbCq}O zm$TAGic%#^x7I2sFt37in!;ka{&E~WD5$BmC@#aruSl(97UDWK)2H;pDca-Bhw1xn zIWm#T5oU?^RKGKieb!8|%X>C7Rnc9RS^B_k;M)CDm$?g?yO5YVIk)kK6^L}s>5lwm zEiYYbgG#~$Yg_9*qA1i_otcnEyfMu;={7$m$aYhu%X{*-#;ADmR9t@`D?d@Rquk9t zXVGDVClROtr7Q`(AN9xe;H>X5gi4PSsXR6C~J#g$jb?0jvhI^+S#QNlPfvZP|Jhn4yst%I%XviWSg zB^Ay;(6S3N({tGq)$swv{#kLYwZ*5z8{vPciD$8=X8WpFyy&4sek5_d;6$e*dPl+z zf-a&3ml+xxj0kNltR$T$rI|SSHR)ElV^_!6Pcxl0{-Ybt`)mWYH);$#2@$|UTAzfK zTE@N|N-H(1t(DnT2`~HRh*t%v;kFkT7I3UWwTH^s;PqEt!r}EW@m<{*4Tjk3D6KfP zpy85UsltQlyKfNhqUR)}Fpc;1jo0cUa4*Wo^Suo8cn6nKNnKbA$A51l%;kg*>J$hB zi9%5zH&7I?CmjL?z+h;Eh2k1Yh=*U5iluG{0RS8ZLiwZND1IJ@6O;!5c5?Wi;6s4T zP0`{g#Lo;bLePN_5Jr$Y7Z~6}BA^fK*VxAb2p-cQ0_lo3S`2mYCx9d1{{?dAeJp*@ z2T>wFDFngqA4nqjUkMBiL&-`Ap>mLJ>g|BipZ#P90m~sBbF3ULrL9Zk`5hU)>h*f#?rrQa8vs=WBc?FKe`|u{g!(!ChAPp&kzH z^A=J{-7vx3%C@vyf9Tx(R$SuKtG?<60p|967gbb7KEDr5*0S<64^LS>YA z2}D^gfgu10D3C`XQ8N3Jlo5s=f{ImQTyC_8SKq1#C7vU6KTW)lX;}TMiTgXXZz2Fv zMn4K{YcH(1^CuP^I%9I{bs|UNlg;Tncz$4U2=7c{ zhU#g{RW&V-PI3;sq7mhGG?c0At68XrO!lRTcElQO8^T!nR-Gg!r$;QOzG!HDPd#4v z>fmIL?P{69?j-HN^f&neUgHG$`b}@|MK?R=CCn-8^#63kAYndcZ^mGE7B6u^cbQ?W@z_ zODWsigCoS3ocY`JD%a@C?|qXS8={D%({u&3bgY)devQaER2b&s*2zEcU6Q>Ggn`eY}4X%A)uzA~edF^Ihx%uoyI^u9?jox`@{p9Ri z`9pgy)<~VKzj0l>mt8})B9;*%J%(?rDqcTp2Kd+sx8g~8r%JB~O*;GKgxTB=EhfPZ zxxP+1RYo6NF7Q z-V05(B+YY*@n`4xJT+S`>B%PN^=r?)sOo*0cxgf-K*s_DJS%P;8vv)LV-=(?0c7&4s(&frpW$^y*boO{cZ~vtkRW-0q;;HMXb< z;;MCBC|{wbV%>pPqhl2PCOM0Fr_M&_$(ph|7_Hmc(IZs)r8UTTC7CQ-(m!~k5b$WrbU$m(e1NswX^yl^pvd0;xI-62o zyrsSyOM&as9}@A;C3obzE{(@dyUD5?Qzd*1Uk+8#oNFr^%Ze)Q%a=;8RKJENd{IkY z?0mOf*ZKqV!M$Vm9LDnXA?nra;bYiAUS1Yj=(=Qnd&eZpqbKCPS6U%fRp_`G{v?Cik~cGX2-43eBZ<-jW$o zke|?LbRu6%w0W%%zo8$xgk@>kMzC~H#{O-BQ!R_FDdUW#<~~lE?PlzlHXyXOwB(j< ziwN3bp?mvAi#@t-oVRyqBrty-nZ1;pjQ?h`&M7jaA@Gr}0^NKDot^QSC5$rC{l)eE z!anOCJ(9;>TMsmZKwy3#8RiX=p+^c}Pyh<$2RzndF^k+Y5Dd9R`GNuf0Uijb zghPP>2pADAAPBc2>?hzhftc?hOc3cyI(fp^i*!B^loLnc@Svd%4rd7J>*+X%qjYfG z5d2s8|0N*DyEj5M@2KVzF8 z{`NBCLDf}>D)YOl=G$)>(}%`{rdy@bd7HklZ?^#t3Pwi+jTegjMI)~W2-w}TS}tq; zxc0PWYTp_p&86FibkwSj$DrS2LTUf5yPbxHxk%9*OQPAB#Ob){8^__y`qLT8*xCEv z5);?L8c;Lusg5dj?rOZ}B%ZD{R4q+kWv4l(`{iE#@S70*xMQ2Nt?rHMrSqHFM+5}N z(mfwFTJ^~1#(Y$xJj$G!a6sPWtcD(Rgu(y_P#Bd5M&*FA+!DwN!NuSrR@X+2(yg4T z56L8#{XTR#PE#ko6_o$Jtpt|<#Ue#Sdcm}7&6!$_EUk03){Mp)#Ip|WSI9kp27nDX z90rAb1(DxlVK@|o{7k(CE(VGq__*IC4-YR4`qvEjnHu{&HTmBU;FsyICFH&zZV3U1 z)!P{o!tQ}Z{}RCuhKLO~?-FGj>&qExamy&u!n0bbgsIx{^z2@$F3NBKKzC+G%OdqN z?qY&gsx@7#kGL_{%)AM-Q94vOtnJ=jcs4@vlZK|vu(h@~L%VkaeP&oK=`ztGD%_Kv z=sDS3q3KuK6tX)^c?gq_`piJKU(vo$HTu=kDad@#GS66?H3#giru znpOc_rho#UF7#vKrPd7&O_1o+jK(jw$wnyy&Fw@5`KXG?6C=#=ZktNOvb(Q*?2o5T z$4GdVU0%6|NP61M85j66>vkLzBLQ+FcFb?oz*sq1-~cmXM3U&Pe2$?$4Ef8QCD$E+ z>kjveJ2dtma)Yi#%bQ5Jl$VgVksm} z3)U~)noR$ukxC?Qsdn5aA~ve^}(Ee=7G?T!PROsog};% zbQ;!dkb2HJ>I}~Yr_xZn{-Pcfy2s+exnygKjO&{bN65s|w}HNJO0rzLYPrRoeTZ%+ znLb+_+{n10$KE6*gU(svSh8s3XCntDIwjVglf#cJ>l#I1ChRMfk1z^SZ&e*e)=Ltci<I#tAAG U31R_-E>1dq>f0VF`YYP}33!<%LjV8( literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/keystore.jks b/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/localhost-ks.jks b/nifi-nar-bundles/nifi-standard-services/nifi-ssl-context-bundle/nifi-ssl-context-service/src/test/resources/localhost-ks.jks deleted file mode 100755 index df36197d92ab8e9870f42666d74c47646fd56f26..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3512 zcmchZXH*l&7RQqiLLfv+Xd3T`p&Iir~-L#Y=#ud?{E1Ldil!ycf^TKF)2%4dD_ z&CRl2juv-w;hX`>r;n!ME0*eJZQo{liYbqFr%&s4KWji3-S|{|Q#z3Bi!_n-HQIvn z)_yhbW3OmoReGZ$;mdsOy)j4ml{e?MpM3BXEZ&%y>@=UK++W7rcU+QDvQxbsYBP>C ziqWW_4}oMI2<5S^ml11u$vzs(Bz1QY%@RE`7dI!_J9pQZTH;ai+~*FZ-!&0FO}AsY zOxhC^^;ctKwcW!%@WtyMsu@6xd3zdv(I!8(v5$IseOUHF#yBeb=(KkBD?D*{)a_{6 zy11;ZtH1s5w8!+ewZvnrkKmE%X*#>Ul%b`b!V6_&L1)$_<6^i6k7Bh$Cbm8X7HN40 zS#G)q)jhM1yqIk|ug4$}yr>lNM^7CDi=S{rQqn53pE8J!Vk=?&Q_pATc&ICwBQ zS(^FTsqy1f=9leGJUj=gReI>!b5N4p{xQ7Yh?)gcpugwPJJKnkHLG#|+$oVkg4yV1aO1A$e7 zaQjo^Q#=uo%^bn4wLVp1-Lpy>m3Om-GmM2@#_FNth9W;Io4*MtEVVL^kgC7SFA-we z#qVjp#>O>$RucpY72eI-)`&+06CPE;lJYi4}@3m`# zJ_AU}qlHP&l8^Sxdy9$-4gOUb4UL4637oYGzAr%oZTy>dW-CT`%o3B(duSJ1(e{$Y zM<9UyvWx;+833RQMN{a4(G-wlHXR5E0)ZV>5?#@72%}__LDViB2!zoC&;$$&%?P2h z0z(iWD~mq^C<3ITh2caaj#n5E%ofhx0nUQPL~nPTGlqqB22Ex{K(u_Eac+1F2b%p@ zfFWRi2!bZ=dhQr@H0!ZShxiYx(fr(S%o#KWt$@YIDPiPok3$Sr4*fIyhqIvoh5uR( z+G9aS0kQzl6d)6b0t5omn(X@$hGj=yE`{&~S2Gtia5Gn?EL_(yG|G+K@=fp0D^(rz zxT1R64#p$fx05POs#deg9+l!c8gwhEor|BbmTA)uRlj-gz6)6_cB&4*Tc-M`bK9>c z*H4msFu-a#7iT^GkUgZvxqIcr(X*;=?XWBEh_4N)!@=`Ah5M!kt4cNNSPATwH?AXC zdENd&XqoAr2Dq}BQ6Gnc3D~XB-xhZWLe^fld)&QlbH&rFP$(?%sxBMiB_=cw?r7CH@9Dd8TnkYHTi)yt>lPMf~Qh{TVz-%zd}mpoX@Lx z7dHOF@cCta&Y}DYj>8M>y0uqvg+{1>9qQK_{DUz^17>%6baZre>Zg9-*JTh{JeEgE(Xc$3KCdGsnB0X~&288Q1yu50`xi`1$u zxw%0F{zoTzg?QpaXg#S%Pc}TD&G9sE#r*FN1sL2ia!PT<-siU_xsUiWo{_zcpd9U!Ni)~G zLi}%abS2t*$1jmQ&rh~)%FTUKeNh{2;~_;7Z1a$&S<~zN0o(9-C8gCXFPUtQaEi(Ok}L|C$~05J}GOTeZ2`>N!9w z|5?&Yv(xUn4w}Md-)+>Xm-idnwqK!l-ep)3M#!opq&#uM)v4O^f$5XSSy^-7P*&lV zi*Bv9WLRzp8QFh_Sp$75|b~$}d%! zADHN!cN?}Zq;Pfp`_&u3UsSsuum4tHmJnSKKJnFdCJT}j<9dY@Y9;CdG*Uh6JugW| zjszU%k%LnRdK;+FkhCS;r3tV3Qu-?q>U@4Gz20FckyBYJ$a2l5D|g6nnw|8he9Zuw zE>xvKu;5sW8RFB^dtl3__u=TrP;92~^c`S>V6o8(>LDq#2#WbkDhztv-Y+KRxxc_( z9-Ig8g=a}sc!GElV)j`DAZZobG^EycOweBae{tMx(CCHt3QRem*{+4B%V0XzUy$!_ zUZ;}$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/pom.xml b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/pom.xml index 70e8833e078b..4ce65663d4a4 100644 --- a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/pom.xml +++ b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/pom.xml @@ -35,7 +35,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 org.apache.nifi diff --git a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/main/java/org/apache/nifi/processors/attributes/UpdateAttribute.java b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/main/java/org/apache/nifi/processors/attributes/UpdateAttribute.java index 29e903fea2a9..cee8d2227ea5 100644 --- a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/main/java/org/apache/nifi/processors/attributes/UpdateAttribute.java +++ b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/main/java/org/apache/nifi/processors/attributes/UpdateAttribute.java @@ -16,23 +16,6 @@ */ package org.apache.nifi.processors.attributes; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Pattern; - import org.apache.commons.lang3.StringUtils; import org.apache.nifi.annotation.behavior.DynamicProperty; import org.apache.nifi.annotation.behavior.EventDriven; @@ -45,7 +28,6 @@ import org.apache.nifi.annotation.documentation.CapabilityDescription; import org.apache.nifi.annotation.documentation.Tags; import org.apache.nifi.annotation.lifecycle.OnScheduled; -import org.apache.nifi.components.AllowableValue; import org.apache.nifi.components.PropertyDescriptor; import org.apache.nifi.components.PropertyValue; import org.apache.nifi.components.ValidationContext; @@ -75,6 +57,24 @@ import org.apache.nifi.update.attributes.Rule; import org.apache.nifi.update.attributes.serde.CriteriaSerDe; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicReference; +import java.util.regex.Pattern; + @EventDriven @SideEffectFree @SupportsBatching @@ -97,24 +97,27 @@ public class UpdateAttribute extends AbstractProcessor implements Searchable { private final static Set statelessRelationshipSet; private final static Set statefulRelationshipSet; + private final Map canonicalValueLookup = new LinkedHashMap() { + @Override + protected boolean removeEldestEntry(final Map.Entry eldest) { + return size() > 100; + } + }; + // relationships public static final Relationship REL_SUCCESS = new Relationship.Builder() .description("All successful FlowFiles are routed to this relationship").name("success").build(); - public static final Relationship REL_FAILURE = new Relationship.Builder() - .description("All flowfiles that cannot be updated are routed to this relationship").name("failure").autoTerminateDefault(true).build(); public static final Relationship REL_FAILED_SET_STATE = new Relationship.Builder() .description("A failure to set the state after adding the attributes to the FlowFile will route the FlowFile here.").name("set state fail").build(); static { Set tempStatelessSet = new HashSet<>(); tempStatelessSet.add(REL_SUCCESS); - tempStatelessSet.add(REL_FAILURE); statelessRelationshipSet = Collections.unmodifiableSet(tempStatelessSet); Set tempStatefulSet = new HashSet<>(); tempStatefulSet.add(REL_SUCCESS); - tempStatefulSet.add(REL_FAILURE); tempStatefulSet.add(REL_FAILED_SET_STATE); statefulRelationshipSet = Collections.unmodifiableSet(tempStatefulSet); @@ -149,21 +152,6 @@ public ValidationResult validate(String subject, String input, ValidationContext } }; - public static final AllowableValue FAIL_STOP = new AllowableValue("penalize", "Penalize", "Penalize FlowFiles." + - "This is based on the original behavior of the processor to allow for a smooth transition."); - public static final AllowableValue FAIL_ROUTE = new AllowableValue("route", "Route to Failure Relationship", - "If chosen, failed FlowFiles will be routed to the failure relationship."); - public static final PropertyDescriptor FAILURE_BEHAVIOR = new PropertyDescriptor.Builder() - .name("update-attribute-failure-behavior") - .displayName("Failure Behavior") - .description("Control how to handle errors in Expression Language evaluation. The default behavior is to stop evaluation. It can be " + - "changed by the user to route to a failure relationship instead.") - .allowableValues(FAIL_STOP, FAIL_ROUTE) - .defaultValue(FAIL_STOP.getValue()) - .required(true) - .build(); - - // static properties public static final String DELETE_ATTRIBUTES_EXPRESSION_NAME = "Delete Attributes Expression"; public static final PropertyDescriptor DELETE_ATTRIBUTES = new PropertyDescriptor.Builder() @@ -217,7 +205,6 @@ protected List getSupportedPropertyDescriptors() { descriptors.add(DELETE_ATTRIBUTES); descriptors.add(STORE_STATE); descriptors.add(STATEFUL_VARIABLES_INIT_VALUE); - descriptors.add(FAILURE_BEHAVIOR); return Collections.unmodifiableList(descriptors); } @@ -465,51 +452,39 @@ public void onTrigger(final ProcessContext context, final ProcessSession session Map defaultActions = this.defaultActions; List flowFilesToTransfer = new LinkedList<>(); - boolean routeToFailure = context.getProperty(FAILURE_BEHAVIOR).getValue().equals(FAIL_ROUTE.getValue()); - try { - // if there is update criteria specified, evaluate it - if (criteria != null && evaluateCriteria(session, context, criteria, incomingFlowFile, matchedRules, stateInitialAttributes)) { - // apply the actions for each rule and transfer the flowfile - for (final Map.Entry> entry : matchedRules.entrySet()) { - FlowFile match = entry.getKey(); - final List rules = entry.getValue(); - boolean updateWorking = incomingFlowFile.equals(match); - - // execute each matching rule(s) - match = executeActions(session, context, rules, defaultActions, match, stateInitialAttributes, stateWorkingAttributes); - - if (updateWorking) { - incomingFlowFile = match; - } + // if there is update criteria specified, evaluate it + if (criteria != null && evaluateCriteria(session, context, criteria, incomingFlowFile, matchedRules, stateInitialAttributes)) { + // apply the actions for each rule and transfer the flowfile + for (final Map.Entry> entry : matchedRules.entrySet()) { + FlowFile match = entry.getKey(); + final List rules = entry.getValue(); + boolean updateWorking = incomingFlowFile.equals(match); - if (debugEnabled) { - logger.debug("Updated attributes for {}; transferring to '{}'", new Object[]{match, REL_SUCCESS.getName()}); - } + // execute each matching rule(s) + match = executeActions(session, context, rules, defaultActions, match, stateInitialAttributes, stateWorkingAttributes); - // add the match to the list to transfer - flowFilesToTransfer.add(match); + if (updateWorking) { + incomingFlowFile = match; } - } else { - // Either we're running without any rules or the FlowFile didn't match any - incomingFlowFile = executeActions(session, context, null, defaultActions, incomingFlowFile, stateInitialAttributes, stateWorkingAttributes); if (debugEnabled) { - logger.debug("Updated attributes for {}; transferring to '{}'", new Object[]{incomingFlowFile, REL_SUCCESS.getName()}); + logger.debug("Updated attributes for {}; transferring to '{}'", new Object[]{match, REL_SUCCESS.getName()}); } - // add the flowfile to the list to transfer - flowFilesToTransfer.add(incomingFlowFile); + // add the match to the list to transfer + flowFilesToTransfer.add(match); } - } catch (ProcessException pe) { - if (routeToFailure) { - session.transfer(incomingFlowFile, REL_FAILURE); - getLogger().error("Failed to update flowfile attribute(s).", pe); - return; - } else { - throw pe; + } else { + // Either we're running without any rules or the FlowFile didn't match any + incomingFlowFile = executeActions(session, context, null, defaultActions, incomingFlowFile, stateInitialAttributes, stateWorkingAttributes); + + if (debugEnabled) { + logger.debug("Updated attributes for {}; transferring to '{}'", new Object[]{incomingFlowFile, REL_SUCCESS.getName()}); } - } + // add the flowfile to the list to transfer + flowFilesToTransfer.add(incomingFlowFile); + } if (stateInitialAttributes != null) { try { @@ -619,6 +594,30 @@ private boolean evaluateCondition(final ProcessContext context, final Condition } } + /** + * This method caches a 'canonical' value for a given attribute value. When this processor is used to update an attribute or add a new + * attribute, if Expression Language is used, we may well end up with a new String object for each attribute for each FlowFile. As a result, + * we will store a different String object for the attribute value of every FlowFile, meaning that we have to keep a lot of String objects + * in heap. By using this 'canonical lookup', we are able to keep only a single String object on the heap. + * + * For example, if we have a property named "abc" and the value is "${abc}${xyz}", and we send through 1,000 FlowFiles with attributes abc="abc" + * and xyz="xyz", then would end up with 1,000 String objects with a value of "abcxyz". By using this canonical representation, we are able to + * instead hold a single String whose value is "abcxyz" instead of holding 1,000 String objects in heap (1,000 String objects may still be created + * when calling PropertyValue.evaluateAttributeExpressions, but this way those values are garbage collected). + * + * @param attributeValue the value whose canonical value should be return + * @return the canonical representation of the given attribute value + */ + private synchronized String getCanonicalRepresentation(final String attributeValue) { + final String canonical = this.canonicalValueLookup.get(attributeValue); + if (canonical != null) { + return canonical; + } + + this.canonicalValueLookup.put(attributeValue, attributeValue); + return attributeValue; + } + // Executes the specified action on the specified flowfile. private FlowFile executeActions(final ProcessSession session, final ProcessContext context, final List rules, final Map defaultActions, final FlowFile flowfile, final Map stateInitialAttributes, final Map stateWorkingAttributes) { @@ -688,7 +687,8 @@ private FlowFile executeActions(final ProcessSession session, final ProcessConte if (notDeleted || setStatefulAttribute) { try { - final String newAttributeValue = getPropertyValue(action.getValue(), context).evaluateAttributeExpressions(flowfile, null, null, stateInitialAttributes).getValue(); + String newAttributeValue = getPropertyValue(action.getValue(), context).evaluateAttributeExpressions(flowfile, null, null, stateInitialAttributes).getValue(); + newAttributeValue = getCanonicalRepresentation(newAttributeValue); // log if appropriate if (debugEnabled) { @@ -746,8 +746,7 @@ private Map getDefaultActions(final Map defaultActions = new HashMap<>(); for (final Map.Entry entry : properties.entrySet()) { - if(entry.getKey() != STORE_STATE && entry.getKey() != STATEFUL_VARIABLES_INIT_VALUE - && entry.getKey() != FAILURE_BEHAVIOR) { + if(entry.getKey() != STORE_STATE && entry.getKey() != STATEFUL_VARIABLES_INIT_VALUE) { final Action action = new Action(); action.setAttribute(entry.getKey().getName()); action.setValue(entry.getValue()); diff --git a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/test/java/org/apache/nifi/update/attributes/TestUpdateAttribute.java b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/test/java/org/apache/nifi/update/attributes/TestUpdateAttribute.java index 35b5536b51c1..50938e673c9d 100644 --- a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/test/java/org/apache/nifi/update/attributes/TestUpdateAttribute.java +++ b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-processor/src/test/java/org/apache/nifi/update/attributes/TestUpdateAttribute.java @@ -1005,33 +1005,4 @@ public void testDataIsTooShort() { } } - @Test - public void testInvalidExpressionLanguage() { - final TestRunner runner = TestRunners.newTestRunner(new UpdateAttribute()); - runner.setVariable("test", "Squirrel!!1!"); - runner.setProperty("bad_attr", "${test:toDate('yyyy-MM-dd')}"); - runner.setProperty(UpdateAttribute.FAILURE_BEHAVIOR, UpdateAttribute.FAIL_ROUTE); - runner.assertValid(); - - runner.enqueue("Test"); - runner.run(); - - runner.assertTransferCount(UpdateAttribute.REL_SUCCESS, 0); - runner.assertTransferCount(UpdateAttribute.REL_FAILURE, 1); - - runner.clearTransferState(); - - Throwable ex = null; - try { - runner.setProperty(UpdateAttribute.FAILURE_BEHAVIOR, UpdateAttribute.FAIL_STOP); - runner.enqueue("Test"); - runner.run(); - } catch (Throwable t) { - ex = t; - } finally { - Assert.assertNotNull(ex); - Assert.assertTrue(ex.getCause() instanceof ProcessException); - runner.assertQueueNotEmpty(); - } - } } diff --git a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-ui/pom.xml b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-ui/pom.xml index bcad29d55843..309bf3a31846 100644 --- a/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-ui/pom.xml +++ b/nifi-nar-bundles/nifi-update-attribute-bundle/nifi-update-attribute-ui/pom.xml @@ -25,7 +25,7 @@ true true - 2.9.5 + 2.9.7 2.26 @@ -137,7 +137,7 @@ org.apache.commons commons-lang3 - 3.7 + 3.8.1 diff --git a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/pom.xml b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/pom.xml index d806e3e779d3..cad0864a2889 100644 --- a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/pom.xml +++ b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/pom.xml @@ -55,11 +55,6 @@ org.apache.rat apache-rat-plugin - - - src/test/resources/certs/localhost.crt - - diff --git a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketClientExample.java b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketClientExample.java index 5ef73bfb5dd2..f8c9b3a6fe4d 100644 --- a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketClientExample.java +++ b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketClientExample.java @@ -47,11 +47,11 @@ public void test() { final CountDownLatch replyLatch = new CountDownLatch(1); final SslContextFactory sslContextFactory = new SslContextFactory(); - sslContextFactory.setKeyStorePath("src/test/resources/certs/localhost-ks.jks"); - sslContextFactory.setKeyStorePassword("localtest"); + sslContextFactory.setKeyStorePath("src/test/resources/certs/keystore.jks"); + sslContextFactory.setKeyStorePassword("passwordpassword"); sslContextFactory.setKeyStoreType("JKS"); - sslContextFactory.setTrustStorePath("src/test/resources/certs/localhost-ks.jks"); - sslContextFactory.setTrustStorePassword("localtest"); + sslContextFactory.setTrustStorePath("src/test/resources/certs/truststore.jks"); + sslContextFactory.setTrustStorePassword("passwordpassword"); sslContextFactory.setTrustStoreType("JKS"); WebSocketClient client = new WebSocketClient(sslContextFactory); diff --git a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketServerExample.java b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketServerExample.java index eddecd5aab3c..e7a3ab6943d4 100644 --- a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketServerExample.java +++ b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/example/WebSocketServerExample.java @@ -153,8 +153,8 @@ public static void setup() throws Exception { httpConnector.setPort(50010); final SslContextFactory sslContextFactory = new SslContextFactory(); - sslContextFactory.setKeyStorePath("src/test/resources/certs/localhost-ks.jks"); - sslContextFactory.setKeyStorePassword("localtest"); + sslContextFactory.setKeyStorePath("src/test/resources/certs/keystore.jks"); + sslContextFactory.setKeyStorePassword("passwordpassword"); sslContextFactory.setKeyStoreType("JKS"); final HttpConfiguration https = new HttpConfiguration(); diff --git a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/jetty/ITJettyWebSocketSecureCommunication.java b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/jetty/ITJettyWebSocketSecureCommunication.java index 249af7a2bd4c..3f1c48240600 100644 --- a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/jetty/ITJettyWebSocketSecureCommunication.java +++ b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/java/org/apache/nifi/websocket/jetty/ITJettyWebSocketSecureCommunication.java @@ -29,11 +29,11 @@ public class ITJettyWebSocketSecureCommunication extends ITJettyWebSocketCommuni public ITJettyWebSocketSecureCommunication() { try { - sslTestContext.setCustomValue(StandardSSLContextService.KEYSTORE, "src/test/resources/certs/localhost-ks.jks"); - sslTestContext.setCustomValue(StandardSSLContextService.KEYSTORE_PASSWORD, "localtest"); + sslTestContext.setCustomValue(StandardSSLContextService.KEYSTORE, "src/test/resources/certs/keystore.jks"); + sslTestContext.setCustomValue(StandardSSLContextService.KEYSTORE_PASSWORD, "passwordpassword"); sslTestContext.setCustomValue(StandardSSLContextService.KEYSTORE_TYPE, "JKS"); - sslTestContext.setCustomValue(StandardSSLContextService.TRUSTSTORE, "src/test/resources/certs/localhost-ks.jks"); - sslTestContext.setCustomValue(StandardSSLContextService.TRUSTSTORE_PASSWORD, "localtest"); + sslTestContext.setCustomValue(StandardSSLContextService.TRUSTSTORE, "src/test/resources/certs/truststore.jks"); + sslTestContext.setCustomValue(StandardSSLContextService.TRUSTSTORE_PASSWORD, "passwordpassword"); sslTestContext.setCustomValue(StandardSSLContextService.TRUSTSTORE_TYPE, "JKS"); sslContextService.initialize(sslTestContext.getInitializationContext()); diff --git a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/keystore.jks b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/keystore.jks new file mode 100644 index 0000000000000000000000000000000000000000..246fe888efbb981188c9dcca109b5d2c52f2b435 GIT binary patch literal 3088 zcmb`|c{tSF9suz9%_a@TzDvU}%JBY1_7`DFP1&+#U$ZAATQ9;`LzV{F%6MfdJ87|v zE2AhhmLf7Oyo_p0MWJw2_jzyc^W6K~`^Wpod7kq<=RDu@$M<}`XY<|WI|u{<-5$XA zvE!7#kH1Qw_qpu_?~N4xO$Y=51D%~@XpW4zpN#`sJCe8iw3r=g#nrL- zc}e<${}VaCYCr4gf{ezT(R=D z-nlpW#qF(FTF*1$2O#WiubmF;Hrxmy6*igNRlfI9l~@uN!qhs~FEEGiMBlUNgZzPX(j)?E&4g~kNMFchr_X$68d zLt}BE%T`bPS@41`N-iwN`9RK`z@zznQIT?}DVA}T$?N6wuhG{w2?-ZIV8Z4AapH~> zM=w+HO5Fp%YPU&g%=71;K{*z_W9CqOV7L|boJ98Vakn zj}6I7pIDmnvrHDGH%#ao7SjJ1`5HddZo{Zee{!RGW`+H+BrM;}xmtv)l}OxJAZh8- zwT`Bb9j|;Yw_=HDi5fgOoK;}HBaAL{SGgCDyYgC&a%im9w+!Rh4OO{I+3JX2BP;2q`9{!Jli?d3(E_2u<*?$NuwN zAx~K%jw`&zdYQkM#CBOGN@Z)7Q<2%226`=Tiu8=+T0z;V zY7d2carE2qJ_kfpxZ~T^YKhz^<)mJ)c!*1P+p8$2xI%eB(zQC(9^*_&h|gtE3?{fC_^JA?rpHl7SEY8Zz?j!0+^9w##-C~jZ9C0k?>zlY?s;}AZqkO zbydnL6y`L;WU);zfw*aVg|DuDb_E^dVtr1h)_rk&z2RQwX?`5pO;@!2(%#Elv zUe?<*C&k;IN?al00~>7(D+{R{ic4NlN$k7d`Kle9?n&9GU&+1B-px6N;$i0b^mLdH z$(AH9DJAw4%+ES<-~96R++imu41fUT@mIn4Vo+wg1TuVZQMj;q`m}DIxpU)D>FgW# zCt@$))iSz4*>BtOaB)yHPFQ#tq@;HhsC0~}gtS`Jb~GUw{o7yR_5m~iY{B6$C~Otv z{uT?tp&;Z(Y6Z9`D2&{({aqpuTrlXLGvG&Rfp4kF|9${JO@A)o_WRi`ApkLd%?d`^ zwVRHvkb+>ylrv{t<84w-ewm=TU#?Km9_vmc&9~O%6%D7U^XiNP=!ZoKGTfS%Z}Nw) zH{x9_j=9q}jQmOY)74O+sC%~#z>AO)@0IaZlNPG}wW`Bg=)g&(tXNyq_!yt+OrN=~ zI($%TXQXiyc7nf@_hT*hgCy0khUrx^=WzesaX3$8g63@cB7^&p{McFHy1vdY==?h9 zr$i9-K5UdfLvmB==xlIx+U1VV&1Bu2@vaB97}@x`Igcs&i$SY;of|i20_+_wEhLNk z+=yMjh0Gcl{PF_zdC}bg6b%M;P1(`c(n3>q$pDAw=cgoWJ+UFRdnX}(x-;8$N7{Br z_{kkw;BRtA_^UgDhJ(-XdZNP~%U{gf7FCwHzJh|Rf_+h9s``swy%q}DLj3H6C zm&5jubG)kdY)^pIY2viya-LOlTTWE{Q#6J~$`{9ISBVxD5%5QXmr|5=0xtV6gXHrVgG0f?qugFkf{fs5ABY4YFOs`kuIZEzZ*rUX}v9X zN><>P7`!CrHo!+=rw-Pd*t`9=BZ6as?F1UlV0w}EQMEuBMHopBcmt14|g&sQmlf^%aznq zUn5%e{qm(@k9XT@{BRcw#{5+cunF?~P=f$r+me1V`5(#sPm|wG|95zQ?aSX?xh;m} z?PMiEdK%D&u3H6GI^rhnY*xAGv<;Iqj>{WtWW@EZletfeN9l*o^*{aEh^`J6S`PPx zb!lA+afuH^Y<;QMQN8n<1{6v(4GYP(NR)PaHRS}EU0EA{9Fv&6>jAFDwB;aj&{_Ji z%^1H5<93JiC5io0Rj&h9D-N^eGlyk6Ijv?(^|B%=o%YPcg*(0Cu1 literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/localhost-ks.jks b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/localhost-ks.jks deleted file mode 100755 index 119b50f736900fc4b4f0319feb5427206234a745..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2246 zcmchY`9IVP7sqGDm}rb`6seKrS{r<4C~K1?OQ9xPW!E6XFodSUH8UdH*td+bb<0|V z65+9hC_BlPvRuX**;dpCTbEPBd0S1{P*VGau(QwOm)@k8zwW* z1HfX?vv2Wg1!KZse@0k{q4gs!HF7X)eUVOGvc#SuWxdOOxuQ);!UKfq#{XPMvQ?*tv@l~xdH zq)+rwD}w3^KC~RWr(K=jL!An7LM73%Tj4saouZamE3NU1=1xh3f+&rF_fk4w z(|Y!XGJO;F{fITgVCFt=O&P+2x}`3eA^bclZ605`e5bC@{<-Q5+kWt(sbhxrYNr7@ zd|OeulFtj^o&dfRK~tAW?xQPkBsLb7U0SxDN&&fvc7`S zsUR=Dj{LRM*b?$>%%~gU2|JG3pC%|JK9?-PdLy>u;@X;wBOpun-Ery zNHft=IVm4j`36F~=Cft5 za7DAA<{NEAf4=MQEkw$l-kjb16?+OUT&v?Xeo ztj5I|5W_zlOEBW-n!+r^yBmHppgp?k5c%ZRkm`Bn1Uh$5&LH=dJ9%itb8p7Bgw^`4 zuZ<(;HM41}_z3GKxrdkIvhPhFTtf*%)={W({0@)N zsqd(Zxp=m*fZ{u=!PAy%eo|*v(Uge6(GWXr8PsfhvP<8*1#Rc!t$0xl))@RMDZ_}@ zRUIT_fIzky^~;%f*0mWIMGeXDXRX#yEFMvHb(Er+q1L{zt#Wqg!S&tyQ&Uf~Ke{xG zO=o}F>~Kfq@#sJ0cy#ZUrL>KO_n9+t;3v6L9eaj56!qtDXI?XfoF6}Q8C=Q5=VIT% zt9^pDnzHipey&W3?sz1Id6b^iyZ3->G5lMLT;w7#+;;_5QBY`=0 z1|wx?HxQwaDZ%A(jNIFWnS`NI%t>>-92yRnk*<+iP;@9isbD)AoMn1^l_cHv#Nw#f z-H0X;N=uF3bNQ}0O7V`XVnyg!IW2KPmrc9F$(fVLXx?^52w98WsFp~8AgV|@M57n5b^8R2mJa`N_z`a z-eqGBd^?L83-tldne`2%;Yot6wtH_P4zT!_L3Rs9dej<|*u$}pt7xtI`!;f4Q8@`{ zfk0fr0G*2h&>_ybTo5n>0(I6-@&FJpUZqIW$ZZ4|4Ce*`Bn%wjg>l(HVMvIL)qe#K z5@M*2IRqeoWeyHE17Jl!!OfbcH>N5cONBrgz+ zi5Ou(@E7Gm{_j=;5~2vqDF6eU#A4JmG3s`|BnGPn{73$uyP<=n|7pkmv*4nG1wa5D zd=NqhgF%^%4!wy(s>4-Fw~tX?@)g&M@>QlU+%hyX&yU#f&+039sx#o_q$6h!7gO!s z%21}h)V0YSIdOPzz_YAoK4I2YrPU<(bja@ecc>EiqqQJ$md?YD%^ThMH z@wO8Wgl|{7cCGO5l|KL026rMNKi6jrQP^8W-|?oan$>GqMeK$zp`D_&=5Rllh2OzJ zE1P&t2n6_X`xWjm1;?AHnp^gdy4J2bd0#4$3Qan4UVD-d8Kv!;aHFr>63P6jK)SO=_f?glm0@7>h^HnHNFQDk<9ahP9-Iepw#XxKO0zat63|W z_BVuZfkEH`Dd0GO+MgtfA9@%nVwhS^66$vrcWU26Fvvwz zykI@+jvLQiGtsVj2I7pBVZBXyxR)x7LMXU)MNUl){~9Cf`+78QenEjpmw~M!t)cP0 z{#?m-^>w9WZ*&IRcH%FGm9{rv&Ga)ilj-f7GLNm8Hd?PxN_poBD&B@pyfAxqMdHxl z;|dYZ`8K1PYlI?mbpICPbG7^ZQD;*=A~z78qo>bsGmY#sVNe}(c#z(>%e!3}%Dpa4 z?_>66*{a=o4{TzOFusN^*66GCMHbCNe3iz-vwu&^Zx%Q51MlT7BAil&OOH&9%dQuD z28P*BH9-pdE1SPWT`L}P4SbU%;=T7O3kf}%t8{nnw|tL;DS;EKcC@B?Z3HgamB|U= S*}ln-o;`x{l#gLW2mS?OFW+qd diff --git a/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/localhost-ts.jks b/nifi-nar-bundles/nifi-websocket-bundle/nifi-websocket-services-jetty/src/test/resources/certs/localhost-ts.jks deleted file mode 100755 index 7824378a3266565cff3d71b7c508ab2d60e06e34..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1816 zcmezO_TO6u1_mZL=FG`YPRz;3FD_wVU$4v!kJ?fiOsh zU6?00F|Q<1!8boIGdazbS85=;kbaqV>qY`p(FtRc*H!<=v7&I|*F*PwV zGR!y-b78_&{p;J_RLYcZ=UKH^oM-d2R~63QK8sqv6wbQ1c%Aj-tT=16Xl@Dp3*V;e zHf*;EU2s!d?EmGAwL4$*KMm76>RxSI^Y_{r+12XOyBVZ5SkF88wdmZUBCW>1mjpsy z^o8A>D^$57@$5Uk|7*7VJjNZDDg1En^sD7BzpeZg;PKvK$44Vgqc3^M$IC50#>}kV z5b(o}W%EH!_vB=5`RI47^%}8dvO6oHmz@0=8J8WnQn7ZTq?gtGwUN*b>^*j51vd`gXZ}Tj=d=! zZ5Q2p8)B?EgtP6!|DK($dm-WAwXXk9U+SN8m>b$H+55Tn^-f3Qi)|}kFy(38X`WLz zb3tscaO}@TH^6nkgPpdLY>Z2bWWLj})^PvwNNvp0VmYkR- zC$rcPs*X#TBPg{vHL)l;!%)D052TJ;m^~!5xFj<#9cRWgF)#(@MiUbYLkm#GG%*0? zP$-w~?rCD&0nCOvupnUsa^#sB8yWuA2RF)=3TX!2_nM>k=E?JKg4=^^-n%dyma}iz z7O0l#8tb4G_&d_JH{#d+qhEI!d^66fYKrV z-7ByE+kQ;?hjsY#V=I=4^0WMI{&xAO++ilu5aB4XiALW_Kd;kHysq{BlM=J!+>0KJ z$C*SKrY8jSiz;)U*)(Zq)1ucc+#e!jzJi?g{o#VvYqM?do!+xL#%xFU&dMq4cmJ|_ z)$}vmhufCDDLpVUyl>Z)NdISr>;jDqTRg=Im0$R1hzV~$&uP?iV%b9*v8wKnnqG|u zi`U6%Z(de9F>i4__b)}$q>sOosu)$Q&n)@4Z$;pQ&K1q~A4WZ$&o-$$Ev}(DR5gXs z$NJxSPc7!gRtAte7ABjPohmimJL!w=83*1S57xEkb0m5`p0y|T%0y91?Xr*$k!KcN z@qQxIFmK}r4~|)iTkLXzMLu+2k#TdI871R(@5qwPB{BO^B}gF%iVw*e;`b0`a&FjHtS zjKd+!G`N@en8TrK}26E!OhGqta2Bt=a#ukPqQ6R3Nk+A`kOJ~Bg2dXF&8FW)xW;&O{GlvcAiyx#d$VQdsX4=?X#%$ zOW~}Gi`Q9S&Wf{Ei{_Tlzwm9kZo_t)&;>WO#r|K;SiAE@|I;wNqwdwVHGi+2kzK7W zy_-R5gY~?VUyIHyD$;8Fdr2VVMPJz6xk8mY63@Ql`@d$(&tu$inZghEPQP0I^4rQ^ z0Uqxyc6>C_H2RX4e7xN9W6aFz4FNywST-NzbWcv^mXChNTCWjXCcCpDddb<3k#X69 zD-~-eN_tt{Pj2flU#7z_CrsY`rq|2%s5^6W@9z=oW(dxCV0Eg?{L7l0KTP5oO^slYI*H?VQ$x!(9`2~U67z}vXIJMe5+P?ELGIFyr7$_U^8gR2Qhq7?`|G6{iVN=9$4_qR+voW1o`bX1kq_?@ zm7?Yu-O|hoDsa9N@MUVyrd<=O*InmV-0wL}LeXNm*vwgmI&)i2xoutU%O^gq+-#-h zgcU!vCxtetZirTB`cPrNOwe`BjfgY1S)#*|1p91Qo|habzwWRw{qs>-^K}Z`hO4)3 zSIUzY)UVZ%QmoRgXzEd9>P=X}&%!9YU~gQOux6db!Fj1Vv2}&_t)uD|R&4O>6_I{d zsCSIx%+#5m7tCg~+3?%uOvdTe*F$Rt zebbjM$oo^i)iz_A4D$trP*WymMh3>k^#-*De832h{z<6{wH5fM3WFu8l*4Bb6fbc?q7ui3Ma?czxTd62X+i-dt#19k;q{i-Zt1|n=6 z+H8!htnAE8a26wS6amu*Fp3x%;$M|c<$Br5aA-Zh`^uo~Wl`!k!$N+rAKm}gt?NVV zYn6)fmHTq0T%UdUsC(tf>wh-zugDkvQkKrV&wNr*Y-2X-|KIB@^>$R5GBG*(lon0! zZ;^?AbUT{i$=9D(FVwb7kUOEi+Gn#X+u0bI3n5nobT4k-BQr_T{`T_6SwEI~yi@Kw zFTeN0R^RvQOEeC9N z9oJ^^ymM}2j4=Bzl|5_bntl+HlHs5ImG#RU$C=4?=Kr%5`)8e9xTbMSRd_+e)hDMV z{$6UT4g6+eRVVNyVx3Chngx@VtPkMaA?X>r^jGNWKqZspMPe?;zuNh^J${uwG4xsu E0JdaNg#Z8m literal 0 HcmV?d00001 diff --git a/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/pom.xml b/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/pom.xml index a504a3bd211b..4b7d063628da 100644 --- a/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/pom.xml +++ b/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/pom.xml @@ -21,8 +21,8 @@ language governing permissions and limitations under the License. --> jar - 4.2.2 - 3.20.0-GA + 4.5.2 + 3.23.1-GA diff --git a/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/src/main/java/org/apache/nifi/processors/windows/event/log/ConsumeWindowsEventLog.java b/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/src/main/java/org/apache/nifi/processors/windows/event/log/ConsumeWindowsEventLog.java index 87aa07f19129..7dc423692147 100644 --- a/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/src/main/java/org/apache/nifi/processors/windows/event/log/ConsumeWindowsEventLog.java +++ b/nifi-nar-bundles/nifi-windows-event-log-bundle/nifi-windows-event-log-processors/src/main/java/org/apache/nifi/processors/windows/event/log/ConsumeWindowsEventLog.java @@ -20,6 +20,7 @@ import com.sun.jna.platform.win32.Kernel32; import com.sun.jna.platform.win32.Kernel32Util; import com.sun.jna.platform.win32.WinNT; + import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; @@ -32,6 +33,8 @@ import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; + import org.apache.nifi.annotation.behavior.InputRequirement; import org.apache.nifi.annotation.behavior.TriggerSerially; import org.apache.nifi.annotation.behavior.WritesAttribute; @@ -43,6 +46,7 @@ import org.apache.nifi.components.PropertyDescriptor; import org.apache.nifi.components.ValidationContext; import org.apache.nifi.components.ValidationResult; +import org.apache.nifi.expression.ExpressionLanguageScope; import org.apache.nifi.flowfile.FlowFile; import org.apache.nifi.flowfile.attributes.CoreAttributes; import org.apache.nifi.processor.AbstractSessionFactoryProcessor; @@ -77,6 +81,7 @@ public class ConsumeWindowsEventLog extends AbstractSessionFactoryProcessor { .defaultValue(DEFAULT_CHANNEL) .description("The Windows Event Log Channel to listen to.") .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) .build(); public static final PropertyDescriptor QUERY = new PropertyDescriptor.Builder() @@ -86,6 +91,7 @@ public class ConsumeWindowsEventLog extends AbstractSessionFactoryProcessor { .defaultValue(DEFAULT_XPATH) .description("XPath Query to filter events. (See https://msdn.microsoft.com/en-us/library/windows/desktop/dd996910(v=vs.85).aspx for examples.)") .addValidator(StandardValidators.NON_EMPTY_VALIDATOR) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) .build(); public static final PropertyDescriptor MAX_BUFFER_SIZE = new PropertyDescriptor.Builder() @@ -108,7 +114,21 @@ public class ConsumeWindowsEventLog extends AbstractSessionFactoryProcessor { .addValidator(StandardValidators.POSITIVE_INTEGER_VALIDATOR) .build(); - public static final List PROPERTY_DESCRIPTORS = Collections.unmodifiableList(Arrays.asList(CHANNEL, QUERY, MAX_BUFFER_SIZE, MAX_EVENT_QUEUE_SIZE)); + public static final PropertyDescriptor INACTIVE_DURATION_TO_RECONNECT = new PropertyDescriptor.Builder() + .name("inactiveDurationToReconnect") + .displayName("Inactive duration to reconnect") + .description("If no new event logs are processed for the specified time period," + + " this processor will try reconnecting to recover from a state where any further messages cannot be consumed." + + " Such situation can happen if Windows Event Log service is restarted, or ERROR_EVT_QUERY_RESULT_STALE (15011) is returned." + + " Setting no duration, e.g. '0 ms' disables auto-reconnection.") + .required(true) + .expressionLanguageSupported(ExpressionLanguageScope.VARIABLE_REGISTRY) + .defaultValue("10 mins") + .addValidator(StandardValidators.createTimePeriodValidator(0, TimeUnit.MILLISECONDS, Long.MAX_VALUE, TimeUnit.MILLISECONDS)) + .build(); + + public static final List PROPERTY_DESCRIPTORS = Collections.unmodifiableList( + Arrays.asList(CHANNEL, QUERY, MAX_BUFFER_SIZE, MAX_EVENT_QUEUE_SIZE, INACTIVE_DURATION_TO_RECONNECT)); public static final Relationship REL_SUCCESS = new Relationship.Builder() .name("success") @@ -134,6 +154,9 @@ public class ConsumeWindowsEventLog extends AbstractSessionFactoryProcessor { private ProcessSessionFactory sessionFactory; private String provenanceUri; + private long inactiveDurationToReconnect = 0; + private long lastActivityTimestamp = 0; + /** * Framework constructor */ @@ -182,12 +205,20 @@ private Kernel32 loadKernel32() { * * @param context the process context */ - private String subscribe(ProcessContext context) throws URISyntaxException { - String channel = context.getProperty(CHANNEL).getValue(); - String query = context.getProperty(QUERY).getValue(); + private String subscribe(ProcessContext context) { + final String channel = context.getProperty(CHANNEL).evaluateAttributeExpressions().getValue(); + final String query = context.getProperty(QUERY).evaluateAttributeExpressions().getValue(); renderedXMLs = new LinkedBlockingQueue<>(context.getProperty(MAX_EVENT_QUEUE_SIZE).asInteger()); - provenanceUri = new URI("winlog", name, "/" + channel, query, null).toASCIIString(); + + try { + provenanceUri = new URI("winlog", name, "/" + channel, query, null).toASCIIString(); + } catch (URISyntaxException e) { + getLogger().debug("Failed to construct detailed provenanceUri from channel={}, query={}, use simpler one.", new Object[]{channel, query}); + provenanceUri = String.format("winlog://%s/%s", name, channel); + } + + inactiveDurationToReconnect = context.getProperty(INACTIVE_DURATION_TO_RECONNECT).evaluateAttributeExpressions().asTimePeriod(TimeUnit.MILLISECONDS); evtSubscribeCallback = new EventSubscribeXmlRenderingCallback(getLogger(), s -> { try { @@ -199,9 +230,12 @@ private String subscribe(ProcessContext context) throws URISyntaxException { subscriptionHandle = wEvtApi.EvtSubscribe(null, null, channel, query, null, null, evtSubscribeCallback, WEvtApi.EvtSubscribeFlags.SUBSCRIBE_TO_FUTURE | WEvtApi.EvtSubscribeFlags.EVT_SUBSCRIBE_STRICT); + if (!isSubscribed()) { return UNABLE_TO_SUBSCRIBE + errorLookup.getLastError(); } + + lastActivityTimestamp = System.currentTimeMillis(); return null; } @@ -210,7 +244,7 @@ private boolean isSubscribed() { } @OnScheduled - public void onScheduled(ProcessContext context) throws AlreadySubscribedException, URISyntaxException { + public void onScheduled(ProcessContext context) throws AlreadySubscribedException { if (isSubscribed()) { throw new AlreadySubscribedException(PROCESSOR_ALREADY_SUBSCRIBED); } @@ -225,11 +259,8 @@ public void onScheduled(ProcessContext context) throws AlreadySubscribedExceptio */ @OnStopped public void stop() { - if (isSubscribed()) { - wEvtApi.EvtClose(subscriptionHandle); - } - subscriptionHandle = null; - evtSubscribeCallback = null; + unsubscribe(); + if (!renderedXMLs.isEmpty()) { if (sessionFactory != null) { getLogger().info("Finishing processing leftover events"); @@ -246,29 +277,49 @@ public void stop() { renderedXMLs = null; } + private void unsubscribe() { + if (isSubscribed()) { + wEvtApi.EvtClose(subscriptionHandle); + } + subscriptionHandle = null; + evtSubscribeCallback = null; + } + @Override public void onTrigger(ProcessContext context, ProcessSessionFactory sessionFactory) throws ProcessException { this.sessionFactory = sessionFactory; + if (!isSubscribed()) { - String errorMessage; - try { - errorMessage = subscribe(context); - } catch (URISyntaxException e) { - getLogger().error(e.getMessage(), e); - context.yield(); - return; - } + String errorMessage = subscribe(context); if (errorMessage != null) { context.yield(); getLogger().error(errorMessage); return; } } - processQueue(sessionFactory.createSession()); + + final int flowFileCount = processQueue(sessionFactory.createSession()); + + final long now = System.currentTimeMillis(); + if (flowFileCount > 0) { + lastActivityTimestamp = now; + + } else if (inactiveDurationToReconnect > 0) { + if ((now - lastActivityTimestamp) > inactiveDurationToReconnect) { + getLogger().info("Exceeds configured 'inactive duration to reconnect' {} ms. Unsubscribe to reconnect..", new Object[]{inactiveDurationToReconnect}); + unsubscribe(); + } + } } - private void processQueue(ProcessSession session) { + /** + * Create FlowFiles from received logs. + * @return the number of created FlowFiles + */ + private int processQueue(ProcessSession session) { String xml; + int flowFileCount = 0; + while ((xml = renderedXMLs.peek()) != null) { FlowFile flowFile = session.create(); byte[] xmlBytes = xml.getBytes(StandardCharsets.UTF_8); @@ -277,6 +328,7 @@ private void processQueue(ProcessSession session) { session.getProvenanceReporter().receive(flowFile, provenanceUri); session.transfer(flowFile, REL_SUCCESS); session.commit(); + flowFileCount++; if (!renderedXMLs.remove(xml) && getLogger().isWarnEnabled()) { getLogger().warn(new StringBuilder("Event ") .append(xml) @@ -286,6 +338,7 @@ private void processQueue(ProcessSession session) { .toString()); } } + return flowFileCount; } @Override diff --git a/nifi-toolkit/nifi-toolkit-admin/pom.xml b/nifi-toolkit/nifi-toolkit-admin/pom.xml index a6f46851018f..e7c9ca27dc74 100644 --- a/nifi-toolkit/nifi-toolkit-admin/pom.xml +++ b/nifi-toolkit/nifi-toolkit-admin/pom.xml @@ -24,7 +24,7 @@ language governing permissions and limitations under the License. --> commons-cli commons-cli - 1.3.1 + 1.4 com.google.guava @@ -39,7 +39,7 @@ language governing permissions and limitations under the License. --> com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.apache.nifi @@ -118,7 +118,7 @@ language governing permissions and limitations under the License. --> org.apache.commons commons-compress - 1.16.1 + 1.18 diff --git a/nifi-toolkit/nifi-toolkit-cli/pom.xml b/nifi-toolkit/nifi-toolkit-cli/pom.xml index 5728eebade86..8fb718ae532c 100644 --- a/nifi-toolkit/nifi-toolkit-cli/pom.xml +++ b/nifi-toolkit/nifi-toolkit-cli/pom.xml @@ -48,7 +48,7 @@ commons-cli commons-cli - 1.3.1 + 1.4 org.apache.nifi @@ -69,7 +69,7 @@ com.fasterxml.jackson.core jackson-databind - 2.9.5 + 2.9.7 org.jline diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/ControllerClient.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/ControllerClient.java index 22821ee3845e..6cb32264957b 100644 --- a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/ControllerClient.java +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/ControllerClient.java @@ -16,6 +16,8 @@ */ package org.apache.nifi.toolkit.cli.impl.client.nifi; +import org.apache.nifi.web.api.entity.ClusterEntity; +import org.apache.nifi.web.api.entity.NodeEntity; import org.apache.nifi.web.api.entity.RegistryClientEntity; import org.apache.nifi.web.api.entity.RegistryClientsEntity; @@ -34,4 +36,16 @@ public interface ControllerClient { RegistryClientEntity updateRegistryClient(RegistryClientEntity registryClientEntity) throws NiFiClientException, IOException; + NodeEntity connectNode(String nodeId, NodeEntity nodeEntity) throws NiFiClientException, IOException; + + NodeEntity deleteNode(String nodeId) throws NiFiClientException, IOException; + + NodeEntity disconnectNode(String nodeId, NodeEntity nodeEntity) throws NiFiClientException, IOException; + + NodeEntity getNode(String nodeId) throws NiFiClientException, IOException; + + ClusterEntity getNodes() throws NiFiClientException, IOException; + + NodeEntity offloadNode(String nodeId, NodeEntity nodeEntity) throws NiFiClientException, IOException; + } diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/impl/JerseyControllerClient.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/impl/JerseyControllerClient.java index 9c9ffc49d339..a16279046891 100644 --- a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/impl/JerseyControllerClient.java +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/client/nifi/impl/JerseyControllerClient.java @@ -19,6 +19,8 @@ import org.apache.commons.lang3.StringUtils; import org.apache.nifi.toolkit.cli.impl.client.nifi.ControllerClient; import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; +import org.apache.nifi.web.api.entity.ClusterEntity; +import org.apache.nifi.web.api.entity.NodeEntity; import org.apache.nifi.web.api.entity.RegistryClientEntity; import org.apache.nifi.web.api.entity.RegistryClientsEntity; @@ -104,4 +106,89 @@ public RegistryClientEntity updateRegistryClient(final RegistryClientEntity regi }); } + @Override + public NodeEntity deleteNode(final String nodeId) throws NiFiClientException, IOException { + if (StringUtils.isBlank(nodeId)) { + throw new IllegalArgumentException("Node ID cannot be null or empty"); + } + + return executeAction("Error deleting node", () -> { + final WebTarget target = controllerTarget.path("cluster/nodes/" + nodeId); + + return getRequestBuilder(target).delete(NodeEntity.class); + }); + } + + @Override + public NodeEntity connectNode(final String nodeId, final NodeEntity nodeEntity) throws NiFiClientException, IOException { + if (StringUtils.isBlank(nodeId)) { + throw new IllegalArgumentException("Node ID cannot be null or empty"); + } + + if (nodeEntity == null) { + throw new IllegalArgumentException("Node entity cannot be null"); + } + + return executeAction("Error connecting node", () -> { + final WebTarget target = controllerTarget.path("cluster/nodes/" + nodeId); + + return getRequestBuilder(target).put(Entity.entity(nodeEntity, MediaType.APPLICATION_JSON), NodeEntity.class); + }); + } + + @Override + public NodeEntity offloadNode(final String nodeId, final NodeEntity nodeEntity) throws NiFiClientException, IOException { + if (StringUtils.isBlank(nodeId)) { + throw new IllegalArgumentException("Node ID cannot be null or empty"); + } + + if (nodeEntity == null) { + throw new IllegalArgumentException("Node entity cannot be null"); + } + + return executeAction("Error offloading node", () -> { + final WebTarget target = controllerTarget.path("cluster/nodes/" + nodeId); + + return getRequestBuilder(target).put(Entity.entity(nodeEntity, MediaType.APPLICATION_JSON), NodeEntity.class); + }); + } + + @Override + public NodeEntity disconnectNode(final String nodeId, final NodeEntity nodeEntity) throws NiFiClientException, IOException { + if (StringUtils.isBlank(nodeId)) { + throw new IllegalArgumentException("Node ID cannot be null or empty"); + } + + if (nodeEntity == null) { + throw new IllegalArgumentException("Node entity cannot be null"); + } + + return executeAction("Error disconnecting node", () -> { + final WebTarget target = controllerTarget.path("cluster/nodes/" + nodeId); + + return getRequestBuilder(target).put(Entity.entity(nodeEntity, MediaType.APPLICATION_JSON), NodeEntity.class); + }); + } + + @Override + public NodeEntity getNode(String nodeId) throws NiFiClientException, IOException { + if (StringUtils.isBlank(nodeId)) { + throw new IllegalArgumentException("Node ID cannot be null or empty"); + } + + return executeAction("Error retrieving node status", () -> { + final WebTarget target = controllerTarget.path("cluster/nodes/" + nodeId); + + return getRequestBuilder(target).get(NodeEntity.class); + }); + } + + @Override + public ClusterEntity getNodes() throws NiFiClientException, IOException { + return executeAction("Error retrieving node status", () -> { + final WebTarget target = controllerTarget.path("cluster"); + + return getRequestBuilder(target).get(ClusterEntity.class); + }); + } } diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/CommandOption.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/CommandOption.java index ad15036f3b9f..171e6cf85eb8 100644 --- a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/CommandOption.java +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/CommandOption.java @@ -49,6 +49,9 @@ public enum CommandOption { SRC_FLOW_ID("sf", "sourceFlowIdentifier", "A flow identifier from the source registry", true), SRC_FLOW_VERSION("sfv", "sourceFlowVersion", "A version of a flow from the source registry", true), + // NiFi - Nodes + NIFI_NODE_ID("nnid", "nifiNodeId", "The ID of a node in the NiFi cluster", true), + // NiFi - Registries REGISTRY_CLIENT_ID("rcid", "registryClientId", "The id of a registry client", true), REGISTRY_CLIENT_NAME("rcn", "registryClientName", "The name of the registry client", true), diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/NiFiCommandGroup.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/NiFiCommandGroup.java index 00a38a222c2f..298b709fee1b 100644 --- a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/NiFiCommandGroup.java +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/NiFiCommandGroup.java @@ -21,6 +21,12 @@ import org.apache.nifi.toolkit.cli.impl.command.nifi.flow.ClusterSummary; import org.apache.nifi.toolkit.cli.impl.command.nifi.flow.CurrentUser; import org.apache.nifi.toolkit.cli.impl.command.nifi.flow.GetRootId; +import org.apache.nifi.toolkit.cli.impl.command.nifi.nodes.ConnectNode; +import org.apache.nifi.toolkit.cli.impl.command.nifi.nodes.OffloadNode; +import org.apache.nifi.toolkit.cli.impl.command.nifi.nodes.DeleteNode; +import org.apache.nifi.toolkit.cli.impl.command.nifi.nodes.DisconnectNode; +import org.apache.nifi.toolkit.cli.impl.command.nifi.nodes.GetNode; +import org.apache.nifi.toolkit.cli.impl.command.nifi.nodes.GetNodes; import org.apache.nifi.toolkit.cli.impl.command.nifi.pg.PGChangeVersion; import org.apache.nifi.toolkit.cli.impl.command.nifi.pg.PGDisableControllerServices; import org.apache.nifi.toolkit.cli.impl.command.nifi.pg.PGEnableControllerServices; @@ -58,7 +64,13 @@ protected List createCommands() { final List commands = new ArrayList<>(); commands.add(new CurrentUser()); commands.add(new ClusterSummary()); + commands.add(new ConnectNode()); + commands.add(new DeleteNode()); + commands.add(new DisconnectNode()); commands.add(new GetRootId()); + commands.add(new GetNode()); + commands.add(new GetNodes()); + commands.add(new OffloadNode()); commands.add(new ListRegistryClients()); commands.add(new CreateRegistryClient()); commands.add(new UpdateRegistryClient()); diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/ConnectNode.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/ConnectNode.java new file mode 100644 index 000000000000..8ec006692bae --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/ConnectNode.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.command.nifi.nodes; + +import org.apache.commons.cli.MissingOptionException; +import org.apache.nifi.toolkit.cli.api.CommandException; +import org.apache.nifi.toolkit.cli.api.Context; +import org.apache.nifi.toolkit.cli.impl.client.nifi.ControllerClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; +import org.apache.nifi.toolkit.cli.impl.command.CommandOption; +import org.apache.nifi.toolkit.cli.impl.command.nifi.AbstractNiFiCommand; +import org.apache.nifi.toolkit.cli.impl.result.NodeResult; +import org.apache.nifi.web.api.dto.NodeDTO; +import org.apache.nifi.web.api.entity.NodeEntity; + +import java.io.IOException; +import java.util.Properties; + +/** + * Command for offloading a node of the NiFi cluster. + */ +public class ConnectNode extends AbstractNiFiCommand { + + public ConnectNode() { + super("connect-node", NodeResult.class); + } + + @Override + public String getDescription() { + return "Connects a node to the NiFi cluster."; + } + + @Override + protected void doInitialize(Context context) { + addOption(CommandOption.NIFI_NODE_ID.createOption()); + } + + @Override + public NodeResult doExecute(NiFiClient client, Properties properties) throws NiFiClientException, IOException, MissingOptionException, CommandException { + final String nodeId = getRequiredArg(properties, CommandOption.NIFI_NODE_ID); + final ControllerClient controllerClient = client.getControllerClient(); + + NodeDTO nodeDto = new NodeDTO(); + nodeDto.setNodeId(nodeId); + // TODO There are no constants for the CONNECT node statuses + nodeDto.setStatus("CONNECTING"); + NodeEntity nodeEntity = new NodeEntity(); + nodeEntity.setNode(nodeDto); + NodeEntity nodeEntityResult = controllerClient.connectNode(nodeId, nodeEntity); + return new NodeResult(getResultType(properties), nodeEntityResult); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DeleteNode.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DeleteNode.java new file mode 100644 index 000000000000..280e625fd94c --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DeleteNode.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.command.nifi.nodes; + +import org.apache.commons.cli.MissingOptionException; +import org.apache.nifi.toolkit.cli.api.Context; +import org.apache.nifi.toolkit.cli.impl.client.nifi.ControllerClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; +import org.apache.nifi.toolkit.cli.impl.command.CommandOption; +import org.apache.nifi.toolkit.cli.impl.command.nifi.AbstractNiFiCommand; +import org.apache.nifi.toolkit.cli.impl.result.OkResult; + +import java.io.IOException; +import java.util.Properties; + +/** + * Command for deleting a node from the NiFi cluster. + */ +public class DeleteNode extends AbstractNiFiCommand { + + public DeleteNode() { + super("delete-node", OkResult.class); + } + + @Override + public String getDescription() { + return "Deletes a node from the NiFi cluster."; + } + + @Override + protected void doInitialize(Context context) { + addOption(CommandOption.NIFI_NODE_ID.createOption()); + } + + @Override + public OkResult doExecute(NiFiClient client, Properties properties) throws NiFiClientException, IOException, MissingOptionException { + final String nodeId = getRequiredArg(properties, CommandOption.NIFI_NODE_ID); + final ControllerClient controllerClient = client.getControllerClient(); + + controllerClient.deleteNode(nodeId); + return new OkResult(getContext().isInteractive()); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DisconnectNode.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DisconnectNode.java new file mode 100644 index 000000000000..65a7e72efd01 --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/DisconnectNode.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.command.nifi.nodes; + +import org.apache.commons.cli.MissingOptionException; +import org.apache.nifi.toolkit.cli.api.CommandException; +import org.apache.nifi.toolkit.cli.api.Context; +import org.apache.nifi.toolkit.cli.impl.client.nifi.ControllerClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; +import org.apache.nifi.toolkit.cli.impl.command.CommandOption; +import org.apache.nifi.toolkit.cli.impl.command.nifi.AbstractNiFiCommand; +import org.apache.nifi.toolkit.cli.impl.result.NodeResult; +import org.apache.nifi.web.api.dto.NodeDTO; +import org.apache.nifi.web.api.entity.NodeEntity; + +import java.io.IOException; +import java.util.Properties; + +/** + * Command for disconnecting a node from the NiFi cluster. + */ +public class DisconnectNode extends AbstractNiFiCommand { + + public DisconnectNode() { + super("disconnect-node", NodeResult.class); + } + + @Override + public String getDescription() { + return "Disconnects a node from the NiFi cluster."; + } + + @Override + protected void doInitialize(Context context) { + addOption(CommandOption.NIFI_NODE_ID.createOption()); + } + + @Override + public NodeResult doExecute(NiFiClient client, Properties properties) throws NiFiClientException, IOException, MissingOptionException, CommandException { + final String nodeId = getRequiredArg(properties, CommandOption.NIFI_NODE_ID); + final ControllerClient controllerClient = client.getControllerClient(); + + NodeDTO nodeDto = new NodeDTO(); + nodeDto.setNodeId(nodeId); + // TODO There are no constants for the DISCONNECT node status + nodeDto.setStatus("DISCONNECTING"); + NodeEntity nodeEntity = new NodeEntity(); + nodeEntity.setNode(nodeDto); + NodeEntity nodeEntityResult = controllerClient.disconnectNode(nodeId, nodeEntity); + return new NodeResult(getResultType(properties), nodeEntityResult); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNode.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNode.java new file mode 100644 index 000000000000..54687bdb124c --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNode.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.command.nifi.nodes; + +import org.apache.commons.cli.MissingOptionException; +import org.apache.nifi.toolkit.cli.api.Context; +import org.apache.nifi.toolkit.cli.impl.client.nifi.ControllerClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; +import org.apache.nifi.toolkit.cli.impl.command.CommandOption; +import org.apache.nifi.toolkit.cli.impl.command.nifi.AbstractNiFiCommand; +import org.apache.nifi.toolkit.cli.impl.result.NodeResult; +import org.apache.nifi.web.api.entity.NodeEntity; + +import java.io.IOException; +import java.util.Properties; + +/** + * Command for retrieving the status of the nodes from the NiFi cluster. + */ +public class GetNode extends AbstractNiFiCommand { + + public GetNode() { + super("get-node", NodeResult.class); + } + + @Override + public String getDescription() { + return "Retrieves the status for a node in the NiFi cluster."; + } + + @Override + protected void doInitialize(Context context) { + addOption(CommandOption.NIFI_NODE_ID.createOption()); + } + + @Override + public NodeResult doExecute(NiFiClient client, Properties properties) throws NiFiClientException, IOException, MissingOptionException { + final String nodeId = getRequiredArg(properties, CommandOption.NIFI_NODE_ID); + final ControllerClient controllerClient = client.getControllerClient(); + + NodeEntity nodeEntityResult = controllerClient.getNode(nodeId); + return new NodeResult(getResultType(properties), nodeEntityResult); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNodes.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNodes.java new file mode 100644 index 000000000000..368fb4ddab4c --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/GetNodes.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.command.nifi.nodes; + +import org.apache.commons.cli.MissingOptionException; +import org.apache.nifi.toolkit.cli.api.CommandException; +import org.apache.nifi.toolkit.cli.impl.client.nifi.ControllerClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; +import org.apache.nifi.toolkit.cli.impl.command.nifi.AbstractNiFiCommand; +import org.apache.nifi.toolkit.cli.impl.result.NodesResult; +import org.apache.nifi.web.api.entity.ClusterEntity; + +import java.io.IOException; +import java.util.Properties; + +/** + * Command for retrieving the status of the nodes from the NiFi cluster. + */ +public class GetNodes extends AbstractNiFiCommand { + + public GetNodes() { + super("get-nodes", NodesResult.class); + } + + @Override + public String getDescription() { + return "Retrieves statuses for the nodes of the NiFi cluster."; + } + + @Override + public NodesResult doExecute(NiFiClient client, Properties properties) throws NiFiClientException, IOException, MissingOptionException, CommandException { + final ControllerClient controllerClient = client.getControllerClient(); + + ClusterEntity clusterEntityResult = controllerClient.getNodes(); + return new NodesResult(getResultType(properties), clusterEntityResult); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/OffloadNode.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/OffloadNode.java new file mode 100644 index 000000000000..aa759b1c7dcf --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/command/nifi/nodes/OffloadNode.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.command.nifi.nodes; + +import org.apache.commons.cli.MissingOptionException; +import org.apache.nifi.toolkit.cli.api.CommandException; +import org.apache.nifi.toolkit.cli.api.Context; +import org.apache.nifi.toolkit.cli.impl.client.nifi.ControllerClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClient; +import org.apache.nifi.toolkit.cli.impl.client.nifi.NiFiClientException; +import org.apache.nifi.toolkit.cli.impl.command.CommandOption; +import org.apache.nifi.toolkit.cli.impl.command.nifi.AbstractNiFiCommand; +import org.apache.nifi.toolkit.cli.impl.result.NodeResult; +import org.apache.nifi.web.api.dto.NodeDTO; +import org.apache.nifi.web.api.entity.NodeEntity; + +import java.io.IOException; +import java.util.Properties; + +/** + * Command for offloading a node of the NiFi cluster. + */ +public class OffloadNode extends AbstractNiFiCommand { + + public OffloadNode() { + super("offload-node", NodeResult.class); + } + + @Override + public String getDescription() { + return "Offloads a node of the NiFi cluster."; + } + + @Override + protected void doInitialize(Context context) { + addOption(CommandOption.NIFI_NODE_ID.createOption()); + } + + @Override + public NodeResult doExecute(NiFiClient client, Properties properties) throws NiFiClientException, IOException, MissingOptionException, CommandException { + final String nodeId = getRequiredArg(properties, CommandOption.NIFI_NODE_ID); + final ControllerClient controllerClient = client.getControllerClient(); + + NodeDTO nodeDto = new NodeDTO(); + nodeDto.setNodeId(nodeId); + // TODO There are no constants for the OFFLOAD node statuses + nodeDto.setStatus("OFFLOADING"); + NodeEntity nodeEntity = new NodeEntity(); + nodeEntity.setNode(nodeDto); + NodeEntity nodeEntityResult = controllerClient.offloadNode(nodeId, nodeEntity); + return new NodeResult(getResultType(properties), nodeEntityResult); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodeResult.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodeResult.java new file mode 100644 index 000000000000..3e1efdf46eea --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodeResult.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.result; + +import org.apache.commons.lang3.Validate; +import org.apache.nifi.toolkit.cli.api.ResultType; +import org.apache.nifi.web.api.dto.NodeDTO; +import org.apache.nifi.web.api.entity.NodeEntity; + +import java.io.IOException; +import java.io.PrintStream; + +public class NodeResult extends AbstractWritableResult { + + private final NodeEntity nodeEntity; + + public NodeResult(ResultType resultType, NodeEntity nodeEntity) { + super(resultType); + this.nodeEntity = nodeEntity; + Validate.notNull(nodeEntity); + } + + @Override + public NodeEntity getResult() { + return nodeEntity; + } + + @Override + protected void writeSimpleResult(PrintStream output) throws IOException { + NodeDTO nodeDTO = nodeEntity.getNode(); + output.printf("Node ID: %s\nNode Address: %s\nAPI Port: %s\nNode Status:%s", + nodeDTO.getNodeId(), nodeDTO.getAddress(), nodeDTO.getApiPort(), nodeDTO.getStatus()); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodesResult.java b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodesResult.java new file mode 100644 index 000000000000..daab27fd82e6 --- /dev/null +++ b/nifi-toolkit/nifi-toolkit-cli/src/main/java/org/apache/nifi/toolkit/cli/impl/result/NodesResult.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.nifi.toolkit.cli.impl.result; + +import org.apache.commons.lang3.Validate; +import org.apache.nifi.toolkit.cli.api.ResultType; +import org.apache.nifi.toolkit.cli.impl.result.writer.DynamicTableWriter; +import org.apache.nifi.toolkit.cli.impl.result.writer.Table; +import org.apache.nifi.toolkit.cli.impl.result.writer.TableWriter; +import org.apache.nifi.web.api.dto.NodeDTO; +import org.apache.nifi.web.api.entity.ClusterEntity; +import org.glassfish.jersey.internal.guava.Lists; + +import java.io.IOException; +import java.io.PrintStream; +import java.util.List; + +public class NodesResult extends AbstractWritableResult { + + private final ClusterEntity clusterEntity; + + public NodesResult(ResultType resultType, ClusterEntity clusterEntity) { + super(resultType); + this.clusterEntity = clusterEntity; + Validate.notNull(clusterEntity); + } + + @Override + public ClusterEntity getResult() { + return clusterEntity; + } + + @Override + protected void writeSimpleResult(PrintStream output) throws IOException { + final Table table = new Table.Builder() + .column("#", 3, 3, false) + .column("Node ID", 36, 36, false) + .column("Node Address", 36, 36, true) + .column("API Port", 8, 8, false) + .column("Node Status", 13, 13, false) + .build(); + + List nodes = Lists.newArrayList(clusterEntity.getCluster().getNodes()); + for (int i = 0; i < nodes.size(); ++i) { + NodeDTO nodeDTO = nodes.get(i); + table.addRow(String.valueOf(i), nodeDTO.getNodeId(), nodeDTO.getAddress(), String.valueOf(nodeDTO.getApiPort()), nodeDTO.getStatus()); + } + + final TableWriter tableWriter = new DynamicTableWriter(); + tableWriter.write(table, output); + } +} diff --git a/nifi-toolkit/nifi-toolkit-cli/src/test/java/org/apache/nifi/toolkit/cli/impl/result/TestVersionedFlowSnapshotMetadataResult.java b/nifi-toolkit/nifi-toolkit-cli/src/test/java/org/apache/nifi/toolkit/cli/impl/result/TestVersionedFlowSnapshotMetadataResult.java index 7f6f6d6bf81e..c36409663932 100644 --- a/nifi-toolkit/nifi-toolkit-cli/src/test/java/org/apache/nifi/toolkit/cli/impl/result/TestVersionedFlowSnapshotMetadataResult.java +++ b/nifi-toolkit/nifi-toolkit-cli/src/test/java/org/apache/nifi/toolkit/cli/impl/result/TestVersionedFlowSnapshotMetadataResult.java @@ -69,13 +69,13 @@ public void testWriteSimpleVersionedFlowSnapshotResult() throws ParseException, //System.out.println(resultOut); // can't get the time zone to line up on travis, so ignore this for now - final String expected = "\n" + - "Ver Date Author Message \n" + - "--- -------------------------- ------ ---------------------------------------- \n" ;//+ + final String expectedPattern = "^\\n" + + "Ver +Date + Author + Message +\\n" + + "-+ +-+ +-+ +-+ +\\n" + //"1 Wed, Feb 14 2018 12:00 EST user1 This is a long comment, longer than t... \n" + //"2 Wed, Feb 14 2018 12:30 EST user2 This is v2 \n" + - //"\n"; + "(.|\\n)+$"; - Assert.assertTrue(resultOut.startsWith(expected)); + Assert.assertTrue(resultOut.matches(expectedPattern)); } } diff --git a/nifi-toolkit/nifi-toolkit-encrypt-config/pom.xml b/nifi-toolkit/nifi-toolkit-encrypt-config/pom.xml index 764fcbff125b..a2925182a5b1 100644 --- a/nifi-toolkit/nifi-toolkit-encrypt-config/pom.xml +++ b/nifi-toolkit/nifi-toolkit-encrypt-config/pom.xml @@ -54,7 +54,7 @@ commons-cli commons-cli - 1.3.1 + 1.4 com.github.stefanbirkner @@ -77,7 +77,7 @@ org.apache.commons commons-configuration2 - 2.0 + 2.3 commons-beanutils diff --git a/nifi-toolkit/nifi-toolkit-s2s/pom.xml b/nifi-toolkit/nifi-toolkit-s2s/pom.xml index b38baae52358..3a9c4ae0729f 100644 --- a/nifi-toolkit/nifi-toolkit-s2s/pom.xml +++ b/nifi-toolkit/nifi-toolkit-s2s/pom.xml @@ -24,7 +24,7 @@ Site-to-site cli - 2.9.5 + 2.9.7 @@ -36,7 +36,7 @@ commons-cli commons-cli - 1.3.1 + 1.4 commons-io diff --git a/nifi-toolkit/nifi-toolkit-tls/pom.xml b/nifi-toolkit/nifi-toolkit-tls/pom.xml index f2b813f373c9..445e46c10ffc 100644 --- a/nifi-toolkit/nifi-toolkit-tls/pom.xml +++ b/nifi-toolkit/nifi-toolkit-tls/pom.xml @@ -24,7 +24,7 @@ Tooling to make tls configuration easier 2.26 - 2.9.5 + 2.9.7 @@ -52,17 +52,17 @@ org.bouncycastle bcpkix-jdk15on - 1.59 + 1.60 org.bouncycastle bcprov-jdk15on - 1.59 + 1.60 commons-cli commons-cli - 1.3.1 + 1.4 commons-io diff --git a/nifi-toolkit/nifi-toolkit-tls/src/test/java/org/apache/nifi/toolkit/tls/status/TlsToolkitGetStatusCommandLineTest.java b/nifi-toolkit/nifi-toolkit-tls/src/test/java/org/apache/nifi/toolkit/tls/status/TlsToolkitGetStatusCommandLineTest.java index 008a9af21503..65a3852ff440 100644 --- a/nifi-toolkit/nifi-toolkit-tls/src/test/java/org/apache/nifi/toolkit/tls/status/TlsToolkitGetStatusCommandLineTest.java +++ b/nifi-toolkit/nifi-toolkit-tls/src/test/java/org/apache/nifi/toolkit/tls/status/TlsToolkitGetStatusCommandLineTest.java @@ -30,6 +30,10 @@ public class TlsToolkitGetStatusCommandLineTest { + private final String TRUSTSTORE_PATH = "src/test/resources/localhost/truststore.jks"; + private final String TRUSTSTORE_PASSWORD = "passwordpassword"; + private final String JKS_TYPE = "JKS"; + private TlsToolkitGetStatusCommandLine commandLine; @Before @@ -53,9 +57,9 @@ public void testSuccess() { final String urlStr = "https://localhost:8443/test"; commandLine.parse( "-u", urlStr, - "-ts", "src/test/resources/localhost/truststore.jks", - "-tst", "JKS", - "-tsp", "t7rmn1fg8np2ck1sduqdd85opv"); + "-ts", TRUSTSTORE_PATH, + "-tst", JKS_TYPE, + "-tsp", TRUSTSTORE_PASSWORD); final GetStatusConfig config = commandLine.createConfig(); Assert.assertNotNull(config); @@ -75,9 +79,9 @@ public void testSuccess() { public void testMissingUrl() { try { commandLine.parse( - "-ts", "src/test/resources/localhost/truststore.jks", - "-tst", "JKS", - "-tsp", "t7rmn1fg8np2ck1sduqdd85opv"); + "-ts", TRUSTSTORE_PATH, + "-tst", JKS_TYPE, + "-tsp", TRUSTSTORE_PASSWORD); fail("Expected invalid args"); } catch (CommandLineParseException e) { @@ -92,8 +96,8 @@ public void testTruststoreDoesNotExist() { commandLine.parse( "-u", urlStr, "-ts", "does/not/exist/truststore.jks", - "-tst", "JKS", - "-tsp", "t7rmn1fg8np2ck1sduqdd85opv"); + "-tst", JKS_TYPE, + "-tsp", TRUSTSTORE_PASSWORD); fail("Expected invalid args"); } catch (CommandLineParseException e) { @@ -107,9 +111,9 @@ public void testInvalidTruststoreType() { final String urlStr = "https://localhost:8443/test"; commandLine.parse( "-u", urlStr, - "-ts", "src/test/resources/localhost/truststore.jks", + "-ts", TRUSTSTORE_PATH, "-tst", "INVALID", - "-tsp", "t7rmn1fg8np2ck1sduqdd85opv"); + "-tsp", TRUSTSTORE_PASSWORD); fail("Expected invalid args"); } catch (CommandLineParseException e) { diff --git a/nifi-toolkit/nifi-toolkit-tls/src/test/resources/localhost/truststore.jks b/nifi-toolkit/nifi-toolkit-tls/src/test/resources/localhost/truststore.jks index 8d0b4de75564f61df06be7d1f07c0304bce1ce74..87f4be1cb74419252a6dd4a8cb8ed6063e7ade2f 100644 GIT binary patch delta 773 zcmbQw-p@Wku0Hi|yTCsN)(AaQ14{-5W?zFQW>14ACc_2HOpHuST#P_@enAExFyLk5 z)N1o+`_9YA$j!=Nplrx%z|F=S%EHaV;^*n+X((hM0OB(9aOUJEC+1}27nc~wiSrs- z7+4w^8d@5dnHolk^BNffxdu?Kfif|sOuV32-`!@|Ut{;7!tUKKMSMqg1vEB!kTp! z2j`{g#MTwww~ne?Sh2ygS48?Uk~Z&2)RDZ?Fi?E zm#U8py_)ZTzH~V!)no6)$=~S+ltqh0O^SiGM%3cLodG$hV%LKU->Z^S=yRw~)k+~3ZML_rB_B}F_ zB<*i6f1LGWsmD9z`o8n>dp~UTeZNj#RCjO0{gxLy(m!on@RVT=&jOw<(dW^z((1b- z45iG_(l6p(8U_Ra0@{%ox;*rhae$=Zrf|GafK{4IT%u*3G|X>~`}M;(Iy+kVDPZ{4x= z^{j(S?p~dcdG_|HV*mfIc)xg`TC}6h zGl_SPC%JEYC}L2ole)+u@za8vR|3{6ng+D)lw7e*skd6rv}0wedfUBvu1{`v4z2iK z`aJB7QCQjY?bGg^eq3i7nq#o!QFG;Po1|^-%Rd#YUa4?cG<_{=-XW$J?@&CUmT5?J=h^eo2J*ltkX2@pFc51H!KPrc2a|DqjG=()+=M%er_Sy* zE!nA)=Dp#eyO_fEZ3?><*l)dlXa0&r)wwg%@*GdE{9xbKCfaAY<@GnS6UQ4*B-G67 z+Z22ANM`GgS=Xa?uCI6_sraqfIH#5CwZdepV-~Ev`ic9dZhh&Z`Nr>&_bXFr7vC6h zR=Z9&4dJ8r7cadTa#G?)uG#oI^o#zMlk4lAzi^!)QM@EmbN)op&NJSO zb@P>*cA2+1vHCM7ax6`9G&#moGzdE(&bx?xi%W&titF8bmkMwRwP(sde;L#*Efy8~SzB commons-cli commons-cli - 1.3.1 + 1.4 com.google.guava diff --git a/nifi-toolkit/pom.xml b/nifi-toolkit/pom.xml index 12eae22b0df6..dee8b96bed5a 100644 --- a/nifi-toolkit/pom.xml +++ b/nifi-toolkit/pom.xml @@ -47,7 +47,14 @@ + + + io.netty + netty + 3.7.1.Final + + diff --git a/pom.xml b/pom.xml index 80662ee60ece..b86cada10379 100644 --- a/pom.xml +++ b/pom.xml @@ -342,6 +342,9 @@ **/Test*.class **/*Spec.class + + **/*ITSpec.class + true -Xmx1G -Djava.net.preferIPv4Stack=true @@ -433,7 +436,7 @@ com.puppycrawl.tools checkstyle - 8.5 + 8.12