From 375253743b8be96af01580ef0fdb706baf3040f3 Mon Sep 17 00:00:00 2001 From: Krzysztof Pado Date: Wed, 21 Dec 2016 20:22:25 +0100 Subject: [PATCH] Fix #35: Major clean up --- .scalafmt.conf | 6 + build.sbt | 36 +- project/Dependencies.scala | 5 +- project/plugins.sbt | 2 + .../src/main/resources/reference.conf | 10 + .../io/rdbc/pgsql/scodec/ColValueCodec.scala | 49 + .../rdbc/pgsql/scodec/ParamValuesCodec.scala | 71 ++ .../io/rdbc/pgsql/scodec/PgMapCodec.scala | 32 +- .../scodec/ReturnFieldFormatsCodec.scala | 20 +- .../io/rdbc/pgsql/scodec/ScodecDecoder.scala | 31 +- .../pgsql/scodec/ScodecDecoderFactory.scala | 6 +- .../io/rdbc/pgsql/scodec/ScodecEncoder.scala | 39 + .../pgsql/scodec/ScodecEncoderFactory.scala | 6 +- .../rdbc/pgsql/scodec/TerminatedCodec.scala | 4 +- .../pgsql/scodec/msg/backend/package.scala | 198 ++++ .../pgsql/scodec/msg/frontend/package.scala | 163 +++ .../io/rdbc/pgsql/scodec/msg/package.scala | 14 +- .../scala/io/rdbc/pgsql/scodec/package.scala | 105 ++ .../rdbc/pgsql/scodec/types/CommonCodec.scala | 6 +- .../pgsql/scodec/types/ScodecPgBool.scala | 5 +- .../pgsql/scodec/types/ScodecPgBytea.scala | 2 +- .../pgsql/scodec/types/ScodecPgChar.scala | 2 +- .../pgsql/scodec/types/ScodecPgDate.scala | 8 +- .../pgsql/scodec/types/ScodecPgDecimal.scala | 170 ++++ .../pgsql/scodec/types/ScodecPgFloat4.scala | 2 +- .../pgsql/scodec/types/ScodecPgFloat8.scala | 4 +- .../pgsql/scodec/types/ScodecPgInt2.scala | 2 +- .../pgsql/scodec/types/ScodecPgInt4.scala | 6 +- .../pgsql/scodec/types/ScodecPgInt8.scala | 2 +- .../pgsql/scodec/types/ScodecPgText.scala | 2 +- .../pgsql/scodec/types/ScodecPgTime.scala | 12 +- .../scodec/types/ScodecPgTimestamp.scala | 14 +- .../scodec/types/ScodecPgTimestampTz.scala | 16 +- .../pgsql/scodec/types/ScodecPgType.scala | 51 + .../scodec/types/ScodecPgTypesProvider.scala | 43 + .../pgsql/scodec/types/ScodecPgUuid.scala | 2 +- .../pgsql/scodec/types/ScodecPgVarchar.scala | 2 +- .../pgsql/scodec/types/ScodecStringLike.scala | 13 +- .../io/rdbc/pgsql/scodec/types/package.scala | 16 +- .../src/main/resources/reference.conf | 31 + .../pgsql/core/AbstractPgConnection.scala | 474 +++++++++ .../io/rdbc/pgsql/core/ChannelWriter.scala | 2 +- .../io/rdbc/pgsql/core/PgAnyStatement.scala | 108 -- .../io/rdbc/pgsql/core/PgConnection.scala | 451 -------- ...ription.scala => PgConnectionConfig.scala} | 15 +- .../rdbc/pgsql/core/PgNativeStatement.scala | 34 - .../io/rdbc/pgsql/core/PgResultStream.scala | 45 - .../main/scala/io/rdbc/pgsql/core/PgRow.scala | 62 -- .../io/rdbc/pgsql/core/SessionParams.scala | 8 +- .../rdbc/pgsql/core/auth/Authenticator.scala | 10 +- .../auth/UsernamePasswordAuthenticator.scala | 16 +- .../io/rdbc/pgsql/core/codec/Decoded.scala | 4 +- .../io/rdbc/pgsql/core/codec/Decoder.scala | 9 +- .../pgsql/core/codec/DecoderFactory.scala | 4 +- .../io/rdbc/pgsql/core/codec/Encoder.scala | 6 +- .../pgsql/core/codec/EncoderFactory.scala | 4 +- .../exception/PgAuthFailureException.scala | 6 +- .../core/exception/PgChannelException.scala | 3 +- .../PgConstraintViolationException.scala | 6 +- .../core/exception/PgDecodeException.scala | 3 +- .../PgDriverInternalErrorException.scala | 3 +- .../core/exception/PgEncodeException.scala | 22 + .../exception/PgInvalidQueryException.scala | 6 +- .../PgProtocolViolationException.scala | 7 +- .../exception/PgStatusDataException.scala | 21 +- .../PgSubscriptionRejectedException.scala | 3 +- .../core/exception/PgTimeoutException.scala | 6 +- .../exception/PgUnauthorizedException.scala | 6 +- .../exception/PgUncategorizedException.scala | 10 +- .../PgUncategorizedStatusDataException.scala} | 13 +- .../PgUnsupportedCharsetException.scala} | 16 +- .../core/exception/PgUnsupportedType.scala | 7 +- .../rdbc/pgsql/core/fsm/Authenticating.scala | 58 -- .../core/fsm/DeallocatingStatement.scala | 39 - .../io/rdbc/pgsql/core/fsm/Initializing.scala | 49 - .../rdbc/pgsql/core/fsm/SimpleQuerying.scala | 79 -- .../scala/io/rdbc/pgsql/core/fsm/State.scala | 126 --- .../core/fsm/extendedquery/BeginningTx.scala | 89 -- .../pgsql/core/fsm/extendedquery/Failed.scala | 86 -- .../extendedquery/WaitingForDescribe.scala | 141 --- .../core/fsm/extendedquery/package.scala | 50 - .../writeonly/ExecutingWriteOnly.scala | 53 - .../core/internal/FatalErrorHandler.scala | 21 + .../pgsql/core/internal/PgAnyStatement.scala | 105 ++ .../pgsql/core/{ => internal}/PgCharset.scala | 28 +- .../core/internal/PgNativeStatement.scala | 60 ++ .../PgParametrizedStatement.scala | 36 +- .../pgsql/core/internal/PgResultStream.scala | 56 + .../io/rdbc/pgsql/core/internal/PgRow.scala | 75 ++ .../core/{ => internal}/PgRowPublisher.scala | 244 ++--- .../core/internal/PgSessionFsmManager.scala | 144 +++ .../internal/PgStatementDeallocator.scala | 25 + .../core/internal/PgStatementExecutor.scala | 35 + .../{ => internal}/PreparedStmtCache.scala | 15 +- .../core/internal/WriteFailureHandler.scala | 21 + .../core/internal/fsm/Authenticating.scala | 74 ++ .../{ => internal}/fsm/ConnectionClosed.scala | 8 +- .../internal/fsm/DeallocatingStatement.scala | 36 + .../internal/fsm/DefaultErrorHandling.scala | 22 + .../pgsql/core/internal/fsm/EmptyState.scala | 23 + .../fsm}/ExecutingBatch.scala | 29 +- .../internal/fsm/ExecutingWriteOnly.scala | 56 + .../pgsql/core/{ => internal}/fsm/Idle.scala | 9 +- .../core/internal/fsm/Initializing.scala | 49 + .../internal/fsm/NonFatalErrorsAreFatal.scala | 32 + .../core/internal/fsm/SimpleQuerying.scala | 40 + .../{ => internal}/fsm/StartingRequest.scala | 6 +- .../rdbc/pgsql/core/internal/fsm/State.scala | 245 +++++ .../pgsql/core/internal/fsm/StateAction.scala | 55 + .../{ => internal}/fsm/Uninitialized.scala | 6 +- .../core/internal/fsm/WaitingForReady.scala | 34 + .../core/internal/fsm/WarningCollection.scala | 40 + .../fsm/streaming/AfterDescData.scala} | 19 +- .../fsm/streaming/StrmBeginningTx.scala | 92 ++ .../streaming/StrmPendingClosePortal.scala} | 38 +- .../fsm/streaming/StrmPendingCommit.scala} | 35 +- .../fsm/streaming/StrmPullingRows.scala} | 63 +- .../fsm/streaming/StrmQueryFailed.scala | 86 ++ .../streaming/StrmWaitingAfterClose.scala} | 33 +- .../streaming/StrmWaitingAfterCommit.scala} | 28 +- .../streaming/StrmWaitingAfterRollback.scala} | 46 +- .../streaming/StrmWaitingForDescribe.scala | 134 +++ .../scheduler/ScheduledTask.scala} | 6 +- .../scheduler/TaskScheduler.scala | 6 +- .../internal/scheduler/TimeoutHandler.scala | 25 + .../pgsql/core/messages/frontend/Bind.scala | 43 - .../pgsql/core/messages/frontend/Close.scala | 20 - .../messages/frontend/StartupMessage.scala | 24 - .../scala/io/rdbc/pgsql/core/package.scala | 27 +- .../Field.scala => pgstruct/ColDesc.scala} | 14 +- .../rdbc/pgsql/core/pgstruct/ColFormat.scala | 24 + .../Parse.scala => pgstruct/ColValue.scala} | 10 +- .../rdbc/pgsql/core/pgstruct/DataType.scala | 24 + .../io/rdbc/pgsql/core/pgstruct/Oid.scala | 23 + .../rdbc/pgsql/core/pgstruct/ParamValue.scala | 29 + .../core/pgstruct/ReturnColFormats.scala | 26 + .../rdbc/pgsql/core/pgstruct/StatusData.scala | 38 + .../TxStatus.scala} | 15 +- .../{ => pgstruct}/messages/PgMessage.scala | 2 +- .../messages/backend/BackendKeyData.scala} | 10 +- .../messages/backend/BindComplete.scala | 4 +- .../messages/backend/CloseComplete.scala | 4 +- .../messages/backend/CommandComplete.scala | 4 +- .../messages/backend/DataRow.scala | 6 +- .../messages/backend/EmptyQueryResponse.scala | 4 +- .../messages/backend/MsgHeader.scala} | 4 +- .../messages/backend/NoData.scala | 2 +- .../backend/ParameterDescription.scala | 6 +- .../messages/backend/ParameterStatus.scala | 22 + .../messages/backend/ParseComplete.scala | 4 +- .../messages/backend/PgBackendMessage.scala | 4 +- .../messages/backend/PortalSuspended.scala | 2 +- .../messages/backend/ReadyForQuery.scala | 11 +- .../messages/backend/RowDescription.scala | 7 +- .../messages/backend/StatusMessage.scala | 70 +- .../backend/UnknownBackendMessage.scala | 10 +- .../backend/auth/AuthBackendMessage.scala | 4 +- .../messages/backend/auth/AuthOk.scala | 4 +- .../messages/backend/auth/AuthRequest.scala | 21 + .../backend/auth/AuthRequestMd5.scala | 8 +- .../pgstruct/messages/frontend/Bind.scala | 25 + .../messages/frontend/CancelRequest.scala | 6 +- .../messages/frontend/ClosePortal.scala | 19 + .../messages/frontend/CloseStatement.scala | 19 + .../pgstruct/messages/frontend/Describe.scala | 20 + .../messages/frontend/Execute.scala | 6 +- .../messages/frontend/Flush.scala | 2 +- .../pgstruct/messages/frontend/Parse.scala | 24 + .../messages/frontend/PasswordMessage.scala | 13 +- .../messages/frontend/PgFrontendMessage.scala | 6 +- .../messages/frontend/Query.scala | 4 +- .../pgstruct/messages/frontend/Startup.scala | 23 + .../messages/frontend/Sync.scala | 2 +- .../messages/frontend/Terminate.scala | 4 +- .../pgstruct/messages/frontend/package.scala | 24 + .../io/rdbc/pgsql/core/types/PgBool.scala | 3 +- .../io/rdbc/pgsql/core/types/PgBytea.scala | 3 +- .../io/rdbc/pgsql/core/types/PgChar.scala | 3 +- .../io/rdbc/pgsql/core/types/PgDate.scala | 3 +- .../io/rdbc/pgsql/core/types/PgDecimal.scala | 3 +- .../io/rdbc/pgsql/core/types/PgFloat4.scala | 3 +- .../io/rdbc/pgsql/core/types/PgFloat8.scala | 3 +- .../io/rdbc/pgsql/core/types/PgInt2.scala | 3 +- .../io/rdbc/pgsql/core/types/PgInt4.scala | 3 +- .../io/rdbc/pgsql/core/types/PgInt8.scala | 3 +- .../io/rdbc/pgsql/core/types/PgText.scala | 3 +- .../io/rdbc/pgsql/core/types/PgTime.scala | 3 +- .../rdbc/pgsql/core/types/PgTimestamp.scala | 3 +- .../rdbc/pgsql/core/types/PgTimestampTz.scala | 3 +- .../io/rdbc/pgsql/core/types/PgType.scala | 8 +- .../pgsql/core/types/PgTypeRegistry.scala | 35 +- .../PgTypesProvider.scala} | 8 +- .../io/rdbc/pgsql/core/types/PgUuid.scala | 3 +- .../io/rdbc/pgsql/core/types/PgVarchar.scala | 3 +- .../BlockLock.scala} | 33 +- .../concurrent/Lock.scala} | 10 +- .../concurrent/SpinLock.scala} | 26 +- .../src/main/resources/logback.xml | 12 + .../scala/io/rdbc/pgsql/playground/Jdbc.scala | 18 +- .../scala/io/rdbc/pgsql/playground/Pars.scala | 41 + .../io/rdbc/pgsql/playground/ScodecTest.scala | 5 +- .../io/rdbc/pgsql/playground/tests.scala | 959 ++++++++++-------- .../rdbc/pgsql/scodec/FieldValueCodec.scala | 42 - .../rdbc/pgsql/scodec/ParamValuesCodec.scala | 56 - .../io/rdbc/pgsql/scodec/ScodecEncoder.scala | 45 - .../pgsql/scodec/msg/backend/package.scala | 151 --- .../pgsql/scodec/msg/frontend/package.scala | 113 --- .../scala/io/rdbc/pgsql/scodec/package.scala | 121 --- .../pgsql/scodec/types/ScodecPgDecimal.scala | 150 --- .../pgsql/scodec/types/ScodecPgType.scala | 39 - .../src/main/resources/reference.conf | 19 + .../transport/netty/ChannelOptionValue.scala | 21 + .../EventLoopGroupExecutionContext.scala | 40 - .../netty/EventLoopGroupScheduler.scala | 18 +- .../transport/netty/NettyChannelWriter.scala | 31 +- .../netty/NettyPgConnFactoryConfig.scala | 144 +++ .../transport/netty/NettyPgConnection.scala | 56 +- .../netty/NettyPgConnectionFactory.scala | 321 +++--- ...ledTask.scala => NettyScheduledTask.scala} | 7 +- .../transport/netty/PgMsgDecoderHandler.scala | 27 +- .../transport/netty/PgMsgEncoderHandler.scala | 30 +- .../rdbc/pgsql/transport/netty/package.scala | 28 +- scalastyle-config.xml | 122 +++ 223 files changed, 5412 insertions(+), 3707 deletions(-) create mode 100644 .scalafmt.conf create mode 100644 rdbc-pgsql-codec-scodec/src/main/resources/reference.conf create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ColValueCodec.scala create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/PgMapCodec.scala (50%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/ReturnFieldFormatsCodec.scala (55%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoder.scala (55%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoderFactory.scala (80%) create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoderFactory.scala (80%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/TerminatedCodec.scala (85%) create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/msg/package.scala (60%) create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/CommonCodec.scala (78%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBool.scala (88%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBytea.scala (89%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgChar.scala (92%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDate.scala (85%) create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat4.scala (93%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat8.scala (87%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt2.scala (93%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt4.scala (88%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt8.scala (93%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgText.scala (92%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTime.scala (77%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestamp.scala (75%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestampTz.scala (73%) create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala create mode 100644 rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTypesProvider.scala rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgUuid.scala (93%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgVarchar.scala (91%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecStringLike.scala (67%) rename {rdbc-pgsql-scodec => rdbc-pgsql-codec-scodec}/src/main/scala/io/rdbc/pgsql/scodec/types/package.scala (68%) create mode 100644 rdbc-pgsql-core/src/main/resources/reference.conf create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/AbstractPgConnection.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgAnyStatement.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgConnection.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/backend/FieldDescription.scala => PgConnectionConfig.scala} (60%) delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgNativeStatement.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgResultStream.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgRow.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgEncodeException.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/frontend/Describe.scala => exception/PgUncategorizedStatusDataException.scala} (68%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{util/Preconditions.scala => exception/PgUnsupportedCharsetException.scala} (59%) delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Authenticating.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/DeallocatingStatement.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Initializing.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/SimpleQuerying.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/State.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/BeginningTx.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/Failed.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForDescribe.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/package.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/writeonly/ExecutingWriteOnly.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/FatalErrorHandler.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgAnyStatement.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/PgCharset.scala (72%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgNativeStatement.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/PgParametrizedStatement.scala (50%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgResultStream.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgRow.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/PgRowPublisher.scala (56%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgSessionFsmManager.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementDeallocator.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementExecutor.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/PreparedStmtCache.scala (67%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/WriteFailureHandler.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Authenticating.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/fsm/ConnectionClosed.scala (80%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DeallocatingStatement.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DefaultErrorHandling.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/EmptyState.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/extendedquery/batch => internal/fsm}/ExecutingBatch.scala (53%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ExecutingWriteOnly.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/fsm/Idle.scala (77%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Initializing.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/NonFatalErrorsAreFatal.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/SimpleQuerying.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/fsm/StartingRequest.scala (84%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/State.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/StateAction.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/fsm/Uninitialized.scala (85%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WaitingForReady.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WarningCollection.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/WaitingForReady.scala => internal/fsm/streaming/AfterDescData.scala} (58%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmBeginningTx.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/extendedquery/CompletedPendingClosePortal.scala => internal/fsm/streaming/StrmPendingClosePortal.scala} (50%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/extendedquery/CompletedPendingCommit.scala => internal/fsm/streaming/StrmPendingCommit.scala} (51%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/extendedquery/PullingRows.scala => internal/fsm/streaming/StrmPullingRows.scala} (52%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmQueryFailed.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/extendedquery/WaitingForCloseCompletion.scala => internal/fsm/streaming/StrmWaitingAfterClose.scala} (50%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/extendedquery/WaitingForCommitCompletion.scala => internal/fsm/streaming/StrmWaitingAfterCommit.scala} (56%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/extendedquery/WaitingForRollbackCompletion.scala => internal/fsm/streaming/StrmWaitingAfterRollback.scala} (50%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingForDescribe.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/backend/Header.scala => internal/scheduler/ScheduledTask.scala} (86%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => internal}/scheduler/TaskScheduler.scala (89%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/TimeoutHandler.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Bind.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Close.scala delete mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/StartupMessage.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/data/Field.scala => pgstruct/ColDesc.scala} (64%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColFormat.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/frontend/Parse.scala => pgstruct/ColValue.scala} (73%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/DataType.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/Oid.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ParamValue.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ReturnColFormats.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/StatusData.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/data/DbValFormat.scala => pgstruct/TxStatus.scala} (74%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/PgMessage.scala (93%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/data/DataType.scala => pgstruct/messages/backend/BackendKeyData.scala} (71%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/BindComplete.scala (85%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/CloseComplete.scala (85%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/CommandComplete.scala (82%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/DataRow.scala (77%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/EmptyQueryResponse.scala (85%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/backend/BackendKeyData.scala => pgstruct/messages/backend/MsgHeader.scala} (83%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/NoData.scala (92%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/ParameterDescription.scala (76%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParameterStatus.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/ParseComplete.scala (85%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/PgBackendMessage.scala (85%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/PortalSuspended.scala (92%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/ReadyForQuery.scala (77%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/RowDescription.scala (78%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/StatusMessage.scala (52%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/UnknownBackendMessage.scala (71%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/auth/AuthBackendMessage.scala (83%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/auth/AuthOk.scala (85%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthRequest.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/backend/auth/AuthRequestMd5.scala (76%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Bind.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/CancelRequest.scala (75%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/ClosePortal.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/CloseStatement.scala create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Describe.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/Execute.scala (75%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/Flush.scala (92%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Parse.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/PasswordMessage.scala (79%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/PgFrontendMessage.scala (80%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/Query.scala (83%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Startup.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/Sync.scala (92%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{ => pgstruct}/messages/frontend/Terminate.scala (85%) create mode 100644 rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/package.scala rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{messages/backend/ParameterStatus.scala => types/PgTypesProvider.scala} (82%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/{Lock.scala => concurrent/BlockLock.scala} (72%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{fsm/EmptyState.scala => util/concurrent/Lock.scala} (82%) rename rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/{scheduler/TimeoutScheduler.scala => util/concurrent/SpinLock.scala} (59%) create mode 100644 rdbc-pgsql-playground/src/main/resources/logback.xml create mode 100644 rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Pars.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/FieldValueCodec.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala delete mode 100644 rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala create mode 100644 rdbc-pgsql-transport-netty/src/main/resources/reference.conf create mode 100644 rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/ChannelOptionValue.scala delete mode 100644 rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupExecutionContext.scala create mode 100644 rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnFactoryConfig.scala rename rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/{FutureScheduledTask.scala => NettyScheduledTask.scala} (75%) create mode 100644 scalastyle-config.xml diff --git a/.scalafmt.conf b/.scalafmt.conf new file mode 100644 index 0000000..dd34e27 --- /dev/null +++ b/.scalafmt.conf @@ -0,0 +1,6 @@ +style = default +maxColumn = 120 +importSelectors = binPack +includeCurlyBraceInSelectChains = false +rewrite.rules = [SortImports] +project.git = true diff --git a/build.sbt b/build.sbt index 33b95ef..951b316 100644 --- a/build.sbt +++ b/build.sbt @@ -28,7 +28,10 @@ lazy val commonSettings = Seq( setNextVersion, commitNextVersion, pushChanges - ) + ), + buildInfoKeys := Vector(version, scalaVersion, git.gitHeadCommit, BuildInfoKey.action("buildTime") { + java.time.Instant.now() + }) ) lazy val rdbcPgsql = (project in file(".")) @@ -40,6 +43,7 @@ lazy val rdbcPgsql = (project in file(".")) .aggregate(core, scodec, nettyTransport) lazy val core = (project in file("rdbc-pgsql-core")) + .enablePlugins(BuildInfoPlugin) .settings(commonSettings: _*) .settings( name := "pgsql-core", @@ -47,24 +51,31 @@ lazy val core = (project in file("rdbc-pgsql-core")) Library.rdbcScalaApi, Library.rdbcTypeconv, Library.rdbcImplbase, + Library.rdbcUtil, Library.typesafeConfig, Library.scalaLogging, Library.akkaStream, - Library.sourcecode - ) + Library.sourcecode, + Library.scodecBits + ), + buildInfoPackage := "io.rdbc.pgsql.core" ) -lazy val scodec = (project in file("rdbc-pgsql-scodec")) +lazy val scodec = (project in file("rdbc-pgsql-codec-scodec")) + .enablePlugins(BuildInfoPlugin) .settings(commonSettings: _*) .settings( name := "pgsql-codec-scodec", libraryDependencies ++= Vector( Library.scodecBits, Library.scodecCore - ) - ).dependsOn(core) + ), + buildInfoPackage := "io.rdbc.pgsql.scodec" + ) + .dependsOn(core) lazy val nettyTransport = (project in file("rdbc-pgsql-transport-netty")) + .enablePlugins(BuildInfoPlugin) .settings(commonSettings: _*) .settings( name := "pgsql-transport-netty", @@ -72,10 +83,13 @@ lazy val nettyTransport = (project in file("rdbc-pgsql-transport-netty")) Library.nettyHandler, Library.nettyEpoll, Library.rdbcTypeconv, + Library.rdbcUtil, Library.scalaLogging, Library.logback - ) - ).dependsOn(core, scodec) + ), + buildInfoPackage := "io.rdbc.pgsql.transport.netty" + ) + .dependsOn(core, scodec) lazy val playground = (project in file("rdbc-pgsql-playground")) .settings(commonSettings: _*) @@ -84,6 +98,8 @@ lazy val playground = (project in file("rdbc-pgsql-playground")) publishArtifact := false, bintrayReleaseOnPublish := false, libraryDependencies ++= Vector( - "org.postgresql" % "postgresql" % "9.4.1211" + "org.postgresql" % "postgresql" % "9.4.1212", + "org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.5" ) - ).dependsOn(core, scodec, nettyTransport) + ) + .dependsOn(core, scodec, nettyTransport) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index fabd9f0..c3f249b 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -1,7 +1,7 @@ import sbt._ object Version { - val rdbc = "0.0.30" + val rdbc = "0.0.37" val netty = "4.1.6.Final" } @@ -9,8 +9,9 @@ object Library { val rdbcScalaApi = "io.rdbc" %% "rdbc-api-scala" % Version.rdbc val rdbcImplbase = "io.rdbc" %% "rdbc-implbase" % Version.rdbc val rdbcTypeconv = "io.rdbc" %% "rdbc-typeconv" % Version.rdbc + val rdbcUtil = "io.rdbc" %% "rdbc-util" % Version.rdbc val reactiveStreams = "org.reactivestreams" % "reactive-streams" % "1.0.0" - val akkaStream = "com.typesafe.akka" %% "akka-stream" % "2.4.14" + val akkaStream = "com.typesafe.akka" %% "akka-stream" % "2.4.16" val scodecBits = "org.scodec" %% "scodec-bits" % "1.1.2" val scodecCore = "org.scodec" %% "scodec-core" % "1.10.3" val typesafeConfig = "com.typesafe" % "config" % "1.3.1" diff --git a/project/plugins.sbt b/project/plugins.sbt index 58b6660..dd3f753 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -5,3 +5,5 @@ addSbtPlugin("me.lessis" % "bintray-sbt" % "0.3.0") addSbtPlugin("de.heikoseeberger" % "sbt-header" % "1.6.0") addSbtPlugin("io.get-coursier" % "sbt-coursier" % "1.0.0-M14") addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.3") +addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "0.8.0") +addSbtPlugin("com.eed3si9n" % "sbt-buildinfo" % "0.6.1") diff --git a/rdbc-pgsql-codec-scodec/src/main/resources/reference.conf b/rdbc-pgsql-codec-scodec/src/main/resources/reference.conf new file mode 100644 index 0000000..59e8194 --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/resources/reference.conf @@ -0,0 +1,10 @@ +rdbc.pgsql.codec.defaults { + + msg-decoder-factory = "io.rdbc.pgsql.scodec.ScodecDecoderFactory" + msg-encoder-factory = "io.rdbc.pgsql.scodec.ScodecEncoderFactory" + + pg-types-providers = [ + "io.rdbc.pgsql.scodec.types.ScodecPgTypesProvider" + ] + +} diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ColValueCodec.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ColValueCodec.scala new file mode 100644 index 0000000..e12ea6b --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ColValueCodec.scala @@ -0,0 +1,49 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec + +import _root_.scodec.bits.{BitVector, ByteVector} +import _root_.scodec.codecs._ +import _root_.scodec.{Attempt, Codec, DecodeResult, SizeBound} +import io.rdbc.pgsql.core.pgstruct.ColValue + +private[scodec] object ColValueCodec extends Codec[ColValue] { + + private[this] val nullLength = -1 + + val sizeBound = SizeBound.exact(32) | SizeBound.atLeast(32) + + def decode(bits: BitVector): Attempt[DecodeResult[ColValue]] = { + int32.withContext("col_val_length") //TODO maybe use "conditional" codec, check in other places if it can be used + .decode(bits) + .flatMap(lenResult => { + val len = lenResult.value + if (len == nullLength) { + Attempt.successful(DecodeResult(ColValue.Null, lenResult.remainder)) + } else { + bytes(len).withContext("col_val_bytes") + .as[ColValue.NotNull] + .decode(lenResult.remainder) + } + }) + } + + def encode(value: ColValue): Attempt[BitVector] = value match { + case ColValue.Null => int32.unit(nullLength).encode(Unit) + case ColValue.NotNull(data) => variableSizeBytes(int32, bytes).encode(data) + } +} diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala new file mode 100644 index 0000000..af65b1a --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala @@ -0,0 +1,71 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec + +import java.nio.charset.Charset + +import _root_.scodec.bits.BitVector +import _root_.scodec.codecs._ +import _root_.scodec.{Attempt, Codec, DecodeResult, Err, SizeBound} +import io.rdbc.pgsql.core.pgstruct.ParamValue + +private[scodec] object ParamValuesCodec { + + private val formatCodec = new Codec[ParamValue] { + val sizeBound = SizeBound.exact(16) + + def encode(value: ParamValue): Attempt[BitVector] = { + value match { + case _: ParamValue.Textual | _: ParamValue.Null => int16.encode(0) + case _: ParamValue.Binary => int16.encode(1) + } + } + + def decode(bits: BitVector): Attempt[DecodeResult[ParamValue]] = { + Attempt.failure(Err("decoding not supported")) + } + } + + private def valueCodec(implicit charset: Charset) = new Codec[ParamValue] { + def sizeBound: SizeBound = SizeBound.atLeast(32) + + def encode(value: ParamValue): Attempt[BitVector] = value match { + case _: ParamValue.Null => int32.encode(-1) + case ParamValue.Textual(value, _) => variableSizeBytes(int32, string).encode(value) + case ParamValue.Binary(value, _) => variableSizeBytes(int32, bytes).encode(value) + } + + def decode(bits: BitVector): Attempt[Nothing] = { + Attempt.failure(Err("decoding not supported")) + } + } + + def paramValues(implicit charset: Charset) = new Codec[Vector[ParamValue]] { + + val sizeBound: SizeBound = SizeBound.unknown + + def encode(params: Vector[ParamValue]): Attempt[BitVector] = { + { + vectorOfN(int16, formatCodec) ~ vectorOfN(int16, valueCodec) + }.encode(params, params) + } + + def decode(bits: BitVector): Attempt[Nothing] = { + Attempt.failure(Err("decoding not supported")) + } + } +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/PgMapCodec.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/PgMapCodec.scala similarity index 50% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/PgMapCodec.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/PgMapCodec.scala index c067409..4640221 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/PgMapCodec.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/PgMapCodec.scala @@ -24,33 +24,15 @@ import scala.collection.mutable.ListBuffer class PgMapCodec[K](codec: Codec[(K, String)]) extends Codec[Map[K, String]] { - private val nul = BitVector.lowByte + private[this] val nul = BitVector.lowByte - def sizeBound = SizeBound.unknown + val sizeBound = SizeBound.unknown - def encode(options: Map[K, String]) = Encoder.encodeSeq(codec)(options.toList).map(_ ++ nul) + def encode(options: Map[K, String]): Attempt[BitVector] = { + Encoder.encodeSeq(codec)(options.toList).map(_ ++ nul) + } def decode(buffer: BitVector): Attempt[DecodeResult[Map[K, String]]] = { - val builder = ListBuffer.empty[(K, String)] - var remaining = buffer - var count = 0 - var error: Option[Err] = None - while (remaining.sizeGreaterThanOrEqual(8) && remaining.slice(0, 8) != nul) { - codec.decode(remaining) match { - case Attempt.Successful(DecodeResult(value, rest)) => - builder += value - count += 1 - remaining = rest - case Attempt.Failure(err) => - error = Some(err.pushContext(count.toString)) - remaining = BitVector.empty - } - } - error match { - case None => constant(nul).withContext("collection terminator").decode(remaining).map(dr => DecodeResult(Map(builder: _*), dr.remainder)) - case Some(err) => Attempt.failure(err) - } + Attempt.failure(Err("decoding not supported")) } - - override def toString = s"pgMap($codec)" -} \ No newline at end of file +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ReturnFieldFormatsCodec.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ReturnFieldFormatsCodec.scala similarity index 55% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ReturnFieldFormatsCodec.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ReturnFieldFormatsCodec.scala index 083e515..e1c788f 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ReturnFieldFormatsCodec.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ReturnFieldFormatsCodec.scala @@ -18,17 +18,19 @@ package io.rdbc.pgsql.scodec import _root_.scodec.bits.BitVector import _root_.scodec.codecs._ -import _root_.scodec.{Attempt, Codec, DecodeResult, SizeBound} -import io.rdbc.pgsql.core.messages.frontend._ +import _root_.scodec.{Attempt, Codec, Err, SizeBound} +import io.rdbc.pgsql.core.pgstruct.ReturnColFormats -object ReturnFieldFormatsCodec extends Codec[ReturnFieldFormats] { +private[scodec] object ReturnFieldFormatsCodec extends Codec[ReturnColFormats] { def sizeBound: SizeBound = SizeBound.atLeast(1) - def encode(value: ReturnFieldFormats): Attempt[BitVector] = value match { - case NoReturnFields | AllTextual => pgInt16.encode(0) - case AllBinary => (pgInt16 ~ pgInt16).encode(1, 1) - case SpecificFieldFormats(formats) => listOfN(pgInt16, dbValFormat).encode(formats) + def encode(value: ReturnColFormats): Attempt[BitVector] = value match { + case ReturnColFormats.None | ReturnColFormats.AllTextual => int16.encode(0) + case ReturnColFormats.AllBinary => (int16 ~ int16).encode(1, 1) + case ReturnColFormats.Specific(formats) => vectorOfN(int16, colValFormat).encode(formats) } - def decode(bits: BitVector): Attempt[DecodeResult[ReturnFieldFormats]] = ??? //TODO -} \ No newline at end of file + def decode(bits: BitVector): Attempt[Nothing] = { + Attempt.failure(Err("decoding not supported")) + } +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoder.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoder.scala similarity index 55% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoder.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoder.scala index 6f1f3c7..2b0a1e0 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoder.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoder.scala @@ -20,25 +20,30 @@ import java.nio.charset.Charset import _root_.scodec.Attempt.{Failure, Successful} import _root_.scodec.DecodeResult -import _root_.scodec.bits.BitVector +import _root_.scodec.bits.ByteVector import io.rdbc.pgsql.core.codec.{Decoded, Decoder} import io.rdbc.pgsql.core.exception.PgDecodeException -import io.rdbc.pgsql.core.messages.backend.{Header, PgBackendMessage} +import io.rdbc.pgsql.core.pgstruct.messages.backend.{MsgHeader, PgBackendMessage} import io.rdbc.pgsql.scodec.msg.backend._ -object ScodecDecoder extends Decoder { - override def decodeMsg(bytes: Array[Byte])(implicit charset: Charset): Decoded[PgBackendMessage] = { - pgBackendMessage.decode(BitVector.view(bytes)) match { - case Successful(DecodeResult(msg, remainder)) => Decoded(msg, remainder.toByteArray) - case Failure(err) => throw PgDecodeException(err.messageWithContext) - } +class ScodecDecoder(protected val charset: Charset) extends Decoder { + + private[this] val codec = pgBackendMessage(charset) + + def decodeMsg(bytes: ByteVector): Decoded[PgBackendMessage] = { + decode(codec, bytes) + } + + def decodeHeader(bytes: ByteVector): Decoded[MsgHeader] = { + decode(header, bytes) } - override def decodeHeader(bytes: Array[Byte]): Decoded[Header] = { - header.decode(BitVector.view(bytes)) match { - case Successful(DecodeResult(msg, remainder)) => Decoded(msg, remainder.toByteArray) - case Failure(err) => throw PgDecodeException(err.messageWithContext) + private def decode[A](decoder: scodec.Decoder[A], bytes: ByteVector): Decoded[A] = { + decoder.decode(bytes.bits) match { //TODO data copying is a major bottleneck. decide + case Successful(DecodeResult(msg, remainder)) => Decoded(msg, remainder.bytes) + case Failure(err) => throw new PgDecodeException( + s"Error occurred while decoding message ${bytes.toHex}: ${err.messageWithContext}" + ) } - //TODO code dupl } } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoderFactory.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoderFactory.scala similarity index 80% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoderFactory.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoderFactory.scala index c0df0b2..65a9350 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoderFactory.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecDecoderFactory.scala @@ -16,8 +16,10 @@ package io.rdbc.pgsql.scodec +import java.nio.charset.Charset + import io.rdbc.pgsql.core.codec.DecoderFactory -object ScodecDecoderFactory extends DecoderFactory { - val decoder = ScodecDecoder +class ScodecDecoderFactory extends DecoderFactory { + def decoder(charset: Charset): ScodecDecoder = new ScodecDecoder(charset) } diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala new file mode 100644 index 0000000..783b302 --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala @@ -0,0 +1,39 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec + +import java.nio.charset.Charset + +import io.rdbc.pgsql.core.codec.Encoder +import io.rdbc.pgsql.core.exception.PgEncodeException +import io.rdbc.pgsql.core.pgstruct.messages.frontend._ +import io.rdbc.pgsql.scodec.msg.frontend.{pgFrontendMessage, _} +import scodec.Attempt.{Failure, Successful} +import scodec.bits.ByteVector + +class ScodecEncoder(protected val charset: Charset) extends Encoder { + private[this] val codec = pgFrontendMessage(charset) + + def encode(msg: PgFrontendMessage): ByteVector = { + codec.encode(msg) match { + case Successful(bits) => bits.bytes + case Failure(err) => throw new PgEncodeException( + s"Error occurred while encoding message '$msg': ${err.messageWithContext}" + ) + } + } +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoderFactory.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoderFactory.scala similarity index 80% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoderFactory.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoderFactory.scala index 337d6ba..5e44d09 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoderFactory.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoderFactory.scala @@ -16,8 +16,10 @@ package io.rdbc.pgsql.scodec +import java.nio.charset.Charset + import io.rdbc.pgsql.core.codec.EncoderFactory -object ScodecEncoderFactory extends EncoderFactory { - val encoder = ScodecEncoder +class ScodecEncoderFactory extends EncoderFactory { + def encoder(charset: Charset): ScodecEncoder = new ScodecEncoder(charset) } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/TerminatedCodec.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/TerminatedCodec.scala similarity index 85% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/TerminatedCodec.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/TerminatedCodec.scala index bb31b22..e9c5fb0 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/TerminatedCodec.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/TerminatedCodec.scala @@ -19,13 +19,13 @@ package io.rdbc.pgsql.scodec import _root_.scodec.bits.BitVector import _root_.scodec.{Attempt, Codec, DecodeResult, Err, SizeBound} -class TerminatedCodec[A](terminator: BitVector, codec: Codec[A]) extends Codec[A] { +private[scodec] class TerminatedCodec[A](terminator: BitVector, codec: Codec[A]) extends Codec[A] { def sizeBound: SizeBound = SizeBound.unknown def decode(bits: BitVector): Attempt[DecodeResult[A]] = { bits.bytes.indexOfSlice(terminator.bytes) match { - case -1 => Attempt.failure(Err(s"Does not contain a '0x${terminator.toHex}' terminator.")) + case -1 => Attempt.failure(Err(s"does not contain a '${terminator.toHex}' terminator")) case i => codec.decode(bits.take(i * 8L)).map(dr => dr.mapRemainder(_ => bits.drop(i * 8L + 8L))) } } diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala new file mode 100644 index 0000000..6661f6e --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala @@ -0,0 +1,198 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec.msg + +import java.nio.charset.Charset + +import _root_.scodec.codecs._ +import _root_.scodec.{Attempt, Codec, DecodeResult, Err, SizeBound} +import io.rdbc.pgsql.core.pgstruct.TxStatus +import io.rdbc.pgsql.core.pgstruct.messages.backend._ +import io.rdbc.pgsql.core.pgstruct.messages.backend.auth.{AuthBackendMessage, AuthOk, AuthRequestMd5} +import io.rdbc.pgsql.scodec._ +import scodec.bits.BitVector + +package object backend { + + private[scodec] def pgBackendMessage(implicit charset: Charset): Codec[PgBackendMessage] = { + val msgTypeContext = "msg_type_id" + + def unknownMsgFallback(msgCodec: Codec[PgBackendMessage]) = new Codec[PgBackendMessage] { + def sizeBound: SizeBound = unknown.sizeBound | msgCodec.sizeBound + def encode(msg: PgBackendMessage): Attempt[BitVector] = { + msg match { + case ubm: UnknownBackendMessage => unknown.encode(ubm) + case _ => msgCodec.encode(msg) + } + } + + def decode(b: BitVector): Attempt[DecodeResult[PgBackendMessage]] = { + msgCodec.decode(b).recoverWith { + case err: KnownDiscriminatorType[_]#UnknownDiscriminator + if err.context.last == msgTypeContext => + unknown.decode(b) + } + } + } + + unknownMsgFallback( + discriminated[PgBackendMessage] + .by(msgTypeContext | byte) + .typecase('S', "param_status_msg" | parameterStatus) + .typecase('R', "auth_msg" | auth) + .typecase('Z', "ready_for_query_msg" | readyForQuery) + .typecase('T', "row_desc_msg" | rowDescription) + .typecase('D', "data_row_msg" | dataRow) + .typecase('C', "command_complete_msg" | commandComplete) + .typecase('K', "backend_key_data_msg" | backendKeyData) + .typecase('1', "parse_complete_msg" | parseComplete) + .typecase('2', "bind_complete_msg" | bindComplete) + .typecase('3', "close_complete_msg" | closeComplete) + .typecase('I', "empty_query_response_msg" | emptyQueryResponse) + .typecase('E', "error_msg" | error) + .typecase('N', "notice_msg" | notice) + .typecase('s', "portal_suspended_msg" | portalSuspended) + .typecase('n', "no_data_msg" | noData) + .typecase('t', "param_desc_msg" | parameterDescription) + ) + } + + private val noData: Codec[NoData.type] = { + pgSingletonHeadlessMsg(NoData) + } + + private val closeComplete: Codec[CloseComplete.type] = { + pgSingletonHeadlessMsg(CloseComplete) + } + + private val portalSuspended: Codec[PortalSuspended.type] = { + pgSingletonHeadlessMsg(PortalSuspended) + } + + private val parseComplete: Codec[ParseComplete.type] = { + pgSingletonHeadlessMsg(ParseComplete) + } + + private val bindComplete: Codec[BindComplete.type] = { + pgSingletonHeadlessMsg(BindComplete) + } + + private val emptyQueryResponse: Codec[EmptyQueryResponse.type] = { + pgSingletonHeadlessMsg(EmptyQueryResponse) + } + + private def commandComplete(implicit charset: Charset): Codec[CommandComplete] = pgHeadlessMsg { + stringNul.xmap[CommandComplete]( + message => { + if (CommandComplete.RowCountMessages.exists(rowCountMsg => message.startsWith(rowCountMsg))) { + val (constant, rowsStr) = message.splitAt(message.lastIndexOf(" ") + 1) //TODO err handling + CommandComplete(constant, Some(rowsStr.toInt)) + } else { + CommandComplete(message, None) + } + }, { + case CommandComplete(message, None) => message + case CommandComplete(message, Some(rowCount)) => s"$message $rowCount" + } + ) + } + + private def parameterStatus(implicit charset: Charset): Codec[ParameterStatus] = pgHeadlessMsg { + pgParam(stringNul).withContext("status_param").xmap( + { + case (key, value) => ParameterStatus(SessionParamKey(key), SessionParamVal(value)) + }, + p => (p.key.value, p.value.value) + ) + } + + private def dataRow(implicit charset: Charset): Codec[DataRow] = pgHeadlessMsg { + vectorOfN( + "colCount" | int16, + "colValues" | colValue + ).withContext("columns") + .as[DataRow] + } + + private val unknown: Codec[UnknownBackendMessage] = { + { + ("head" | byte) :: + ("body" | variableSizeBytes(int32, bytes, 4)) + }.as[UnknownBackendMessage] + } + + private val readyForQuery: Codec[ReadyForQuery] = pgHeadlessMsg { + discriminated + .by("tx_status_id" | byte) + .typecase('I', provide(ReadyForQuery(TxStatus.Idle))) + .typecase('E', provide(ReadyForQuery(TxStatus.Failed))) + .typecase('T', provide(ReadyForQuery(TxStatus.Active))) + .withContext("tx_status") + } + + private val authRequestMd5: Codec[AuthRequestMd5] = { + bytes(4).withContext("md5_salt").as[AuthRequestMd5] + } + + private val auth: Codec[AuthBackendMessage] = pgHeadlessMsg { + discriminated[AuthBackendMessage] + .by("auth_type_id" | int32) + .typecase(0x00, provide(AuthOk)) + .typecase(0x05, authRequestMd5) + .withContext("auth_type") + } + + private def rowDescription(implicit charset: Charset): Codec[RowDescription] = pgHeadlessMsg { + vectorOfN( + "col_count" | int16.withContext("col_count"), + "col_desc" | colDesc + ).withContext("col_descs") + .as[RowDescription] + } + + private val backendKeyData: Codec[BackendKeyData] = pgHeadlessMsg { + { + ("pid" | int32).as[PgPid] :: + ("key" | int32).as[PgKey] + }.as[BackendKeyData] + } + + private val parameterDescription: Codec[ParameterDescription] = pgHeadlessMsg { + vectorOfN( + "param_count" | int16, + "param_type_id" | oid + ).withContext("param_descs") + .as[ParameterDescription] + } + + private def error(implicit charset: Charset): Codec[StatusMessage.Error] = { + status(StatusMessage.error).withContext("error_status") + } + + private def notice(implicit charset: Charset): Codec[StatusMessage.Notice] = { + status(StatusMessage.notice).withContext("notice_status") + } + + private def status[A <: StatusMessage](creator: Map[Byte, String] => A)(implicit charset: Charset): Codec[A] = { + pgHeadlessMsg { + pgParamMap("param_key" | byte).exmap[A]( + map => Attempt.successful(creator(map)), + _ => Attempt.failure(Err("encoding not supported")) + ).withContext("status_msg_params") + } + } +} diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala new file mode 100644 index 0000000..e09e41b --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala @@ -0,0 +1,163 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec.msg + +import java.nio.charset.Charset + +import _root_.scodec.codecs._ +import _root_.scodec.{Attempt, Codec, Err, SizeBound} +import io.rdbc.pgsql.core.pgstruct.messages.backend.{PgKey, PgPid} +import io.rdbc.pgsql.core.pgstruct.messages.frontend._ +import io.rdbc.pgsql.scodec.ParamValuesCodec.paramValues +import io.rdbc.pgsql.scodec._ +import scodec.bits.BitVector + +package object frontend { + + private[scodec] def pgFrontendMessage(implicit charset: Charset): scodec.Encoder[PgFrontendMessage] = { + new scodec.Encoder[PgFrontendMessage] { + val sizeBound: SizeBound = SizeBound.unknown + + def encode(msg: PgFrontendMessage): Attempt[BitVector] = { + val codec = msg match { + case _: Startup => startup.upcast[PgFrontendMessage] + case _: Bind => bind.upcast[PgFrontendMessage] + case _: DescribePortal => describePortal.upcast[PgFrontendMessage] + case _: DescribeStatement => describeStatement.upcast[PgFrontendMessage] + case _: Execute => execute.upcast[PgFrontendMessage] + case _: Parse => parse.upcast[PgFrontendMessage] + case _: PasswordMessage => password.upcast[PgFrontendMessage] + case _: Query => query.upcast[PgFrontendMessage] + case _: CancelRequest => cancelRequest.upcast[PgFrontendMessage] + case _: CloseStatement => closeStatement.upcast[PgFrontendMessage] + case _: ClosePortal => closePortal.upcast[PgFrontendMessage] + case Terminate => terminate.upcast[PgFrontendMessage] + case Flush => flush.upcast[PgFrontendMessage] + case Sync => sync.upcast[PgFrontendMessage] + case _ => fail[PgFrontendMessage] { + Err(s"Encoding message of type ${msg.getClass} is not supported") + } + } + codec.encode(msg) + } + } + } + + private def portalName(implicit charset: Charset): Codec[PortalName] = stringNul.as[PortalName] + + private def maybePortalName(implicit charset: Charset): Codec[Option[PortalName]] = { + maybe(portalName, PortalName("")) + } + + private def stmtName(implicit charset: Charset): Codec[StmtName] = stringNul.as[StmtName] + + private def maybeStmtName(implicit charset: Charset): Codec[Option[StmtName]] = { + maybe(stmtName, StmtName("")) + } + + private def nativeSql(implicit charset: Charset): Codec[NativeSql] = stringNul.as[NativeSql] + + private def bind(implicit charset: Charset): Codec[Bind] = { + pgHeadedMsg('B') { + { + ("portal" | maybePortalName) :: + ("statement" | maybeStmtName) :: + ("param_values" | paramValues) :: + ("result_columns" | ReturnFieldFormatsCodec) + }.as[Bind] + } + } + + private def describeStatement(implicit charset: Charset): Codec[DescribeStatement] = pgHeadedMsg('D') { + { + ("describeType" | byte.unit('S')) ~> ("optionalName" | maybeStmtName) + }.as[DescribeStatement] + } + + private def describePortal(implicit charset: Charset): Codec[DescribePortal] = pgHeadedMsg('D') { + { + ("describeType" | byte.unit('P')) ~> ("optionalName" | maybePortalName) + }.as[DescribePortal] + } + + private def execute(implicit charset: Charset): Codec[Execute] = pgHeadedMsg('E') { + { + ("portalName" | maybePortalName) :: + ("fetchSize" | maybeInt32) + }.as[Execute] + } + + private val flush: Codec[Flush.type] = pgSingletonHeadedMsg('F', Flush) + + private def parse(implicit charset: Charset): Codec[Parse] = pgHeadedMsg('P') { + { + ("preparedStmt" | maybeStmtName) :: + ("query" | nativeSql) :: + ("paramTypes" | vectorOfN(int16, oid)) + }.as[Parse] + } + + private val password: Codec[PasswordMessage] = pgHeadedMsg('p') { + ("credentials" | bytes).as[PasswordMessage] + } + + private def query(implicit charset: Charset): Codec[Query] = pgHeadedMsg('Q') { + ("query" | nativeSql).as[Query] + } + + private def startup(implicit charset: Charset): Codec[Startup] = { + val ver3_0 = int32.unit(196608) + + pgHeadlessMsg( + { + ("protocol version" | ver3_0) ~> + ("user key" | stringNul.unit("user")) ~> + ("user" | stringNul) :: + ("db key" | stringNul.unit("database")) ~> + ("database" | stringNul) :: + ("options" | pgParamMap(stringNul)) + }.as[Startup] + ).withToString("StartupMessage") + } + + private val terminate: Codec[Terminate.type] = pgSingletonHeadedMsg('X', Terminate) + + private val sync: Codec[Sync.type] = pgSingletonHeadedMsg('S', Sync) + + private def cancelRequest: Codec[CancelRequest] = { + pgHeadlessMsg( + { + ("cancel code" | int32.unit(80877102)) ~> + ("process ID" | int32).as[PgPid] :: + ("secret key" | int32).as[PgKey] + }.as[CancelRequest] + ) + } + + private def closeStatement(implicit charset: Charset): Codec[CloseStatement] = pgHeadedMsg('C') { + { + byte.unit('S') ~> ("optionalName" | maybeStmtName) + }.as[CloseStatement] + } + + private def closePortal(implicit charset: Charset): Codec[ClosePortal] = pgHeadedMsg('C') { + { + byte.unit('P') ~> ("optionalName" | maybePortalName) + }.as[ClosePortal] + } + +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/package.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/package.scala similarity index 60% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/package.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/package.scala index bc3543a..2487b73 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/package.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/package.scala @@ -18,23 +18,23 @@ package io.rdbc.pgsql.scodec import _root_.scodec.Codec import _root_.scodec.codecs._ -import io.rdbc.pgsql.core.messages.PgMessage +import io.rdbc.pgsql.core.pgstruct.messages.PgMessage package object msg { - def pgHeadlessMsg[A <: PgMessage](bodyCodec: Codec[A]): Codec[A] = { - variableSizeBytes(pgInt32, bodyCodec, 4) + private[msg] def pgHeadlessMsg[A <: PgMessage](bodyCodec: Codec[A]): Codec[A] = { + variableSizeBytes(int32, bodyCodec, 4) } - def pgHeadedMsg[A <: PgMessage](head: Byte)(bodyCodec: Codec[A]): Codec[A] = { + private[msg] def pgHeadedMsg[A <: PgMessage](head: Byte)(bodyCodec: Codec[A]): Codec[A] = { byte.unit(head) ~> pgHeadlessMsg(bodyCodec) } - def pgSingletonHeadedMsg[A <: PgMessage](head: Byte, singleton: A): Codec[A] = { + private[msg] def pgSingletonHeadedMsg[A <: PgMessage](head: Byte, singleton: A): Codec[A] = { byte.unit(head) ~> pgHeadlessMsg(provide(singleton)) } - def pgSingletonMsg[A <: PgMessage](singleton: A): Codec[A] = { - pgInt32.withContext("length").unit(4).xmap(_ => singleton, _ => Unit) + private[msg] def pgSingletonHeadlessMsg[A <: PgMessage](singleton: A): Codec[A] = { + int32.withContext("header_length").unit(4).xmap[A](_ => singleton, _ => Unit) } } diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala new file mode 100644 index 0000000..c8b3933 --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala @@ -0,0 +1,105 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql + +import java.nio.charset.Charset + +import _root_.scodec._ +import _root_.scodec.bits.{BitVector, ByteVector} +import _root_.scodec.codecs._ +import io.rdbc.pgsql.core.pgstruct._ +import io.rdbc.pgsql.core.pgstruct.messages.backend.MsgHeader +import io.rdbc.pgsql.core.pgstruct.messages.frontend.ColName + +package object scodec { + + private[scodec] def terminated[A](terminator: BitVector, codec: Codec[A]): Codec[A] = { + new TerminatedCodec[A](terminator, codec).withToString("terminated") + } + + private[scodec] def stringNul(implicit charset: Charset): Codec[String] = { + terminated(BitVector.lowByte, string(charset)).withToString("stringNul") + } + + private[scodec] def maybeStringNul(implicit charset: Charset): Codec[Option[String]] = { + maybe(stringNul(charset), "") + } + + private[scodec] def maybe[A](codec: Codec[A], noneVal: A): Codec[Option[A]] = { + codec.xmap[Option[A]](v => { + if (v == noneVal) None + else Some(v) + }, { + case Some(v) => v + case None => noneVal + }).withToString("maybe " + codec.toString) + } + + private[scodec] val maybeInt16: Codec[Option[Int]] = maybe(int16, 0) + + private[scodec] val maybeInt32: Codec[Option[Int]] = maybe(int32, 0) + + private[scodec] def colName(implicit charset: Charset): Codec[ColName] = stringNul.as[ColName] + + private[scodec] def pgParam[K](keyCodec: Codec[K])(implicit charset: Charset): Codec[(K, String)] = { + ("key" | keyCodec) ~ ("value" | stringNul) + }.withToString("pgParam") + + private[scodec] def pgParamMap[K](keyCodec: Codec[K])(implicit charset: Charset): Codec[Map[K, String]] = { + new PgMapCodec[K](pgParam(keyCodec)).withContext("pg_params") + } + + private[scodec] def colDesc(implicit charset: Charset): Codec[ColDesc] = { + { + ("name" | colName) :: + ("tableOid" | maybeOid) :: + ("columnAttr" | maybeInt16) :: + ("dataType" | dataType) :: + ("colFormatCode" | colValFormat) + }.as[ColDesc].withToString("ColDesc") + } + + private[scodec] val colValue: Codec[ColValue] = ColValueCodec + + private[scodec] val header: Codec[MsgHeader] = { + { + ("header" | ignore(8)) ~> + ("msgLength" | int32) + }.as[MsgHeader] + } + + private[scodec] val colValFormat: Codec[ColFormat] = { + discriminated[ColFormat] + .by(int16) + .subcaseP(0)({ case t@ColFormat.Textual => t })(provide(ColFormat.Textual)) + .subcaseP(1)({ case b@ColFormat.Binary => b })(provide(ColFormat.Binary)) + } + + private[scodec] val oid: Codec[Oid] = uint32.as[Oid].withToString("pgOid") + + private[scodec] val maybeOid: Codec[Option[Oid]] = maybe(oid, Oid(0L)) + + private[scodec] val dataType: Codec[DataType] = { + { + ("oid" | oid) :: + ("size" | int16).as[DataType.Size] :: + ("modifier" | int32).as[DataType.Modifier] + }.as[DataType] + } + + private[scodec] val bytesArr: Codec[Array[Byte]] = bytes.xmap(_.toArray, ByteVector.view) +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/CommonCodec.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/CommonCodec.scala similarity index 78% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/CommonCodec.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/CommonCodec.scala index 5593ca2..c988ad5 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/CommonCodec.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/CommonCodec.scala @@ -19,9 +19,9 @@ package io.rdbc.pgsql.scodec.types import io.rdbc.pgsql.core.SessionParams import scodec.Codec -trait CommonCodec[T] { +private[types] trait CommonCodec[T] { this: ScodecPgType[T] => def codec(implicit sessionParams: SessionParams): Codec[T] - def decodeCodec(implicit sessionParams: SessionParams): Codec[T] = codec - def encodeCodec(implicit sessionParams: SessionParams): Codec[T] = codec + def decoder(implicit sessionParams: SessionParams): Codec[T] = codec + def encoder(implicit sessionParams: SessionParams): Codec[T] = codec } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBool.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBool.scala similarity index 88% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBool.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBool.scala index ef50e04..d80779b 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBool.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBool.scala @@ -16,12 +16,13 @@ package io.rdbc.pgsql.scodec.types +import _root_.scodec.codecs.byte import io.rdbc.pgsql.core.SessionParams import io.rdbc.pgsql.core.types.PgBool import scodec.Codec -object ScodecPgBool extends PgBool with ScodecPgType[Boolean] with CommonCodec[Boolean] { - def codec(implicit sessionParams: SessionParams): Codec[Boolean] = _root_.scodec.codecs.byte.xmap( +object ScodecPgBool extends ScodecPgType[Boolean] with PgBool with CommonCodec[Boolean] { + def codec(implicit sessionParams: SessionParams): Codec[Boolean] = byte.xmap( _ == 1, if (_) 1 else 0 ) diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBytea.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBytea.scala similarity index 89% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBytea.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBytea.scala index 478716c..a28d7de 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBytea.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgBytea.scala @@ -21,6 +21,6 @@ import io.rdbc.pgsql.core.types.PgBytea import io.rdbc.pgsql.scodec.bytesArr import scodec.Codec -object ScodecPgBytea extends PgBytea with ScodecPgType[Array[Byte]] with CommonCodec[Array[Byte]] { +object ScodecPgBytea extends ScodecPgType[Array[Byte]] with PgBytea with CommonCodec[Array[Byte]] { def codec(implicit sessionParams: SessionParams): Codec[Array[Byte]] = bytesArr } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgChar.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgChar.scala similarity index 92% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgChar.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgChar.scala index ed7eae0..229c6a5 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgChar.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgChar.scala @@ -18,4 +18,4 @@ package io.rdbc.pgsql.scodec.types import io.rdbc.pgsql.core.types.PgChar -object ScodecPgChar extends PgChar with ScodecStringLike \ No newline at end of file +object ScodecPgChar extends ScodecStringLike with PgChar diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDate.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDate.scala similarity index 85% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDate.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDate.scala index 8408640..943aaec 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDate.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDate.scala @@ -20,19 +20,19 @@ import java.time._ import java.time.temporal.ChronoUnit.DAYS import _root_.scodec.Codec +import _root_.scodec.codecs.int32 import io.rdbc.pgsql.core.SessionParams import io.rdbc.pgsql.core.types.PgDate -import io.rdbc.pgsql.scodec._ -object ScodecPgDate extends PgDate with ScodecPgType[LocalDate] with CommonCodec[LocalDate] { +object ScodecPgDate extends ScodecPgType[LocalDate] with PgDate with CommonCodec[LocalDate] { def codec(implicit sessionParams: SessionParams): Codec[LocalDate] = { - pgInt32.xmap( + int32.xmap( int2LocalDate, localDate2Int ) } - private val PgZero: LocalDate = LocalDate.of(2000, Month.JANUARY, 1) + private[this] val PgZero: LocalDate = LocalDate.of(2000, Month.JANUARY, 1) private def int2LocalDate(i: Int): LocalDate = { PgZero.plus(i, DAYS) diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala new file mode 100644 index 0000000..40f15aa --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala @@ -0,0 +1,170 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec.types + +import io.rdbc.pgsql.core.SessionParams +import io.rdbc.pgsql.core.types.PgDecimal +import io.rdbc.sapi.SqlNumeric +import scodec.Attempt.Failure +import scodec.bits.BitVector +import scodec.codecs._ +import scodec._ + +private[types] object PgDecimalCodec extends Codec[SqlNumeric] { + + private sealed trait PgDecSign + private object PgDecSign { + case object Negative extends PgDecSign + case object Positive extends PgDecSign + case object NaN extends PgDecSign + } + + private case class DecimalParts(integer: String, fraction: String) + private case class Header(digitsCount: Int, firstDigitWeight: Int, sign: PgDecSign, scale: Int) + + private[this] val signCodec: Codec[PgDecSign] = { + discriminated + .by("decimal_sign_id" | uint16) + .typecase(0x4000, provide(PgDecSign.Negative).upcast[PgDecSign]) + .typecase(0x0000, provide(PgDecSign.Positive).upcast[PgDecSign]) + .typecase(0xC000, provide(PgDecSign.NaN).upcast[PgDecSign]) + } + + private[this] val headerCodec: Codec[Header] = { + { + ("digitsCount" | uint16) :: + ("firstDigitWeight" | uint16) :: + ("sign" | signCodec) :: + ("scale" | uint16) + }.as[Header] + } + + private[this] val NaNBits = headerCodec.encode { + Header(digitsCount = 0, firstDigitWeight = 0, sign = PgDecSign.NaN, scale = 0) + } + + private[this] val DigitLength = 4 + + val sizeBound: SizeBound = headerCodec.sizeBound.atLeast + + def decode(bits: BitVector): Attempt[DecodeResult[SqlNumeric]] = { + headerCodec.decode(bits).flatMap { headerResult => + val header = headerResult.value + vectorOfN( + provide(header.digitsCount), + "digit" | int16 + ).decode(headerResult.remainder).map { digitsResult => + header.sign match { + case PgDecSign.NaN => DecodeResult(SqlNumeric.NaN, digitsResult.remainder) + case _ => decodeNonNan(header, digitsResult) + } + } + } + } + + private def decodeNonNan(header: Header, + digitsResult: DecodeResult[Vector[Int]]): DecodeResult[SqlNumeric] = { + val dp = decimalPartsFromDigits(digitsResult.value, header.firstDigitWeight) + + val bigDecStr = { + val signStr = if (header.sign == PgDecSign.Negative) "-" else "" + val integerTrimmed = dropLeadingZeros(dp.integer) + val fractionPadded = dp.fraction.padTo(header.scale, '0') + signStr + integerTrimmed + (if (!fractionPadded.isEmpty) "." + fractionPadded else "") + } + + DecodeResult(SqlNumeric.Val(BigDecimal(bigDecStr)), digitsResult.remainder) + } + + private def decimalPartsFromDigits(digits: Vector[Int], firstDigitWeight: Int): DecimalParts = { + val weight1 = firstDigitWeight + 1 + + val digitsPadded = digits.padTo(weight1, 0) + val digitStrs = digitsPadded.map("%04d".format(_)) + + if (digitStrs.size > weight1) { + DecimalParts( + integer = digitStrs.slice(0, weight1).mkString, + fraction = digitStrs.slice(weight1, digitStrs.size).mkString + ) + } else DecimalParts(integer = digitStrs.mkString, fraction = "") + } + + def encode(value: SqlNumeric): Attempt[BitVector] = { + value match { + case SqlNumeric.Val(bigDec) => encodeBigDec(bigDec) + case SqlNumeric.NaN => NaNBits + case SqlNumeric.NegInfinity | SqlNumeric.PosInfinity => + Failure(Err("Cannot encode infinity as a PostgreSQL decimal")) + } + } + + private def encodeBigDec(bigDec: BigDecimal): Attempt[BitVector] = { + val dp = bigDecToDecimalParts(bigDec) + val mergedNoDot = padInteger(dp.integer + dp.fraction) + + encode( + sign = if (bigDec >= 0) PgDecSign.Positive else PgDecSign.Negative, + weight = dp.integer.length / DigitLength, + scale = bigDec.scale, + digits = mergedNoDot.grouped(mergedNoDot.length / DigitLength).map(_.toInt).toVector + ) + } + + private def bigDecToDecimalParts(bigDecimal: BigDecimal): DecimalParts = { + val bigDecStr = bigDecimal.bigDecimal.toPlainString.toList.dropWhile(_ == '-') + val (integer, fractionWithDot) = bigDecStr.span(_ != '.') + + DecimalParts( + integer = padInteger(integer.mkString), + fraction = padFraction(fractionWithDot.drop(1).mkString) //drop(1) is to drop a dot + ) + } + + private def padInteger(num: String): String = { + val digitCount = math.ceil(num.length.toDouble / DigitLength).toInt + val padNeeded = (digitCount * DigitLength) - num.length + ("0" * padNeeded) + num + } + + private def padFraction(fraction: String): String = { + val zeroTrimmed = dropTrailingZeros(fraction) + val digitCount = math.ceil(zeroTrimmed.length.toDouble / DigitLength).toInt + zeroTrimmed.padTo(digitCount * DigitLength, '0') + } + + private def encode(sign: PgDecSign, weight: Int, scale: Int, digits: Vector[Int]): Attempt[BitVector] = { + (headerCodec ~ vectorOfN(provide(digits.size), "digit" | int16)).encode( + ( + Header(digits.size, weight, sign, scale), + digits + ) + ) + } + + private def dropTrailingZeros(str: String): String = { + str.replaceAll("0+$", "") + } + + private def dropLeadingZeros(str: String): String = { + str.replaceAll("^0+", "") + } +} + +object ScodecPgDecimal extends ScodecPgType[SqlNumeric] with PgDecimal with CommonCodec[SqlNumeric] { + def codec(implicit sessionParams: SessionParams): Codec[SqlNumeric] = PgDecimalCodec +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat4.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat4.scala similarity index 93% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat4.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat4.scala index c107575..ba8a6a4 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat4.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat4.scala @@ -21,6 +21,6 @@ import io.rdbc.pgsql.core.types.PgFloat4 import scodec.Codec import scodec.codecs.float -object ScodecPgFloat4 extends PgFloat4 with ScodecPgType[Float] with CommonCodec[Float] { +object ScodecPgFloat4 extends ScodecPgType[Float] with PgFloat4 with CommonCodec[Float] { def codec(implicit sessionParams: SessionParams): Codec[Float] = float } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat8.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat8.scala similarity index 87% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat8.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat8.scala index db4cdff..085ca9f 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat8.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgFloat8.scala @@ -17,10 +17,10 @@ package io.rdbc.pgsql.scodec.types import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.core.types.{PgFloat4, PgFloat8} +import io.rdbc.pgsql.core.types.PgFloat8 import scodec.Codec import scodec.codecs.double -object ScodecPgFloat8 extends PgFloat8 with ScodecPgType[Double] with CommonCodec[Double] { +object ScodecPgFloat8 extends ScodecPgType[Double] with PgFloat8 with CommonCodec[Double] { def codec(implicit sessionParams: SessionParams): Codec[Double] = double } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt2.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt2.scala similarity index 93% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt2.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt2.scala index 899a19d..93f905d 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt2.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt2.scala @@ -21,6 +21,6 @@ import io.rdbc.pgsql.core.types.PgInt2 import scodec.Codec import scodec.codecs.short16 -object ScodecPgInt2 extends PgInt2 with ScodecPgType[Short] with CommonCodec[Short] { +object ScodecPgInt2 extends ScodecPgType[Short] with PgInt2 with CommonCodec[Short] { def codec(implicit sessionParams: SessionParams): Codec[Short] = short16 } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt4.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt4.scala similarity index 88% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt4.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt4.scala index 17119ab..85de607 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt4.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt4.scala @@ -16,11 +16,11 @@ package io.rdbc.pgsql.scodec.types +import _root_.scodec.codecs.int32 import io.rdbc.pgsql.core.SessionParams import io.rdbc.pgsql.core.types.PgInt4 -import io.rdbc.pgsql.scodec._ import scodec.Codec -object ScodecPgInt4 extends PgInt4 with ScodecPgType[Int] with CommonCodec[Int] { - def codec(implicit sessionParams: SessionParams): Codec[Int] = pgInt32 +object ScodecPgInt4 extends ScodecPgType[Int] with PgInt4 with CommonCodec[Int] { + def codec(implicit sessionParams: SessionParams): Codec[Int] = int32 } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt8.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt8.scala similarity index 93% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt8.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt8.scala index e499d01..d221676 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt8.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgInt8.scala @@ -21,6 +21,6 @@ import io.rdbc.pgsql.core.types.PgInt8 import scodec.Codec import scodec.codecs.long -object ScodecPgInt8 extends PgInt8 with ScodecPgType[Long] with CommonCodec[Long] { +object ScodecPgInt8 extends ScodecPgType[Long] with PgInt8 with CommonCodec[Long] { def codec(implicit sessionParams: SessionParams): Codec[Long] = long(64) } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgText.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgText.scala similarity index 92% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgText.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgText.scala index e9b8840..ffd0621 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgText.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgText.scala @@ -18,4 +18,4 @@ package io.rdbc.pgsql.scodec.types import io.rdbc.pgsql.core.types.PgText -object ScodecPgText extends PgText with ScodecStringLike \ No newline at end of file +object ScodecPgText extends ScodecStringLike with PgText diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTime.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTime.scala similarity index 77% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTime.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTime.scala index b2ef2f9..542103d 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTime.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTime.scala @@ -20,27 +20,25 @@ import java.time._ import java.time.temporal.ChronoUnit.MICROS import _root_.scodec.Codec +import _root_.scodec.codecs.int64 import io.rdbc.pgsql.core.SessionParams import io.rdbc.pgsql.core.types.PgTime -import io.rdbc.pgsql.scodec._ -object ScodecPgTime extends PgTime with ScodecPgType[LocalTime] with CommonCodec[LocalTime] { +object ScodecPgTime extends ScodecPgType[LocalTime] with PgTime with CommonCodec[LocalTime] { def codec(implicit sessionParams: SessionParams): Codec[LocalTime] = { - pgInt64.xmap( + int64.xmap( long2LocalTime, localTime2Long ) } - private val PgZero: LocalTime = LocalTime.MIDNIGHT + private[this] val PgZero: LocalTime = LocalTime.MIDNIGHT private def long2LocalTime(l: Long): LocalTime = { PgZero.plus(l, MICROS) } private def localTime2Long(ldt: LocalTime): Long = { - val dur = Duration.between(PgZero, ldt) - val micros = (dur.getSeconds * 1000L * 1000L) + (dur.getNano / 1000L) //TODO this repeats - micros + Duration.between(PgZero, ldt).toMicros } } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestamp.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestamp.scala similarity index 75% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestamp.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestamp.scala index b6a905e..afeab4d 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestamp.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestamp.scala @@ -20,19 +20,21 @@ import java.time.temporal.ChronoUnit.MICROS import java.time.{ZonedDateTime, _} import _root_.scodec.Codec +import _root_.scodec.codecs.int64 import io.rdbc.pgsql.core.SessionParams import io.rdbc.pgsql.core.types.PgTimestamp -import io.rdbc.pgsql.scodec._ -object ScodecPgTimestamp extends PgTimestamp with ScodecPgType[LocalDateTime] with CommonCodec[LocalDateTime] { +object ScodecPgTimestamp extends ScodecPgType[LocalDateTime] with PgTimestamp with CommonCodec[LocalDateTime] { def codec(implicit sessionParams: SessionParams): Codec[LocalDateTime] = { - pgInt64.xmap( + int64.xmap( long2LocalDateTime, localDateTime2Long ) } - private val PgZero: ZonedDateTime = LocalDate.of(2000, Month.JANUARY, 1).atStartOfDay(ZoneId.of("UTC")) + private[this] val PgZero: ZonedDateTime = { + LocalDate.of(2000, Month.JANUARY, 1).atStartOfDay(ZoneId.of("UTC")) + } private def long2LocalDateTime(l: Long): LocalDateTime = { PgZero.plus(l, MICROS).toLocalDateTime @@ -40,8 +42,6 @@ object ScodecPgTimestamp extends PgTimestamp with ScodecPgType[LocalDateTime] wi private def localDateTime2Long(ldt: LocalDateTime): Long = { val zdt = ZonedDateTime.of(ldt, ZoneId.of("UTC")) - val dur = Duration.between(PgZero, zdt) - val micros = (dur.getSeconds * 1000L * 1000L) + (dur.getNano / 1000L) - micros + Duration.between(PgZero, zdt).toMicros } } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestampTz.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestampTz.scala similarity index 73% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestampTz.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestampTz.scala index e1d593f..4222194 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestampTz.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTimestampTz.scala @@ -20,27 +20,29 @@ import java.time._ import java.time.temporal.ChronoUnit.MICROS import _root_.scodec.Codec +import _root_.scodec.codecs.int64 import io.rdbc.pgsql.core.SessionParams import io.rdbc.pgsql.core.types.PgTimestampTz -import io.rdbc.pgsql.scodec._ -object ScodecPgTimestampTz extends PgTimestampTz with ScodecPgType[Instant] with CommonCodec[Instant] { +object ScodecPgTimestampTz extends ScodecPgType[Instant] with PgTimestampTz with CommonCodec[Instant] { def codec(implicit sessionParams: SessionParams): Codec[Instant] = { - pgInt64.xmap( + int64.xmap( long2Instant, instant2Long ) } - private val PgZero: Instant = LocalDate.of(2000, Month.JANUARY, 1).atStartOfDay(ZoneId.of("UTC")).toInstant + private[this] val PgZero: Instant = { + LocalDate.of(2000, Month.JANUARY, 1) + .atStartOfDay(ZoneId.of("UTC")) + .toInstant + } private def long2Instant(l: Long): Instant = { PgZero.plus(l, MICROS) } private def instant2Long(inst: Instant): Long = { - val dur = Duration.between(PgZero, inst) - val micros = (dur.getSeconds * 1000L * 1000L) + (dur.getNano / 1000L) - micros + Duration.between(PgZero, inst).toMicros } } diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala new file mode 100644 index 0000000..893fc92 --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala @@ -0,0 +1,51 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec.types + +import io.rdbc.pgsql.core.SessionParams +import io.rdbc.pgsql.core.exception.PgDecodeException +import io.rdbc.pgsql.core.types.PgType +import scodec.Attempt.{Failure, Successful} +import scodec.bits.ByteVector + +import scala.reflect.ClassTag + +private[types] abstract class ScodecPgType[T: ClassTag] extends PgType[T] { + + def decoder(implicit sessionParams: SessionParams): scodec.Decoder[T] + def encoder(implicit sessionParams: SessionParams): scodec.Encoder[T] + + def toObj(binaryVal: ByteVector)(implicit sessionParams: SessionParams): T = { + decoder.decodeValue(binaryVal.bits) match { + case Successful(value) => value + case Failure(err) => throw new PgDecodeException( + s"Error decoding '${binaryVal.toHex}' of PG type '$name' as '${implicitly[ClassTag[T]]}': " + + err.messageWithContext + ) + } + } + + def toPgBinary(obj: T)(implicit sessionParams: SessionParams): ByteVector = { + encoder.encode(obj) match { + case Successful(value) => value.bytes + case Failure(err) => throw new PgDecodeException( + s"Error encoding '$obj' of type '${obj.getClass}' to PG type '$name': " + + err.messageWithContext + ) + } + } +} diff --git a/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTypesProvider.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTypesProvider.scala new file mode 100644 index 0000000..326abd6 --- /dev/null +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgTypesProvider.scala @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.scodec.types + +import io.rdbc.ImmutSeq +import io.rdbc.pgsql.core.types.{PgType, PgTypesProvider} + +class ScodecPgTypesProvider extends PgTypesProvider { + val types: ImmutSeq[PgType[_]] = { + Vector( + ScodecPgBool, + ScodecPgInt2, + ScodecPgInt4, + ScodecPgInt8, + ScodecPgFloat4, + ScodecPgFloat8, + ScodecPgDecimal, + ScodecPgTime, + ScodecPgDate, + ScodecPgTimestamp, + ScodecPgTimestampTz, + ScodecPgVarchar, + ScodecPgChar, + ScodecPgText, + ScodecPgUuid, + ScodecPgBytea + ) + } +} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgUuid.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgUuid.scala similarity index 93% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgUuid.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgUuid.scala index 36154bb..35b74d1 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgUuid.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgUuid.scala @@ -23,6 +23,6 @@ import io.rdbc.pgsql.core.types.PgUuid import scodec.Codec import scodec.codecs.uuid -object ScodecPgUuid extends PgUuid with ScodecPgType[UUID] with CommonCodec[UUID] { +object ScodecPgUuid extends ScodecPgType[UUID] with PgUuid with CommonCodec[UUID] { def codec(implicit sessionParams: SessionParams): Codec[UUID] = uuid } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgVarchar.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgVarchar.scala similarity index 91% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgVarchar.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgVarchar.scala index 29b36bd..73d1184 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgVarchar.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgVarchar.scala @@ -18,4 +18,4 @@ package io.rdbc.pgsql.scodec.types import io.rdbc.pgsql.core.types.PgVarchar -object ScodecPgVarchar extends PgVarchar with ScodecStringLike \ No newline at end of file +object ScodecPgVarchar extends ScodecStringLike with PgVarchar diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecStringLike.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecStringLike.scala similarity index 67% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecStringLike.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecStringLike.scala index b5b3ac2..b48dcc3 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecStringLike.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecStringLike.scala @@ -17,10 +17,15 @@ package io.rdbc.pgsql.scodec.types import _root_.scodec.Codec +import _root_.scodec.codecs.string import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.scodec._ -trait ScodecStringLike extends ScodecPgType[String] { - def decodeCodec(implicit sessionParams: SessionParams): Codec[String] = pgStringNonTerminated(sessionParams.serverCharset) - def encodeCodec(implicit sessionParams: SessionParams): Codec[String] = pgStringNonTerminated(sessionParams.clientCharset) +private[types] abstract class ScodecStringLike extends ScodecPgType[String] { + def decoder(implicit sessionParams: SessionParams): Codec[String] = { + string(sessionParams.serverCharset) + } + + def encoder(implicit sessionParams: SessionParams): Codec[String] = { + string(sessionParams.clientCharset) + } } diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/package.scala b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/package.scala similarity index 68% rename from rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/package.scala rename to rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/package.scala index bb7821f..756d814 100644 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/package.scala +++ b/rdbc-pgsql-codec-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/package.scala @@ -16,16 +16,14 @@ package io.rdbc.pgsql.scodec +import java.time.Duration + import io.rdbc.pgsql.core.types.PgTypeRegistry package object types { - val ScodecBuiltInTypes = PgTypeRegistry( - ScodecPgBool, - ScodecPgInt2, ScodecPgInt4, ScodecPgInt8, - ScodecPgFloat4, ScodecPgFloat8, - ScodecPgDecimal, - ScodecPgTime, ScodecPgDate, ScodecPgTimestamp, ScodecPgTimestampTz, - ScodecPgChar, ScodecPgVarchar, ScodecPgText, - ScodecPgUuid, - ScodecPgBytea) + private[types] implicit class Duration2Micros(underlying: Duration) { + def toMicros: Long = { + (underlying.getSeconds * 1000L * 1000L) + (underlying.getNano / 1000L) + } + } } diff --git a/rdbc-pgsql-core/src/main/resources/reference.conf b/rdbc-pgsql-core/src/main/resources/reference.conf new file mode 100644 index 0000000..ae36737 --- /dev/null +++ b/rdbc-pgsql-core/src/main/resources/reference.conf @@ -0,0 +1,31 @@ +rdbc.pgsql.defaults { + host = "localhost" + port = 5432 + + pg-user = "changeme" + database = "changeme" + + auth { + type = "user-password" + user-password { + user = "changeme" + password = "changeme" + } + } + + max-batch-size = 100 + + lock-factory = "io.rdbc.pgsql.core.util.concurrent.BlockLockFactory" + + type-converters-providers = [ + "io.rdbc.typeconv.StandardTypeConvertersProvider" + ] + + write-timeout = 30 seconds + + execution-context = "global" + + # akka { + # + # } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/AbstractPgConnection.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/AbstractPgConnection.scala new file mode 100644 index 0000000..99b3678 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/AbstractPgConnection.scala @@ -0,0 +1,474 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core + +import java.nio.charset.Charset +import java.util.concurrent.atomic.AtomicInteger + +import akka.stream.Materializer +import akka.stream.scaladsl.{Sink, Source} +import io.rdbc.api.exceptions.{ConnectionClosedException, IllegalSessionStateException} +import io.rdbc.implbase.{ConnectionPartialImpl, ReturningInsertImpl} +import io.rdbc.pgsql.core.auth.Authenticator +import io.rdbc.pgsql.core.exception.PgUnsupportedCharsetException +import io.rdbc.pgsql.core.internal._ +import io.rdbc.pgsql.core.internal.fsm.StateAction.{Fatal, Goto, Stay} +import io.rdbc.pgsql.core.internal.fsm._ +import io.rdbc.pgsql.core.internal.fsm.streaming.{StrmBeginningTx, StrmWaitingForDescribe} +import io.rdbc.pgsql.core.internal.scheduler.{TaskScheduler, TimeoutHandler} +import io.rdbc.pgsql.core.pgstruct.messages.backend.{SessionParamKey, _} +import io.rdbc.pgsql.core.pgstruct.messages.frontend._ +import io.rdbc.pgsql.core.pgstruct.{ParamValue, ReturnColFormats, TxStatus} +import io.rdbc.sapi._ +import io.rdbc.util.Logging +import io.rdbc.util.Preconditions._ + +import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ExecutionContext, Future, Promise} + +//TODO make a note in Connection scaladoc that implementations must be thread safe +abstract class AbstractPgConnection(config: PgConnectionConfig, + implicit private[this] val out: ChannelWriter, + implicit protected val ec: ExecutionContext, + scheduler: TaskScheduler, + requestCanceler: RequestCanceler, + implicit private[this] val streamMaterializer: Materializer) + extends Connection + with ConnectionPartialImpl + with WriteFailureHandler + with FatalErrorHandler + with PgStatementExecutor + with PgStatementDeallocator + with Logging { + + private[this] val fsmManager = new PgSessionFsmManager(config.lockFactory, this) + @volatile private[this] var sessionParams = SessionParams.default + @volatile private[this] var stmtCache = PreparedStmtCache.empty + @volatile private[this] var maybeBackendKeyData = Option.empty[BackendKeyData] + private[this] val stmtCounter = new AtomicInteger(0) + + override def watchForIdle: Future[this.type] = fsmManager.readyFuture.map(_ => this) + + override def statement(sql: String): Future[AnyStatement] = traced { + argsNotNull() + checkNonEmptyString(sql) + Future.successful { + new PgAnyStatement(this, this, config.pgTypes, sessionParams, PgNativeStatement.parse(RdbcSql(sql))) + } + } + + override def beginTx()(implicit timeout: FiniteDuration): Future[Unit] = traced { + argsNotNull() + simpleQueryIgnoreResult(NativeSql("BEGIN")) + } + + override def commitTx()(implicit timeout: FiniteDuration): Future[Unit] = traced { + argsNotNull() + simpleQueryIgnoreResult(NativeSql("COMMIT")) + } + + override def rollbackTx()(implicit timeout: FiniteDuration): Future[Unit] = traced { + argsNotNull() + simpleQueryIgnoreResult(NativeSql("ROLLBACK")) + } + + override def returningInsert(sql: String): Future[ReturningInsert] = traced { + argsNotNull() + checkNonEmptyString(sql) + returningInsert(sql, "*") + } + + override def validate()(implicit timeout: FiniteDuration): Future[Boolean] = traced { + argsNotNull() + simpleQueryIgnoreResult(NativeSql("")).map(_ => true).recoverWith { + case ex: IllegalSessionStateException => Future.failed(ex) + case _ => Future.successful(false) + } + } + + override def returningInsert(sql: String, keyColumns: String*): Future[ReturningInsert] = traced { + argsNotNull() + checkNonEmptyString(sql) + checkNonEmpty(keyColumns) + val returningSql = sql + " returning " + keyColumns.mkString(",") + statement(returningSql).map { stmt => + new ReturningInsertImpl(stmt) + } + } + + override def release(): Future[Unit] = traced { + //TODO do nothing if already released + fsmManager.ifReady { (_, _) => + logger.debug(s"Releasing connection on client request") + doRelease("Connection released by client") + } + } + + override def forceRelease(): Future[Unit] = traced { + //TODO do nothing if already released + logger.info("Forcing a connection release") + doRelease("Connection released by client (forced)") + } + + def init(dbUser: String, dbName: String, authenticator: Authenticator): Future[Unit] = traced { + argsNotNull() + logger.debug(s"Initializing connection") + val initPromise = Promise[BackendKeyData] + fsmManager.triggerTransition(State.authenticating(initPromise, authenticator)(out, ec)) + + out.writeAndFlush(Startup(dbUser, dbName)).recoverWith(writeFailureHandler).flatMap { _ => + initPromise.future.map { returnedBkd => + maybeBackendKeyData = Some(returnedBkd) + () + } + } + } + + protected def handleClientCharsetChange(charset: Charset): Unit + + protected def handleServerCharsetChange(charset: Charset): Unit + + protected final def handleBackendMessage(msg: PgBackendMessage): Unit = traced { + //argsNotNull() + msg match { + case paramStatus: ParameterStatus => handleParamStatusChange(paramStatus) + case _ => + fsmManager.currentState.onMessage(msg) match { + case Stay(afterAcknowledged) => afterAcknowledged.foreach(_.apply()) + case Goto(newState, afterTransitionAction) => fsmManager.triggerTransition(newState, afterTransitionAction) + case Fatal(ex, afterReleaseAction) => + logger.error("Fatal error occurred, connection will be closed", ex) + doRelease(ex).map(_ => afterReleaseAction.foreach(_.apply())) + } + } + } + + protected[core] final def handleFatalError(msg: String, cause: Throwable): Unit = traced { + argsNotNull() + checkNonEmptyString(msg) + logger.error(msg, cause) + doRelease(cause) + } + + private[core] def executeStatementForStream(nativeSql: NativeSql, params: Vector[ParamValue])( + implicit timeout: FiniteDuration): Future[ResultStream] = traced { + fsmManager.ifReady { (reqId, txStatus) => + logger.debug(s"Executing statement '${nativeSql.value}'") + + val msgs@ParseAndBind(parse, _) = newParseAndBind(nativeSql, params) + val streamPromise = Promise[PgResultStream] + val parsePromise = Promise[Unit] + + writeInitialExecuteMessages(txStatus, reqId, msgs, streamPromise, parsePromise) + .recoverWith(writeFailureHandler) + .flatMap(_ => updateStmtCacheIfNeeded(parse, parsePromise.future, nativeSql)) + .flatMap(_ => streamPromise.future) + } + } + + private def updateStmtCacheIfNeeded(maybeParse: Option[Parse], + parseFut: Future[Unit], + nativeSql: NativeSql): Future[Unit] = { + maybeParse.flatMap(_.optionalName) match { + case Some(stmtName) => parseFut.map(_ => stmtCache = stmtCache.updated(nativeSql, stmtName)) + case None => unitFuture + } + } + + private def writeInitialExecuteMessages(txStatus: TxStatus, + reqId: RequestId, + messages: ParseAndBind, + streamPromise: Promise[PgResultStream], + parsePromise: Promise[Unit])( + implicit timeout: FiniteDuration): Future[Unit] = { + txStatus match { + case TxStatus.Active | TxStatus.Failed => + fsmManager.triggerTransition( + waitingForDescribeResult(reqId, messages.bind.portal, streamPromise, parsePromise) + ) + messages.parse.foreach(out.write(_)) + out.writeAndFlush(messages.bind, DescribePortal(messages.bind.portal), Sync) + + case TxStatus.Idle => + fsmManager.triggerTransition( + beginningTx(reqId, messages.parse, messages.bind, streamPromise, parsePromise) + ) + out.writeAndFlush(Query(NativeSql("BEGIN"))) + } + } + + private def waitingForDescribeResult(reqId: RequestId, + portalName: Option[PortalName], + streamPromise: Promise[PgResultStream], + parsePromise: Promise[Unit])( + implicit timeout: FiniteDuration): StrmWaitingForDescribe = { + State.Streaming.waitingForDescribe( + txMgmt = false, + portalName = portalName, + streamPromise = streamPromise, + parsePromise = parsePromise, + pgTypes = config.pgTypes, + typeConverters = config.typeConverters, + sessionParams = sessionParams, + timeoutHandler = newTimeoutHandler(reqId, timeout), + lockFactory = config.lockFactory, + fatalErrorNotifier = handleFatalError)(out, ec) + } + + private def beginningTx(reqId: RequestId, + parse: Option[Parse], + bind: Bind, + streamPromise: Promise[PgResultStream], + parsePromise: Promise[Unit])( + implicit timeout: FiniteDuration): StrmBeginningTx = { + State.Streaming.beginningTx( + maybeParse = parse, + bind = bind, + streamPromise = streamPromise, + parsePromise = parsePromise, + sessionParams = sessionParams, + timeoutHandler = newTimeoutHandler(reqId, timeout), + typeConverters = config.typeConverters, + pgTypes = config.pgTypes, + lockFactory = config.lockFactory, + fatalErrorNotifier = handleFatalError)(out, ec) + } + + sealed trait StatementStatus + object StatementStatus { + case class NotCachedDoCache(stmtName: StmtName) extends StatementStatus + case object NotCachedDontCache extends StatementStatus + case class Cached(stmtName: StmtName) extends StatementStatus + } + + private def determineStmtStatus(nativeSql: NativeSql): StatementStatus = { + stmtCache.get(nativeSql) match { + case Some(stmtName) => StatementStatus.Cached(stmtName) + case None => + if (shouldCache(nativeSql)) StatementStatus.NotCachedDoCache(nextStmtName()) + else StatementStatus.NotCachedDontCache + } + } + + object ParseAndBind { + def apply(bind: Bind): ParseAndBind = ParseAndBind(None, bind) + def apply(parse: Parse, bind: Bind): ParseAndBind = ParseAndBind(Some(parse), bind) + } + + case class ParseAndBind(parse: Option[Parse], bind: Bind) + + private def newParseAndBind(nativeSql: NativeSql, params: Vector[ParamValue]): ParseAndBind = { + def newParse(maybeStmtName: Option[StmtName]): Parse = { + Parse(maybeStmtName, nativeSql, params.map(_.dataTypeOid)) + } + + def newBind(maybeStmtName: Option[StmtName]): Bind = { + Bind(portal = None, maybeStmtName, params, ReturnColFormats.AllBinary) + } + + determineStmtStatus(nativeSql) match { + case StatementStatus.Cached(stmtName) => ParseAndBind(newBind(Some(stmtName))) + case StatementStatus.NotCachedDoCache(stmtName) => ParseAndBind(newParse(Some(stmtName)), newBind(Some(stmtName))) + case StatementStatus.NotCachedDontCache => ParseAndBind(newParse(None), newBind(None)) + } + } + + private def shouldCache(nativeSql: NativeSql): Boolean = traced { + //will be implemented as part of resolving https://github.com/rdbc-io/rdbc-pgsql/issues/42 + true + } + + private[core] def executeStatementForRowsAffected(nativeSql: NativeSql, params: Vector[ParamValue])( + implicit timeout: FiniteDuration): Future[Long] = traced { + fsmManager.ifReady { (reqId, _) => + logger.debug(s"Executing write-only statement '$nativeSql'") + + val ParseAndBind(parse, bind) = newParseAndBind(nativeSql, params) + + val parsePromise = Promise[Unit] + val resultPromise = Promise[Long] + + fsmManager.triggerTransition(State.executingWriteOnly(parsePromise, resultPromise)) + parse.foreach(out.write(_)) + out + .writeAndFlush(bind, Execute(optionalPortalName = bind.portal, optionalFetchSize = None), Sync) + .recoverWith(writeFailureHandler) + .flatMap { _ => + val timeoutTask = newTimeoutHandler(reqId, timeout).scheduleTimeoutTask() + updateStmtCacheIfNeeded(parse, parsePromise.future, nativeSql) + .flatMap(_ => resultPromise.future) + .map { result => + timeoutTask.cancel() + result + } + } + } + } + + private[core] def executeParamsStream(nativeSql: NativeSql, + paramsSource: ParamsSource): Future[Unit] = traced { + fsmManager.ifReady { (_, _) => + sourceWithParseWritten(nativeSql, paramsSource) + .batch(max = config.maxBatchSize, seed = Vector(_))(_ :+ _) + .mapAsyncUnordered(parallelism = 1)(executeBatch) + .runWith(Sink.last) + .map(txStatus => fsmManager.triggerTransition(newState = Idle(txStatus))) + .map(_ => ()) + } + } + + /** Transforms params source to a source which upon materialization sends "Parse" to the backend before + * any of source's elements are processed. */ + private def sourceWithParseWritten(nativeSql: NativeSql, + paramsSource: ParamsSource): ParamsSource = { + paramsSource.prefixAndTail(1).flatMapConcat { case (head, tail) => + val firstParams = head.head + out.write(Parse(None, nativeSql, firstParams.map(_.dataTypeOid))) + Source(head).concat(tail) + } + } + + private def executeBatch(batch: Vector[Vector[ParamValue]]): Future[TxStatus] = { + val execute = Execute(optionalPortalName = None, optionalFetchSize = None) + val batchMsgs = batch.flatMap { params => + Vector(Bind(execute.optionalPortalName, None, params, ReturnColFormats.AllBinary), execute) + } + + val batchPromise = Promise[TxStatus] + fsmManager.triggerTransition(State.executingBatch(batchPromise)) + out + .writeAndFlush(batchMsgs :+ Sync) + .recoverWith(writeFailureHandler) + .flatMap(_ => batchPromise.future) + } + + private[core] def handleWriteError(cause: Throwable): Unit = traced { + handleFatalError("Write error occurred, the connection will be closed", cause) + } + + private def simpleQueryIgnoreResult(sql: NativeSql)(implicit timeout: FiniteDuration): Future[Unit] = traced { + fsmManager.ifReady { (reqId, _) => + val queryPromise = Promise[Unit] + fsmManager.triggerTransition(State.simpleQuerying(queryPromise)) + out + .writeAndFlush(Query(sql)) + .recoverWith(writeFailureHandler) + .map(_ => newTimeoutHandler(reqId, timeout).scheduleTimeoutTask()) + .flatMap { timeoutTask => + queryPromise.future.map { _ => + timeoutTask.cancel() + } + } + } + } + + private def handleCharsetChange(pgCharsetName: String)(consumer: Charset => Unit): Unit = traced { + try { + consumer(PgCharset.toJavaCharset(pgCharsetName)) + } catch { + case ex: PgUnsupportedCharsetException => handleFatalError(ex.getMessage, ex) + } + } + + private def handleParamStatusChange(p: ParameterStatus): Unit = traced { + p match { + case ParameterStatus(SessionParamKey("client_encoding"), SessionParamVal(pgCharsetName)) => + handleCharsetChange(pgCharsetName) { charset => + handleClientCharsetChange(charset) + sessionParams = sessionParams.copy(clientCharset = charset) + } + + case ParameterStatus(SessionParamKey("server_encoding"), SessionParamVal(pgCharsetName)) => + handleCharsetChange(pgCharsetName) { charset => + handleServerCharsetChange(charset) + sessionParams = sessionParams.copy(serverCharset = charset) + } + + case _ => () + } + logger.debug(s"Session parameter '${p.key.value}' is now set to '${p.value.value}'") + } + + private def nextStmtName(): StmtName = traced { + StmtName("S" + stmtCounter.incrementAndGet()) + } + + private def doRelease(cause: Throwable): Future[Unit] = traced { + out + .writeAndFlush(Terminate) + .recover { case writeEx => + logger.error("Write error occurred when terminating connection", writeEx) + } + .flatMap { _ => + val connClosedEx = cause match { + case ex: ConnectionClosedException => ex + case ex => new ConnectionClosedException("Connection closed", ex) + } + fsmManager.triggerTransition(ConnectionClosed(connClosedEx)) + out.close().recover { case closeEx => + logger.error("Channel close error occurred when terminating connection", closeEx) + } + } + } + + private def doRelease(cause: String): Future[Unit] = traced { + doRelease(new ConnectionClosedException(cause)) + } + + private def writeFailureHandler[T]: PartialFunction[Throwable, Future[T]] = { + case writeEx => + handleWriteError(writeEx) + Future.failed(writeEx) + } + + private def newTimeoutHandler(reqId: RequestId, timeout: FiniteDuration): TimeoutHandler = traced { + new TimeoutHandler(scheduler, timeout, timeoutAction = { + val shouldCancel = fsmManager.startHandlingTimeout(reqId) + if (shouldCancel) { + logger.debug(s"Timeout occurred for request '$reqId', cancelling it") + maybeBackendKeyData.foreach { bkd => + requestCanceler(bkd).onComplete(_ => fsmManager.finishHandlingTimeout()) + } + } else { + logger.debug(s"Timeout task ran for request '$reqId', but this request is not being executed anymore") + } + }) + } + + private[core] def deallocateStatement(nativeSql: NativeSql): Future[Unit] = traced { + fsmManager.ifReady { (_, txStatus) => + stmtCache.get(nativeSql) match { + case Some(stmtName) => deallocateCached(stmtName) + case None => + fsmManager.triggerTransition(Idle(txStatus)) + unitFuture + } + } + } + + private def deallocateCached(stmtName: StmtName): Future[Unit] = traced { + val promise = Promise[Unit] + fsmManager.triggerTransition(new DeallocatingStatement(promise)) + out + .writeAndFlush(CloseStatement(Some(stmtName)), Sync) + .recoverWith { case writeEx => + handleWriteError(writeEx) + Future.failed(writeEx) + } + .flatMap(_ => promise.future) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/ChannelWriter.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/ChannelWriter.scala index d6e48c5..2bc77d7 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/ChannelWriter.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/ChannelWriter.scala @@ -17,7 +17,7 @@ package io.rdbc.pgsql.core import io.rdbc._ -import io.rdbc.pgsql.core.messages.frontend.PgFrontendMessage +import io.rdbc.pgsql.core.pgstruct.messages.frontend.PgFrontendMessage import scala.concurrent.Future diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgAnyStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgAnyStatement.scala deleted file mode 100644 index 6d913da..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgAnyStatement.scala +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core - -import io.rdbc.ImmutIndexedSeq -import io.rdbc.api.exceptions.{MissingParamValException, NoSuitableConverterFoundException} -import io.rdbc.implbase.BindablePartialImpl -import io.rdbc.pgsql.core.messages.data.Unknown -import io.rdbc.pgsql.core.messages.frontend.{BinaryDbValue, DbValue, NullDbValue} -import io.rdbc.pgsql.core.types.{PgType, PgTypeRegistry} -import io.rdbc.sapi._ -import org.reactivestreams.Publisher - -import scala.concurrent.{ExecutionContext, Future} - -class PgAnyStatement(stmtExecutor: PgStatementExecutor, - stmtDeallocator: PgStatementDeallocator, - pgTypeRegistry: PgTypeRegistry, - sessionParams: SessionParams, - val nativeStmt: PgNativeStatement)(implicit ec: ExecutionContext) - extends AnyStatement - with BindablePartialImpl[AnyParametrizedStatement] { - - def nativeSql: String = nativeStmt.statement - - def bind(params: (String, Any)*): AnyParametrizedStatement = { - val indexedDbValues = convertParamsToSeq(Map(params: _*)) - parametrizedStmt(indexedDbValues) - } - - def bindByIdx(params: Any*): AnyParametrizedStatement = { - val dbValues = params.map(convertParam).toVector - parametrizedStmt(dbValues) - } - - def noParams: AnyParametrizedStatement = parametrizedStmt(Vector.empty) //TODO validate whether there really are no params in the statement - - def streamParams(paramsPublisher: Publisher[Map[String, Any]]): Future[Unit] = { - import akka.stream.scaladsl._ - - val stmtSource = Source.fromPublisher(paramsPublisher).map { paramMap => - //TODO this is very inefficient, only the first element should parse native stmt etc - convertParamsToSeq(paramMap) - } - - stmtExecutor.executeParamsStream(nativeSql, stmtSource) - } - - def deallocate(): Future[Unit] = stmtDeallocator.deallocateStatement(nativeSql) - - private def convertParamsToSeq(params: Map[String, Any]): ImmutIndexedSeq[DbValue] = { - val dbValues: Map[String, DbValue] = convertNamedParams(params) - val indexedDbValues = nativeStmt.params.foldLeft(Vector.empty[DbValue]) { (acc, paramName) => - dbValues.get(paramName) match { - case Some(paramValue) => acc :+ paramValue - case None => throw new MissingParamValException(paramName) - } - } - indexedDbValues - } - - private def parametrizedStmt(dbValues: ImmutIndexedSeq[DbValue]): AnyParametrizedStatement = { - new PgParametrizedStatement(stmtExecutor, stmtDeallocator, nativeStmt.statement, dbValues) - } - - private def convertParam(value: Any): DbValue = { - //TODO document in bind null/None/Some support - value match { - case null | None => NullDbValue(Unknown.oid) - case NullParam(cls) => withPgType(cls)(pgType => NullDbValue(pgType.typeOid)) - case NotNullParam(notNullVal) => convertNotNullParam(notNullVal) - case Some(notNullVal) => convertNotNullParam(notNullVal) - case notNullVal => convertNotNullParam(notNullVal) - } - } - - private def withPgType[A, B](cls: Class[A])(block: PgType[A] => B): B = { - pgTypeRegistry.byClass(cls) - .map(block) - .getOrElse(throw NoSuitableConverterFoundException(cls)) - } - - private def convertNotNullParam(value: Any): DbValue = { - withPgType(value.getClass) { pgType => - val binVal = pgType.asInstanceOf[PgType[Any]].toPgBinary(value)(sessionParams) - BinaryDbValue(binVal, pgType.typeOid) - } - } - - private def convertNamedParams(params: Map[String, Any]): Map[String, DbValue] = params.map { nameValue => - val (name, value) = nameValue - (name, convertParam(value)) - } -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgConnection.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgConnection.scala deleted file mode 100644 index bfd047b..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgConnection.scala +++ /dev/null @@ -1,451 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core - -import java.nio.charset.Charset -import java.util.concurrent.atomic.AtomicInteger - -import akka.NotUsed -import akka.stream.Materializer -import akka.stream.scaladsl.{Sink, Source} -import com.typesafe.scalalogging.StrictLogging -import io.rdbc._ -import io.rdbc.api.exceptions.{ConnectionClosedException, IllegalSessionStateException} -import io.rdbc.implbase.{ConnectionPartialImpl, ReturningInsertImpl} -import io.rdbc.pgsql.core.auth.Authenticator -import io.rdbc.pgsql.core.fsm.State.{Fatal, Goto, Stay} -import io.rdbc.pgsql.core.fsm._ -import io.rdbc.pgsql.core.fsm.extendedquery.batch.ExecutingBatch -import io.rdbc.pgsql.core.fsm.extendedquery.writeonly.ExecutingWriteOnly -import io.rdbc.pgsql.core.fsm.extendedquery.{BeginningTx, WaitingForDescribe} -import io.rdbc.pgsql.core.messages.backend._ -import io.rdbc.pgsql.core.messages.frontend._ -import io.rdbc.pgsql.core.scheduler.{TaskScheduler, TimeoutScheduler} -import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.pgsql.core.util.SleepLock -import io.rdbc.sapi._ - -import scala.concurrent.duration.FiniteDuration -import scala.concurrent.{ExecutionContext, Future, Promise} -import scala.util.control.NonFatal -import scala.util.{Failure, Success} - -trait PgStatementExecutor { - def executeStatementForStream(nativeSql: String, params: ImmutSeq[DbValue])(implicit timeout: FiniteDuration): Future[ResultStream] - def executeStatementForRowsAffected(nativeSql: String, params: ImmutSeq[DbValue])(implicit timeout: FiniteDuration): Future[Long] - def executeParamsStream(nativeSql: String, params: Source[ImmutSeq[DbValue], NotUsed]): Future[Unit] -} - -trait PgStatementDeallocator { - def deallocateStatement(nativeSql: String): Future[Unit] -} - - -//TODO make a note in Connection scaladoc that implementations must be thread safe -abstract class PgConnection(val pgTypeConvRegistry: PgTypeRegistry, - val rdbcTypeConvRegistry: TypeConverterRegistry, - private[pgsql] val out: ChannelWriter, - implicit val ec: ExecutionContext, - private[pgsql] val scheduler: TaskScheduler, - requestCanceler: (BackendKeyData) => Future[Unit], - implicit val streamMaterializer: Materializer) - extends Connection - with ConnectionPartialImpl - with StrictLogging { - - thisConn => - - class FsmManager { - private[this] val lock = new SleepLock //TODO make lock impl configurable - - private[this] var ready = false - private[this] var handlingTimeout = false - private[this] var state: State = Uninitialized - private[this] var readyPromise = Promise[thisConn.type] - private[this] var lastRequestId = Long.MinValue - - def ifReady[A](block: (Long, TxStatus) => Future[A]): Future[A] = { - val action: () => Future[A] = lock.withLock { - if (handlingTimeout) { - () => Future.failed(new IllegalSessionStateException(s"Session is busy, currently cancelling timed out action")) - } else if (ready) { - state match { - case Idle(txStatus) => - ready = false - state = StartingRequest - readyPromise = Promise[thisConn.type] - lastRequestId = lastRequestId + 1L - val localLastRequestId = lastRequestId - () => block(localLastRequestId, txStatus) - - case _ => ??? //TODO fatal error bug - } - } else { - state match { - case ConnectionClosed(cause) => () => Future.failed(cause) - case _ => () => Future.failed(new IllegalSessionStateException(s"Session is busy, currently processing query")) - } - } - } - action() - } - - def triggerTransition(newState: State, afterTransition: Option[() => Future[Unit]] = None): Unit = { - val successful = lock.withLock { - state match { - case ConnectionClosed(_) => false - case _ => - newState match { - case Idle(_) => - ready = true - readyPromise.success(thisConn) - - case ConnectionClosed(cause) => - ready = false - if (!readyPromise.isCompleted) { - readyPromise.failure(cause) - } - - case _ => () - } - state = newState - true - } - } - if (successful) { - logger.trace(s"Transitioned to state '$newState'") - //TODO note that afterTransition action can't access state or any transactional data - afterTransition.foreach(_.apply().recover { - case NonFatal(ex) => onFatalError("Fatal error occurred in handling after state transition logic", ex) - }) - } - } - - def onTimeout[A](reqId: Long): Unit = { - val shouldCancel = lock.withLock { - if (!handlingTimeout && !ready && lastRequestId == reqId) { - handlingTimeout = true - true - } else false - } - if (shouldCancel) { - logger.debug(s"Timeout occurred for request '$reqId', cancelling it") - bkd.foreach { bkd => - requestCanceler(bkd).onComplete(_ => lock.withLock(handlingTimeout = false)) - } - } - } - - def currentState: State = lock.withLock(state) - - def readyFuture: Future[thisConn.type] = lock.withLock(readyPromise.future) - - } - - private val fsmManager = new FsmManager - - @volatile private var sessionParams = SessionParams.default - @volatile private var stmtCache = PreparedStmtCache.empty - @volatile private var bkd = Option.empty[BackendKeyData] - private val stmtCounter = new AtomicInteger(0) - - def watchForIdle: Future[this.type] = fsmManager.readyFuture - - def statement(sql: String): Future[AnyStatement] = { - Future.successful(new PgAnyStatement(stmtExecutor, stmtDeallocator, pgTypeConvRegistry, sessionParams, PgNativeStatement.parse(sql))) - } - - def beginTx()(implicit timeout: FiniteDuration): Future[Unit] = simpleQueryIgnoreResult("BEGIN") - def commitTx()(implicit timeout: FiniteDuration): Future[Unit] = simpleQueryIgnoreResult("COMMIT") - def rollbackTx()(implicit timeout: FiniteDuration): Future[Unit] = simpleQueryIgnoreResult("ROLLBACK") - def returningInsert(sql: String): Future[ReturningInsert] = returningInsert(sql, "*") - - def validate()(implicit timeout: FiniteDuration): Future[Boolean] = simpleQueryIgnoreResult("").map(_ => true).recoverWith { - case ex: IllegalSessionStateException => Future.failed(ex) - case _ => Future.successful(false) - } - - def returningInsert(sql: String, keyColumns: String*): Future[ReturningInsert] = { - val returningSql = sql + " returning " + keyColumns.mkString(",") - statement(returningSql).map { stmt => - new ReturningInsertImpl(stmt) - } - } - - protected def simpleQueryIgnoreResult(sql: String)(implicit timeout: FiniteDuration): Future[Unit] = fsmManager.ifReady { (reqId, _) => - //TODO timeout - val queryPromise = Promise[Unit] - fsmManager.triggerTransition(new SimpleQuerying.PullingRows(out, queryPromise)) - out.writeAndFlush(Query(sql)).recoverWith(writeFailureHandler) - .flatMap(_ => queryPromise.future) - //TODO common handler for ChannelErrors -> close conn & treat as fatal - //TODO handle TCP disconnects - } - - private[pgsql] def init(dbUser: String, dbName: String, authenticator: Authenticator): Future[Unit] = { - logger.debug(s"Initializing connection") - val initPromise = Promise[BackendKeyData] - fsmManager.triggerTransition(new Authenticating(initPromise, authenticator)(out, ec)) - - out.writeAndFlush(StartupMessage(dbUser, dbName)).recoverWith(writeFailureHandler).flatMap { _ => - initPromise.future.map { returnedBkd => - bkd = Some(returnedBkd) - () - } - } - - } - - private def handleCharsetChange(pgCharsetName: String)(consumer: (Charset) => Unit) = { - PgCharset.toJavaCharset(pgCharsetName) match { - case Some(charset) => consumer(charset) - case None => - val msg = s"Unsupported charset '$pgCharsetName'" - logger.error(msg) - doRelease(msg) - } - } - - protected def clientCharsetChanged(charset: Charset): Unit - - protected def serverCharsetChanged(charset: Charset): Unit - - protected def onMessage(msg: PgBackendMessage): Unit = { - logger.trace(s"Received backend message '$msg'") - msg match { - case ParameterStatus("client_encoding", pgCharsetName) => handleCharsetChange(pgCharsetName) { charset => - clientCharsetChanged(charset) - sessionParams = sessionParams.copy(clientCharset = charset) - } - - case ParameterStatus("server_encoding", pgCharsetName) => handleCharsetChange(pgCharsetName) { charset => - serverCharsetChanged(charset) - sessionParams = sessionParams.copy(serverCharset = charset) - } - - case ParameterStatus(name, value) => logger.debug(s"Received parameter '$name' = '$value'") - - case _ => - fsmManager.currentState.onMessage(msg) match { - case Stay => () - case Goto(newState, afterTransitionAction) => fsmManager.triggerTransition(newState, afterTransitionAction) - case Fatal(ex, afterReleaseAction) => - logger.error("Fatal error occurred, connection will be closed", ex) - doRelease(ex).map(_ => afterReleaseAction.foreach(_.apply())) - } - } - } - - private[pgsql] def nextStmtName(): String = "S" + stmtCounter.incrementAndGet() - - def release(): Future[Unit] = fsmManager.ifReady { (_, _) => - logger.debug(s"Releasing connection on client request") - doRelease("Connection released by client") - } - - protected def doRelease(cause: Throwable): Future[Unit] = { - out.writeAndFlush(Terminate).recover { - case writeEx => - logger.error("Write error occurred when terminating connection", writeEx) - () - }.map { _ => - out.close() - //TODO this block needs to happen regardless of the success of Terminate write, so can't use map here - val connClosedEx = cause match { - case ex: ConnectionClosedException => ex - case ex => new ConnectionClosedException("Connection closed", ex) - } - fsmManager.triggerTransition(ConnectionClosed(connClosedEx)) - } - } - - private def onWriteError(cause: Throwable): Unit = { - onFatalError("Write error occurred, the connection will be closed", cause) - } - - private def onFatalError(msg: String, cause: Throwable): Unit = { - logger.error(msg, cause) - out.close() - fsmManager.triggerTransition(ConnectionClosed(new ConnectionClosedException("Connection closed", cause))) - } - - protected def doRelease(cause: String): Future[Unit] = { - doRelease(new ConnectionClosedException(cause)) - } - - - def forceRelease(): Future[Unit] = { - logger.info("Forcing a connection release") - doRelease("Connection released by client (forced)") - } - - private val stmtDeallocator = new PgStatementDeallocator { - def deallocateStatement(nativeSql: String): Future[Unit] = fsmManager.ifReady { (_, txStatus) => - stmtCache.get(nativeSql) match { - case Some(stmtName) => - val promise = Promise[Unit] - fsmManager.triggerTransition(new DeallocatingStatement(promise)) - out.writeAndFlush(CloseStatement(Some(stmtName)), Sync).recoverWith(writeFailureHandler) - .flatMap(_ => promise.future) - - case None => - fsmManager.triggerTransition(Idle(txStatus)) - Future.successful(()) - } - } - } - - private val stmtExecutor = new PgStatementExecutor { - def executeStatementForStream(nativeSql: String, params: ImmutSeq[DbValue]) - (implicit timeout: FiniteDuration): Future[ResultStream] = { - //TODO close portal after completion? - fsmManager.ifReady { (reqId, txStatus) => - logger.debug(s"Executing statement '$nativeSql'") - val (parse, bind) = parseAndBind(nativeSql, params) - - val streamPromise = Promise[PgResultStream] - val parsePromise = Promise[Unit] - - val timeoutScheduler = TimeoutScheduler { - scheduler.schedule(timeout) { - fsmManager.onTimeout(reqId) - } - } - - val writeFut = txStatus match { - case TxStatus.Active => - fsmManager.triggerTransition(WaitingForDescribe.withoutTxMgmt(bind.portal, streamPromise, parsePromise, sessionParams, - timeoutScheduler, rdbcTypeConvRegistry, pgTypeConvRegistry, onFatalError)(out, ec)) - parse.foreach(out.write(_)) - out.writeAndFlush(bind, Describe(PortalType, bind.portal), Sync) - - case TxStatus.Idle => - fsmManager.triggerTransition(BeginningTx(parse, bind, streamPromise, parsePromise, sessionParams, timeoutScheduler, rdbcTypeConvRegistry, pgTypeConvRegistry, onFatalError)(out, ec)) - out.writeAndFlush(Query("BEGIN")) - - case TxStatus.Failed => Future.successful(()) //TODO oooo - } - - writeFut.recoverWith(writeFailureHandler).flatMap { _ => - parse.flatMap(_.optionalName).foreach { stmtName => - parsePromise.future.foreach { _ => - stmtCache = stmtCache.updated(nativeSql, stmtName) - } - } - streamPromise.future - } - } - } - - protected def parseAndBind(nativeSql: String, params: ImmutSeq[DbValue]): (Option[Parse], Bind) = { - val cachedPreparedStatement = stmtCache.get(nativeSql) - //TODO can't use the same cached prepared statement for other param types. nativeSql + paramTypes have to be the cache key - val (stmtName, parse) = if (cachedPreparedStatement.isDefined) { - (cachedPreparedStatement, Option.empty[Parse]) - } else { - val stmtName = if (shouldCache()) Some(nextStmtName()) else None - val parse = Some(Parse(stmtName, nativeSql, params.map(_.dataTypeOid).toVector)) - (stmtName, parse) - } - - //TODO AllTextual TODO toList - (parse, Bind(stmtName.map(_ + "P"), stmtName, params.toList, AllBinary)) - } - - protected def shouldCache(): Boolean = { - //TODO introduce a cache threshold - true - } - - def executeStatementForRowsAffected(nativeSql: String, params: ImmutSeq[DbValue])(implicit timeout: FiniteDuration): Future[Long] = { - fsmManager.ifReady { (reqId, _) => - logger.debug(s"Executing write-only statement '$nativeSql'") - val (parse, bind) = parseAndBind(nativeSql, params) - - val timeoutScheduler = TimeoutScheduler { - scheduler.schedule(timeout) { - fsmManager.onTimeout(reqId) - } - } - timeoutScheduler.scheduleTimeout() - //TODO code dupl - - val promise = Promise[Long] - fsmManager.triggerTransition(new ExecutingWriteOnly(promise)) - - parse.foreach(out.write(_)) - out.writeAndFlush(bind.copy(portal = None), Execute(None, None), Sync).recoverWith(writeFailureHandler).flatMap { _ => - val fut = promise.future - - fut.map { rowsAffected => - parse.flatMap(_.optionalName).foreach { stmtName => - stmtCache = stmtCache.updated(nativeSql, stmtName) - } - rowsAffected - } - } - } - } - - def executeParamsStream(nativeSql: String, paramsSource: Source[ImmutSeq[DbValue], NotUsed]): Future[Unit] = { - fsmManager.ifReady { (_, _) => - val promise = Promise[Unit] - - val stmtName = Option.empty[String] - val portalName = Option.empty[String] - val execute = Execute(portalName, None) - var first = true - //TODO make max batch size configurable - paramsSource.batch(100L, first => Vector(first))((acc, elem) => acc :+ elem).mapAsyncUnordered(1) { batch => - val batchPromise = Promise[TxStatus] - fsmManager.triggerTransition(new ExecutingBatch(batchPromise)) - - if (first) { - val parse = Parse(stmtName, nativeSql, batch.head.map(_.dataTypeOid).toVector) //TODO use cached value if available - out.write(parse) - first = false - } - - batch.foreach { oneParamSet => - out.write(Bind(portalName, stmtName, oneParamSet.toList, AllBinary), execute) - } - - out.writeAndFlush(Sync).recoverWith(writeFailureHandler) - .flatMap(_ => batchPromise.future) - - }.runWith(Sink.last).onComplete { - case Success(txStatus) => fsmManager.triggerTransition( - newState = Idle(txStatus), - afterTransition = Some(() => Future.successful(promise.success(()))) - ) - - case Failure(ex) => promise.failure(ex) - } - - promise.future - } - } - } - - private def writeFailureHandler[T]: PartialFunction[Throwable, Future[T]] = { - case writeEx => - logger.error("Write error occurred, connection will be closed", writeEx) - onWriteError(writeEx) - Future.failed(writeEx) - } - -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/FieldDescription.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgConnectionConfig.scala similarity index 60% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/FieldDescription.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgConnectionConfig.scala index a6bed8c..4157a49 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/FieldDescription.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgConnectionConfig.scala @@ -14,12 +14,13 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core -import io.rdbc.pgsql.core.messages.data.{DataType, DbValFormat, Oid} +import io.rdbc.pgsql.core.types.PgTypeRegistry +import io.rdbc.pgsql.core.util.concurrent.LockFactory +import io.rdbc.sapi.TypeConverterRegistry -case class FieldDescription(name: String, - tableOid: Option[Oid], - columnAttr: Option[Int], - dataType: DataType, - fieldFormat: DbValFormat) +case class PgConnectionConfig(pgTypes: PgTypeRegistry, + typeConverters: TypeConverterRegistry, + lockFactory: LockFactory, + maxBatchSize: Long) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgNativeStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgNativeStatement.scala deleted file mode 100644 index 2962b95..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgNativeStatement.scala +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core - -import io.rdbc._ - -object PgNativeStatement { - def parse(statement: String): PgNativeStatement = { - /* matches ":param1", ":param2" etc. and groups a match without an initial colon */ - val paramPattern = """:([^\W]*)""".r - val params = paramPattern.findAllMatchIn(statement).map(_.group(1)).toVector - val nativeStatement = params.zipWithIndex.foldLeft(statement) { (acc, elem) => - val (param, idx) = elem - acc.replaceFirst(":" + param, "\\$" + (idx + 1)) - } - PgNativeStatement(nativeStatement, params) - } -} - -case class PgNativeStatement(statement: String, params: ImmutIndexedSeq[String]) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgResultStream.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgResultStream.scala deleted file mode 100644 index 4a60c94..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgResultStream.scala +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core - -import io.rdbc.ImmutSeq -import io.rdbc.pgsql.core.messages.backend.{RowDescription, StatusMessage} -import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.sapi._ -import org.reactivestreams.Publisher - -import scala.concurrent.{ExecutionContext, Future} - -class PgResultStream(val rows: Publisher[Row], - rowDesc: RowDescription, - val rowsAffected: Future[Long], - warningMsgsFut: Future[ImmutSeq[StatusMessage]], - pgTypeConvRegistry: PgTypeRegistry, - rdbcTypeConvRegistry: TypeConverterRegistry) - (implicit ec: ExecutionContext) extends ResultStream { - - val warnings: Future[ImmutSeq[Warning]] = warningMsgsFut.map { warnMsgs => - warnMsgs.map(warnMsg => Warning(warnMsg.statusData.shortInfo, warnMsg.statusData.sqlState)) - } - - lazy val metadata: RowMetadata = { - val columnsMetadata = rowDesc.fieldDescriptions.map { fdesc => - ColumnMetadata(fdesc.name, fdesc.dataType.oid.code.toString, pgTypeConvRegistry.oid2type.get(fdesc.dataType.oid).map(_.cls)) - } - RowMetadata(columnsMetadata) - } -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgRow.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgRow.scala deleted file mode 100644 index 891c0e5..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgRow.scala +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core - -import io.rdbc.api.exceptions.MissingColumnException -import io.rdbc.implbase.RowPartialImpl -import io.rdbc.pgsql.core.exception.{PgDriverInternalErrorException, PgUnsupportedType} -import io.rdbc.pgsql.core.messages.backend.RowDescription -import io.rdbc.pgsql.core.messages.data.DbValFormat.{BinaryDbValFormat, TextualDbValFormat} -import io.rdbc.pgsql.core.messages.data.{DataType, FieldValue, NotNullFieldValue, NullFieldValue} -import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.sapi.{Row, TypeConverterRegistry} - -class PgRow(rowDesc: RowDescription, - cols: IndexedSeq[FieldValue], - nameMapping: Map[String, Int], - rdbcTypeConvRegistry: TypeConverterRegistry, - pgTypeConvRegistry: PgTypeRegistry, - implicit val sessionParams: SessionParams) - extends Row with RowPartialImpl { - - val typeConverterRegistry = rdbcTypeConvRegistry - - protected def notConverted(name: String): Any = { - nameMapping.get(name) match { - case Some(idx) => notConverted(idx) - case None => throw new MissingColumnException(name) - } - } - - protected def notConverted(idx: Int): Any = { - val fieldVal = cols(idx) - fieldVal match { - case NullFieldValue => null - case NotNullFieldValue(rawFieldVal) => - val fieldDesc = rowDesc.fieldDescriptions(idx) - fieldDesc.fieldFormat match { - case BinaryDbValFormat => binaryToObj(fieldDesc.dataType, rawFieldVal) - case TextualDbValFormat => throw PgDriverInternalErrorException(s"Value '$fieldVal' of field '$fieldDesc' is in textual format, which is unsupported") - } - } - } - - private def binaryToObj(pgType: DataType, binaryVal: Array[Byte]): Any = { - pgTypeConvRegistry.byTypeOid(pgType.oid).map(_.toObj(binaryVal)) - .getOrElse(throw new PgUnsupportedType(pgType)) - } -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/SessionParams.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/SessionParams.scala index 168869c..b5c139e 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/SessionParams.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/SessionParams.scala @@ -19,10 +19,12 @@ package io.rdbc.pgsql.core import java.nio.charset.Charset object SessionParams { + private val DefaultCharset = Charset.forName("US-ASCII") + val default = SessionParams( - clientCharset = Charset.forName("US-ASCII"), - serverCharset = Charset.forName("US-ASCII") + clientCharset = DefaultCharset, + serverCharset = DefaultCharset ) } -case class SessionParams(clientCharset: Charset, serverCharset: Charset) +final case class SessionParams(clientCharset: Charset, serverCharset: Charset) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/Authenticator.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/Authenticator.scala index 5c5cc19..9121883 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/Authenticator.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/Authenticator.scala @@ -17,8 +17,8 @@ package io.rdbc.pgsql.core.auth import io.rdbc.ImmutSeq -import io.rdbc.pgsql.core.messages.backend.auth.AuthBackendMessage -import io.rdbc.pgsql.core.messages.frontend.PgFrontendMessage +import io.rdbc.pgsql.core.pgstruct.messages.backend.auth.AuthBackendMessage +import io.rdbc.pgsql.core.pgstruct.messages.frontend.PgFrontendMessage trait Authenticator { def authenticate(authReqMessage: AuthBackendMessage): AuthState @@ -26,10 +26,10 @@ trait Authenticator { } sealed trait AuthState { - def answers: ImmutSeq[PgFrontendMessage] + def responses: ImmutSeq[PgFrontendMessage] } object AuthState { - case class AuthContinue(answers: ImmutSeq[PgFrontendMessage]) extends AuthState - case class AuthComplete(answers: ImmutSeq[PgFrontendMessage]) extends AuthState + final case class AuthContinue(responses: ImmutSeq[PgFrontendMessage]) extends AuthState + final case class AuthComplete(responses: ImmutSeq[PgFrontendMessage]) extends AuthState } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/UsernamePasswordAuthenticator.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/UsernamePasswordAuthenticator.scala index a3fa877..95d4116 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/UsernamePasswordAuthenticator.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/auth/UsernamePasswordAuthenticator.scala @@ -17,21 +17,25 @@ package io.rdbc.pgsql.core.auth import io.rdbc.pgsql.core.auth.AuthState.AuthComplete -import io.rdbc.pgsql.core.messages.backend.auth.{AuthBackendMessage, AuthRequestMd5} -import io.rdbc.pgsql.core.messages.frontend.PasswordMessage +import io.rdbc.pgsql.core.pgstruct.messages.backend.auth.{AuthBackendMessage, AuthRequestMd5} +import io.rdbc.pgsql.core.pgstruct.messages.frontend.PasswordMessage import scala.collection.immutable class UsernamePasswordAuthenticator(val username: String, val password: String) extends Authenticator { - override def authenticate(authReqMessage: AuthBackendMessage): AuthState = authReqMessage match { + def authenticate(authReqMessage: AuthBackendMessage): AuthState = authReqMessage match { case req: AuthRequestMd5 => AuthComplete(immutable.Seq(PasswordMessage.md5(username, password, req.salt))) //TODO more username password mechanisms } - override def supports(authReqMessage: AuthBackendMessage): Boolean = authReqMessage match { - case _: AuthRequestMd5 => true - case _ => false + def supports(authReqMessage: AuthBackendMessage): Boolean = { + authReqMessage match { + case _: AuthRequestMd5 => true + case _ => false + } } + + override val toString = s"UsernamePasswordAuthenticator(username=$username, password=***)" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoded.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoded.scala index c7488e4..6348d4c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoded.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoded.scala @@ -16,4 +16,6 @@ package io.rdbc.pgsql.core.codec -case class Decoded[A](msg: A, remainder: Array[Byte]) +import scodec.bits.ByteVector + +final case class Decoded[A](msg: A, remainder: ByteVector) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoder.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoder.scala index 88682e1..45fdb01 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoder.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Decoder.scala @@ -18,10 +18,13 @@ package io.rdbc.pgsql.core.codec import java.nio.charset.Charset -import io.rdbc.pgsql.core.messages.backend.{Header, PgBackendMessage} +import io.rdbc.pgsql.core.pgstruct.messages.backend.{MsgHeader, PgBackendMessage} +import scodec.bits.ByteVector trait Decoder { - def decodeMsg(bytes: Array[Byte])(implicit charset: Charset): Decoded[PgBackendMessage] + protected def charset: Charset - def decodeHeader(bytes: Array[Byte]): Decoded[Header] + def decodeMsg(bytes: ByteVector): Decoded[PgBackendMessage] + + def decodeHeader(bytes: ByteVector): Decoded[MsgHeader] } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/DecoderFactory.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/DecoderFactory.scala index be75d83..69ce144 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/DecoderFactory.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/DecoderFactory.scala @@ -16,6 +16,8 @@ package io.rdbc.pgsql.core.codec +import java.nio.charset.Charset + trait DecoderFactory { - def decoder: Decoder + def decoder(charset: Charset): Decoder } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Encoder.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Encoder.scala index 0756bab..2e0148f 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Encoder.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/Encoder.scala @@ -18,8 +18,10 @@ package io.rdbc.pgsql.core.codec import java.nio.charset.Charset -import io.rdbc.pgsql.core.messages.frontend.PgFrontendMessage +import io.rdbc.pgsql.core.pgstruct.messages.frontend.PgFrontendMessage +import scodec.bits.ByteVector trait Encoder { - def encode(msg: PgFrontendMessage)(implicit charset: Charset): Array[Byte] + protected def charset: Charset + def encode(msg: PgFrontendMessage): ByteVector } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/EncoderFactory.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/EncoderFactory.scala index b59ec94..7c123ae 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/EncoderFactory.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/codec/EncoderFactory.scala @@ -16,6 +16,8 @@ package io.rdbc.pgsql.core.codec +import java.nio.charset.Charset + trait EncoderFactory { - def encoder: Encoder + def encoder(charset: Charset): Encoder } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgAuthFailureException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgAuthFailureException.scala index 6c19939..b00154c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgAuthFailureException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgAuthFailureException.scala @@ -17,6 +17,8 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.AuthFailureException -import io.rdbc.pgsql.core.messages.backend.StatusData +import io.rdbc.pgsql.core.pgstruct.StatusData -class PgAuthFailureException(val pgStatusData: StatusData) extends AuthFailureException(pgStatusData.shortInfo) with PgStatusDataException +class PgAuthFailureException(val pgStatusData: StatusData) + extends AuthFailureException(pgStatusData.shortInfo) + with PgStatusDataException diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgChannelException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgChannelException.scala index 7a01784..aaf6ebb 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgChannelException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgChannelException.scala @@ -18,4 +18,5 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.RdbcException -case class PgChannelException(cause: Throwable) extends RdbcException(cause.getMessage, cause) +class PgChannelException(cause: Throwable) + extends RdbcException(cause.getMessage, cause) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgConstraintViolationException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgConstraintViolationException.scala index 1024cb5..1af6190 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgConstraintViolationException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgConstraintViolationException.scala @@ -17,12 +17,12 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.ConstraintViolationException -import io.rdbc.pgsql.core.messages.backend.StatusData +import io.rdbc.pgsql.core.pgstruct.StatusData class PgConstraintViolationException(val pgStatusData: StatusData) extends ConstraintViolationException( schema = pgStatusData.schemaName.getOrElse(""), table = pgStatusData.tableName.getOrElse(""), constraint = pgStatusData.constraintName.getOrElse(""), - msg = pgStatusData.shortInfo - ) with PgStatusDataException + msg = pgStatusData.shortInfo) + with PgStatusDataException diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDecodeException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDecodeException.scala index 3cf53d6..b5a2d85 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDecodeException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDecodeException.scala @@ -18,4 +18,5 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.RdbcException -case class PgDecodeException(msg: String) extends RdbcException(msg) +class PgDecodeException(msg: String) + extends RdbcException(msg) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDriverInternalErrorException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDriverInternalErrorException.scala index 0a4b6ad..ae5c40d 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDriverInternalErrorException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgDriverInternalErrorException.scala @@ -18,4 +18,5 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.RdbcException -case class PgDriverInternalErrorException(msg: String) extends RdbcException(s"THIS IS A RDBC DRIVER BUG: $msg") +class PgDriverInternalErrorException(msg: String) + extends RdbcException(s"THIS MOST LIKELY IS A BUG OF THE DRIVER: $msg") diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgEncodeException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgEncodeException.scala new file mode 100644 index 0000000..ab7286d --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgEncodeException.scala @@ -0,0 +1,22 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.exception + +import io.rdbc.api.exceptions.RdbcException + +class PgEncodeException(msg: String) + extends RdbcException(msg) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgInvalidQueryException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgInvalidQueryException.scala index 67aafcb..c90460f 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgInvalidQueryException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgInvalidQueryException.scala @@ -17,6 +17,8 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.InvalidQueryException -import io.rdbc.pgsql.core.messages.backend.StatusData +import io.rdbc.pgsql.core.pgstruct.StatusData -class PgInvalidQueryException(val pgStatusData: StatusData) extends InvalidQueryException(pgStatusData.shortInfo, pgStatusData.position) with PgStatusDataException +class PgInvalidQueryException(val pgStatusData: StatusData) + extends InvalidQueryException(pgStatusData.shortInfo, pgStatusData.position) + with PgStatusDataException diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgProtocolViolationException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgProtocolViolationException.scala index 584733b..6a2b25a 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgProtocolViolationException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgProtocolViolationException.scala @@ -18,7 +18,8 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.RdbcException -class PgProtocolViolationException(msg: String, cause: Option[Throwable]) extends RdbcException(msg, cause) { - def this(msg: String) = this(msg, None) - def this(msg: String, cause: Throwable) = this(msg, Some(cause)) +class PgProtocolViolationException(msg: String, cause: Option[Throwable]) + extends RdbcException(msg, cause) { + def this(msg: String) = this(msg, None) + def this(msg: String, cause: Throwable) = this(msg, Some(cause)) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgStatusDataException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgStatusDataException.scala index 87056b4..f88709e 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgStatusDataException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgStatusDataException.scala @@ -17,19 +17,24 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.RdbcException -import io.rdbc.pgsql.core.messages.backend.StatusData +import io.rdbc.pgsql.core.pgstruct.StatusData -trait PgStatusDataException { +trait PgStatusDataException extends Throwable { def pgStatusData: StatusData } object PgStatusDataException { def apply(statusData: StatusData): RdbcException with PgStatusDataException = { - if (statusData.sqlState == "42501") new PgUnauthorizedException(statusData) - else if (statusData.sqlState == "57014") new PgTimeoutException(statusData) - else if (statusData.sqlState.startsWith("28")) new PgAuthFailureException(statusData) - else if (statusData.sqlState.startsWith("42")) new PgInvalidQueryException(statusData) - else if (statusData.sqlState.startsWith("23")) new PgConstraintViolationException(statusData) - else new PgUncategorizedException(statusData) + if (statusData.sqlState == "42501") + new PgUnauthorizedException(statusData) + else if (statusData.sqlState == "57014") + new PgTimeoutException(statusData) + else if (statusData.sqlState.startsWith("28")) + new PgAuthFailureException(statusData) + else if (statusData.sqlState.startsWith("42")) + new PgInvalidQueryException(statusData) + else if (statusData.sqlState.startsWith("23")) + new PgConstraintViolationException(statusData) + else new PgUncategorizedStatusDataException(statusData) } } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgSubscriptionRejectedException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgSubscriptionRejectedException.scala index 069951f..308b1f7 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgSubscriptionRejectedException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgSubscriptionRejectedException.scala @@ -18,4 +18,5 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.RdbcException -case class PgSubscriptionRejectedException(msg: String) extends RdbcException(msg) +class PgSubscriptionRejectedException(msg: String) + extends RdbcException(msg) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgTimeoutException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgTimeoutException.scala index 3c81006..e15fd8c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgTimeoutException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgTimeoutException.scala @@ -17,6 +17,8 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.TimeoutException -import io.rdbc.pgsql.core.messages.backend.StatusData +import io.rdbc.pgsql.core.pgstruct.StatusData -class PgTimeoutException(val pgStatusData: StatusData) extends TimeoutException(pgStatusData.shortInfo) with PgStatusDataException +class PgTimeoutException(val pgStatusData: StatusData) + extends TimeoutException(pgStatusData.shortInfo) + with PgStatusDataException diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnauthorizedException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnauthorizedException.scala index 5224719..fd6cddf 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnauthorizedException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnauthorizedException.scala @@ -17,6 +17,8 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.UnauthorizedException -import io.rdbc.pgsql.core.messages.backend.StatusData +import io.rdbc.pgsql.core.pgstruct.StatusData -class PgUnauthorizedException(val pgStatusData: StatusData) extends UnauthorizedException(pgStatusData.shortInfo) with PgStatusDataException +class PgUnauthorizedException(val pgStatusData: StatusData) + extends UnauthorizedException(pgStatusData.shortInfo) + with PgStatusDataException diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUncategorizedException.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUncategorizedException.scala index 833a2bd..961332f 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUncategorizedException.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUncategorizedException.scala @@ -16,7 +16,11 @@ package io.rdbc.pgsql.core.exception -import io.rdbc.api.exceptions._ -import io.rdbc.pgsql.core.messages.backend.StatusData +import io.rdbc.api.exceptions.UncategorizedRdbcException -class PgUncategorizedException(val pgStatusData: StatusData) extends UncategorizedRdbcException(pgStatusData.shortInfo) with PgStatusDataException +class PgUncategorizedException(msg: String, cause: Option[Throwable]) + extends UncategorizedRdbcException(msg, cause) { + + def this(msg: String) = this(msg, None) + def this(msg: String, cause: Throwable) = this(msg, Some(cause)) +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Describe.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUncategorizedStatusDataException.scala similarity index 68% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Describe.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUncategorizedStatusDataException.scala index d739bf8..7c9cef3 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Describe.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUncategorizedStatusDataException.scala @@ -14,12 +14,11 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.exception -sealed trait DescribeType +import io.rdbc.api.exceptions._ +import io.rdbc.pgsql.core.pgstruct.StatusData -case object PreparedStatementType extends DescribeType - -case object PortalType extends DescribeType - -case class Describe(descType: DescribeType, optionalName: Option[String]) extends PgFrontendMessage +class PgUncategorizedStatusDataException(val pgStatusData: StatusData) + extends UncategorizedRdbcException(pgStatusData.shortInfo) + with PgStatusDataException diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/Preconditions.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnsupportedCharsetException.scala similarity index 59% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/Preconditions.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnsupportedCharsetException.scala index a07572c..953a7fa 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/Preconditions.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnsupportedCharsetException.scala @@ -14,16 +14,12 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.util +package io.rdbc.pgsql.core.exception -object Preconditions { - - def notNull[A](param: sourcecode.Text[A]): Unit = { - if (param == null) throw new NullPointerException(s"Parameter '${param.source}' cannot be null") - } - - def check[A](param: sourcecode.Text[A], f: A => Boolean, msg: => String): Unit = { - require(f(param.value), s"Requirement failed for parameter '${param.source}': $msg") - } +import io.rdbc.api.exceptions.RdbcException +class PgUnsupportedCharsetException(pgCharsetName: String, cause: Option[Throwable]) + extends RdbcException(s"PostgreSQL charset '$pgCharsetName' is not supported", cause) { + def this(pgCharsetName: String) = this(pgCharsetName, None) + def this(pgCharsetName: String, cause: Throwable) = this(pgCharsetName, Some(cause)) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnsupportedType.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnsupportedType.scala index e664aaf..a38f429 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnsupportedType.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/exception/PgUnsupportedType.scala @@ -17,6 +17,9 @@ package io.rdbc.pgsql.core.exception import io.rdbc.api.exceptions.RdbcException -import io.rdbc.pgsql.core.messages.data.DataType +import io.rdbc.pgsql.core.pgstruct.DataType -class PgUnsupportedType(dataType: DataType) extends RdbcException(s"Could not find type converted for PostgreSQL type with OID ${dataType.oid}") +class PgUnsupportedType(dataType: DataType) + extends RdbcException( + s"Could not find type converted for PostgreSQL type with OID ${dataType.oid}" + ) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Authenticating.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Authenticating.scala deleted file mode 100644 index fc15ad0..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Authenticating.scala +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm - -import io.rdbc.api.exceptions.AuthFailureException -import io.rdbc.pgsql.core.ChannelWriter -import io.rdbc.pgsql.core.auth.AuthState.{AuthComplete, AuthContinue} -import io.rdbc.pgsql.core.auth.Authenticator -import io.rdbc.pgsql.core.messages.backend.BackendKeyData -import io.rdbc.pgsql.core.messages.backend.auth.{AuthOk, AuthRequest} - -import scala.concurrent.{ExecutionContext, Promise} - -class Authenticating(initPromise: Promise[BackendKeyData], authenticator: Authenticator) - (implicit out: ChannelWriter, ec: ExecutionContext) - extends State with NonFatalErrorsAreFatal { - - private var waitingForOk = false - - def msgHandler = { - case authReq: AuthRequest if !waitingForOk => - if (authenticator.supports(authReq)) { - val answersToWrite = authenticator.authenticate(authReq) match { - case AuthContinue(answers) => answers - case AuthComplete(answers) => - waitingForOk = true - answers - } - out.writeAndFlush(answersToWrite) - stay - } else { - val ex = new AuthFailureException(s"Authentication mechanism '${authReq.authMechanismName}' requested by server is not supported by provided authenticator") - fatal(ex) andThenFailPromise initPromise - } - - case AuthOk if waitingForOk => goto(new Initializing(initPromise)) - } - - protected def onFatalError(ex: Throwable): Unit = { - initPromise.failure(ex) - } - - val name = "authenticating" -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/DeallocatingStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/DeallocatingStatement.scala deleted file mode 100644 index 221aa03..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/DeallocatingStatement.scala +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm - -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.messages.backend.CloseComplete - -import scala.concurrent.Promise - -class DeallocatingStatement(promise: Promise[Unit]) extends State { - protected def msgHandler = { - case CloseComplete => goto(new WaitingForReady(onIdle = promise.success(()), onFailure = (ex) => promise.failure(ex))) - } - - protected def onFatalError(ex: Throwable): Unit = promise.failure(ex) - - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new WaitingForReady(onIdle = promise.failure(ex), onFailure = { exWhenWaiting => - logger.error("Error occurred when waiting for ready", exWhenWaiting) - promise.failure(ex) - })) //TODO this pattern repeats in many places - } - - val name = "deallocating_statement" -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Initializing.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Initializing.scala deleted file mode 100644 index 1ebbb9b..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Initializing.scala +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm - -import io.rdbc.pgsql.core.ChannelWriter -import io.rdbc.pgsql.core.exception.PgProtocolViolationException -import io.rdbc.pgsql.core.messages.backend.{BackendKeyData, ReadyForQuery} - -import scala.concurrent.{ExecutionContext, Promise} - -class Initializing(initPromise: Promise[BackendKeyData])(implicit out: ChannelWriter, ec: ExecutionContext) - extends State with NonFatalErrorsAreFatal { - - private var backendKeyData: Option[BackendKeyData] = None - - def msgHandler = { - case bkd: BackendKeyData => - backendKeyData = Some(bkd) - stay - - case ReadyForQuery(txStatus) => - backendKeyData match { - case Some(bkd) => goto(Idle(txStatus)) andThenF initPromise.success(bkd) - case None => - val ex = new PgProtocolViolationException("Ready for query received in initializing state without prior backend key data message") - fatal(ex) andThenF initPromise.failure(ex) - } - } - - protected def onFatalError(ex: Throwable): Unit = { - initPromise.failure(ex) - } - - val name = "initializing" -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/SimpleQuerying.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/SimpleQuerying.scala deleted file mode 100644 index 9fd2bf0..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/SimpleQuerying.scala +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm - -import io.rdbc.pgsql.core.ChannelWriter -import io.rdbc.pgsql.core.exception.PgStatusDataException -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.messages.backend._ - -import scala.concurrent.Promise - -sealed trait SimpleQuerying extends State { - def subName: String - val name = "simple_querying." + subName -} - -object SimpleQuerying { - class PullingRows(out: ChannelWriter, promise: Promise[Unit]) extends SimpleQuerying { - - def msgHandler = { - case _: RowDescription => stay - case _: DataRow => stay - case CommandComplete(_, _) | EmptyQueryResponse => goto(new SuccessWaitingForReady(promise)) - case StatusMessage.Error(statusData) => goto(new FailureWaitingForReady(PgStatusDataException(statusData), promise)) - } - - override protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new FailureWaitingForReady(ex, promise)) - } - - protected def onFatalError(ex: Throwable): Unit = { - promise.failure(ex) - } - - val subName = "pulling_rows" - } - - class SuccessWaitingForReady(promise: Promise[Unit]) extends SimpleQuerying - with NonFatalErrorsAreFatal { - - def msgHandler = { - case ReadyForQuery(txStatus) => goto(Idle(txStatus)) andThenF promise.success(()) - } - - protected def onFatalError(ex: Throwable): Unit = { - promise.failure(ex) - } - - val subName = "success_waiting_for_ready" - } - - class FailureWaitingForReady(ex: Throwable, promise: Promise[Unit]) extends SimpleQuerying - with NonFatalErrorsAreFatal { - - def msgHandler = { - case ReadyForQuery(txStatus) => goto(Idle(txStatus)) andThenF promise.failure(ex) - } - - protected def onFatalError(ex: Throwable): Unit = { - promise.failure(ex) - } - - val subName = "failure_waiting_for_ready" - } -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/State.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/State.scala deleted file mode 100644 index 5b4a258..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/State.scala +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm - -import com.typesafe.scalalogging.StrictLogging -import io.rdbc.pgsql.core.exception.PgStatusDataException -import io.rdbc.pgsql.core.fsm.State._ -import io.rdbc.pgsql.core.messages.backend.{PgBackendMessage, StatusMessage, UnknownBackendMessage} - -import scala.concurrent.{Future, Promise} -import scala.util.control.NonFatal - -object State { - sealed trait Outcome - - case class Goto(next: State, afterTransition: Option[() => Future[Unit]]) extends Outcome { - def andThen(block: => Future[Unit]): Goto = { - Goto(next, Some(() => block)) - } - def andThenF(block: => Unit): Goto = { - andThen(Future.successful(block)) - } - } - - case object Stay extends Outcome - - case class Fatal(ex: Throwable, afterTransition: Option[() => Future[Unit]]) extends Outcome { - def andThen(block: => Future[Unit]): Fatal = { - Fatal(ex, Some(() => block)) - } - - def andThenF(block: => Unit): Fatal = { - andThenF(Future.successful(block)) - } - - def andThenFailPromise[A](promise: Promise[A]): Fatal = { - andThen(Future.successful(promise.failure(ex))) - } - } -} - -trait State extends StrictLogging { - - def onMessage(msg: PgBackendMessage): Outcome = { - try { - val outcome = msg match { - case err: StatusMessage.Error if !err.isFatal => - val ex = PgStatusDataException(err.statusData) - Some(onNonFatalError(ex)) - - case err: StatusMessage.Error => - val ex = PgStatusDataException(err.statusData) - Some(fatal(ex) andThen onFatalErrorF(ex)) - - case any => msgHandler.lift.apply(any) - } - - outcome match { - case None => msg match { - case noticeMsg: StatusMessage.Notice => - if (noticeMsg.isWarning) { - logger.warn(s"Warning received: ${noticeMsg.statusData.shortInfo}") - } else { - logger.debug(s"Notice received: ${noticeMsg.statusData.shortInfo}") - } - stay - - case unknownMsg: UnknownBackendMessage => - val msg = s"Unknown message received: '$unknownMsg'" - val ex = new RuntimeException(msg) //TODO internal error - fatal(ex) andThen onFatalErrorF(ex) - - case unhandledMsg => - val msg = s"Unhandled message '$unhandledMsg' in state '$name'" - val ex = new RuntimeException(msg) //TODO internal error - fatal(ex) andThen onFatalErrorF(ex) - } - - case Some(handled) => handled - } - - } catch { - case NonFatal(ex) => fatal(ex) andThen onFatalErrorF(ex) - } - } - - def name: String - - protected def msgHandler: PartialFunction[PgBackendMessage, Outcome] - protected def onFatalError(ex: Throwable): Unit - protected def onNonFatalError(ex: Throwable): Outcome - - protected def onFatalErrorF(ex: Throwable): Future[Unit] = Future.successful(onFatalError(ex)) - protected def stay = Stay - protected def fatal(ex: Throwable) = Fatal(ex, None) - protected def goto(next: State) = Goto(next, None) -} - -trait DefaultErrorHandling extends NonFatalErrorsAreFatal { - this: State => - - protected def onFatalError(ex: Throwable): Unit = () -} - -trait NonFatalErrorsAreFatal { - this: State => - - protected def onNonFatalError(ex: Throwable): Outcome = { - logger.debug(s"State '$name' does not override non-fatal error handler, treating error as fatal") - fatal(ex) andThen onFatalErrorF(ex) - } -} \ No newline at end of file diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/BeginningTx.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/BeginningTx.scala deleted file mode 100644 index 6290ebd..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/BeginningTx.scala +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm.extendedquery - -import io.rdbc.pgsql.core.fsm.State -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.messages.backend.{CommandComplete, ReadyForQuery} -import io.rdbc.pgsql.core.messages.frontend._ -import io.rdbc.pgsql.core.scheduler.TimeoutScheduler -import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.pgsql.core.{ChannelWriter, FatalErrorNotifier, PgResultStream, SessionParams} -import io.rdbc.sapi.TypeConverterRegistry - -import scala.concurrent.{ExecutionContext, Future, Promise} -import scala.util.control.NonFatal - -object BeginningTx { - def apply(parse: Option[Parse], bind: Bind, streamPromise: Promise[PgResultStream], parsePromise: Promise[Unit], - sessionParams: SessionParams, timeoutScheduler: TimeoutScheduler, - rdbcTypeConvRegistry: TypeConverterRegistry, pgTypeConvRegistry: PgTypeRegistry, - fatalErrorNotifier: FatalErrorNotifier) - (implicit out: ChannelWriter, ec: ExecutionContext): BeginningTx = { - new BeginningTx(parse, bind, streamPromise, parsePromise, sessionParams, timeoutScheduler, rdbcTypeConvRegistry, pgTypeConvRegistry, fatalErrorNotifier) - } -} - -class BeginningTx protected(maybeParse: Option[Parse], - bind: Bind, - streamPromise: Promise[PgResultStream], - parsePromise: Promise[Unit], - sessionParams: SessionParams, - timeoutScheduler: TimeoutScheduler, - rdbcTypeConvRegistry: TypeConverterRegistry, - pgTypeConvRegistry: PgTypeRegistry, - fatalErrorNotifier: FatalErrorNotifier) - (implicit out: ChannelWriter, - ec: ExecutionContext) - extends State { - - private var beginComplete = false - - def msgHandler = { - case CommandComplete("BEGIN", _) => - beginComplete = true - stay - - case ReadyForQuery(_) if beginComplete => - maybeParse.foreach(out.write(_)) - goto(WaitingForDescribe.withTxMgmt(bind.portal, streamPromise, parsePromise, sessionParams: SessionParams, timeoutScheduler, rdbcTypeConvRegistry, pgTypeConvRegistry, fatalErrorNotifier)) andThen { - out.writeAndFlush(bind, Describe(PortalType, bind.portal), Sync).recoverWith { - case NonFatal(ex) => - sendFailureToClient(ex) - Future.failed(ex) - } - } - } - - protected def sendFailureToClient(ex: Throwable): Unit = { - streamPromise.failure(ex) - parsePromise.failure(ex) - } - - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(Failed(txMgmt = true, bind.portal) { - sendFailureToClient(ex) - }) - } - - protected def onFatalError(ex: Throwable): Unit = { - sendFailureToClient(ex) - } - - val name = "extended_querying.beginning_tx" - -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/Failed.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/Failed.scala deleted file mode 100644 index 4b742ae..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/Failed.scala +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm.extendedquery - -import com.typesafe.scalalogging.StrictLogging -import io.rdbc.pgsql.core.ChannelWriter -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm.{Idle, State, WaitingForReady} -import io.rdbc.pgsql.core.messages.backend.{CloseComplete, ReadyForQuery} -import io.rdbc.pgsql.core.messages.frontend.{ClosePortal, Query, Sync} - -import scala.concurrent.{ExecutionContext, Future} -import scala.util.control.NonFatal - -object Failed { - def apply(txMgmt: Boolean, portalName: Option[String])(onIdle: => Unit)(implicit out: ChannelWriter, ec: ExecutionContext): Failed = { - new Failed(txMgmt, portalName, onIdle) - } -} - -class Failed protected(txMgmt: Boolean, portalName: Option[String], sendFailureCause: => Unit)(implicit out: ChannelWriter, ec: ExecutionContext) - extends State - with StrictLogging { - - var portalClosed = false - - def msgHandler = { - case CloseComplete if !portalClosed => - portalClosed = true - stay - - case ReadyForQuery(txStatus) => - if (txMgmt) { - goto(new WaitingForRollbackCompletion(sendFailureCause)) andThen { - out.writeAndFlush(Query("ROLLBACK")).recoverWith { - case NonFatal(ex) => - sendFailureToClient(ex) - Future.failed(ex) - } - } - } else { - if (!portalClosed) { - out.writeAndFlush(ClosePortal(portalName), Sync).recoverWith { - case NonFatal(ex) => - sendFailureToClient(ex) - Future.failed(ex) - } - stay //TODO should be stay andThen - } else { - goto(Idle(txStatus)) andThenF sendFailureCause - } - } - } - - def sendFailureToClient(ex: Throwable): Unit = { - logger.error("Error occurred when handling failed operation", ex) - sendFailureCause - } - - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new WaitingForReady(onIdle = sendFailureToClient(ex), onFailure = { exWhenWaiting => - logger.error("Error occurred when waiting for ready", exWhenWaiting) - sendFailureToClient(ex) - })) //TODO this pattern repeats - } - - protected def onFatalError(ex: Throwable): Unit = { - sendFailureToClient(ex) - } - - val name = "extended_querying.failed" -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForDescribe.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForDescribe.scala deleted file mode 100644 index 74c9da2..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForDescribe.scala +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm.extendedquery - -import io.rdbc.pgsql.core.exception.PgProtocolViolationException -import io.rdbc.pgsql.core.fsm.State -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.messages.backend._ -import io.rdbc.pgsql.core.scheduler.TimeoutScheduler -import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.pgsql.core.{ChannelWriter, FatalErrorNotifier, PgResultStream, PgRowPublisher, SessionParams} -import io.rdbc.sapi.TypeConverterRegistry - -import scala.concurrent.{ExecutionContext, Promise} - -object WaitingForDescribe { - def withTxMgmt(portalName: Option[String], - streamPromise: Promise[PgResultStream], - parsePromise: Promise[Unit], - sessionParams: SessionParams, - timeoutScheduler: TimeoutScheduler, - rdbcTypeConvRegistry: TypeConverterRegistry, - pgTypeConvRegistry: PgTypeRegistry, - fatalErrorNotifier: FatalErrorNotifier) - (implicit out: ChannelWriter, - ec: ExecutionContext): WaitingForDescribe = { - new WaitingForDescribe(txMgmt = true, portalName, streamPromise, parsePromise, pgTypeConvRegistry, rdbcTypeConvRegistry, sessionParams, timeoutScheduler, fatalErrorNotifier) - } - - def withoutTxMgmt(portalName: Option[String], - streamPromise: Promise[PgResultStream], - parsePromise: Promise[Unit], - sessionParams: SessionParams, - timeoutScheduler: TimeoutScheduler, - rdbcTypeConvRegistry: TypeConverterRegistry, - pgTypeConvRegistry: PgTypeRegistry, - fatalErrorNotifier: FatalErrorNotifier) - (implicit out: ChannelWriter, - ec: ExecutionContext): WaitingForDescribe = { - new WaitingForDescribe(txMgmt = false, portalName, streamPromise, parsePromise, pgTypeConvRegistry, rdbcTypeConvRegistry, sessionParams, timeoutScheduler, fatalErrorNotifier) - } -} - -class WaitingForDescribe protected(txMgmt: Boolean, - portalName: Option[String], - streamPromise: Promise[PgResultStream], - parsePromise: Promise[Unit], - pgTypeConvRegistry: PgTypeRegistry, - rdbcTypeConvRegistry: TypeConverterRegistry, - sessionParams: SessionParams, - timeoutScheduler: TimeoutScheduler, - fatalErrorNotifier: FatalErrorNotifier - )(implicit out: ChannelWriter, ec: ExecutionContext) - extends State { - - private var maybeAfterDescData = Option.empty[AfterDescData] - - def msgHandler = { - case ParseComplete => - parsePromise.success(()) - stay - - case BindComplete => stay - case _: ParameterDescription => stay - case NoData => onRowDescription(RowDescription.empty) - case rowDesc: RowDescription => onRowDescription(rowDesc) - - case _: ReadyForQuery => maybeAfterDescData match { - case None => onNonFatalError(new PgProtocolViolationException("ready for query received without prior row desc")) - case Some(afterDescData@AfterDescData(publisher, _, _)) => - goto(new PullingRows(txMgmt, afterDescData)) andThenF { - publisher.resume() - } - } - } - - private def onRowDescription(rowDesc: RowDescription): Outcome = maybeAfterDescData match { - case Some(_) => onNonFatalError(new PgProtocolViolationException("already received row description")) - case None => - val publisher = new PgRowPublisher(rowDesc, portalName, pgTypeConvRegistry, rdbcTypeConvRegistry, sessionParams, timeoutScheduler, fatalErrorNotifier) - val warningsPromise = Promise[Vector[StatusMessage.Notice]] - val rowsAffectedPromise = Promise[Long] - - maybeAfterDescData = Some(AfterDescData( - publisher = publisher, - warningsPromise = warningsPromise, - rowsAffectedPromise = rowsAffectedPromise - )) - - val stream = new PgResultStream( - publisher, - rowDesc = rowDesc, - rowsAffected = rowsAffectedPromise.future, - warningMsgsFut = warningsPromise.future, - pgTypeConvRegistry = pgTypeConvRegistry, - rdbcTypeConvRegistry = rdbcTypeConvRegistry - ) - streamPromise.success(stream) - stay - } - - def sendFailureToClient(ex: Throwable): Unit = { - maybeAfterDescData match { - case Some(AfterDescData(publisher, warningsPromise, rowsAffectedPromise)) => - publisher.failure(ex) - warningsPromise.failure(ex) - rowsAffectedPromise.failure(ex) - parsePromise.failure(ex) - - case None => - streamPromise.failure(ex) - } - } - - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(Failed(txMgmt, portalName) { - sendFailureToClient(ex) - }) - } - - protected def onFatalError(ex: Throwable): Unit = { - sendFailureToClient(ex) - } - - val name = "extended_querying.waiting_for_describe" - -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/package.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/package.scala deleted file mode 100644 index 3cac8f7..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/package.scala +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm - -import io.rdbc.pgsql.core.PgRowPublisher -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.messages.backend.{PgBackendMessage, StatusMessage} - -import scala.concurrent.Promise -import scala.util.control.NonFatal - -package object extendedquery { - - trait WarningCollection extends State { - private var _warnings = Vector.empty[StatusMessage.Notice] - - protected def warnings = _warnings - - abstract override def onMessage(msg: PgBackendMessage): Outcome = { - try { - msg match { - case notice: StatusMessage.Notice if notice.isWarning => - _warnings = _warnings :+ notice - stay - case _ => super.onMessage(msg) - } - } catch { - case NonFatal(ex) => fatal(ex) andThenF onFatalError(ex) - } - } - } - - case class AfterDescData(publisher: PgRowPublisher, - warningsPromise: Promise[Vector[StatusMessage.Notice]], - rowsAffectedPromise: Promise[Long]) -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/writeonly/ExecutingWriteOnly.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/writeonly/ExecutingWriteOnly.scala deleted file mode 100644 index c84e03e..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/writeonly/ExecutingWriteOnly.scala +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.fsm.extendedquery.writeonly - -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm.{State, WaitingForReady} -import io.rdbc.pgsql.core.messages.backend._ - -import scala.concurrent.Promise - -class ExecutingWriteOnly(promise: Promise[Long]) extends State { - val name = "executing_write_only" - - protected def msgHandler = { - case BindComplete => stay - case ParseComplete => stay - case _: DataRow => stay - case EmptyQueryResponse => finished(0L) - case CommandComplete(_, rowsAffected) => finished(rowsAffected.map(_.toLong).getOrElse(0L)) - } - - private def finished(rowsAffected: Long): Outcome = { - goto(new WaitingForReady( - onIdle = promise.success(rowsAffected), - onFailure = { ex => - promise.failure(ex) - }) - ) - } - - protected def onFatalError(ex: Throwable): Unit = promise.failure(ex) - - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new WaitingForReady(onIdle = promise.failure(ex), onFailure = { exWhenWaiting => - logger.error("Error occurred when waiting for ready", exWhenWaiting) - promise.failure(ex) - })) //TODO this repeats throughout the project - } -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/FatalErrorHandler.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/FatalErrorHandler.scala new file mode 100644 index 0000000..84902c0 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/FatalErrorHandler.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +trait FatalErrorHandler { + protected[core] def handleFatalError(msg: String, cause: Throwable): Unit +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgAnyStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgAnyStatement.scala new file mode 100644 index 0000000..88e67bb --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgAnyStatement.scala @@ -0,0 +1,105 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +import akka.stream.scaladsl.Source +import io.rdbc.api.exceptions.{MissingParamValException, NoSuitableConverterFoundException} +import io.rdbc.implbase.BindablePartialImpl +import io.rdbc.pgsql.core.SessionParams +import io.rdbc.pgsql.core.pgstruct.{Oid, ParamValue} +import io.rdbc.pgsql.core.types.{PgType, PgTypeRegistry} +import io.rdbc.sapi._ +import io.rdbc.util.Logging +import org.reactivestreams.Publisher + +import scala.concurrent.{ExecutionContext, Future} + +private[core] class PgAnyStatement(stmtExecutor: PgStatementExecutor, + stmtDeallocator: PgStatementDeallocator, + pgTypes: PgTypeRegistry, + sessionParams: SessionParams, + nativeStmt: PgNativeStatement) + (implicit ec: ExecutionContext) + extends AnyStatement + with BindablePartialImpl[AnyParametrizedStatement] + with Logging { + + def bind(params: (String, Any)*): AnyParametrizedStatement = traced { + val pgParamValues = toPgParamValueSeq(Map(params: _*)) + pgParametrizedStatement(pgParamValues) + } + + def bindByIdx(params: Any*): AnyParametrizedStatement = traced { + val pgParamValues = params.map(toPgParamValue).toVector + pgParametrizedStatement(pgParamValues) + } + + def noParams: AnyParametrizedStatement = traced(bindByIdx()) + + def streamParams(paramsPublisher: Publisher[Map[String, Any]]): Future[Unit] = traced { + val pgParamsSource = Source.fromPublisher(paramsPublisher).map { paramMap => + toPgParamValueSeq(paramMap) + } + stmtExecutor.executeParamsStream(nativeStmt.sql, pgParamsSource) + } + + def deallocate(): Future[Unit] = traced { + stmtDeallocator.deallocateStatement(nativeStmt.sql) + } + + private def pgParametrizedStatement(pgParamValues: Vector[ParamValue]): PgParametrizedStatement = traced { + new PgParametrizedStatement( + executor = stmtExecutor, + deallocator = stmtDeallocator, + nativeSql = nativeStmt.sql, + params = pgParamValues + ) + } + + private def toPgParamValueSeq(params: Map[String, Any]): Vector[ParamValue] = traced { + val pgParamsMap = params.mapValues(toPgParamValue) + val indexedPgParams = nativeStmt.params.foldLeft(Vector.empty[ParamValue]) { (acc, paramName) => + acc :+ pgParamsMap.getOrElse(paramName, throw new MissingParamValException(paramName)) + } + indexedPgParams + } + + private def toPgParamValue(value: Any): ParamValue = traced { + //TODO document in bind null/None/Some support + value match { + case null | None => ParamValue.Null(Oid.unknownDataType) + case NullParam(cls) => withPgType(cls)(pgType => ParamValue.Null(pgType.typeOid)) + case NotNullParam(notNullVal) => notNullToPgParamValue(notNullVal) + case Some(notNullVal) => notNullToPgParamValue(notNullVal) + case notNullVal => notNullToPgParamValue(notNullVal) + } + } + + private def notNullToPgParamValue(value: Any): ParamValue = traced { + withPgType(value.getClass) { pgType => + val binVal = pgType.asInstanceOf[PgType[Any]].toPgBinary(value)(sessionParams) + ParamValue.Binary(binVal, pgType.typeOid) + } + } + + private def withPgType[A, B](cls: Class[A])(body: PgType[A] => B): B = { + pgTypes + .typeByClass(cls) + .map(body) + .getOrElse(throw NoSuitableConverterFoundException(cls)) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgCharset.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgCharset.scala similarity index 72% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgCharset.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgCharset.scala index 5a2434a..b500889 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgCharset.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgCharset.scala @@ -14,19 +14,31 @@ * limitations under the License. */ -package io.rdbc.pgsql.core +package io.rdbc.pgsql.core.internal -import java.nio.charset.Charset +import java.nio.charset.{Charset, UnsupportedCharsetException} -object PgCharset { +import io.rdbc.pgsql.core.exception.PgUnsupportedCharsetException - def toJavaCharset(pgCharset: String): Option[Charset] = mapping.get(pgCharset).map { javaCharsetName => - Charset.forName(javaCharsetName) +private[core] object PgCharset { + + def toJavaCharset(pgCharset: String): Charset = { + try { + Charset.forName(toIanaName(pgCharset)) + } catch { + case ex: UnsupportedCharsetException => + throw new PgUnsupportedCharsetException(pgCharset, ex) + } + } + + private def toIanaName(pgCharset: String): String = { + mapping.getOrElse( + pgCharset, + throw new PgUnsupportedCharsetException(pgCharset) + ) } - /** - * Mapping between PostgreSQL charset name and IANA charset name used by java.nio.Charset - */ + /** Mapping between PostgreSQL charset name and IANA charset name used by java.nio.Charset */ private val mapping = Map( "BIG5" -> "Big5", "EUC_CN" -> "EUC_CN", diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgNativeStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgNativeStatement.scala new file mode 100644 index 0000000..e84769c --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgNativeStatement.scala @@ -0,0 +1,60 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +import io.rdbc._ +import io.rdbc.pgsql.core.RdbcSql +import io.rdbc.pgsql.core.pgstruct.messages.frontend.NativeSql +import io.rdbc.util.Logging + +import scala.collection.mutable.ListBuffer +import scala.util.matching.Regex + +private[core] object PgNativeStatement extends Logging { + + private val cast = """::[a-zA-Z]\w*""" + private val sqlString = "'.+?'" + private val column = """".+?"""" + private val blockComment = """/\*.*?\*/""" + private val lineComment = "--.*" + private val param = """(:[a-zA-Z]\w*)""" + //TODO support dollar quoting? + //TODO test newlines + + private[this] val pattern = new Regex( + s"$sqlString|$column|$blockComment|$lineComment|$cast|$param" + ) + + def parse(statement: RdbcSql): PgNativeStatement = traced { + val sb = new StringBuilder + val params = ListBuffer.empty[String] + var lastTextIdx = 0 + var lastParamIdx = 0 + pattern.findAllMatchIn(statement.value).filter(_.group(1) != null).foreach { m => + sb.append(statement.value.substring(lastTextIdx, m.start)) + lastParamIdx += 1 + sb.append("$").append(lastParamIdx) + params += m.group(1).drop(1) + lastTextIdx = m.end + } + sb.append(statement.value.substring(lastTextIdx)) + + PgNativeStatement(NativeSql(sb.toString), params.toVector) + } +} + +private[core] case class PgNativeStatement(sql: NativeSql, params: ImmutIndexedSeq[String]) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgParametrizedStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgParametrizedStatement.scala similarity index 50% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgParametrizedStatement.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgParametrizedStatement.scala index bb247ed..c5aae7a 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgParametrizedStatement.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgParametrizedStatement.scala @@ -14,27 +14,35 @@ * limitations under the License. */ -package io.rdbc.pgsql.core +package io.rdbc.pgsql.core.internal -import com.typesafe.scalalogging.StrictLogging -import io.rdbc.ImmutSeq import io.rdbc.implbase.AnyParametrizedStatementPartialImpl -import io.rdbc.pgsql.core.messages.frontend._ -import io.rdbc.sapi.{AnyParametrizedStatement, ResultStream} +import io.rdbc.pgsql.core.pgstruct.ParamValue +import io.rdbc.pgsql.core.pgstruct.messages.frontend.NativeSql +import io.rdbc.sapi.{AnyParametrizedStatement, ResultSet, ResultStream} +import io.rdbc.util.Logging import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} -class PgParametrizedStatement(executor: PgStatementExecutor, - deallocator: PgStatementDeallocator, - nativeSql: String, - params: ImmutSeq[DbValue]) - (implicit val ec: ExecutionContext) +private[core] class PgParametrizedStatement(executor: PgStatementExecutor, + deallocator: PgStatementDeallocator, + nativeSql: NativeSql, + params: Vector[ParamValue]) + (implicit protected val ec: ExecutionContext) extends AnyParametrizedStatement with AnyParametrizedStatementPartialImpl - with StrictLogging { + with Logging { - def deallocate(): Future[Unit] = deallocator.deallocateStatement(nativeSql) - def executeForStream()(implicit timeout: FiniteDuration): Future[ResultStream] = executor.executeStatementForStream(nativeSql, params) - override def executeForRowsAffected()(implicit timeout: FiniteDuration): Future[Long] = executor.executeStatementForRowsAffected(nativeSql, params) + def deallocate(): Future[Unit] = traced { + deallocator.deallocateStatement(nativeSql) + } + + def executeForStream()(implicit timeout: FiniteDuration): Future[ResultStream] = traced { + executor.executeStatementForStream(nativeSql, params) + } + + override def executeForRowsAffected()(implicit timeout: FiniteDuration): Future[Long] = traced { + executor.executeStatementForRowsAffected(nativeSql, params) + } } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgResultStream.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgResultStream.scala new file mode 100644 index 0000000..3ad208c --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgResultStream.scala @@ -0,0 +1,56 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +import io.rdbc.ImmutSeq +import io.rdbc.pgsql.core.pgstruct.messages.backend.{RowDescription, StatusMessage} +import io.rdbc.pgsql.core.types.PgTypeRegistry +import io.rdbc.sapi._ +import io.rdbc.util.Logging +import org.reactivestreams.Publisher + +import scala.concurrent.{ExecutionContext, Future} + +private[core] class PgResultStream(val rows: Publisher[Row], + rowDesc: RowDescription, + val rowsAffected: Future[Long], + warningMsgsFut: Future[Vector[StatusMessage]], + pgTypes: PgTypeRegistry, + typeConverters: TypeConverterRegistry) + (implicit ec: ExecutionContext) + extends ResultStream + with Logging { + + lazy val warnings: Future[ImmutSeq[Warning]] = traced { + warningMsgsFut.map { warnMsgs => + warnMsgs.map { warnMsg => + Warning(warnMsg.statusData.shortInfo, warnMsg.statusData.sqlState) + } + } + } + + lazy val metadata: RowMetadata = traced { + val columnsMetadata = rowDesc.colDescs.map { colDesc => + ColumnMetadata( + name = colDesc.name.value, + dbTypeId = colDesc.dataType.oid.value.toString, + cls = pgTypes.typeByOid(colDesc.dataType.oid).map(_.cls) + ) + } + RowMetadata(columnsMetadata) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgRow.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgRow.scala new file mode 100644 index 0000000..005051b --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgRow.scala @@ -0,0 +1,75 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +import io.rdbc.api.exceptions.MissingColumnException +import io.rdbc.implbase.RowPartialImpl +import io.rdbc.pgsql.core.SessionParams +import io.rdbc.pgsql.core.exception.{PgDriverInternalErrorException, PgUnsupportedType} +import io.rdbc.pgsql.core.pgstruct.messages.backend.RowDescription +import io.rdbc.pgsql.core.pgstruct.messages.frontend.ColName +import io.rdbc.pgsql.core.pgstruct.{ColFormat, ColValue, DataType} +import io.rdbc.pgsql.core.types.PgTypeRegistry +import io.rdbc.sapi.{Row, TypeConverterRegistry} +import io.rdbc.util.Logging +import io.rdbc.util.Preconditions._ +import scodec.bits.ByteVector + +private[core] class PgRow(rowDesc: RowDescription, + cols: IndexedSeq[ColValue], + nameMapping: Map[ColName, Int], + protected val typeConverters: TypeConverterRegistry, + pgTypes: PgTypeRegistry, + implicit private[this] val sessionParams: SessionParams) + extends Row + with RowPartialImpl + with Logging { + + protected def any(name: String): Any = traced { + argsNotNull() + checkNonEmptyString(name) + nameMapping.get(ColName(name)) match { + case Some(idx) => any(idx) + case None => throw new MissingColumnException(name) + } + } + + protected def any(idx: Int): Any = traced { + argsNotNull() + check(idx, idx >= 0, "has to be >= 0") + val colVal = cols(idx) + colVal match { + case ColValue.Null => null + case ColValue.NotNull(rawFieldVal) => + val colDesc = rowDesc.colDescs(idx) + colDesc.format match { + case ColFormat.Binary => binaryToObj(colDesc.dataType, rawFieldVal) + case ColFormat.Textual => + throw new PgDriverInternalErrorException( + s"Value '$colVal' of column '$colDesc' is in textual format, which is unsupported" + ) + } + } + } + + private def binaryToObj(pgType: DataType, binaryVal: ByteVector): Any = traced { + pgTypes + .typeByOid(pgType.oid) + .map(_.toObj(binaryVal)) + .getOrElse(throw new PgUnsupportedType(pgType)) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgRowPublisher.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgRowPublisher.scala similarity index 56% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgRowPublisher.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgRowPublisher.scala index 4c2a84d..142fd6a 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PgRowPublisher.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgRowPublisher.scala @@ -14,55 +14,129 @@ * limitations under the License. */ -package io.rdbc.pgsql.core +package io.rdbc.pgsql.core.internal import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} import io.rdbc.pgsql.core.exception.PgSubscriptionRejectedException -import io.rdbc.pgsql.core.messages.backend.{DataRow, RowDescription} -import io.rdbc.pgsql.core.messages.frontend.{ClosePortal, Execute, Sync} -import io.rdbc.pgsql.core.scheduler.{ScheduledTask, TimeoutScheduler} +import io.rdbc.pgsql.core.internal.scheduler.{ScheduledTask, TimeoutHandler} +import io.rdbc.pgsql.core.pgstruct.messages.backend.{DataRow, RowDescription} +import io.rdbc.pgsql.core.pgstruct.messages.frontend._ import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.pgsql.core.util.SleepLock +import io.rdbc.pgsql.core.util.concurrent.LockFactory +import io.rdbc.pgsql.core.{ChannelWriter, FatalErrorNotifier, SessionParams} import io.rdbc.sapi.{Row, TypeConverterRegistry} import org.reactivestreams.{Publisher, Subscriber, Subscription} import scala.concurrent.ExecutionContext import scala.util.control.NonFatal -import scala.util.{Failure, Success, Try} +import scala.util.{Failure, Success} + +private[core] object PgRowPublisher { + + object DummySubscription extends Subscription { + def cancel(): Unit = () + def request(n: Long): Unit = () + } + + class LockGuardedState(lockFactory: LockFactory) { + private[this] val lock = lockFactory.lock + + private[this] var ready = false + private[this] var demand = 0L + private[this] var unboundedDemand = false + + def ifCanQuery(body: (Option[Int]) => Unit): Unit = { + val action = lock.withLock { + if (ready && (demand > 0L || unboundedDemand)) { + ready = false + if (unboundedDemand) { + () => body(None) + } else { + val localDemand = demand + () => body(Some(localDemand.toInt)) //TODO isn't Some(localDemand.toInt) strict? Do I need localDemand? + } + } else () => () + } + action() + } + + def ifCanCancel(body: => Unit): Unit = { + val can = lock.withLock { + if (ready) { + ready = false + true + } else false + } + if (can) { + body + } + } + + def increaseDemand(n: Long): Unit = { + lock.withLock { + try { + demand = Math.addExact(demand, n) + } catch { + case _: ArithmeticException => unboundedDemand = true + } + } + } + + def decrementDemand(): Unit = { + lock.withLock { + if (!unboundedDemand) { + demand = demand - 1L + } + } + } + + def setUnboundedDemand(): Unit = { + lock.withLock { + unboundedDemand = true + } + } + + def setReady(): Unit = { + lock.withLock { + ready = true + } + } + } +} + //TODO in the future tests, use reactive streams TCK to test this publisher & subscription -class PgRowPublisher(rowDesc: RowDescription, - val portalName: Option[String], - pgTypeConvRegistry: PgTypeRegistry, - rdbcTypeConvRegistry: TypeConverterRegistry, - sessionParams: SessionParams, - timeoutScheduler: TimeoutScheduler, - @volatile private var _fatalErrNotifier: FatalErrorNotifier - )(implicit out: ChannelWriter, ec: ExecutionContext) +private[core] class PgRowPublisher(rowDesc: RowDescription, + val portalName: Option[PortalName], + pgTypes: PgTypeRegistry, + typeConverters: TypeConverterRegistry, + sessionParams: SessionParams, + timeoutHandler: TimeoutHandler, + lockFactory: LockFactory, + @volatile private[this] var fatalErrorNotifier: FatalErrorNotifier) + (implicit out: ChannelWriter, ec: ExecutionContext) extends Publisher[Row] { - private val subscriber = new AtomicReference(Option.empty[Subscriber[_ >: Row]]) - @volatile private var cancelRequested = false - private val neverExecuted = new AtomicBoolean(true) - @volatile private var timeoutScheduledTask = Option.empty[ScheduledTask] - private val state = new State() + import PgRowPublisher._ - private val nameIdxMapping: Map[String, Int] = { - Map(rowDesc.fieldDescriptions.zipWithIndex.map { - case (fdesc, idx) => fdesc.name -> idx - }: _*) - } + private[this] val subscriber = new AtomicReference(Option.empty[Subscriber[_ >: Row]]) + @volatile private[this] var cancelRequested = false + private[this] val neverExecuted = new AtomicBoolean(true) + @volatile private[this] var timeoutScheduledTask = Option.empty[ScheduledTask] + private[this] val lockGuarded = new LockGuardedState(lockFactory) - object DummySubscription extends Subscription { - def cancel(): Unit = () + import lockGuarded._ - def request(n: Long): Unit = () + private[this] val nameIdxMapping: Map[ColName, Int] = { + Map(rowDesc.colDescs.zipWithIndex.map { + case (cdesc, idx) => cdesc.name -> idx + }: _*) } object RowSubscription extends Subscription { def cancel(): Unit = { - state.ifCanCancel { + ifCanCancel { closePortal() } cancelRequested = true @@ -70,9 +144,9 @@ class PgRowPublisher(rowDesc: RowDescription, def request(n: Long): Unit = { if (n == Long.MaxValue) { - state.setUnboundedDemand() + setUnboundedDemand() } else { - state.increaseDemand(n) + increaseDemand(n) } tryQuerying() } @@ -86,33 +160,38 @@ class PgRowPublisher(rowDesc: RowDescription, s.onSubscribe(RowSubscription) } else { s.onSubscribe(DummySubscription) //spec 1.9 - s.onError(PgSubscriptionRejectedException("This publisher can be subscribed only once, it has already been subscribed by other subscriber.")) + s.onError( + new PgSubscriptionRejectedException( + "This publisher can be subscribed only once, " + + "it has already been subscribed by other subscriber.") + ) } } } - private[core] def handleRow(dataRow: DataRow): Unit = { - //this method is always called by the same I/O thread - cancelTimeoutScheduler() + def handleRow(dataRow: DataRow): Unit = { + /* this method is always called by the same I/O thread */ + //TODO should I make it thread-safe anyway? + cancelTimeoutTask() subscriber.get().foreach { s => val pgRow = new PgRow( rowDesc = rowDesc, - cols = dataRow.fieldValues, + cols = dataRow.colValues, nameMapping = nameIdxMapping, - rdbcTypeConvRegistry = rdbcTypeConvRegistry, - pgTypeConvRegistry = pgTypeConvRegistry, + typeConverters = typeConverters, + pgTypes = pgTypes, sessionParams = sessionParams ) - state.decrementDemand() + decrementDemand() s.onNext(pgRow) } } - private[core] def resume(): Unit = { - state.setReady() + def resume(): Unit = { + setReady() if (cancelRequested) { - state.ifCanCancel { + ifCanCancel { closePortal() } } else { @@ -127,22 +206,22 @@ class PgRowPublisher(rowDesc: RowDescription, } } - private[core] def complete(): Unit = { - cancelTimeoutScheduler() + def complete(): Unit = { + cancelTimeoutTask() subscriber.get().foreach(_.onComplete()) } - private[core] def failure(ex: Throwable): Unit = { - cancelTimeoutScheduler() + def failure(ex: Throwable): Unit = { + cancelTimeoutTask() subscriber.get().foreach(_.onError(ex)) } private def tryQuerying(): Unit = { - state.ifCanQuery { demand => + ifCanQuery { demand => out.writeAndFlush(Execute(portalName, demand), Sync).onComplete { case Success(_) => if (neverExecuted.compareAndSet(true, false)) { - timeoutScheduledTask = Some(timeoutScheduler.scheduleTimeout()) + timeoutScheduledTask = Some(timeoutHandler.scheduleTimeoutTask()) } case Failure(NonFatal(ex)) => @@ -151,76 +230,11 @@ class PgRowPublisher(rowDesc: RowDescription, } } - private def cancelTimeoutScheduler(): Unit = { + private def cancelTimeoutTask(): Unit = { timeoutScheduledTask.foreach(_.cancel()) } - class State { - private[this] val lock = new SleepLock - - private[this] var ready = false - private[this] var demand = 0L - private[this] var unboundedDemand = false - - def ifCanQuery(block: (Option[Int]) => Unit): Unit = { - val action = lock.withLock { - if (ready && (demand > 0L || unboundedDemand)) { - ready = false - if (unboundedDemand) { - () => block(None) - } else { - val localDemand = demand - () => block(Some(localDemand.toInt)) - } - } else () => () - } - action() - } - - def ifCanCancel(block: => Unit): Unit = { - val can = lock.withLock { - if (ready) { - ready = false - true - } else false - } - if (can) { - block - } - } - - def increaseDemand(n: Long): Unit = { - lock.withLock { - try { - demand = Math.addExact(demand, n) - } catch { - case _: ArithmeticException => unboundedDemand = true - } - } - } - - def decrementDemand(): Unit = { - lock.withLock { - if (!unboundedDemand) { - demand = demand - 1L - } - } - } - - def setUnboundedDemand(): Unit = { - lock.withLock { - unboundedDemand = true - } - } - - def setReady(): Unit = { - lock.withLock { - ready = true - } - } - } - - private[core] def fatalErrNotifier_=(fen: FatalErrorNotifier): Unit = _fatalErrNotifier = fen + private[core] def fatalErrNotifier_=(fen: FatalErrorNotifier): Unit = fatalErrorNotifier = fen - private[core] def fatalErrNotifier: FatalErrorNotifier = _fatalErrNotifier + private[core] def fatalErrNotifier: FatalErrorNotifier = fatalErrorNotifier } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgSessionFsmManager.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgSessionFsmManager.scala new file mode 100644 index 0000000..bf269e3 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgSessionFsmManager.scala @@ -0,0 +1,144 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +import io.rdbc.api.exceptions.IllegalSessionStateException +import io.rdbc.pgsql.core.exception.PgDriverInternalErrorException +import io.rdbc.pgsql.core.internal.fsm._ +import io.rdbc.pgsql.core.util.concurrent.LockFactory +import io.rdbc.pgsql.core.{ClientRequest, RequestId} +import io.rdbc.util.Logging + +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.control.NonFatal + +private[core] class PgSessionFsmManager(lockFactory: LockFactory, + fatalErrorHandler: FatalErrorHandler + )(implicit val ec: ExecutionContext) + extends Logging { + private[this] val lock = lockFactory.lock + private[this] var ready = false + private[this] var handlingTimeout = false + private[this] var state: State = Uninitialized + private[this] var readyPromise = Promise[Unit] + private[this] var lastRequestId = RequestId(0L) + + /* TODO can't make this traced, compilation fails, investigate */ + def ifReady[A](request: ClientRequest[A]): Future[A] = { + val action: () => Future[A] = lock.withLock { + if (handlingTimeout) { + () => + Future.failed { + new IllegalSessionStateException(s"Session is busy, currently cancelling timed out action") + } + } else if (ready) { + actionWhenReady(request) + } else { + actionWhenNotReady() + } + } + action() + } + + /* TODO can't make this traced, compilation fails, investigate */ + private def actionWhenReady[A](request: ClientRequest[A]): () => Future[A] = { + state match { + case Idle(txStatus) => + val newRequestId = prepareStateForNewRequest() + () => request(newRequestId, txStatus) + + case state => + val ex = new PgDriverInternalErrorException(s"Expected connection state to be idle, actual state was $state") + fatalErrorHandler.handleFatalError(ex.getMessage, ex) + () => Future.failed(ex) + } + } + + private def actionWhenNotReady[A](): () => Future[A] = traced { + state match { + case ConnectionClosed(cause) => + () => Future.failed(cause) + case _ => + () => Future.failed(new IllegalSessionStateException(s"Session is busy, currently processing query")) + } + } + + private def prepareStateForNewRequest(): RequestId = traced { + ready = false + state = StartingRequest + readyPromise = Promise[Unit] + lastRequestId = RequestId(lastRequestId.value + 1L) + lastRequestId + } + + def triggerTransition(newState: State, afterTransition: Option[() => Future[Unit]] = None): Boolean = traced { + val successful = lock.withLock { + state match { + case ConnectionClosed(_) => false + case _ => + newState match { + case Idle(_) => + ready = true + readyPromise.success(()) + + case ConnectionClosed(cause) => + ready = false + if (!readyPromise.isCompleted) { + readyPromise.failure(cause) + } + + case _ => () + } + state = newState + true + } + } + if (successful) { + logger.debug(s"Transitioned to state '$newState'") + runAfterTransition(afterTransition) + } + successful + } + + private def runAfterTransition(afterTransition: Option[() => Future[Unit]]): Unit = { + afterTransition.foreach(_.apply() + .recover { + case NonFatal(ex) => + fatalErrorHandler.handleFatalError( + "Fatal error occurred when handling post state transition logic", ex + ) + }) + } + + def startHandlingTimeout(reqId: RequestId): Boolean = traced { + lock.withLock { + if (!handlingTimeout && !ready && lastRequestId == reqId) { + handlingTimeout = true + true + } else false + } + } + + def finishHandlingTimeout(): Unit = traced { + lock.withLock(handlingTimeout = false) + } + + def currentState: State = traced(lock.withLock(state)) + + def readyFuture: Future[Unit] = traced(lock.withLock(readyPromise.future)) + +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementDeallocator.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementDeallocator.scala new file mode 100644 index 0000000..dd6dc59 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementDeallocator.scala @@ -0,0 +1,25 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +import io.rdbc.pgsql.core.pgstruct.messages.frontend.NativeSql + +import scala.concurrent.Future + +private[core] trait PgStatementDeallocator { + private[core] def deallocateStatement(nativeSql: NativeSql): Future[Unit] +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementExecutor.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementExecutor.scala new file mode 100644 index 0000000..a9edb8d --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PgStatementExecutor.scala @@ -0,0 +1,35 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +import io.rdbc.pgsql.core.ParamsSource +import io.rdbc.pgsql.core.pgstruct.ParamValue +import io.rdbc.pgsql.core.pgstruct.messages.frontend.NativeSql +import io.rdbc.sapi.ResultStream + +import scala.concurrent.Future +import scala.concurrent.duration.FiniteDuration + +private[core] trait PgStatementExecutor { + private[core] def executeStatementForStream(nativeSql: NativeSql, params: Vector[ParamValue]) + (implicit timeout: FiniteDuration): Future[ResultStream] + + private[core] def executeStatementForRowsAffected(nativeSql: NativeSql, params: Vector[ParamValue]) + (implicit timeout: FiniteDuration): Future[Long] + + private[core] def executeParamsStream(nativeSql: NativeSql, params: ParamsSource): Future[Unit] +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PreparedStmtCache.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PreparedStmtCache.scala similarity index 67% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PreparedStmtCache.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PreparedStmtCache.scala index 2dd5ffa..c55b674 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/PreparedStmtCache.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/PreparedStmtCache.scala @@ -14,20 +14,23 @@ * limitations under the License. */ -package io.rdbc.pgsql.core +package io.rdbc.pgsql.core.internal -object PreparedStmtCache { +import io.rdbc.pgsql.core.pgstruct.messages.frontend.{NativeSql, StmtName} + +private[core] object PreparedStmtCache { val empty = new PreparedStmtCache(Map.empty) } -class PreparedStmtCache(cache: Map[String, String]) { +private[core] class PreparedStmtCache(cache: Map[NativeSql, StmtName]) { + //TODO should I cache using NativeSql or RdbcSql? + //TODO replace with LRU or sth //TODO when element is evicted from the cache CloseStatement needs to be sent to the backend - def get(sql: String): Option[String] = cache.get(sql) + def get(sql: NativeSql): Option[StmtName] = cache.get(sql) - def updated(sql: String, stmtName: String): PreparedStmtCache = { + def updated(sql: NativeSql, stmtName: StmtName): PreparedStmtCache = { new PreparedStmtCache(cache + (sql -> stmtName)) } - } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/WriteFailureHandler.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/WriteFailureHandler.scala new file mode 100644 index 0000000..4cfa8e5 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/WriteFailureHandler.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal + +private[core] trait WriteFailureHandler { + private[core] def handleWriteError(cause: Throwable): Unit +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Authenticating.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Authenticating.scala new file mode 100644 index 0000000..a3bb794 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Authenticating.scala @@ -0,0 +1,74 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.api.exceptions.AuthFailureException +import io.rdbc.pgsql.core.auth.AuthState.{AuthComplete, AuthContinue} +import io.rdbc.pgsql.core.auth.{AuthState, Authenticator} +import io.rdbc.pgsql.core.pgstruct.messages.backend.BackendKeyData +import io.rdbc.pgsql.core.pgstruct.messages.backend.auth.{AuthOk, AuthRequest} +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} + +import scala.concurrent.{ExecutionContext, Promise} + +private[core] +class Authenticating private[fsm](initPromise: Promise[BackendKeyData], + authenticator: Authenticator) + (implicit out: ChannelWriter, + ec: ExecutionContext) + extends State + with NonFatalErrorsAreFatal { + + private[this] var waitingForOk = false + + val msgHandler: PgMsgHandler = { + case authReq: AuthRequest if !waitingForOk => + ifAuthenticatorSupports(authReq) { + val authState = authenticator.authenticate(authReq) + if (isComplete(authState)) { + waitingForOk = true + } + stay andThenF out.writeAndFlush(authState.responses) + } + + case AuthOk if waitingForOk => goto(new Initializing(initPromise)) + } + + private def ifAuthenticatorSupports(authReq: AuthRequest)(body: => StateAction): StateAction = { + if (authenticator.supports(authReq)) { + body + } else { + val ex = new AuthFailureException( + s"Authentication mechanism '${authReq.authMechanismName}' requested by" + + " server is not supported by provided authenticator" + ) + fatal(ex) andThenFailPromise initPromise + } + } + + private def isComplete(authState: AuthState): Boolean = { + authState match { + case _: AuthContinue => false + case _: AuthComplete => true + } + } + + protected def onError(ex: Throwable): Unit = { + initPromise.failure(ex) + } + +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/ConnectionClosed.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ConnectionClosed.scala similarity index 80% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/ConnectionClosed.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ConnectionClosed.scala index 509fd77..3698f13 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/ConnectionClosed.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ConnectionClosed.scala @@ -14,10 +14,10 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm +package io.rdbc.pgsql.core.internal.fsm import io.rdbc.api.exceptions.ConnectionClosedException -case class ConnectionClosed(cause: ConnectionClosedException) extends EmptyState { - val name = "connection_closed" -} +private[core] +final case class ConnectionClosed private[fsm](cause: ConnectionClosedException) + extends EmptyState diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DeallocatingStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DeallocatingStatement.scala new file mode 100644 index 0000000..3e08efa --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DeallocatingStatement.scala @@ -0,0 +1,36 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.pgsql.core.PgMsgHandler +import io.rdbc.pgsql.core.pgstruct.messages.backend.CloseComplete + +import scala.concurrent.Promise + +private[core] class DeallocatingStatement(promise: Promise[Unit]) + extends State { + + protected val msgHandler: PgMsgHandler = { + case CloseComplete => goto(State.waitingAfterSuccess(promise)) + } + + protected def onFatalError(ex: Throwable): Unit = promise.failure(ex) + + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.waitingAfterFailure(promise, ex)) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DefaultErrorHandling.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DefaultErrorHandling.scala new file mode 100644 index 0000000..08b26d1 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/DefaultErrorHandling.scala @@ -0,0 +1,22 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +private[core] trait DefaultErrorHandling extends NonFatalErrorsAreFatal { this: State => + + protected def onError(ex: Throwable): Unit = () +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/EmptyState.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/EmptyState.scala new file mode 100644 index 0000000..78aeb9c --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/EmptyState.scala @@ -0,0 +1,23 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.pgsql.core.PgMsgHandler + +private[core] trait EmptyState extends State with DefaultErrorHandling { + protected val msgHandler: PgMsgHandler = PartialFunction.empty +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/batch/ExecutingBatch.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ExecutingBatch.scala similarity index 53% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/batch/ExecutingBatch.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ExecutingBatch.scala index 0dd51ea..955b847 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/batch/ExecutingBatch.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ExecutingBatch.scala @@ -14,34 +14,33 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm.extendedquery.batch +package io.rdbc.pgsql.core.internal.fsm -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm.{State, WaitingForReady} -import io.rdbc.pgsql.core.messages.backend._ +import io.rdbc.pgsql.core.PgMsgHandler +import io.rdbc.pgsql.core.pgstruct.TxStatus +import io.rdbc.pgsql.core.pgstruct.messages.backend._ import scala.concurrent.Promise -class ExecutingBatch(promise: Promise[TxStatus]) extends State { +private[core] +class ExecutingBatch private[fsm](batchPromise: Promise[TxStatus]) + extends State { - protected def msgHandler = { + protected val msgHandler: PgMsgHandler = { case ParseComplete => stay case BindComplete => stay case _: DataRow => stay case EmptyQueryResponse | _: CommandComplete => stay case ReadyForQuery(txStatus) => - promise.success(txStatus) + batchPromise.success(txStatus) stay } - protected def onFatalError(ex: Throwable): Unit = promise.failure(ex) - - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new WaitingForReady(onIdle = promise.failure(ex), onFailure = { exWhenWaiting => - logger.error("Error occurred when waiting for ready", exWhenWaiting) - promise.failure(ex) - })) //TODO this repeats throughout the project + protected def onFatalError(ex: Throwable): Unit = traced { + batchPromise.failure(ex) } - val name = "executing_batch" + protected def onNonFatalError(ex: Throwable): StateAction = traced { + goto(State.waitingAfterFailure(batchPromise, ex)) + } } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ExecutingWriteOnly.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ExecutingWriteOnly.scala new file mode 100644 index 0000000..faf9150 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/ExecutingWriteOnly.scala @@ -0,0 +1,56 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.pgsql.core.PgMsgHandler +import io.rdbc.pgsql.core.pgstruct.messages.backend._ + +import scala.concurrent.Promise + +private[core] +class ExecutingWriteOnly private[fsm](parsePromise: Promise[Unit], + resultPromise: Promise[Long]) + extends State { + + protected val msgHandler: PgMsgHandler = { + case BindComplete => stay + case ParseComplete => + parsePromise.success(()) + stay + + case _: DataRow => stay + case EmptyQueryResponse => finished(0L) + case CommandComplete(_, rowsAffected) => finished(rowsAffected.map(_.toLong).getOrElse(0L)) + } + + private def finished(rowsAffected: Long): StateAction = traced { + goto(State.waitingAfterSuccess(resultPromise, rowsAffected)) + } + + private def sendFailureToClient(ex: Throwable): Unit = traced { + if (!parsePromise.isCompleted) parsePromise.failure(ex) + resultPromise.failure(ex) + } + + protected def onFatalError(ex: Throwable): Unit = traced { + sendFailureToClient(ex) + } + + protected def onNonFatalError(ex: Throwable): StateAction = traced { + goto(State.waitingAfterFailure(sendFailureToClient(_), ex)) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Idle.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Idle.scala similarity index 77% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Idle.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Idle.scala index 64afb43..0395f0e 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Idle.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Idle.scala @@ -14,10 +14,9 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm +package io.rdbc.pgsql.core.internal.fsm -import io.rdbc.pgsql.core.messages.backend.TxStatus +import io.rdbc.pgsql.core.pgstruct.TxStatus -case class Idle(txStatus: TxStatus) extends EmptyState { - val name = "idle" -} +private[core] +final case class Idle private[fsm](txStatus: TxStatus) extends EmptyState diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Initializing.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Initializing.scala new file mode 100644 index 0000000..9cc4cc0 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Initializing.scala @@ -0,0 +1,49 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.pgsql.core.exception.PgProtocolViolationException +import io.rdbc.pgsql.core.pgstruct.messages.backend.{BackendKeyData, ReadyForQuery} +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} + +import scala.concurrent.{ExecutionContext, Promise} + +class Initializing(initPromise: Promise[BackendKeyData]) + (implicit out: ChannelWriter, ec: ExecutionContext) + extends State + with NonFatalErrorsAreFatal { + + @volatile private[this] var maybeKeyData = Option.empty[BackendKeyData] + + protected val msgHandler: PgMsgHandler = { + case bkd: BackendKeyData => + maybeKeyData = Some(bkd) + stay + + case ReadyForQuery(txStatus) => + maybeKeyData.fold[StateAction] { + val ex = new PgProtocolViolationException( + "Ready for query received in initializing state without prior backend key data message" + ) + fatal(ex) andThenF initPromise.failure(ex) + }(bkd => goto(State.idle(txStatus)) andThenF initPromise.success(bkd)) + } + + protected def onError(ex: Throwable): Unit = traced { + initPromise.failure(ex) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/NonFatalErrorsAreFatal.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/NonFatalErrorsAreFatal.scala new file mode 100644 index 0000000..6cce8c3 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/NonFatalErrorsAreFatal.scala @@ -0,0 +1,32 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +private[core] trait NonFatalErrorsAreFatal { + this: State => + + protected def onError(ex: Throwable): Unit + + protected final def onFatalError(ex: Throwable): Unit = traced { + onError(ex) + } + + protected final def onNonFatalError(ex: Throwable): StateAction = { + logger.debug(s"State '$this' does not override non-fatal error handler, treating error as fatal") + fatal(ex) andThen onFatalErrorF(ex) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/SimpleQuerying.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/SimpleQuerying.scala new file mode 100644 index 0000000..b2a4e17 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/SimpleQuerying.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.pgsql.core.pgstruct.messages.backend._ +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} + +import scala.concurrent.Promise + +private[core] +class SimpleQuerying private[fsm](promise: Promise[Unit])(implicit out: ChannelWriter) extends State { + + protected val msgHandler: PgMsgHandler = { + case _: RowDescription => stay + case _: DataRow => stay + case _: CommandComplete | EmptyQueryResponse => goto(State.waitingAfterSuccess(promise)) + } + + protected def onNonFatalError(ex: Throwable): StateAction = traced { + goto(State.waitingAfterFailure(promise, ex)) + } + + protected def onFatalError(ex: Throwable): Unit = traced { + promise.failure(ex) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/StartingRequest.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/StartingRequest.scala similarity index 84% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/StartingRequest.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/StartingRequest.scala index 4b3b700..8fd8079 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/StartingRequest.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/StartingRequest.scala @@ -14,8 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm +package io.rdbc.pgsql.core.internal.fsm -case object StartingRequest extends EmptyState { - val name = "starting_request" -} +private[core] object StartingRequest extends EmptyState diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/State.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/State.scala new file mode 100644 index 0000000..30fc183 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/State.scala @@ -0,0 +1,245 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.api.exceptions.ConnectionClosedException +import io.rdbc.pgsql.core.auth.Authenticator +import io.rdbc.pgsql.core.exception.{PgDriverInternalErrorException, PgStatusDataException} +import io.rdbc.pgsql.core.internal.fsm.streaming._ +import io.rdbc.pgsql.core.internal.scheduler.TimeoutHandler +import io.rdbc.pgsql.core.internal.{PgResultStream, PgRowPublisher} +import io.rdbc.pgsql.core.pgstruct.TxStatus +import io.rdbc.pgsql.core.pgstruct.messages.backend.{BackendKeyData, PgBackendMessage, StatusMessage, + UnknownBackendMessage} +import io.rdbc.pgsql.core.pgstruct.messages.frontend.{Bind, Parse, PortalName} +import io.rdbc.pgsql.core.types.PgTypeRegistry +import io.rdbc.pgsql.core.util.concurrent.LockFactory +import io.rdbc.pgsql.core.{ChannelWriter, FatalErrorNotifier, PgMsgHandler, SessionParams} +import io.rdbc.sapi.TypeConverterRegistry +import io.rdbc.util.Logging + +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.control.NonFatal + +private[core] trait State extends Logging { + + def onMessage(msg: PgBackendMessage): StateAction = traced { + try { + msg match { + case nonFatalErr: StatusMessage.Error if !nonFatalErr.isFatal => + onNonFatalError(PgStatusDataException(nonFatalErr.statusData)) + + case fatalErr: StatusMessage.Error => + val ex = PgStatusDataException(fatalErr.statusData) + fatal(ex) andThen onFatalErrorF(ex) + + case msg => msgHandler.orElse(fallbackHandler).apply(msg) + } + } catch { + case NonFatal(ex) => fatal(ex) andThen onFatalErrorF(ex) + } + } + + private[this] val fallbackHandler: PgMsgHandler = { + case noticeMsg: StatusMessage.Notice => + if (noticeMsg.isWarning) { + logger.warn(s"Warning received: ${noticeMsg.statusData.shortInfo}") + } else { + logger.debug(s"Notice received: ${noticeMsg.statusData.shortInfo}") + } + stay + + case unknownMsg: UnknownBackendMessage => + val ex = new PgDriverInternalErrorException( + s"Unknown backend message received: '$unknownMsg'" + ) + fatal(ex) andThen onFatalErrorF(ex) + + case unhandledMsg => + val ex = new PgDriverInternalErrorException( + s"Unhandled backend message '$unhandledMsg' in state '$this'" + ) + fatal(ex) andThen onFatalErrorF(ex) + } + + protected def msgHandler: PgMsgHandler + protected def onFatalError(ex: Throwable): Unit + protected def onNonFatalError(ex: Throwable): StateAction + + protected def onFatalErrorF(ex: Throwable): Future[Unit] = Future.successful(onFatalError(ex)) + protected def stay = StateAction.Stay(afterAcknowledgment = None) + protected def fatal(ex: Throwable) = StateAction.Fatal(ex, afterRelease = None) + protected def goto(next: State) = StateAction.Goto(next, afterTransition = None) +} + +private[core] object State extends Logging { + + def idle(txStatus: TxStatus): Idle = Idle(txStatus) + + def authenticating(initPromise: Promise[BackendKeyData], + authenticator: Authenticator) + (implicit out: ChannelWriter, + ec: ExecutionContext): Authenticating = { + new Authenticating(initPromise, authenticator) + } + + def connectionClosed(cause: ConnectionClosedException): ConnectionClosed = { + ConnectionClosed(cause) + } + + def deallocatingStatement(promise: Promise[Unit]): DeallocatingStatement = { + new DeallocatingStatement(promise) + } + + def executingBatch(promise: Promise[TxStatus]): ExecutingBatch = { + new ExecutingBatch(promise) + } + + def executingWriteOnly(parsePromise: Promise[Unit], + resultPromise: Promise[Long]): ExecutingWriteOnly = { + new ExecutingWriteOnly(parsePromise, resultPromise) + } + + def initializing(initPromise: Promise[BackendKeyData]) + (implicit out: ChannelWriter, ec: ExecutionContext): Initializing = { + new Initializing(initPromise) + } + + def simpleQuerying(promise: Promise[Unit])(implicit out: ChannelWriter): SimpleQuerying = { + new SimpleQuerying(promise) + } + + val startingRequest = StartingRequest + + val uninitialized = Uninitialized + + def waitingAfterSuccess(promise: Promise[Unit]): WaitingForReady = { + waitingAfterSuccess(promise, ()) + } + + def waitingAfterSuccess[A](promise: Promise[A], value: A): WaitingForReady = { + new WaitingForReady( + onIdle = promise.success(value), + onFailure = exWhenWaiting => promise.failure(exWhenWaiting) + ) + } + + def waitingAfterFailure[A](promise: Promise[A], failure: Throwable): WaitingForReady = { + waitingAfterFailure(ex => promise.failure(ex), failure) + } + + def waitingAfterFailure(after: Throwable => Unit, failure: Throwable): WaitingForReady = { + new WaitingForReady( + onIdle = after(failure), + onFailure = { exWhenWaiting => + logger.error("Error occurred when waiting for ready", exWhenWaiting) + after(failure) + } + ) + } + + object Streaming { + + def beginningTx(maybeParse: Option[Parse], + bind: Bind, + streamPromise: Promise[PgResultStream], + parsePromise: Promise[Unit], + sessionParams: SessionParams, + timeoutHandler: TimeoutHandler, + typeConverters: TypeConverterRegistry, + pgTypes: PgTypeRegistry, + lockFactory: LockFactory, + fatalErrorNotifier: FatalErrorNotifier) + (implicit out: ChannelWriter, + ec: ExecutionContext): StrmBeginningTx = { + new StrmBeginningTx( + maybeParse = maybeParse, + bind = bind, + streamPromise = streamPromise, + parsePromise = parsePromise, + sessionParams = sessionParams, + timeoutHandler = timeoutHandler, + typeConverters = typeConverters, + pgTypes = pgTypes, + lockFactory = lockFactory, + fatalErrorNotifier = fatalErrorNotifier + ) + } + + def pendingClosePortal(publisher: PgRowPublisher, onIdle: => Unit) + (implicit out: ChannelWriter, ec: ExecutionContext): StrmPendingClosePortal = { + new StrmPendingClosePortal(publisher, onIdle) + } + + def pendingCommit(publisher: PgRowPublisher) + (implicit out: ChannelWriter, ec: ExecutionContext): StrmPendingCommit = { + new StrmPendingCommit(publisher) + } + + def pullingRows(txMgmt: Boolean, afterDescData: AfterDescData) + (implicit out: ChannelWriter, ec: ExecutionContext): StrmPullingRows = { + new StrmPullingRows(txMgmt, afterDescData) + } + + def queryFailed(txMgmt: Boolean, portalName: Option[PortalName])(sendFailureCause: => Unit) + (implicit out: ChannelWriter, ec: ExecutionContext): StrmQueryFailed = { + new StrmQueryFailed(txMgmt, portalName, sendFailureCause) + } + + def waitingAfterClose(onIdle: => Unit, + publisher: PgRowPublisher) + (implicit out: ChannelWriter, ec: ExecutionContext): StrmWaitingAfterClose = { + new StrmWaitingAfterClose(onIdle, publisher) + } + + def waitingAfterCommit(publisher: PgRowPublisher) + (implicit out: ChannelWriter, ec: ExecutionContext): StrmWaitingAfterCommit = { + new StrmWaitingAfterCommit(publisher) + } + + def waitingAfterRollback(sendFailureCause: => Unit): StrmWaitingAfterRollback = { + new StrmWaitingAfterRollback(sendFailureCause) + } + + def waitingForDescribe(txMgmt: Boolean, + portalName: Option[PortalName], + streamPromise: Promise[PgResultStream], + parsePromise: Promise[Unit], + pgTypes: PgTypeRegistry, + typeConverters: TypeConverterRegistry, + sessionParams: SessionParams, + timeoutHandler: TimeoutHandler, + lockFactory: LockFactory, + fatalErrorNotifier: FatalErrorNotifier) + (implicit out: ChannelWriter, ec: ExecutionContext): StrmWaitingForDescribe = { + new StrmWaitingForDescribe( + txMgmt = txMgmt, + portalName = portalName, + streamPromise = streamPromise, + parsePromise = parsePromise, + pgTypes = pgTypes, + typeConverters = typeConverters, + sessionParams = sessionParams, + timeoutHandler = timeoutHandler, + lockFactory = lockFactory, + fatalErrorNotifier = fatalErrorNotifier + ) + } + + } + +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/StateAction.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/StateAction.scala new file mode 100644 index 0000000..833273f --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/StateAction.scala @@ -0,0 +1,55 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import scala.concurrent.{Future, Promise} + +private[core] sealed trait StateAction + +private[core] object StateAction { + case class Goto(next: State, afterTransition: Option[() => Future[Unit]]) extends StateAction { + def andThen(body: => Future[Unit]): Goto = { + Goto(next, Some(() => body)) + } + def andThenF(body: => Unit): Goto = { + andThen(Future.successful(body)) + } + } + + case class Stay(afterAcknowledgment: Option[() => Future[Unit]]) extends StateAction { + def andThen(body: => Future[Unit]): Stay = { + Stay(Some(() => body)) + } + def andThenF(body: => Unit): Stay = { + andThen(Future.successful(body)) + } + } + + case class Fatal(ex: Throwable, afterRelease: Option[() => Future[Unit]]) extends StateAction { + def andThen(body: => Future[Unit]): Fatal = { + Fatal(ex, Some(() => body)) + } + + def andThenF(body: => Unit): Fatal = { + andThen(Future.successful(body)) + } + + def andThenFailPromise[A](promise: Promise[A]): Fatal = { + andThen(Future.successful(promise.failure(ex))) + } + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Uninitialized.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Uninitialized.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Uninitialized.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Uninitialized.scala index 2418316..fec6b1c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/Uninitialized.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/Uninitialized.scala @@ -14,8 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm +package io.rdbc.pgsql.core.internal.fsm -object Uninitialized extends EmptyState { - val name = "uninitialized" -} +private[core] object Uninitialized extends EmptyState diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WaitingForReady.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WaitingForReady.scala new file mode 100644 index 0000000..35cf126 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WaitingForReady.scala @@ -0,0 +1,34 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.pgsql.core.PgMsgHandler +import io.rdbc.pgsql.core.pgstruct.messages.backend.ReadyForQuery + +private[core] +class WaitingForReady private[fsm](onIdle: => Unit, onFailure: Throwable => Unit) + extends State + with NonFatalErrorsAreFatal { + + protected val msgHandler: PgMsgHandler = { + case ReadyForQuery(txStatus) => goto(State.idle(txStatus)) andThenF onIdle + } + + protected def onError(ex: Throwable): Unit = traced { + onFailure(ex) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WarningCollection.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WarningCollection.scala new file mode 100644 index 0000000..8b39662 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/WarningCollection.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm + +import io.rdbc.pgsql.core.pgstruct.messages.backend.{PgBackendMessage, StatusMessage} + +import scala.util.control.NonFatal + +private[core] trait WarningCollection extends State { + @volatile private[this] var _warnings = Vector.empty[StatusMessage.Notice] + + protected def warnings: Vector[StatusMessage.Notice] = _warnings + + abstract override def onMessage(msg: PgBackendMessage): StateAction = { + try { + msg match { + case notice: StatusMessage.Notice if notice.isWarning => + _warnings = _warnings :+ notice + stay + case _ => super.onMessage(msg) + } + } catch { + case NonFatal(ex) => fatal(ex) andThenF onFatalError(ex) + } + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/WaitingForReady.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/AfterDescData.scala similarity index 58% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/WaitingForReady.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/AfterDescData.scala index a66e1f5..dbbead4 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/WaitingForReady.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/AfterDescData.scala @@ -14,18 +14,13 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm +package io.rdbc.pgsql.core.internal.fsm.streaming -import io.rdbc.pgsql.core.messages.backend.ReadyForQuery +import io.rdbc.pgsql.core.internal.PgRowPublisher +import io.rdbc.pgsql.core.pgstruct.messages.backend.StatusMessage -class WaitingForReady(onIdle: => Unit, onFailure: (Throwable) => Unit) - extends State with NonFatalErrorsAreFatal { +import scala.concurrent.Promise - def msgHandler = { - case ReadyForQuery(txStatus) => goto(Idle(txStatus)) andThenF onIdle - } - - protected def onFatalError(ex: Throwable): Unit = onFailure(ex) - - val name = "waiting_for_ready" -} +private[fsm] case class AfterDescData(publisher: PgRowPublisher, + warningsPromise: Promise[Vector[StatusMessage.Notice]], + rowsAffectedPromise: Promise[Long]) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmBeginningTx.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmBeginningTx.scala new file mode 100644 index 0000000..da29fb6 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmBeginningTx.scala @@ -0,0 +1,92 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm.streaming + +import io.rdbc.pgsql.core.internal.PgResultStream +import io.rdbc.pgsql.core.internal.fsm.{State, StateAction} +import io.rdbc.pgsql.core.internal.scheduler.TimeoutHandler +import io.rdbc.pgsql.core.pgstruct.messages.backend.{CommandComplete, ReadyForQuery} +import io.rdbc.pgsql.core.pgstruct.messages.frontend._ +import io.rdbc.pgsql.core.types.PgTypeRegistry +import io.rdbc.pgsql.core.util.concurrent.LockFactory +import io.rdbc.pgsql.core.{ChannelWriter, FatalErrorNotifier, PgMsgHandler, SessionParams} +import io.rdbc.sapi.TypeConverterRegistry + +import scala.concurrent.{ExecutionContext, Future, Promise} +import scala.util.control.NonFatal + +private[core] +class StrmBeginningTx private[fsm](maybeParse: Option[Parse], + bind: Bind, + streamPromise: Promise[PgResultStream], + parsePromise: Promise[Unit], + sessionParams: SessionParams, + timeoutHandler: TimeoutHandler, + typeConverters: TypeConverterRegistry, + pgTypes: PgTypeRegistry, + lockFactory: LockFactory, + fatalErrorNotifier: FatalErrorNotifier) + (implicit out: ChannelWriter, + ec: ExecutionContext) + extends State { + + @volatile private[this] var beginComplete = false + + protected val msgHandler: PgMsgHandler = { + case CommandComplete("BEGIN", _) => + beginComplete = true + stay + + case ReadyForQuery(_) if beginComplete => + maybeParse.foreach(out.write(_)) + goto( + State.Streaming.waitingForDescribe( + txMgmt = true, + portalName = bind.portal, + streamPromise = streamPromise, + parsePromise = parsePromise, + pgTypes = pgTypes, + typeConverters = typeConverters, + sessionParams = sessionParams, + timeoutHandler = timeoutHandler, + lockFactory = lockFactory, + fatalErrorNotifier = fatalErrorNotifier) + ) andThen { + out.writeAndFlush(bind, DescribePortal(bind.portal), Sync) + .recoverWith { case NonFatal(ex) => + sendFailureToClient(ex) + Future.failed(ex) + } + } + } + + private def sendFailureToClient(ex: Throwable): Unit = { + streamPromise.failure(ex) + parsePromise.failure(ex) + } + + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.Streaming.queryFailed(txMgmt = true, bind.portal) { + sendFailureToClient(ex) + }) + } + + protected def onFatalError(ex: Throwable): Unit = { + sendFailureToClient(ex) + } + +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/CompletedPendingClosePortal.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPendingClosePortal.scala similarity index 50% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/CompletedPendingClosePortal.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPendingClosePortal.scala index 1548424..1eb6be7 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/CompletedPendingClosePortal.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPendingClosePortal.scala @@ -14,46 +14,42 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm.extendedquery +package io.rdbc.pgsql.core.internal.fsm.streaming -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm.{State, WaitingForReady} -import io.rdbc.pgsql.core.messages.backend.ReadyForQuery -import io.rdbc.pgsql.core.messages.frontend.{ClosePortal, Sync} -import io.rdbc.pgsql.core.{ChannelWriter, PgRowPublisher} +import io.rdbc.pgsql.core.internal.PgRowPublisher +import io.rdbc.pgsql.core.internal.fsm.{State, StateAction} +import io.rdbc.pgsql.core.pgstruct.messages.backend.ReadyForQuery +import io.rdbc.pgsql.core.pgstruct.messages.frontend.{ClosePortal, Sync} +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal -class CompletedPendingClosePortal(publisher: PgRowPublisher, onIdle: => Unit)(implicit out: ChannelWriter, ec: ExecutionContext) +private[core] +class StrmPendingClosePortal private[fsm](publisher: PgRowPublisher, onIdle: => Unit) + (implicit out: ChannelWriter, ec: ExecutionContext) extends State { - def msgHandler = { + val msgHandler: PgMsgHandler = { case ReadyForQuery(_) => - goto(new WaitingForCloseCompletion(onIdle, publisher)) andThen { - out.writeAndFlush(ClosePortal(publisher.portalName), Sync).recoverWith { - case NonFatal(ex) => + goto(new StrmWaitingAfterClose(onIdle, publisher)) andThen { + out.writeAndFlush(ClosePortal(publisher.portalName), Sync) + .recoverWith { case NonFatal(ex) => sendFailureToClient(ex) Future.failed(ex) - } + } } } - def sendFailureToClient(ex: Throwable): Unit = { + private def sendFailureToClient(ex: Throwable): Unit = { publisher.failure(ex) } - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new WaitingForReady(onIdle = sendFailureToClient(ex), onFailure = { exWhenWaiting => - logger.error("Error occurred when closing portal", exWhenWaiting) - sendFailureToClient(ex) - })) //TODO this pattern repeats + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.waitingAfterFailure(sendFailureToClient(_), ex)) } protected def onFatalError(ex: Throwable): Unit = { sendFailureToClient(ex) } - - val name = "extended_querying.pending_close" - } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/CompletedPendingCommit.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPendingCommit.scala similarity index 51% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/CompletedPendingCommit.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPendingCommit.scala index 7724843..cfadaf2 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/CompletedPendingCommit.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPendingCommit.scala @@ -14,25 +14,28 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm.extendedquery +package io.rdbc.pgsql.core.internal.fsm.streaming -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm.{State, WaitingForReady} -import io.rdbc.pgsql.core.messages.backend.{ReadyForQuery, TxStatus} -import io.rdbc.pgsql.core.messages.frontend.Query -import io.rdbc.pgsql.core.{ChannelWriter, PgRowPublisher} +import io.rdbc.pgsql.core.internal.PgRowPublisher +import io.rdbc.pgsql.core.internal.fsm.{State, StateAction} +import io.rdbc.pgsql.core.pgstruct.TxStatus +import io.rdbc.pgsql.core.pgstruct.messages.backend.ReadyForQuery +import io.rdbc.pgsql.core.pgstruct.messages.frontend.{NativeSql, Query} +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal -class CompletedPendingCommit(publisher: PgRowPublisher)(implicit out: ChannelWriter, ec: ExecutionContext) +private[core] +class StrmPendingCommit private[fsm](publisher: PgRowPublisher) + (implicit out: ChannelWriter, ec: ExecutionContext) extends State { - def msgHandler = { + protected val msgHandler: PgMsgHandler = { case ReadyForQuery(TxStatus.Active) => - goto(new WaitingForCommitCompletion(publisher)) andThen { - out.writeAndFlush(Query("COMMIT")).recoverWith { - case NonFatal(ex) => + goto(new StrmWaitingAfterCommit(publisher)) andThen { + out.writeAndFlush(Query(NativeSql("COMMIT"))).recoverWith { + case NonFatal(ex) => //TODO write is fatal failure sendFailureToClient(ex) Future.failed(ex) } @@ -43,17 +46,11 @@ class CompletedPendingCommit(publisher: PgRowPublisher)(implicit out: ChannelWri publisher.failure(ex) } - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new WaitingForReady(onIdle = sendFailureToClient(ex), onFailure = { exWhenWaiting => - logger.error("Error occurred when waiting for ready", exWhenWaiting) - sendFailureToClient(ex) - })) //TODO this pattern repeats + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.waitingAfterFailure(sendFailureToClient(_), ex)) } protected def onFatalError(ex: Throwable): Unit = { sendFailureToClient(ex) } - - val name = "extended_querying.pending_commit" - } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/PullingRows.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPullingRows.scala similarity index 52% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/PullingRows.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPullingRows.scala index da216c1..e85242a 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/PullingRows.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmPullingRows.scala @@ -14,16 +14,17 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm.extendedquery +package io.rdbc.pgsql.core.internal.fsm.streaming -import io.rdbc.pgsql.core.ChannelWriter -import io.rdbc.pgsql.core.fsm.State.{Goto, Outcome} -import io.rdbc.pgsql.core.fsm.{State, WaitingForReady} -import io.rdbc.pgsql.core.messages.backend._ +import io.rdbc.pgsql.core.internal.fsm.{State, StateAction, WaitingForReady, WarningCollection} +import io.rdbc.pgsql.core.pgstruct.messages.backend._ +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} import scala.concurrent.ExecutionContext -class PullingRows(txMgmt: Boolean, afterDescData: AfterDescData)(implicit out: ChannelWriter, ec: ExecutionContext) +private[core] +class StrmPullingRows private[fsm](txMgmt: Boolean, afterDescData: AfterDescData) + (implicit out: ChannelWriter, ec: ExecutionContext) extends State with WarningCollection { //TODO warnings should be collected in all extended query states @@ -38,9 +39,8 @@ class PullingRows(txMgmt: Boolean, afterDescData: AfterDescData)(implicit out: C private[this] val warningsPromise = afterDescData.warningsPromise private[this] val rowsAffectedPromise = afterDescData.rowsAffectedPromise - def msgHandler = { - case PortalSuspended => - stay + val msgHandler: PgMsgHandler = { + case PortalSuspended => stay case dr: DataRow => publisher.handleRow(dr) @@ -51,26 +51,35 @@ class PullingRows(txMgmt: Boolean, afterDescData: AfterDescData)(implicit out: C stay case EmptyQueryResponse => - rowsAffectedPromise.success(0L) - warningsPromise.success(warnings) - if (txMgmt) goto(new CompletedPendingCommit(publisher)) - else goto(new CompletedPendingClosePortal(publisher, onIdle = publisher.complete())) + completePulling(0L) case CommandComplete(_, rowsAffected) => - rowsAffectedPromise.success(rowsAffected.map(_.toLong).getOrElse(0L)) - warningsPromise.success(warnings) - if (txMgmt) goto(new CompletedPendingCommit(publisher)) - else goto(new CompletedPendingClosePortal(publisher, onIdle = publisher.complete())) + completePulling(rowsAffected.map(_.toLong).getOrElse(0L)) + + case CloseComplete => //TODO we use only unnamed portals, closing them is not necessary + if (txMgmt) goto(new StrmPendingCommit(publisher)) + else goto(new WaitingForReady( + onIdle = publisher.complete(), + onFailure = publisher.failure) + ) + } + + private def completePulling(rowsAffected: Long): StateAction.Goto = { + rowsAffectedPromise.success(rowsAffected) + warningsPromise.success(warnings) + if (txMgmt) goto(new StrmPendingCommit(publisher)) + else goto(new StrmPendingClosePortal(publisher, onIdle = publisher.complete())) + } - //TODO portal needs to be closed on error - case CloseComplete => - if (txMgmt) goto(new CompletedPendingCommit(publisher)) - else goto(new WaitingForReady(onIdle = publisher.complete(), onFailure = publisher.failure)) + private def sendFailureToClient(ex: Throwable): Unit = { + publisher.failure(ex) + warningsPromise.failure(ex) + rowsAffectedPromise.failure(ex) } - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(Failed(txMgmt, afterDescData.publisher.portalName) { + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.Streaming.queryFailed(txMgmt, afterDescData.publisher.portalName) { sendFailureToClient(ex) }) } @@ -78,12 +87,4 @@ class PullingRows(txMgmt: Boolean, afterDescData: AfterDescData)(implicit out: C protected def onFatalError(ex: Throwable): Unit = { sendFailureToClient(ex) } - - private[this] def sendFailureToClient(ex: Throwable): Unit = { - publisher.failure(ex) - warningsPromise.failure(ex) - rowsAffectedPromise.failure(ex) - } - - val name = "extended_querying.pulling_rows" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmQueryFailed.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmQueryFailed.scala new file mode 100644 index 0000000..f354f9e --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmQueryFailed.scala @@ -0,0 +1,86 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm.streaming + +import com.typesafe.scalalogging.StrictLogging +import io.rdbc.pgsql.core.internal.fsm.{Idle, State, StateAction} +import io.rdbc.pgsql.core.pgstruct.messages.backend.{CloseComplete, ReadyForQuery} +import io.rdbc.pgsql.core.pgstruct.messages.frontend._ +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} + +import scala.concurrent.{ExecutionContext, Future} +import scala.util.control.NonFatal + +private[core] +class StrmQueryFailed private[fsm](txMgmt: Boolean, + portalName: Option[PortalName], + sendFailureCause: => Unit) + (implicit out: ChannelWriter, + ec: ExecutionContext) + extends State + with StrictLogging { + + @volatile private[this] var portalClosed = false + + protected val msgHandler: PgMsgHandler = { + case CloseComplete if !portalClosed => + portalClosed = true + stay + + case ReadyForQuery(txStatus) => + if (txMgmt) { + rollback() + } else { + if (!portalClosed) { + closePortal() + } else { + goto(Idle(txStatus)) andThenF sendFailureCause + } + } + } + + private def rollback(): StateAction.Goto = { + goto(new StrmWaitingAfterRollback(sendFailureCause)) andThen { + out.writeAndFlush(Query(NativeSql("ROLLBACK"))).recoverWith { //TODO interpolator nsql? + case NonFatal(ex) => + sendFailureToClient(ex) + Future.failed(ex) + } + } + } + + private def closePortal(): StateAction.Stay = { + stay andThenF out.writeAndFlush(ClosePortal(portalName), Sync).recoverWith { + case NonFatal(ex) => + sendFailureToClient(ex) + Future.failed(ex) + } + } + + private def sendFailureToClient(ex: Throwable): Unit = { + logger.error("Error occurred when handling failed operation", ex) + sendFailureCause + } + + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.waitingAfterFailure(sendFailureToClient(_), ex)) + } + + protected def onFatalError(ex: Throwable): Unit = { + sendFailureToClient(ex) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForCloseCompletion.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterClose.scala similarity index 50% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForCloseCompletion.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterClose.scala index 519d322..d17671c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForCloseCompletion.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterClose.scala @@ -14,32 +14,35 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm.extendedquery +package io.rdbc.pgsql.core.internal.fsm.streaming -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm._ -import io.rdbc.pgsql.core.messages.backend.CloseComplete -import io.rdbc.pgsql.core.{ChannelWriter, PgRowPublisher} +import io.rdbc.pgsql.core.internal.PgRowPublisher +import io.rdbc.pgsql.core.internal.fsm._ +import io.rdbc.pgsql.core.pgstruct.messages.backend.CloseComplete +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} import scala.concurrent.ExecutionContext -class WaitingForCloseCompletion(onIdle: => Unit, publisher: PgRowPublisher)(implicit out: ChannelWriter, ec: ExecutionContext) +private[core] class StrmWaitingAfterClose(onIdle: => Unit, + publisher: PgRowPublisher) + (implicit out: ChannelWriter, ec: ExecutionContext) extends State { - def msgHandler = { + protected val msgHandler: PgMsgHandler = { case CloseComplete => - goto(new WaitingForReady( - onIdle = onIdle, - onFailure = publisher.failure - )) + goto( + new WaitingForReady( + onIdle = onIdle, + onFailure = publisher.failure + )) } - def sendFailureToClient(ex: Throwable): Unit = { + private def sendFailureToClient(ex: Throwable): Unit = { publisher.failure(ex) } - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(Failed(txMgmt = true, publisher.portalName) { + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.Streaming.queryFailed(txMgmt = true, publisher.portalName) { sendFailureToClient(ex) }) } @@ -47,6 +50,4 @@ class WaitingForCloseCompletion(onIdle: => Unit, publisher: PgRowPublisher)(impl protected def onFatalError(ex: Throwable): Unit = { sendFailureToClient(ex) } - - val name = "extended_querying.close_complete" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForCommitCompletion.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterCommit.scala similarity index 56% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForCommitCompletion.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterCommit.scala index cc5c853..76f080c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForCommitCompletion.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterCommit.scala @@ -14,32 +14,34 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm.extendedquery +package io.rdbc.pgsql.core.internal.fsm.streaming -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm._ -import io.rdbc.pgsql.core.messages.backend.CommandComplete -import io.rdbc.pgsql.core.{ChannelWriter, PgRowPublisher} +import io.rdbc.pgsql.core.internal.PgRowPublisher +import io.rdbc.pgsql.core.internal.fsm._ +import io.rdbc.pgsql.core.pgstruct.messages.backend.CommandComplete +import io.rdbc.pgsql.core.{ChannelWriter, PgMsgHandler} import scala.concurrent.ExecutionContext -class WaitingForCommitCompletion(publisher: PgRowPublisher)(implicit out: ChannelWriter, ec: ExecutionContext) +private[core] +class StrmWaitingAfterCommit private[fsm](publisher: PgRowPublisher) + (implicit out: ChannelWriter, ec: ExecutionContext) extends State { - def msgHandler = { + protected val msgHandler: PgMsgHandler = { case CommandComplete("COMMIT", _) => goto(new WaitingForReady( onIdle = publisher.complete(), - onFailure = publisher.failure - )) + onFailure = publisher.failure) + ) } - def sendFailureToClient(ex: Throwable): Unit = { + private def sendFailureToClient(ex: Throwable): Unit = { publisher.failure(ex) } - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(Failed(txMgmt = true, publisher.portalName) { + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.Streaming.queryFailed(txMgmt = true, publisher.portalName) { sendFailureToClient(ex) }) } @@ -47,6 +49,4 @@ class WaitingForCommitCompletion(publisher: PgRowPublisher)(implicit out: Channe protected def onFatalError(ex: Throwable): Unit = { sendFailureToClient(ex) } - - val name = "extended_querying.waiting_for_ready_after_commit" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForRollbackCompletion.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterRollback.scala similarity index 50% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForRollbackCompletion.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterRollback.scala index 040f059..b5af5bd 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/extendedquery/WaitingForRollbackCompletion.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingAfterRollback.scala @@ -14,26 +14,28 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm.extendedquery +package io.rdbc.pgsql.core.internal.fsm.streaming -import com.typesafe.scalalogging.StrictLogging -import io.rdbc.pgsql.core.fsm.State.Outcome -import io.rdbc.pgsql.core.fsm._ -import io.rdbc.pgsql.core.messages.backend.CommandComplete +import io.rdbc.pgsql.core.PgMsgHandler +import io.rdbc.pgsql.core.internal.fsm._ +import io.rdbc.pgsql.core.pgstruct.messages.backend.CommandComplete +import io.rdbc.util.Logging -class WaitingForRollbackCompletion(sendFailureCause: => Unit) +private[core] +class StrmWaitingAfterRollback private[fsm](sendFailureCause: => Unit) extends State - with StrictLogging { + with Logging { - def msgHandler = { + protected val msgHandler: PgMsgHandler = { case CommandComplete("ROLLBACK", _) => - goto(new WaitingForReady( - onIdle = sendFailureCause, - onFailure = { ex => - logger.error("Error occurred when waiting for ready", ex) - sendFailureCause - } - )) + goto( + new WaitingForReady( + onIdle = sendFailureCause, + onFailure = { ex => + logger.error("Error occurred when waiting for ready after the rollback", ex) + sendFailureCause + } + )) } def sendFailureToClient(ex: Throwable): Unit = { @@ -41,19 +43,11 @@ class WaitingForRollbackCompletion(sendFailureCause: => Unit) sendFailureCause } - protected def onNonFatalError(ex: Throwable): Outcome = { - goto(new WaitingForReady( - onIdle = sendFailureToClient(ex), - onFailure = { ex => - logger.error("Error occurred when waiting for ready", ex) - sendFailureToClient(ex) - })) + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.waitingAfterFailure(sendFailureToClient(_), ex)) } protected def onFatalError(ex: Throwable): Unit = { sendFailureToClient(ex) } - - val name = "extended_querying.waiting_for_ready_after_commit" - -} \ No newline at end of file +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingForDescribe.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingForDescribe.scala new file mode 100644 index 0000000..465f3b9 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/fsm/streaming/StrmWaitingForDescribe.scala @@ -0,0 +1,134 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.fsm.streaming + +import io.rdbc.pgsql.core.exception.PgProtocolViolationException +import io.rdbc.pgsql.core.internal.fsm.{State, StateAction} +import io.rdbc.pgsql.core.internal.scheduler.TimeoutHandler +import io.rdbc.pgsql.core.internal.{PgResultStream, PgRowPublisher} +import io.rdbc.pgsql.core.pgstruct.messages.backend._ +import io.rdbc.pgsql.core.pgstruct.messages.frontend.PortalName +import io.rdbc.pgsql.core.types.PgTypeRegistry +import io.rdbc.pgsql.core.util.concurrent.LockFactory +import io.rdbc.pgsql.core.{ChannelWriter, FatalErrorNotifier, PgMsgHandler, SessionParams} +import io.rdbc.sapi.TypeConverterRegistry + +import scala.concurrent.{ExecutionContext, Promise} + +private[core] +class StrmWaitingForDescribe private[fsm](txMgmt: Boolean, + portalName: Option[PortalName], + streamPromise: Promise[PgResultStream], + parsePromise: Promise[Unit], + pgTypes: PgTypeRegistry, + typeConverters: TypeConverterRegistry, + sessionParams: SessionParams, + timeoutHandler: TimeoutHandler, + lockFactory: LockFactory, + fatalErrorNotifier: FatalErrorNotifier) + (implicit out: ChannelWriter, ec: ExecutionContext) + extends State { + + @volatile private[this] var maybeAfterDescData = Option.empty[AfterDescData] + + protected val msgHandler: PgMsgHandler = { + case ParseComplete => + parsePromise.success(()) + stay + + case BindComplete => stay + case _: ParameterDescription => stay + case NoData => completeStreamPromise(RowDescription.empty) + case rowDesc: RowDescription => completeStreamPromise(rowDesc) + + case _: ReadyForQuery => + maybeAfterDescData match { + case None => + val ex = new PgProtocolViolationException( + "Ready for query received without prior row description" + ) + fatal(ex) andThenF sendFailureToClient(ex) + + case Some(afterDescData@AfterDescData(publisher, _, _)) => + goto(new StrmPullingRows(txMgmt, afterDescData)) andThenF publisher.resume() + } + } + + private def completeStreamPromise(rowDesc: RowDescription): StateAction = { + val publisher = createPublisher(rowDesc) + val warningsPromise = Promise[Vector[StatusMessage.Notice]] + val rowsAffectedPromise = Promise[Long] + + val afterDescData = AfterDescData( + publisher = publisher, + warningsPromise = warningsPromise, + rowsAffectedPromise = rowsAffectedPromise + ) + maybeAfterDescData = Some(afterDescData) + + val stream = createStream(afterDescData, rowDesc) + streamPromise.success(stream) + stay + } + + private def createStream(afterDescData: AfterDescData, + rowDesc: RowDescription): PgResultStream = { + new PgResultStream( + rows = afterDescData.publisher, + rowDesc = rowDesc, + rowsAffected = afterDescData.rowsAffectedPromise.future, + warningMsgsFut = afterDescData.warningsPromise.future, + pgTypes = pgTypes, + typeConverters = typeConverters + ) + } + + private def createPublisher(rowDesc: RowDescription): PgRowPublisher = { + new PgRowPublisher( + rowDesc = rowDesc, + portalName = portalName, + pgTypes = pgTypes, + typeConverters = typeConverters, + sessionParams = sessionParams, + timeoutHandler = timeoutHandler, + lockFactory = lockFactory, + fatalErrorNotifier = fatalErrorNotifier + ) + } + + private def sendFailureToClient(ex: Throwable): Unit = { + maybeAfterDescData match { + case Some(AfterDescData(publisher, warningsPromise, rowsAffectedPromise)) => + publisher.failure(ex) + warningsPromise.failure(ex) + rowsAffectedPromise.failure(ex) + parsePromise.failure(ex) + + case None => streamPromise.failure(ex) + } + } + + protected def onNonFatalError(ex: Throwable): StateAction = { + goto(State.Streaming.queryFailed(txMgmt, portalName) { + sendFailureToClient(ex) + }) + } + + protected def onFatalError(ex: Throwable): Unit = { + sendFailureToClient(ex) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/Header.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/ScheduledTask.scala similarity index 86% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/Header.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/ScheduledTask.scala index 0154ae7..0a01728 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/Header.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/ScheduledTask.scala @@ -14,6 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.internal.scheduler -case class Header(msgLength: Int) +trait ScheduledTask { + def cancel(): Unit +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/scheduler/TaskScheduler.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/TaskScheduler.scala similarity index 89% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/scheduler/TaskScheduler.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/TaskScheduler.scala index 00d5a35..89567d4 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/scheduler/TaskScheduler.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/TaskScheduler.scala @@ -14,14 +14,10 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.scheduler +package io.rdbc.pgsql.core.internal.scheduler import scala.concurrent.duration.FiniteDuration trait TaskScheduler { def schedule(delay: FiniteDuration)(action: => Unit): ScheduledTask } - -trait ScheduledTask { - def cancel(): Unit -} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/TimeoutHandler.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/TimeoutHandler.scala new file mode 100644 index 0000000..742e716 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/scheduler/TimeoutHandler.scala @@ -0,0 +1,25 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.internal.scheduler + +import scala.concurrent.duration.FiniteDuration + +private[core] class TimeoutHandler(scheduler: TaskScheduler, timeout: FiniteDuration, timeoutAction: => Unit) { + def scheduleTimeoutTask(): ScheduledTask = { + scheduler.schedule(timeout)(timeoutAction) + } +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Bind.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Bind.scala deleted file mode 100644 index c35be15..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Bind.scala +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.messages.frontend - -import io.rdbc.pgsql.core.messages.data.{DbValFormat, Oid} - -sealed trait ReturnFieldFormats - -case object NoReturnFields extends ReturnFieldFormats - -case object AllBinary extends ReturnFieldFormats - -case object AllTextual extends ReturnFieldFormats - -case class SpecificFieldFormats(formats: List[DbValFormat]) extends ReturnFieldFormats - - -sealed trait DbValue { - def dataTypeOid: Oid -} - -case class NullDbValue(dataTypeOid: Oid) extends DbValue - -case class TextualDbValue(value: String, dataTypeOid: Oid) extends DbValue - -case class BinaryDbValue(value: Array[Byte], dataTypeOid: Oid) extends DbValue - - -case class Bind(portal: Option[String], preparedStmt: Option[String], params: List[DbValue], returnFieldFormats: ReturnFieldFormats) extends PgFrontendMessage \ No newline at end of file diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Close.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Close.scala deleted file mode 100644 index b658f16..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Close.scala +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.messages.frontend - -case class ClosePortal(optionalName: Option[String]) extends PgFrontendMessage -case class CloseStatement(optionalName: Option[String]) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/StartupMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/StartupMessage.scala deleted file mode 100644 index 0009c07..0000000 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/StartupMessage.scala +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.core.messages.frontend - - -case class StartupMessage( - user: String, - database: String, - options: Map[String, String] = Map() - ) extends PgFrontendMessage \ No newline at end of file diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/package.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/package.scala index 861401b..cca929c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/package.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/package.scala @@ -16,12 +16,31 @@ package io.rdbc.pgsql +import akka.NotUsed +import akka.stream.scaladsl.Source +import io.rdbc.pgsql.core.internal.fsm.StateAction +import io.rdbc.pgsql.core.pgstruct.{ParamValue, TxStatus} +import io.rdbc.pgsql.core.pgstruct.messages.backend.{BackendKeyData, PgBackendMessage} + +import scala.concurrent.Future + package object core { - implicit class ArrayToHex(array: Array[Byte]) { - def toHex: String = array.map("%02X" format _).mkString - } + type RequestCanceler = BackendKeyData => Future[Unit] + + private[core] type FatalErrorNotifier = (String, Throwable) => Unit + + private[core] case class RequestId(value: Long) extends AnyVal + + private[core] case class RdbcSql(value: String) extends AnyVal + + private[core] case class StmtParamName(value: String) extends AnyVal + + private[core] type ClientRequest[A] = (RequestId, TxStatus) => Future[A] + + private[core] type PgMsgHandler = PartialFunction[PgBackendMessage, StateAction] - type FatalErrorNotifier = (String, Throwable) => Unit + private[core] type ParamsSource = Source[Vector[ParamValue], NotUsed] + private[core] val unitFuture = Future.successful(()) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/Field.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColDesc.scala similarity index 64% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/Field.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColDesc.scala index 7d20749..d088f55 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/Field.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColDesc.scala @@ -14,12 +14,12 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.data +package io.rdbc.pgsql.core.pgstruct -import io.rdbc.pgsql.core.messages.backend.FieldDescription +import io.rdbc.pgsql.core.pgstruct.messages.frontend.ColName -sealed trait FieldValue -case class NotNullFieldValue(data: Array[Byte]) extends FieldValue -case object NullFieldValue extends FieldValue - -case class Field(desc: FieldDescription, value: FieldValue) +final case class ColDesc(name: ColName, + tableOid: Option[Oid], + columnAttr: Option[Int], + dataType: DataType, + format: ColFormat) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColFormat.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColFormat.scala new file mode 100644 index 0000000..6e31152 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColFormat.scala @@ -0,0 +1,24 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct + +object ColFormat { + final case object Textual extends ColFormat + final case object Binary extends ColFormat +} + +sealed trait ColFormat diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Parse.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColValue.scala similarity index 73% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Parse.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColValue.scala index 90b7f32..9769945 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Parse.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ColValue.scala @@ -14,8 +14,12 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct -import io.rdbc.pgsql.core.messages.data.Oid +import scodec.bits.ByteVector -case class Parse(optionalName: Option[String], query: String, paramTypes: Vector[Oid]) extends PgFrontendMessage +object ColValue { + final case class NotNull(data: ByteVector) extends ColValue + final case object Null extends ColValue +} +sealed trait ColValue diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/DataType.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/DataType.scala new file mode 100644 index 0000000..f52d514 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/DataType.scala @@ -0,0 +1,24 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct + +object DataType { + final case class Size(value: Int) extends AnyVal + final case class Modifier(value: Int) extends AnyVal +} + +final case class DataType(oid: Oid, size: DataType.Size, modifier: DataType.Modifier) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/Oid.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/Oid.scala new file mode 100644 index 0000000..1244530 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/Oid.scala @@ -0,0 +1,23 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct + +object Oid { + val unknownDataType = Oid(705L) +} + +final case class Oid(value: Long) extends AnyVal diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ParamValue.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ParamValue.scala new file mode 100644 index 0000000..abfb5f4 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ParamValue.scala @@ -0,0 +1,29 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct + +import scodec.bits.ByteVector + +sealed trait ParamValue { + def dataTypeOid: Oid +} + +object ParamValue { + case class Null(dataTypeOid: Oid) extends ParamValue + case class Textual(value: String, dataTypeOid: Oid) extends ParamValue + case class Binary(value: ByteVector, dataTypeOid: Oid) extends ParamValue +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ReturnColFormats.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ReturnColFormats.scala new file mode 100644 index 0000000..e95bb34 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/ReturnColFormats.scala @@ -0,0 +1,26 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct + +sealed trait ReturnColFormats + +object ReturnColFormats { + case object None extends ReturnColFormats + case object AllBinary extends ReturnColFormats + case object AllTextual extends ReturnColFormats + case class Specific(formats: Vector[ColFormat]) extends ReturnColFormats +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/StatusData.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/StatusData.scala new file mode 100644 index 0000000..7480729 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/StatusData.scala @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct + +final case class StatusData(severity: String, + sqlState: String, + message: String, + detail: Option[String], + hint: Option[String], + position: Option[Int], + internalPosition: Option[Int], + internalQuery: Option[String], + where: Option[String], + schemaName: Option[String], + tableName: Option[String], + columnName: Option[String], + dataTypeName: Option[String], + constraintName: Option[String], + file: String, + line: String, + routine: String) { + + def shortInfo: String = s"$severity-$sqlState: $message" +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/DbValFormat.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/TxStatus.scala similarity index 74% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/DbValFormat.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/TxStatus.scala index e0b0ea0..3548050 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/DbValFormat.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/TxStatus.scala @@ -14,14 +14,11 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.data +package io.rdbc.pgsql.core.pgstruct -sealed trait DbValFormat - -object DbValFormat { - case object TextualDbValFormat extends DbValFormat - case object BinaryDbValFormat extends DbValFormat +sealed trait TxStatus +object TxStatus { + final case object Idle extends TxStatus + final case object Active extends TxStatus + final case object Failed extends TxStatus } - - - diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/PgMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/PgMessage.scala similarity index 93% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/PgMessage.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/PgMessage.scala index 6a35696..cbe9e32 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/PgMessage.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/PgMessage.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages +package io.rdbc.pgsql.core.pgstruct.messages trait PgMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/DataType.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/BackendKeyData.scala similarity index 71% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/DataType.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/BackendKeyData.scala index 430f684..f861a3b 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/data/DataType.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/BackendKeyData.scala @@ -14,11 +14,9 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.data +package io.rdbc.pgsql.core.pgstruct.messages.backend -case class Oid(code: Long) +final case class PgPid(value: Int) extends AnyVal +final case class PgKey(value: Int) extends AnyVal -case class DataType(oid: Oid, size: Int, modifier: Int) - -object Unknown extends DataType(Oid(705L), -2, -1) -object PgInt4 extends DataType(Oid(23L), 4, -1) +final case class BackendKeyData(pid: PgPid, key: PgKey) extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/BindComplete.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/BindComplete.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/BindComplete.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/BindComplete.scala index 9288bc7..5aba5a5 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/BindComplete.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/BindComplete.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -object BindComplete extends PgBackendMessage +case object BindComplete extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/CloseComplete.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/CloseComplete.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/CloseComplete.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/CloseComplete.scala index bd74b70..5065e7a 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/CloseComplete.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/CloseComplete.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -object CloseComplete extends PgBackendMessage \ No newline at end of file +case object CloseComplete extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/CommandComplete.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/CommandComplete.scala similarity index 82% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/CommandComplete.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/CommandComplete.scala index aefe3f6..2656dce 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/CommandComplete.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/CommandComplete.scala @@ -14,10 +14,10 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend object CommandComplete { val RowCountMessages = List("SELECT", "INSERT", "DELETE", "UPDATE", "MOVE", "FETCH", "COPY") } -case class CommandComplete(message: String, rowsAffected: Option[Int]) extends PgBackendMessage +final case class CommandComplete(message: String, rowsAffected: Option[Int]) extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/DataRow.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/DataRow.scala similarity index 77% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/DataRow.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/DataRow.scala index 7a38062..792a990 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/DataRow.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/DataRow.scala @@ -14,8 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -import io.rdbc.pgsql.core.messages.data.FieldValue +import io.rdbc.pgsql.core.pgstruct.ColValue -case class DataRow(fieldValues: Vector[FieldValue]) extends PgBackendMessage \ No newline at end of file +final case class DataRow(colValues: Vector[ColValue]) extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/EmptyQueryResponse.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/EmptyQueryResponse.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/EmptyQueryResponse.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/EmptyQueryResponse.scala index 1606c38..56711e1 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/EmptyQueryResponse.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/EmptyQueryResponse.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -object EmptyQueryResponse extends PgBackendMessage \ No newline at end of file +object EmptyQueryResponse extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/BackendKeyData.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/MsgHeader.scala similarity index 83% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/BackendKeyData.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/MsgHeader.scala index 49bb684..a9c7555 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/BackendKeyData.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/MsgHeader.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -case class BackendKeyData(pid: Int, key: Int) extends PgBackendMessage +final case class MsgHeader(msgLength: Int) diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/NoData.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/NoData.scala similarity index 92% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/NoData.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/NoData.scala index fd52e37..7644ed4 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/NoData.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/NoData.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend object NoData extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParameterDescription.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParameterDescription.scala similarity index 76% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParameterDescription.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParameterDescription.scala index ff84ff0..0f9144f 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParameterDescription.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParameterDescription.scala @@ -14,8 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid -case class ParameterDescription(dataTypeOids: Vector[Oid]) extends PgBackendMessage +final case class ParameterDescription(dataTypeOids: Vector[Oid]) extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParameterStatus.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParameterStatus.scala new file mode 100644 index 0000000..a421aa7 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParameterStatus.scala @@ -0,0 +1,22 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.backend + +case class SessionParamKey(value: String) extends AnyVal +case class SessionParamVal(value: String) extends AnyVal + +final case class ParameterStatus(key: SessionParamKey, value: SessionParamVal) extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParseComplete.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParseComplete.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParseComplete.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParseComplete.scala index e6049ed..2da268b 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParseComplete.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ParseComplete.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -object ParseComplete extends PgBackendMessage +case object ParseComplete extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/PgBackendMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/PgBackendMessage.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/PgBackendMessage.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/PgBackendMessage.scala index 602a3b8..b8aafa7 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/PgBackendMessage.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/PgBackendMessage.scala @@ -14,8 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -import io.rdbc.pgsql.core.messages.PgMessage +import io.rdbc.pgsql.core.pgstruct.messages.PgMessage trait PgBackendMessage extends PgMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/PortalSuspended.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/PortalSuspended.scala similarity index 92% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/PortalSuspended.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/PortalSuspended.scala index a44f65c..edd22dd 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/PortalSuspended.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/PortalSuspended.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend object PortalSuspended extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ReadyForQuery.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ReadyForQuery.scala similarity index 77% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ReadyForQuery.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ReadyForQuery.scala index 8fb5c30..f5fa53a 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ReadyForQuery.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/ReadyForQuery.scala @@ -14,13 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -case class ReadyForQuery(txStatus: TxStatus) extends PgBackendMessage +import io.rdbc.pgsql.core.pgstruct.TxStatus -sealed trait TxStatus -object TxStatus { - case object Idle extends TxStatus - case object Active extends TxStatus - case object Failed extends TxStatus -} \ No newline at end of file +case class ReadyForQuery(txStatus: TxStatus) extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/RowDescription.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/RowDescription.scala similarity index 78% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/RowDescription.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/RowDescription.scala index 875ad20..734f8f4 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/RowDescription.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/RowDescription.scala @@ -14,10 +14,13 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend + +import io.rdbc.pgsql.core.pgstruct.ColDesc + object RowDescription { val empty = RowDescription(Vector.empty) } -case class RowDescription(fieldDescriptions: Vector[FieldDescription]) extends PgBackendMessage \ No newline at end of file +final case class RowDescription(colDescs: Vector[ColDesc]) extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/StatusMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/StatusMessage.scala similarity index 52% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/StatusMessage.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/StatusMessage.scala index 96f8171..4ce6b0d 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/StatusMessage.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/StatusMessage.scala @@ -14,52 +14,10 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend import io.rdbc.pgsql.core.exception.PgProtocolViolationException - -case class StatusData(severity: String, - sqlState: String, - message: String, - detail: Option[String], - hint: Option[String], - position: Option[Int], - internalPosition: Option[Int], - internalQuery: Option[String], - where: Option[String], - schemaName: Option[String], - tableName: Option[String], - columnName: Option[String], - dataTypeName: Option[String], - constraintName: Option[String], - file: String, - line: String, - routine: String) { - - def shortInfo: String = s"$severity-$sqlState: $message" - - override def toString: String = { - s""" - |severity=$severity - |sqlState=$sqlState - |message=$message - |detail=${detail.getOrElse("none")} - |hint=${hint.getOrElse("none")} - |position=${position.map(_.toString).getOrElse("none")} - |internalPosition=${internalPosition.map(_.toString).getOrElse("none")} - |internalQuery=${internalQuery.getOrElse("none")} - |where=${where.getOrElse("none")} - |schemaName=${schemaName.getOrElse("none")} - |tableName=${tableName.getOrElse("none")} - |columnName=${columnName.getOrElse("none")} - |dataTypeName=${dataTypeName.getOrElse("none")} - |constraintName=${constraintName.getOrElse("none")} - |file=$file - |line=$line - |routine=$routine - |""".stripMargin - } -} +import io.rdbc.pgsql.core.pgstruct.StatusData sealed trait StatusMessage extends PgBackendMessage { @@ -78,7 +36,9 @@ object StatusMessage { private def notNullField(key: Byte, fields: Map[Byte, String]): String = { fields.getOrElse(key, - throw new PgProtocolViolationException(s"Mandatory field '$key' was not found in the status data") + throw new PgProtocolViolationException( + s"Mandatory field '$key' was not found in the status data" + ) ) } @@ -86,14 +46,23 @@ object StatusMessage { try { fields.get(key).map(_.toInt) } catch { - case ex: NumberFormatException => throw new PgProtocolViolationException(s"Field '$key' could not be parsed as an integer", ex) + case ex: NumberFormatException => + throw new PgProtocolViolationException( + s"Field '$key' could not be parsed as an integer", ex + ) } } private def statusData(fields: Map[Byte, String]): StatusData = { StatusData( - severity = fields.get('V').orElse(fields.get('S')) - .getOrElse(throw new PgProtocolViolationException(s"Neither 'V' nor 'S' severity field was found in the status data")), + severity = fields + .get('V') + .orElse(fields.get('S')) + .getOrElse( + throw new PgProtocolViolationException( + s"Neither 'V' nor 'S' severity field was found in the status data" + ) + ), sqlState = notNullField('C', fields), message = fields.getOrElse('M', "dupa"), detail = fields.get('D'), @@ -113,7 +82,7 @@ object StatusMessage { ) } - case class Error(statusData: StatusData) extends StatusMessage { + final case class Error(statusData: StatusData) extends StatusMessage { def isFatal: Boolean = { if (statusData.sqlState == "57014") { false //query canceled @@ -129,6 +98,5 @@ object StatusMessage { } } - case class Notice(statusData: StatusData) extends StatusMessage + final case class Notice(statusData: StatusData) extends StatusMessage } - diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/UnknownBackendMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/UnknownBackendMessage.scala similarity index 71% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/UnknownBackendMessage.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/UnknownBackendMessage.scala index 91481fe..cbd5be7 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/UnknownBackendMessage.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/UnknownBackendMessage.scala @@ -14,10 +14,10 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.pgstruct.messages.backend -import io.rdbc.pgsql.core._ +import scodec.bits.ByteVector -case class UnknownBackendMessage(head: Byte, body: Array[Byte]) extends PgBackendMessage { - override def toString = s"UnknownPgMessage(head = ${head.toChar}, body = ${body.toHex})" -} \ No newline at end of file +final case class UnknownBackendMessage(head: Byte, body: ByteVector) extends PgBackendMessage { + override val toString = s"UnknownPgMessage(head = ${head.toChar}, body = ${body.toHex})" +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthBackendMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthBackendMessage.scala similarity index 83% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthBackendMessage.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthBackendMessage.scala index 1f76214..58b553a 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthBackendMessage.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthBackendMessage.scala @@ -14,8 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend.auth +package io.rdbc.pgsql.core.pgstruct.messages.backend.auth -import io.rdbc.pgsql.core.messages.backend.PgBackendMessage +import io.rdbc.pgsql.core.pgstruct.messages.backend.PgBackendMessage trait AuthBackendMessage extends PgBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthOk.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthOk.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthOk.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthOk.scala index e3a6dc3..a951435 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthOk.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthOk.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend.auth +package io.rdbc.pgsql.core.pgstruct.messages.backend.auth -case object AuthOk extends AuthBackendMessage \ No newline at end of file +case object AuthOk extends AuthBackendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthRequest.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthRequest.scala new file mode 100644 index 0000000..c7ba6dd --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthRequest.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.backend.auth + +trait AuthRequest extends AuthBackendMessage { + def authMechanismName: String +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthRequestMd5.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthRequestMd5.scala similarity index 76% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthRequestMd5.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthRequestMd5.scala index adca777..9a81f0b 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/auth/AuthRequestMd5.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/backend/auth/AuthRequestMd5.scala @@ -14,12 +14,10 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend.auth +package io.rdbc.pgsql.core.pgstruct.messages.backend.auth -trait AuthRequest extends AuthBackendMessage { - def authMechanismName: String -} +import scodec.bits.ByteVector -case class AuthRequestMd5(salt: Array[Byte]) extends AuthRequest { +final case class AuthRequestMd5(salt: ByteVector) extends AuthRequest { val authMechanismName: String = "md5" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Bind.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Bind.scala new file mode 100644 index 0000000..399f378 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Bind.scala @@ -0,0 +1,25 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.frontend + +import io.rdbc.pgsql.core.pgstruct.{ParamValue, ReturnColFormats} + +case class Bind(portal: Option[PortalName], + preparedStmt: Option[StmtName], + params: Vector[ParamValue], + returnFieldFormats: ReturnColFormats) + extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/CancelRequest.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/CancelRequest.scala similarity index 75% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/CancelRequest.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/CancelRequest.scala index a1429c1..a4dfc11 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/CancelRequest.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/CancelRequest.scala @@ -14,6 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend -case class CancelRequest(pid: Int, key: Int) extends PgFrontendMessage +import io.rdbc.pgsql.core.pgstruct.messages.backend.{PgKey, PgPid} + +case class CancelRequest(pid: PgPid, key: PgKey) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/ClosePortal.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/ClosePortal.scala new file mode 100644 index 0000000..7af3604 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/ClosePortal.scala @@ -0,0 +1,19 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.frontend + +case class ClosePortal(optionalName: Option[PortalName]) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/CloseStatement.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/CloseStatement.scala new file mode 100644 index 0000000..1864615 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/CloseStatement.scala @@ -0,0 +1,19 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.frontend + +case class CloseStatement(optionalName: Option[StmtName]) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Describe.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Describe.scala new file mode 100644 index 0000000..d02d412 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Describe.scala @@ -0,0 +1,20 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.frontend + +case class DescribePortal(optionalName: Option[PortalName]) extends PgFrontendMessage +case class DescribeStatement(optionalName: Option[StmtName]) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Execute.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Execute.scala similarity index 75% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Execute.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Execute.scala index e01caf0..83b7bae 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Execute.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Execute.scala @@ -14,6 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend -case class Execute(optionalPortalName: Option[String], optionalFetchSize: Option[Int]) extends PgFrontendMessage +case class Execute(optionalPortalName: Option[PortalName], + optionalFetchSize: Option[Int]) + extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Flush.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Flush.scala similarity index 92% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Flush.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Flush.scala index 549d084..4bb9063 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Flush.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Flush.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend case object Flush extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Parse.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Parse.scala new file mode 100644 index 0000000..0b7fc2c --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Parse.scala @@ -0,0 +1,24 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.frontend + +import io.rdbc.pgsql.core.pgstruct.Oid + +case class Parse(optionalName: Option[StmtName], + query: NativeSql, + paramTypes: Vector[Oid]) + extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/PasswordMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/PasswordMessage.scala similarity index 79% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/PasswordMessage.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/PasswordMessage.scala index c704654..8ca9db3 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/PasswordMessage.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/PasswordMessage.scala @@ -14,14 +14,15 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend import java.security.MessageDigest -object PasswordMessage { +import scodec.bits.ByteVector +object PasswordMessage { - def md5(username: String, password: String, salt: Array[Byte]): PasswordMessage = { + def md5(username: String, password: String, salt: ByteVector): PasswordMessage = { //TODO optimize this happy data copying val md5 = MessageDigest.getInstance("MD5") md5.update(password.getBytes("US-ASCII")) @@ -30,7 +31,7 @@ object PasswordMessage { val digest: Array[Byte] = md5.digest() val hexBytes = bytesToHex(digest).getBytes("US-ASCII") md5.update(hexBytes) - md5.update(salt) + md5.update(salt.toArray) val digest2 = md5.digest() @@ -39,7 +40,7 @@ object PasswordMessage { val md5String = "md5" + hex2 val credentials = Array.concat(md5String.getBytes("US-ASCII"), Array(0.toByte)) //TODO optimize - PasswordMessage(credentials) + PasswordMessage(ByteVector.view(credentials)) } private def bytesToHex(bytes: Array[Byte]): String = { @@ -47,4 +48,4 @@ object PasswordMessage { } } -case class PasswordMessage(credentials: Array[Byte]) extends PgFrontendMessage \ No newline at end of file +case class PasswordMessage(credentials: ByteVector) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/PgFrontendMessage.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/PgFrontendMessage.scala similarity index 80% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/PgFrontendMessage.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/PgFrontendMessage.scala index 87d9ffd..1a09b0c 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/PgFrontendMessage.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/PgFrontendMessage.scala @@ -14,8 +14,8 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend -import io.rdbc.pgsql.core.messages.PgMessage +import io.rdbc.pgsql.core.pgstruct.messages.PgMessage -trait PgFrontendMessage extends PgMessage \ No newline at end of file +trait PgFrontendMessage extends PgMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Query.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Query.scala similarity index 83% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Query.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Query.scala index 3a39d72..cb91dd7 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Query.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Query.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend -case class Query(query: String) extends PgFrontendMessage \ No newline at end of file +case class Query(query: NativeSql) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Startup.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Startup.scala new file mode 100644 index 0000000..0980841 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Startup.scala @@ -0,0 +1,23 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages.frontend + +case class Startup( + user: String, + database: String, + options: Map[String, String] = Map() +) extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Sync.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Sync.scala similarity index 92% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Sync.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Sync.scala index 8cc59f3..fa99280 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Sync.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Sync.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend case object Sync extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Terminate.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Terminate.scala similarity index 85% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Terminate.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Terminate.scala index 3976f88..66cd9e0 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/frontend/Terminate.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/Terminate.scala @@ -14,6 +14,6 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.frontend +package io.rdbc.pgsql.core.pgstruct.messages.frontend -case object Terminate extends PgFrontendMessage \ No newline at end of file +case object Terminate extends PgFrontendMessage diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/package.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/package.scala new file mode 100644 index 0000000..f971188 --- /dev/null +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/pgstruct/messages/frontend/package.scala @@ -0,0 +1,24 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.core.pgstruct.messages + +package object frontend { + case class PortalName(value: String) extends AnyVal + case class StmtName(value: String) extends AnyVal + case class NativeSql(value: String) extends AnyVal + case class ColName(value: String) extends AnyVal +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBool.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBool.scala index a98fd27..17cb088 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBool.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBool.scala @@ -16,10 +16,11 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgBool extends PgType[Boolean] { val typeOid = Oid(16) val cls = classOf[Boolean] + val name = "bool" override val otherClasses = Vector(classOf[java.lang.Boolean]) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBytea.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBytea.scala index 794c8b0..9291ed2 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBytea.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgBytea.scala @@ -16,9 +16,10 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgBytea extends PgType[Array[Byte]] { val typeOid = Oid(17) val cls = classOf[Array[Byte]] + val name = "bytea" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgChar.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgChar.scala index f50eb99..467d4be 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgChar.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgChar.scala @@ -16,9 +16,10 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgChar extends PgType[String] { //TODO both Varchar and Char map to String, this is problematic when binding val typeOid = Oid(1042) val cls = classOf[String] + val name = "char" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDate.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDate.scala index ddeed70..5867b61 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDate.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDate.scala @@ -18,9 +18,10 @@ package io.rdbc.pgsql.core.types import java.time.LocalDate -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgDate extends PgType[LocalDate] { val typeOid = Oid(1082) val cls = classOf[LocalDate] + val name = "date" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDecimal.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDecimal.scala index 9b09b55..6355c6f 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDecimal.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgDecimal.scala @@ -16,10 +16,11 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid import io.rdbc.sapi.SqlNumeric trait PgDecimal extends PgType[SqlNumeric] { val typeOid = Oid(1700) val cls = classOf[SqlNumeric] + val name = "decimal" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat4.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat4.scala index 047d9c6..3ce1207 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat4.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat4.scala @@ -16,10 +16,11 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgFloat4 extends PgType[Float] { val typeOid = Oid(700) val cls = classOf[Float] + val name = "float4" override val otherClasses = Vector(classOf[java.lang.Float]) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat8.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat8.scala index e649dfe..a2e01a8 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat8.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgFloat8.scala @@ -16,10 +16,11 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgFloat8 extends PgType[Double] { val typeOid = Oid(701) val cls = classOf[Double] + val name = "float8" override val otherClasses = Vector(classOf[java.lang.Double]) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt2.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt2.scala index 115f453..c27ef86 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt2.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt2.scala @@ -16,10 +16,11 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgInt2 extends PgType[Short] { val typeOid = Oid(21) val cls = classOf[Short] + val name = "int2" override val otherClasses = Vector(classOf[java.lang.Short]) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt4.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt4.scala index 8182996..f8b9ef0 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt4.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt4.scala @@ -16,10 +16,11 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgInt4 extends PgType[Int] { val typeOid = Oid(23) val cls = classOf[Int] + val name = "int4" override val otherClasses = Vector(classOf[Integer]) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt8.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt8.scala index 56f336d..b2e1105 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt8.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgInt8.scala @@ -16,10 +16,11 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgInt8 extends PgType[Long] { val typeOid = Oid(20) val cls = classOf[Long] + val name = "int8" override val otherClasses = Vector(classOf[java.lang.Long]) } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgText.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgText.scala index c27c80a..7f31712 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgText.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgText.scala @@ -16,9 +16,10 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgText extends PgType[String] { val typeOid = Oid(25) val cls = classOf[String] + val name = "text" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTime.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTime.scala index d72614a..a421644 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTime.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTime.scala @@ -18,9 +18,10 @@ package io.rdbc.pgsql.core.types import java.time.LocalTime -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgTime extends PgType[LocalTime] { val typeOid = Oid(1083) val cls = classOf[LocalTime] + val name = "time" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestamp.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestamp.scala index 5d63fe8..a984ba6 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestamp.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestamp.scala @@ -21,9 +21,10 @@ import java.time.format.DateTimeFormatterBuilder import java.time.temporal.ChronoField import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgTimestamp extends PgType[LocalDateTime] { val typeOid = Oid(1114) val cls = classOf[LocalDateTime] + val name = "timestamp" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestampTz.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestampTz.scala index bfb498c..525156b 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestampTz.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTimestampTz.scala @@ -18,9 +18,10 @@ package io.rdbc.pgsql.core.types import java.time.Instant -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgTimestampTz extends PgType[Instant] { val typeOid = Oid(1184) val cls = classOf[Instant] + val name = "timestamptz" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgType.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgType.scala index 203e9a8..e6b12fc 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgType.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgType.scala @@ -17,14 +17,16 @@ package io.rdbc.pgsql.core.types import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid +import scodec.bits.ByteVector trait PgType[T] { def typeOid: Oid def cls: Class[T] def otherClasses: Vector[Class[_]] = Vector.empty + def name: String - def toObj(binaryVal: Array[Byte])(implicit sessionParams: SessionParams): T + def toObj(binaryVal: ByteVector)(implicit sessionParams: SessionParams): T - def toPgBinary(obj: T)(implicit sessionParams: SessionParams): Array[Byte] + def toPgBinary(obj: T)(implicit sessionParams: SessionParams): ByteVector } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTypeRegistry.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTypeRegistry.scala index 8426b3f..cf37534 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTypeRegistry.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTypeRegistry.scala @@ -16,33 +16,40 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.ImmutSeq +import io.rdbc.pgsql.core.pgstruct.Oid object PgTypeRegistry { - def apply(types: PgType[_]*): PgTypeRegistry = { - val oid2type = Map( - types.map(t => t.typeOid -> t): _* - ) + def apply(types: ImmutSeq[PgType[_]]): PgTypeRegistry = { + val oidTypePairs = types.map(t => (t.typeOid, t)) + val classTypePairs = types.flatMap { t => + val allClasses = t.cls +: t.otherClasses + allClasses.map(cls => (cls, t)) + } - val cls2type: Map[Class[_], PgType[_]] = Map( - types.flatMap { t => - t.otherClasses.map(oc => oc -> t) :+ (t.cls -> t) - }: _* + new PgTypeRegistry( + oid2type = Map(oidTypePairs: _*), + cls2type = Map(classTypePairs: _*) ) + } - new PgTypeRegistry(oid2type, cls2type) + def apply(types: PgType[_]*): PgTypeRegistry = { + apply(ImmutSeq(types)) } } -class PgTypeRegistry(val oid2type: Map[Oid, PgType[_]], val cls2type: Map[Class[_], PgType[_]]) { +class PgTypeRegistry protected ( + oid2type: Map[Oid, PgType[_]], + cls2type: Map[Class[_], PgType[_]] +) { - def byClass[T](cls: Class[T]): Option[PgType[T]] = { + def typeByClass[T](cls: Class[T]): Option[PgType[T]] = { cls2type.get(cls).map(_.asInstanceOf[PgType[T]]) } - def byTypeOid(oid: Oid): Option[PgType[_]] = { + def typeByOid(oid: Oid): Option[PgType[_]] = { oid2type.get(oid) } -} \ No newline at end of file +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParameterStatus.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTypesProvider.scala similarity index 82% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParameterStatus.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTypesProvider.scala index 00ef0e2..65897f8 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/messages/backend/ParameterStatus.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgTypesProvider.scala @@ -14,6 +14,10 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.messages.backend +package io.rdbc.pgsql.core.types -case class ParameterStatus(key: String, value: String) extends PgBackendMessage +import io.rdbc.ImmutSeq + +trait PgTypesProvider { + def types: ImmutSeq[PgType[_]] +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgUuid.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgUuid.scala index 09cb29d..de1ff5e 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgUuid.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgUuid.scala @@ -18,9 +18,10 @@ package io.rdbc.pgsql.core.types import java.util.UUID -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgUuid extends PgType[UUID] { val typeOid = Oid(2950) val cls = classOf[UUID] + val name = "uuid" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgVarchar.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgVarchar.scala index b445bb2..aa8596d 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgVarchar.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/types/PgVarchar.scala @@ -16,9 +16,10 @@ package io.rdbc.pgsql.core.types -import io.rdbc.pgsql.core.messages.data.Oid +import io.rdbc.pgsql.core.pgstruct.Oid trait PgVarchar extends PgType[String] { val typeOid = Oid(1043) val cls = classOf[String] + val name = "varchar" } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/Lock.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/BlockLock.scala similarity index 72% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/Lock.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/BlockLock.scala index f8723eb..e6354fe 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/Lock.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/BlockLock.scala @@ -14,39 +14,30 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.util +package io.rdbc.pgsql.core.util.concurrent import java.util.concurrent.locks.ReentrantLock -import scala.concurrent.blocking -trait Lock { - def withLock[A](body: => A): A -} +import io.rdbc.util.Logging + +import scala.concurrent.blocking -class SpinLock extends Lock { +class BlockLock extends Lock with Logging { private[this] val rl = new ReentrantLock def withLock[A](body: => A): A = { - while (rl.tryLock()) {} + blocking { + rl.lock() + } try { body } finally { rl.unlock() } } -} -class SleepLock extends Lock { - private[this] val rl = new ReentrantLock +} - def withLock[A](body: => A): A = { - blocking { - rl.lock() - try { - body - } finally { - rl.unlock() - } - } - } -} \ No newline at end of file +class BlockLockFactory extends LockFactory { + def lock: BlockLock = new BlockLock +} diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/EmptyState.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/Lock.scala similarity index 82% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/EmptyState.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/Lock.scala index 4147480..09980f8 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/fsm/EmptyState.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/Lock.scala @@ -14,8 +14,12 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.fsm +package io.rdbc.pgsql.core.util.concurrent -trait EmptyState extends State with DefaultErrorHandling { - def msgHandler = PartialFunction.empty +trait Lock { + def withLock[A](body: => A): A +} + +trait LockFactory { + def lock: Lock } diff --git a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/scheduler/TimeoutScheduler.scala b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/SpinLock.scala similarity index 59% rename from rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/scheduler/TimeoutScheduler.scala rename to rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/SpinLock.scala index 6438f00..1618fae 100644 --- a/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/scheduler/TimeoutScheduler.scala +++ b/rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/util/concurrent/SpinLock.scala @@ -14,17 +14,25 @@ * limitations under the License. */ -package io.rdbc.pgsql.core.scheduler +package io.rdbc.pgsql.core.util.concurrent -object TimeoutScheduler { - def apply(f: => ScheduledTask): TimeoutScheduler = { - new TimeoutScheduler() { - def scheduleTimeout(): ScheduledTask = f +import java.util.concurrent.locks.ReentrantLock + +import io.rdbc.util.Logging + +class SpinLock extends Lock with Logging { + private[this] val rl = new ReentrantLock + + def withLock[A](body: => A): A = { + while (rl.tryLock()) {} + try { + body + } finally { + rl.unlock() } } } -trait TimeoutScheduler extends (() => ScheduledTask) { - def scheduleTimeout(): ScheduledTask - def apply(): ScheduledTask = scheduleTimeout() -} \ No newline at end of file +class SpinLockFactory extends LockFactory { + def lock: SpinLock = new SpinLock +} diff --git a/rdbc-pgsql-playground/src/main/resources/logback.xml b/rdbc-pgsql-playground/src/main/resources/logback.xml new file mode 100644 index 0000000..3e6c1da --- /dev/null +++ b/rdbc-pgsql-playground/src/main/resources/logback.xml @@ -0,0 +1,12 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + \ No newline at end of file diff --git a/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Jdbc.scala b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Jdbc.scala index cd0ae10..f67126d 100644 --- a/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Jdbc.scala +++ b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Jdbc.scala @@ -21,10 +21,8 @@ object Jdbc extends App { conn.close() } - } - object JdbcSelect extends App { val props = new Properties() @@ -32,17 +30,20 @@ object JdbcSelect extends App { props.setProperty("password", "povder") val conn = DriverManager.getConnection("jdbc:postgresql://localhost/povder", props) try { - val stmt = conn.prepareStatement("select * from test") - val rs = stmt.executeQuery() - while (rs.next()) { - println(rs.getInt(1)) + (1 to 100).foreach { i => + val start = System.nanoTime() + val stmt = conn.prepareStatement("select x from test") + val rs = stmt.executeQuery() + while (rs.next()) { + rs.getInt("x") + } + val time = System.nanoTime() - start + println(s"$i time = ${time / 1000000.0}ms") } - } finally { conn.close() } - } object JdbcTypeTest extends App { @@ -62,5 +63,4 @@ object JdbcTypeTest extends App { conn.close() } - } diff --git a/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Pars.scala b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Pars.scala new file mode 100644 index 0000000..886a82d --- /dev/null +++ b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/Pars.scala @@ -0,0 +1,41 @@ +package io.rdbc.pgsql.playground + +import scala.collection.mutable.ListBuffer +import scala.util.matching.Regex + + +object Pars extends App { + + val doubleColonParam = """::[a-zA-Z]\w*""" + val string = "'.+?'" + val column = """".+?"""" + val blockComment = """/\*.*?\*/""" + val lineComment = "--.*" + val param = """(:[a-zA-Z]\w*)""" + + val pattern = new Regex( + s"$string|$column|$blockComment|$lineComment|$doubleColonParam|$param", "param" + ) + + val sql = "'1'::int4 dupa :::dupa \":column\" :penor ':pstring' /*:p1*/ dupa :p2>:p3" + + val sb = new StringBuilder + val params = ListBuffer.empty[String] + var lastTextIdx = 0 + var lastParamIdx = 0 + + pattern.findAllMatchIn(sql).foreach(println) + + pattern.findAllMatchIn(sql).filter(_.group(1) != null).foreach { m => + sb.append(sql.substring(lastTextIdx, m.start)) + lastParamIdx += 1 + sb.append("$").append(lastParamIdx) + lastTextIdx = m.end + } + sb.append(sql.substring(lastTextIdx)) + + println(sql) + println(sb.toString()) + + +} diff --git a/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/ScodecTest.scala b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/ScodecTest.scala index ccf7b67..3588b9a 100644 --- a/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/ScodecTest.scala +++ b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/ScodecTest.scala @@ -1,8 +1,7 @@ package io.rdbc.pgsql.playground - object ScodecTest extends App { -/* + /* implicit val serverCharset = ServerCharset(Charset.forName("UTF-8")) implicit val clientCharset = ClientCharset(Charset.forName("UTF-8")) @@ -29,5 +28,5 @@ object ScodecTest extends App { ))).require PgBackendMessage.codec.decode(hex"0x440000004a00020000001e1234567890123456789012345678901234567890123456789012345678900000001e123456789012345678901234567890123456789012345678901234567890".bits) -*/ + */ } diff --git a/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/tests.scala b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/tests.scala index 06a4155..58539e3 100644 --- a/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/tests.scala +++ b/rdbc-pgsql-playground/src/main/scala/io/rdbc/pgsql/playground/tests.scala @@ -16,7 +16,10 @@ package io.rdbc.pgsql.playground +import java.nio.charset.Charset + import akka.stream.scaladsl.{Sink, Source} +import io.rdbc.pgsql.scodec.ScodecDecoderFactory import io.rdbc.pgsql.transport.netty.NettyPgConnectionFactory import io.rdbc.sapi.Interpolators._ import io.rdbc.sapi.Row @@ -32,45 +35,50 @@ object Tst extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") - - fact.connection().flatMap { conn => - println("hai\n\n\n") - //Thread.sleep(20000L) - val start = System.nanoTime() - - val x = -100 - - val rsFut = for { - stmt <- conn.statement(sql"SELECT * FROM test WHERE x > $x") - rs <- stmt.executeForSet() - //TODO when i ctrl-c postgresql during pulling rows nothing happens - } yield (stmt, rs) + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") + //Thread.sleep(20000L) + val start = System.nanoTime() - rsFut.flatMap { case (stmt, rs) => - val time = System.nanoTime() - start - println(s"time = ${time / 1000000}") + val x = -100 - rs.foreach { row => - println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + val rsFut = for { + stmt <- conn.statement(sql"SELECT * FROM test WHERE x > $x") + rs <- stmt.executeForSet() + //TODO when i ctrl-c postgresql during pulling rows nothing happens + } yield (stmt, rs) + + rsFut.flatMap { + case (stmt, rs) => + val time = System.nanoTime() - start + println(s"time = ${time / 1000000}") + + rs.foreach { row => + println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + } + println("DONE") + stmt.deallocate() + }.map { _ => + conn.release() } - println("DONE") - stmt.deallocate() - }.map { _ => - conn.release() - } - }.recover { - case ex => - println("ERROR") - ex.printStackTrace() - }.flatMap { _ => - println("hai shutdown") - fact.shutdown() - }.map { _ => - println("SHUT DOWN") - } + } + .recover { + case ex => + println("ERROR") + ex.printStackTrace() + } + .flatMap { _ => + println("hai shutdown") + fact.shutdown() + } + .map { _ => + println("SHUT DOWN") + } } @@ -79,60 +87,67 @@ object InsertTst extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - fact.connection().flatMap { conn => - println("hai\n\n\n") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.statement("insert into test(x) (select x from test)") - parametrized <- stmt.noParamsF - count <- parametrized.executeForRowsAffected() - } yield (stmt, count) + val rsFut = for { + stmt <- conn.statement("insert into test(x) (select x from test)") + parametrized <- stmt.noParamsF + count <- parametrized.executeForRowsAffected() + } yield (stmt, count) + + rsFut.map { + case (stmt, count) => + println(s"inserted $count") + println("DONE") + conn.release() + fact.shutdown() + } - rsFut.map { case (stmt, count) => - println(s"inserted $count") - println("DONE") - conn.release() - fact.shutdown() } - - }.recover { - case ex => ex.printStackTrace() - } + .recover { + case ex => ex.printStackTrace() + } } - object NoDataTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - fact.connection().flatMap { conn => - println("hai\n\n\n") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.statement("create table if not exists dupa (x varchar)") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.statement("create table if not exists dupa (x varchar)") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs - val result = rsFut.map { rs => - rs.foreach { row => - println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + val result = rsFut.map { rs => + rs.foreach { row => + println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + } + println("DONE") } - println("DONE") - } - result.onComplete(_ => conn.release()) - result + result.onComplete(_ => conn.release()) + result - }.recover { - case ex => ex.printStackTrace() - }.onComplete(_ => fact.shutdown()) + } + .recover { + case ex => ex.printStackTrace() + } + .onComplete(_ => fact.shutdown()) } @@ -141,30 +156,34 @@ object EmptyQueryTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - fact.connection().flatMap { conn => - println("hai\n\n\n") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.statement("") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.statement("") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs - val result = rsFut.map { rs => - rs.foreach { row => - println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + val result = rsFut.map { rs => + rs.foreach { row => + println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + } + println("DONE") } - println("DONE") - } - result.onComplete(_ => conn.release()) - result + result.onComplete(_ => conn.release()) + result - }.recover { - case ex => ex.printStackTrace() - }.onComplete(_ => fact.shutdown()) + } + .recover { + case ex => ex.printStackTrace() + } + .onComplete(_ => fact.shutdown()) } @@ -173,29 +192,33 @@ object BindByIdxTst extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - fact.connection().flatMap { conn => - println("hai\n\n\n") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.statement("select * from test where x > :x and x > :y and x > :z") - parametrized <- stmt.bindByIdxF(-100, -200, -300) - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.statement("select * from test where x > :x and x > :y and x > :z") + parametrized <- stmt.bindByIdxF(-100, -200, -300) + rs <- parametrized.executeForSet() + } yield rs - rsFut.map { rs => - rs.foreach { row => - println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + rsFut.map { rs => + rs.foreach { row => + println(s"x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + } + println("DONE") + }.map { _ => + conn.release() } - println("DONE") - }.map { _ => - conn.release() - } - }.recover { - case ex => ex.printStackTrace() - }.onComplete(_ => fact.shutdown()) + } + .recover { + case ex => ex.printStackTrace() + } + .onComplete(_ => fact.shutdown()) } @@ -204,45 +227,70 @@ object IdleTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().map { conn => - println("hai\n\n\n") - + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .map { conn => + println("hai\n\n\n") - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } +object BeginTwiceTst extends App { + + implicit val ec = ExecutionContext.global + implicit val timeout = FiniteDuration.apply(10, "seconds") + + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .map { conn => + println("hai\n\n\n") + + for (i <- 1 to 100) { + Await.result(conn.beginTx().flatMap(_ => conn.commitTx()), Duration.Inf) + } + + + } + .recover { + case ex => ex.printStackTrace() + } + +} object TstFloat extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().flatMap { conn => - println("hai\n\n\n") + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.statement("select * from test3") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.statement("select * from test3") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs - rsFut.map { rs => - rs.foreach { row => - println(s"f = ${row.float("f")}, x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") - } - println("DONE") - }.map(_ => conn.release()) + rsFut.map { rs => + rs.foreach { row => + println(s"f = ${row.float("f")}, x = ${row.int("x")}, t = ${row.localDateTime("t")}, s = ${row.str("s")}") + } + println("DONE") + }.map(_ => conn.release()) - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } - class SlowSubscriber extends Subscriber[Row] { override def onError(t: Throwable): Unit = t.printStackTrace() @@ -269,28 +317,30 @@ class SlowSubscriber extends Subscriber[Row] { override def onNext(t: Row): Unit = println(s"next $t") } - object StreamTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().flatMap { conn => - println("hai\n\n\n") + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .flatMap { conn => + println("hai\n\n\n") - val streamFut = conn.statement("select * from test").flatMap { stmt => - stmt.noParamsF.flatMap { parametrized => - parametrized.executeForStream() + val streamFut = conn.statement("select * from test").flatMap { stmt => + stmt.noParamsF.flatMap { parametrized => + parametrized.executeForStream() + } } - } - streamFut.map { stream => - stream.rows.subscribe(new SlowSubscriber()) - println("DONE") - } + streamFut.map { stream => + stream.rows.subscribe(new SlowSubscriber()) + println("DONE") + } - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } object TxTest extends App { @@ -298,25 +348,28 @@ object TxTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().flatMap { conn => - println("hai\n\n\n") + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .flatMap { conn => + println("hai\n\n\n") + + val rsFut = for { + _ <- conn.beginTx() + stmt <- conn.statement("select * from test where x = :x") + parametrized <- stmt.bindF("x" -> 421) + rs <- parametrized.executeForSet() + _ <- conn.rollbackTx() + } yield rs - val rsFut = for { - _ <- conn.beginTx() - stmt <- conn.statement("select * from test where x = :x") - parametrized <- stmt.bindF("x" -> 421) - rs <- parametrized.executeForSet() - _ <- conn.rollbackTx() - } yield rs + rsFut.map { rs => + println(s"rs = $rs") + println("DONE") + } - rsFut.map { rs => - println(s"rs = $rs") - println("DONE") } - - }.recover { - case ex => ex.printStackTrace() - } + .recover { + case ex => ex.printStackTrace() + } } object ErrTest extends App { @@ -324,23 +377,26 @@ object ErrTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().flatMap { conn => - println("hai\n\n\n") + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.statement("select * from tes5") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.statement("select * from tes5") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs - rsFut.map { rs => - println(s"rs = $rs") - println("DONE") - } + rsFut.map { rs => + println(s"rs = $rs") + println("DONE") + } - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } @@ -349,25 +405,28 @@ object ErrTestTx extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().flatMap { conn => - println("hai\n\n\n") + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .flatMap { conn => + println("hai\n\n\n") + + val rsFut = for { + _ <- conn.beginTx() + stmt <- conn.statement("select * from tes5") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + _ <- conn.commitTx() + } yield rs - val rsFut = for { - _ <- conn.beginTx() - stmt <- conn.statement("select * from tes5") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - _ <- conn.commitTx() - } yield rs + rsFut.map { rs => + println(s"rs = $rs") + println("DONE") + } - rsFut.map { rs => - println(s"rs = $rs") - println("DONE") } - - }.recover { - case ex => ex.printStackTrace() - } + .recover { + case ex => ex.printStackTrace() + } } @@ -376,37 +435,40 @@ object PerfTst extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val connFact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val connFact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - connFact.connection().flatMap { conn => - println("hai\n\n\n") + connFact + .connection() + .flatMap { conn => + println("hai\n\n\n") - (1 to 100).foreach { i => + (1 to 100).foreach { i => + val start = System.nanoTime() - val start = System.nanoTime() + val rsFut = for { + stmt <- conn.statement("select x from test") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs - val rsFut = for { - stmt <- conn.statement("select * from test") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + /*rsFut.map { rs => - rsFut.map { rs => + }*/ + Await.ready(rsFut, Duration.Inf) val time = System.nanoTime() - start - println("time = " + (time / 1000000.0) + "ms") + println(s"$i time = " + (time / 1000000.0) + "ms") } - Await.ready(rsFut, Duration.Inf) - } - conn.release() + conn.release() - }.onComplete { - case Success(_) => - connFact.shutdown() - println("ok") + } + .onComplete { + case Success(_) => + connFact.shutdown() + println("ok") - case Failure(ex) => ex.printStackTrace() - } + case Failure(ex) => ex.printStackTrace() + } } @@ -440,30 +502,33 @@ object KeyGenTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") - - fact.connection().flatMap { conn => - println("hai\n\n\n") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - val rsFut = for { - stmt <- conn.returningInsert("insert into serial_test(x) values (:x)") - parametrized <- stmt.bindF("x" -> 10) - keys <- parametrized.executeForKeysSet() - } yield keys + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - rsFut.map { keys => - println("KEYS") - keys.rows.foreach { row => - println(" id = " + row.int("id")) + val rsFut = for { + stmt <- conn.returningInsert("insert into serial_test(x) values (:x)") + parametrized <- stmt.bindF("x" -> 10) + keys <- parametrized.executeForKeysSet() + } yield keys + + rsFut.map { keys => + println("KEYS") + keys.rows.foreach { row => + println(" id = " + row.int("id")) + } + println("DONE") + conn.release() + fact.shutdown() } - println("DONE") - conn.release() - fact.shutdown() - } - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } @@ -472,23 +537,26 @@ object TimeoutTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(5, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().flatMap { conn => - println("hai\n\n\n") + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.statement("select pg_sleep(5)") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.statement("select pg_sleep(5)") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs - rsFut.map { rs => - println(s"rs = $rs") - println("DONE") - } + rsFut.map { rs => + println(s"rs = $rs") + println("DONE") + } - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } @@ -497,42 +565,45 @@ object ConnClosedTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().flatMap { conn => - println("hai\n\n\n") - - val rsFut = for { - stmt <- conn.statement("select pg_sleep(60)") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .flatMap { conn => + println("hai\n\n\n") - rsFut.map { rs => - println(s"rs = $rs") - println("DONE") - }.recover { - case ex => println(ex.getMessage) - } - - rsFut.andThen { case _ => - val rsFut2 = for { - stmt <- conn.statement("select 1") + val rsFut = for { + stmt <- conn.statement("select pg_sleep(60)") parametrized <- stmt.noParamsF rs <- parametrized.executeForSet() } yield rs - rsFut2.map { rs => - println("RS2 done") + rsFut.map { rs => + println(s"rs = $rs") + println("DONE") }.recover { - case ex => - println("RS2 failed") - ex.printStackTrace() + case ex => println(ex.getMessage) } - } + rsFut.andThen { + case _ => + val rsFut2 = for { + stmt <- conn.statement("select 1") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs + + rsFut2.map { rs => + println("RS2 done") + }.recover { + case ex => + println("RS2 failed") + ex.printStackTrace() + } + } - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } @@ -540,31 +611,33 @@ object IfIdleTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder").connection().map { conn => - println("hai\n\n\n") - - val rsFut = for { - stmt <- conn.statement("select pg_sleep(60)") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() //TODO executeForSet may use a different thread internally - should I allow this? - } yield rs + NettyPgConnectionFactory("localhost", 5432, "povder", "povder") + .connection() + .map { conn => + println("hai\n\n\n") + val rsFut = for { + stmt <- conn.statement("select pg_sleep(60)") + parametrized <- stmt.noParamsF + rs <- parametrized + .executeForSet() //TODO executeForSet may use a different thread internally - should I allow this? + } yield rs - rsFut.onComplete(r => println("fut1 = " + r)) - - val rsFut2 = for {//TODO will this work without synchronization? Client can execute from multiple threads - should I allow this? - stmt <- conn.statement("select pg_sleep(60)") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() //this should fail because of illegal state - } yield rs - + rsFut.onComplete(r => println("fut1 = " + r)) - rsFut2.onComplete(r => println("fut2 = " + r)) + val rsFut2 = + for {//TODO will this work without synchronization? Client can execute from multiple threads - should I allow this? + stmt <- conn.statement("select pg_sleep(60)") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() //this should fail because of illegal state + } yield rs + rsFut2.onComplete(r => println("fut2 = " + r)) - }.recover { - case ex => ex.printStackTrace() - } + } + .recover { + case ex => ex.printStackTrace() + } } object ParallelTest extends App { @@ -572,47 +645,50 @@ object ParallelTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val connFact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val connFact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - connFact.connection().flatMap { conn => - println("hai\n\n\n") + connFact + .connection() + .flatMap { conn => + println("hai\n\n\n") - var f = Future.successful(()) + var f = Future.successful(()) - (1 to 10).foreach { i => - println(s"starting $i") + (1 to 10).foreach { i => + println(s"starting $i") - val start = System.nanoTime() + val start = System.nanoTime() - val rsFut = for { - stmt <- conn.statement(s"select pg_sleep(${Random.nextInt(2)}) from test") - parametrized <- stmt.noParamsF - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.statement(s"select pg_sleep(${Random.nextInt(2)}) from test") + parametrized <- stmt.noParamsF + rs <- parametrized.executeForSet() + } yield rs + + val fut = rsFut.map { rs => + val time = System.nanoTime() - start + println(s"$i time = ${time / 1000000.0}ms") + }.recover { + case ex => println(s"$i err ${ex.getMessage}") + } + Thread.sleep(Random.nextInt(700)) + f = fut + } - val fut = rsFut.map { rs => - val time = System.nanoTime() - start - println(s"$i time = ${time / 1000000.0}ms") + f.flatMap { _ => + conn.release() }.recover { - case ex => println(s"$i err ${ex.getMessage}") + case _ => conn.release() } - Thread.sleep(Random.nextInt(700)) - f = fut - } - f.flatMap { _ => - conn.release() - }.recover { - case _ => conn.release() } + .onComplete { + case Success(_) => + connFact.shutdown() + println("ok") - }.onComplete { - case Success(_) => - connFact.shutdown() - println("ok") - - case Failure(ex) => ex.printStackTrace() - } + case Failure(ex) => ex.printStackTrace() + } } @@ -621,27 +697,31 @@ object SmallintTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - fact.connection().flatMap { conn => - println("hai\n\n\n") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.insert("insert into test_smallint(x) values (:x)") - parametrized <- stmt.bindF("x" -> Int.MaxValue) - rs <- parametrized.execute() - } yield rs + val rsFut = for { + stmt <- conn.insert("insert into test_smallint(x) values (:x)") + parametrized <- stmt.bindF("x" -> Int.MaxValue) + rs <- parametrized.execute() + } yield rs - val result = rsFut.map { _ => - println("DONE") - } + val result = rsFut.map { _ => + println("DONE") + } - result.onComplete(_ => conn.release()) - result + result.onComplete(_ => conn.release()) + result - }.recover { - case ex => ex.printStackTrace() - }.onComplete(_ => fact.shutdown()) + } + .recover { + case ex => ex.printStackTrace() + } + .onComplete(_ => fact.shutdown()) } @@ -652,33 +732,37 @@ object NullTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - fact.connection().flatMap { conn => - println("hai\n\n\n") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - val intOpt = Option.empty[Int] - val yOpt = Some(10) + val intOpt = Option.empty[Int] + val yOpt = Some(10) - val rsFut = for { - stmt <- conn.select("select :x as x, :y as y, :z as z") - parametrized <- stmt.bindF("x" -> intOpt.toSqlParam, "y" -> yOpt.toSqlParam, "z" -> Some(1)) - rs <- parametrized.executeForSet() - } yield rs + val rsFut = for { + stmt <- conn.select("select :x as x, :y as y, :z as z") + parametrized <- stmt.bindF("x" -> intOpt.toSqlParam, "y" -> yOpt.toSqlParam, "z" -> Some(1)) + rs <- parametrized.executeForSet() + } yield rs - val result = rsFut.map { rs => - rs.foreach { row => - println(s"x = ${row.col[String]("x")}, y = ${row.col[Int]("y")}, z = ${row.col[Int]("z")}") + val result = rsFut.map { rs => + rs.foreach { row => + println(s"x = ${row.col[String]("x")}, y = ${row.col[Int]("y")}, z = ${row.col[Int]("z")}") + } + println("DONE") } - println("DONE") - } - result.onComplete(_ => conn.release()) - result + result.onComplete(_ => conn.release()) + result - }.recover { - case ex => ex.printStackTrace() - }.onComplete(_ => fact.shutdown()) + } + .recover { + case ex => ex.printStackTrace() + } + .onComplete(_ => fact.shutdown()) } @@ -687,27 +771,31 @@ object NullTest2 extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - fact.connection().flatMap { conn => - println("hai\n\n\n") + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") - val rsFut = for { - stmt <- conn.insert("insert into test(x, s) values(:x, 'dupa')") - parametrized <- stmt.bindF("x" -> Some(1)) - rs <- parametrized.execute() - } yield rs + val rsFut = for { + stmt <- conn.insert("insert into test(x, s) values(:x, 'dupa')") + parametrized <- stmt.bindF("x" -> Some(1)) + rs <- parametrized.execute() + } yield rs - val result = rsFut.map { _ => - println("DONE") - } + val result = rsFut.map { _ => + println("DONE") + } - result.onComplete(_ => conn.release()) - result + result.onComplete(_ => conn.release()) + result - }.recover { - case ex => ex.printStackTrace() - }.onComplete(_ => fact.shutdown()) + } + .recover { + case ex => ex.printStackTrace() + } + .onComplete(_ => fact.shutdown()) } @@ -753,15 +841,14 @@ object StmTest extends App { println("immutable = " + immutable.single()) } -*/ - + */ object StreamParamsTst extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") import akka.actor.ActorSystem import akka.stream._ @@ -769,7 +856,6 @@ object StreamParamsTst extends App { implicit val system = ActorSystem("tst") implicit val materializer = ActorMaterializer() - fact.connection().foreach { conn => println("hai\n\n\n") @@ -784,20 +870,19 @@ object StreamParamsTst extends App { _ <- stmt.streamParams(params) } yield stmt - val c = stmtFut.map(_ => conn.commitTx()) + val c = stmtFut.flatMap(_ => conn.commitTx()) val fut = c.map { _ => val time = System.nanoTime() - start - println(s"DONE: ${time / 1000000}ms") + println(s"$i DONE: ${time / 1000000}ms") } - Await.ready(fut, Duration.Inf) + Await.result(fut, Duration.Inf) i += 1 } } - } object ManyInsertTest extends App { @@ -805,14 +890,13 @@ object ManyInsertTest extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") fact.connection().foreach { conn => println("hai\n\n\n") var i = 0 - while (i < 100) { var j = 0 val start = System.nanoTime() @@ -839,7 +923,6 @@ object ManyInsertTest extends App { i += 1 } - } } @@ -849,7 +932,7 @@ object MultiConnTst extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") val f = (1 to 10).map(_ => fact.connection()).reduce((f1, f2) => f1.flatMap(_ => f2)) Await.ready(f, Duration.Inf) @@ -861,42 +944,92 @@ object TypeTst extends App { implicit val ec = ExecutionContext.global implicit val timeout = FiniteDuration.apply(10, "seconds") - val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder", "povder") - - fact.connection().flatMap { conn => - println("hai\n\n\n") - //Thread.sleep(20000L) - val start = System.nanoTime() + val fact = NettyPgConnectionFactory("localhost", 5432, "povder", "povder") - val x = -100 + fact + .connection() + .flatMap { conn => + println("hai\n\n\n") + //Thread.sleep(20000L) + val start = System.nanoTime() - val rsFut = for { - stmt <- conn.statement(sql"SELECT x FROM interval_test") - rs <- stmt.executeForSet() - //TODO when i ctrl-c postgresql during pulling rows nothing happens - } yield (stmt, rs) + val x = -100 - rsFut.flatMap { case (stmt, rs) => + val rsFut = for { + stmt <- conn.statement(sql"SELECT x FROM interval_test") + rs <- stmt.executeForSet() + //TODO when i ctrl-c postgresql during pulling rows nothing happens + } yield (stmt, rs) + + rsFut.flatMap { + case (stmt, rs) => + rs.rows.foreach { row => + println("x = " + row.bytes("x")) + } - rs.rows.foreach { row => - println("x = " + row.bytes("x")) + println("DONE") + stmt.deallocate() + }.map { _ => + conn.release() } - println("DONE") - stmt.deallocate() - }.map { _ => - conn.release() } + .recover { + case ex => + println("ERROR") + ex.printStackTrace() + } + .flatMap { _ => + println("hai shutdown") + fact.shutdown() + } + .map { _ => + println("SHUT DOWN") + } + +} +object DataRowTst extends App { + + import scodec.bits._ + import scodec.codecs._ + + val decoder = new ScodecDecoderFactory().decoder(Charset.forName("UTF-8")) + + + var drs = new Array[ByteVector](100 * 10000) + var i = 0 + var j = 0 + while (i < 100) { + //println(i) + while (j < 10000) { + val x = int32.encode(j).require.bytes + drs(i * 10000 + j) = (hex"440000000e000100000004" ++ x) + j += 1 + } + j = 0 + i += 1 + } + + println("no czesc") + + var x = 0 + while (x < 100) { + val start = System.nanoTime() - }.recover { - case ex => - println("ERROR") - ex.printStackTrace() - }.flatMap { _ => - println("hai shutdown") - fact.shutdown() - }.map { _ => - println("SHUT DOWN") + i = 0 + j = 0 + while (i < 100) { + while (j < 10000) { + decoder.decodeMsg(drs(i * 10000 + j)) + j += 1 + } + j = 0 + i += 1 + } + x += 1 + val time = System.nanoTime() - start + println(s"$x: time = ${time / 1000000.0}") } + } \ No newline at end of file diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/FieldValueCodec.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/FieldValueCodec.scala deleted file mode 100644 index ce73af1..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/FieldValueCodec.scala +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.scodec - -import _root_.scodec.bits.{BitVector, ByteVector} -import _root_.scodec.codecs._ -import _root_.scodec.{Attempt, Codec, DecodeResult, SizeBound} -import io.rdbc.pgsql.core.messages.data.{FieldValue, NotNullFieldValue, NullFieldValue} - -object FieldValueCodec extends Codec[FieldValue] { - def sizeBound: SizeBound = SizeBound.unknown - - def decode(bits: BitVector): Attempt[DecodeResult[FieldValue]] = { - pgInt32.decode(bits).flatMap(dResult => { - val len = dResult.value - if (len == -1) { - Attempt.successful(DecodeResult(NullFieldValue, dResult.remainder)) - } else { - bytesArr(len).as[NotNullFieldValue].decode(dResult.remainder) - } - }) - } - - def encode(value: FieldValue): Attempt[BitVector] = value match { - case NullFieldValue => byte.unit(-1).encode(Unit) - case NotNullFieldValue(data) => variableSizeBytes(pgInt32, bytes).encode(ByteVector.view(data)) - } -} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala deleted file mode 100644 index a92debc..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ParamValuesCodec.scala +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.scodec - -import java.nio.charset.Charset - -import _root_.scodec.bits.{BitVector, ByteVector} -import _root_.scodec.codecs._ -import _root_.scodec.{Attempt, Codec, DecodeResult, SizeBound} -import io.rdbc.pgsql.core.messages.frontend.{BinaryDbValue, DbValue, NullDbValue, TextualDbValue} - -class ParamValuesCodec(implicit charset: Charset) extends Codec[List[DbValue]] { - def sizeBound: SizeBound = SizeBound.unknown - - def encode(params: List[DbValue]): Attempt[BitVector] = { - val lenBits = pgInt16.encode(params.length) - - val paramFormatAttempts = params.map { - case _: NullDbValue | _: TextualDbValue => pgInt16.encode(0) - case _: BinaryDbValue => pgInt16.encode(1) - } - val paramFormatBits = concatAttempts(paramFormatAttempts) - - val paramValueAttempts: List[Attempt[BitVector]] = params.map { - case _: NullDbValue => pgInt32.encode(-1) - case TextualDbValue(value, _) => variableSizeBytes(pgInt32, pgStringNonTerminated).encode(value) //TODO check whether not cstring - case BinaryDbValue(value, _) => variableSizeBytes(pgInt32, bytes).encode(ByteVector.view(value)) - } - - val paramValueBits = concatAttempts(paramValueAttempts) - - concatAttempts(Traversable(lenBits, paramFormatBits, lenBits, paramValueBits)) - } - - def decode(bits: BitVector): Attempt[DecodeResult[List[DbValue]]] = ??? //TODO - - private def concatAttempts(attempts: Traversable[Attempt[BitVector]]): Attempt[BitVector] = { - attempts.foldLeft(Attempt.successful(BitVector.empty)) { (accAttempt, attempt) => - accAttempt.flatMap(accBits => attempt.map(bits => accBits ++ bits)) - } - } -} \ No newline at end of file diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala deleted file mode 100644 index 9d7db16..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/ScodecEncoder.scala +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.scodec - -import java.nio.charset.Charset - -import io.rdbc.pgsql.core.codec.Encoder -import io.rdbc.pgsql.core.messages.frontend._ -import io.rdbc.pgsql.scodec.msg.frontend._ - -object ScodecEncoder extends Encoder { - override def encode(msg: PgFrontendMessage)(implicit charset: Charset): Array[Byte] = { - val codec = msg match { - case m: StartupMessage => startup.upcast[PgFrontendMessage] - case m: Bind => bind.upcast[PgFrontendMessage] - case m: Describe => describe.upcast[PgFrontendMessage] - case m: Execute => execute.upcast[PgFrontendMessage] - case m: Parse => parse.upcast[PgFrontendMessage] - case m: PasswordMessage => password.upcast[PgFrontendMessage] - case m: Query => query.upcast[PgFrontendMessage] - case m: CancelRequest => cancelRequest.upcast[PgFrontendMessage] - case m: CloseStatement => closeStatement.upcast[PgFrontendMessage] - case m: ClosePortal => closePortal.upcast[PgFrontendMessage] - case Terminate => terminate.upcast[PgFrontendMessage] - case Flush => flush.upcast[PgFrontendMessage] - case Sync => sync.upcast[PgFrontendMessage] - } - - codec.encode(msg).require.toByteArray - } -} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala deleted file mode 100644 index 4c61f97..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/backend/package.scala +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.scodec.msg - -import java.nio.charset.Charset - -import _root_.scodec.Codec -import _root_.scodec.codecs._ -import io.rdbc.pgsql.core.messages.backend._ -import io.rdbc.pgsql.core.messages.backend.auth.{AuthBackendMessage, AuthOk, AuthRequestMd5} -import io.rdbc.pgsql.scodec._ - -package object backend { - - - def pgBackendMessage(implicit charset: Charset): Codec[PgBackendMessage] = { - //TODO creating this discriminator list for each backend msg is a major bottleneck, cache for each charset? - discriminatorFallback( - left = unknown, - right = discriminated[PgBackendMessage].by(byte) - .typecase('S', parameterStatus) - .typecase('R', auth) - .typecase('Z', readyForQuery) - .typecase('T', rowDescription) - .typecase('D', dataRow) - .typecase('C', commandComplete) - .typecase('K', backendKeyData) - .typecase('1', parseComplete) - .typecase('2', bindComplete) - .typecase('3', closeComplete) - .typecase('I', emptyQueryResponse) - .typecase('E', error) - .typecase('N', notice) - .typecase('s', portalSuspended) - .typecase('n', noData) - .typecase('t', parameterDescription) - ).xmapc(_.fold(identity, identity)) { - case u: UnknownBackendMessage => Left(u) - case r => Right(r) - } - } - - val noData: Codec[NoData.type] = pgSingletonMsg(NoData) - - val closeComplete: Codec[CloseComplete.type] = pgSingletonMsg(CloseComplete) - - val portalSuspended: Codec[PortalSuspended.type] = pgSingletonMsg(PortalSuspended) - - val parseComplete: Codec[ParseComplete.type] = pgSingletonMsg(ParseComplete) - - val bindComplete: Codec[BindComplete.type] = pgSingletonMsg(BindComplete) - - val emptyQueryResponse: Codec[EmptyQueryResponse.type] = pgSingletonMsg(EmptyQueryResponse) - - def commandComplete(implicit charset: Charset): Codec[CommandComplete] = pgHeadlessMsg { - pgString.xmap[CommandComplete]( - message => { - if (CommandComplete.RowCountMessages.exists(rowCountMsg => message.startsWith(rowCountMsg))) { - val (constant, rowsStr) = message.splitAt(message.lastIndexOf(" ") + 1) - CommandComplete(constant, Some(rowsStr.toInt)) - } else { - CommandComplete(message, None) - } - }, { - case CommandComplete(message, None) => message - case CommandComplete(message, Some(rowCount)) => s"$message $rowCount" - } - ) - } - - def parameterStatus(implicit charset: Charset): Codec[ParameterStatus] = pgHeadlessMsg { - pgParam(pgString).xmap({ - case (key, value) => ParameterStatus(key, value) - }, p => (p.key, p.value)) - } - - def dataRow(implicit charset: Charset): Codec[DataRow] = pgHeadlessMsg { - vectorOfN(pgInt16.withContext("columns"), fieldValue).as[DataRow] - } - - val unknown: Codec[UnknownBackendMessage] = { - { - ("head" | byte) :: - ("body" | variableSizeBytes(pgInt32, bytesArr, 4)) - }.as[UnknownBackendMessage] - } - - val readyForQuery: Codec[ReadyForQuery] = pgHeadlessMsg { - discriminated.by(byte) - .typecase('I', provide(ReadyForQuery(TxStatus.Idle))) - .typecase('E', provide(ReadyForQuery(TxStatus.Failed))) - .typecase('T', provide(ReadyForQuery(TxStatus.Active))) - //TODO error handling - } - - val authRequestMd5: Codec[AuthRequestMd5] = { - bytesArr(4).withContext("md5 salt").as[AuthRequestMd5].withToString("AuthRequestMd5") - } - - val auth: Codec[AuthBackendMessage] = pgHeadlessMsg { - discriminated[AuthBackendMessage].by(pgInt32) - .typecase(0, provide[AuthBackendMessage](AuthOk)) - .typecase(5, authRequestMd5) //TODO more auth mechanisms here, TODO on discrimination fail return meaningful err - } - - def rowDescription(implicit charset: Charset): Codec[RowDescription] = pgHeadlessMsg { - vectorOfN(pgInt16.withContext("fieldCount"), fieldDescription).as[RowDescription] - } - - val backendKeyData: Codec[BackendKeyData] = pgHeadlessMsg { - { - ("pid" | pgInt32) :: - ("key" | pgInt32) - }.as[BackendKeyData] - } - - val parameterDescription: Codec[ParameterDescription] = pgHeadlessMsg { - vectorOfN(pgInt16.withContext("paramCount"), oid) - .as[ParameterDescription] - } - - def error(implicit charset: Charset): Codec[StatusMessage.Error] = status(StatusMessage.error) - - def notice(implicit charset: Charset): Codec[StatusMessage.Notice] = status(StatusMessage.notice) - - private def status[A <: StatusMessage](creator: Map[Byte, String] => A)(implicit charset: Charset): Codec[A] = pgHeadlessMsg { - pgParamMap(byte).withContext("fields") - .xmap(creator, - msg => ??? /* { - msg => msg.fields.map { - case (fieldType, fieldVal) => (fieldType.code, fieldVal) - } - } TODO encoding*/ - ) - } - -} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala deleted file mode 100644 index 610f8c7..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/msg/frontend/package.scala +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.scodec.msg - -import java.nio.charset.Charset - -import _root_.scodec.Codec -import _root_.scodec.codecs._ -import io.rdbc.pgsql.core.messages.frontend._ -import io.rdbc.pgsql.scodec.{ParamValuesCodec, ReturnFieldFormatsCodec, _} - -package object frontend { - - def bind(implicit charset: Charset): Codec[Bind] = pgHeadedMsg('B') { - { - ("portal" | pgStringOption) :: - ("statement" | pgStringOption) :: - ("paramValues" | new ParamValuesCodec) :: - ("resultColumns" | ReturnFieldFormatsCodec) - }.as[Bind] - } - - def describe(implicit charset: Charset): Codec[Describe] = pgHeadedMsg('D') { - { - ("describeType" | { - discriminated[DescribeType].by(byte) - .typecase('S', provide(PreparedStatementType)) - .typecase('P', provide(PortalType)) - }) :: ("optionalName" | pgStringOption) - }.as[Describe] - } - - def execute(implicit charset: Charset): Codec[Execute] = pgHeadedMsg('E') { - { - ("portalName" | pgStringOption) :: - ("fetchSize" | pgInt32Option) - }.as[Execute] - } - - val flush: Codec[Flush.type] = pgSingletonHeadedMsg('F', Flush) - - def parse(implicit charset: Charset): Codec[Parse] = pgHeadedMsg('P') { - { - ("preparedStmt" | pgStringOption) :: - ("query" | pgString) :: - ("paramTypes" | vectorOfN(pgInt16, oid)) - }.as[Parse] - } - - val password: Codec[PasswordMessage] = pgHeadedMsg('p') { - { - "credentials" | bytesArr - }.as[PasswordMessage] - }.withToString("PasswordMessage") - - def query(implicit charset: Charset): Codec[Query] = pgHeadedMsg('Q') { - ("query" | pgString).as[Query] - } - - def startup(implicit charset: Charset): Codec[StartupMessage] = { - val ver3_0 = pgInt32.unit(196608) - - pgHeadlessMsg( - { - ("protocol version" | ver3_0) ~> - pgString.unit("user") ~> ("user" | pgString) :: - pgString.unit("database") ~> ("database" | pgString) :: - ("options" | pgParamMap(pgString)) - }.as[StartupMessage] - ).withToString("StartupMessage") - } - - val terminate: Codec[Terminate.type] = pgSingletonHeadedMsg('X', Terminate) - - val sync: Codec[Sync.type] = pgSingletonHeadedMsg('S', Sync) - - def cancelRequest: Codec[CancelRequest] = { - pgHeadlessMsg( - { - ("cancel code" | pgInt32.unit(80877102)) ~> - ("process ID" | pgInt32) :: - ("secret key" | pgInt32) - }.as[CancelRequest] - ).withToString("CancelRequest") - } - - def closeStatement(implicit charset: Charset): Codec[CloseStatement] = pgHeadedMsg('C') { - { - byte.unit('S') ~> ("optionalName" | pgStringOption) - }.as[CloseStatement] - } - - def closePortal(implicit charset: Charset): Codec[ClosePortal] = pgHeadedMsg('C') { - { - byte.unit('P') ~> ("optionalName" | pgStringOption) - }.as[ClosePortal] - } - -} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala deleted file mode 100644 index 63b7e77..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/package.scala +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql - -import java.nio.charset.Charset - -import _root_.scodec._ -import _root_.scodec.bits.{BitVector, ByteVector} -import _root_.scodec.codecs._ -import io.rdbc.pgsql.core.messages.backend.{FieldDescription, Header} -import io.rdbc.pgsql.core.messages.data.DbValFormat.{BinaryDbValFormat, TextualDbValFormat} -import io.rdbc.pgsql.core.messages.data.{DataType, DbValFormat, FieldValue, Oid} - -package object scodec { - - def terminated[A](terminator: BitVector, codec: Codec[A]): Codec[A] = new TerminatedCodec[A](terminator, codec).withToString("terminated") - - def nulTerminated[A](codec: Codec[A]): Codec[A] = terminated(BitVector.lowByte, codec).withToString("nul terminated") - - def pgString(implicit charset: Charset) = nulTerminated(string(charset)).withToString("pgString") - - def pgStringNonTerminated(implicit charset: Charset) = string(charset).withToString("pgStringNonTerminated") - - implicit val pgInt64 = int64.withToString("pgInt64") - implicit val pgInt32 = int32.withToString("pgInt32") - implicit val pgInt16 = int16.withToString("pgInt16") - - implicit def pgStringOption(implicit charset: Charset) = pgString.xmap[Option[String]](s => { - if (s.isEmpty) None - else Some(s) - }, { - case Some(s) => s - case None => "" - }) - - implicit val pgInt16Option = pgInt16.xmap[Option[Int]](i => { - if (i == 0) None - else Some(i) - }, { - case Some(i) => i - case None => 0 - }) - - implicit val pgInt32Option = pgInt32.xmap[Option[Int]](i => { - if (i == 0) None - else Some(i) - }, { - case Some(i) => i - case None => 0 - }) - - def pgParam[K](keyCodec: Codec[K])(implicit charset: Charset): Codec[(K, String)] = { - ("key" | keyCodec) ~ - ("value" | pgString) - }.withToString("pgParam") - - def pgParamMap[K](keyCodec: Codec[K])(implicit charset: Charset): Codec[Map[K, String]] = new PgMapCodec[K](pgParam(keyCodec)) - - - def fieldDescription(implicit charset: Charset): Codec[FieldDescription] = { - { - ("name" | pgString) :: - ("tableOid" | oidOption) :: - ("columnAttr" | pgInt16Option) :: - ("dataType" | dataType) :: - ("fieldFormatCode" | dbValFormat) - }.as[FieldDescription].withToString("FieldDescription") - } - - val fieldValue: Codec[FieldValue] = FieldValueCodec - - val header: Codec[Header] = { - { - ("header" | ignore(8)) ~> - ("msgLength" | pgInt32) - }.as[Header] - } - - val dbValFormat: Codec[DbValFormat] = { - discriminated[DbValFormat].by(pgInt16) - .subcaseP(0)({ case t@TextualDbValFormat => t })(provide(TextualDbValFormat)) - .subcaseP(1)({ case b@BinaryDbValFormat => b })(provide(BinaryDbValFormat)) - } - - val oid: Codec[Oid] = uint32.as[Oid].withToString("pgOid") - - val oidOption: Codec[Option[Oid]] = oid.xmap[Option[Oid]](oidVal => { - if (oidVal.code == 0) None - else Some(oidVal) - }, { - case Some(oidVal) => oidVal - case None => Oid(0L) - }) - - val dataType: Codec[DataType] = { - { - ("oid" | oid) :: - ("size" | pgInt16) :: - ("modifier" | pgInt32) - }.as[DataType] - } - - def bytesArr(n: Int): Codec[Array[Byte]] = bytes(n).xmap(_.toArray, ByteVector.view) - - val bytesArr: Codec[Array[Byte]] = bytes.xmap(_.toArray, ByteVector.view) - -} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala deleted file mode 100644 index 47242ab..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgDecimal.scala +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.scodec.types - -import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.core.types.PgDecimal -import io.rdbc.sapi.SqlNumeric -import scodec.Attempt.{Failure, Successful} -import scodec.bits.BitVector -import scodec.codecs._ -import scodec.{Codec, _} -import shapeless.{::, HNil} - -object PgDecimalCodec extends Codec[SqlNumeric] { - - private sealed trait PgDecSign - private object PgDecSign { - case object Negative extends PgDecSign - case object Positive extends PgDecSign - case object NaN extends PgDecSign - } - - private case class DecimalParts(integer: String, fraction: String) - - private val signCodec = uint16.exmap[PgDecSign](//TODO this can be done via discriminator or sth - { - case 0x4000 => Successful(PgDecSign.Negative) - case 0x0000 => Successful(PgDecSign.Positive) - case 0xC000 => Successful(PgDecSign.NaN) - case unknownSignCode => Failure(Err(s"Value '$unknownSignCode' is not a valid decimal sign code")) - }, { - case PgDecSign.Negative => Successful(0x4000) - case PgDecSign.Positive => Successful(0x0000) - case PgDecSign.NaN => Successful(0xC000) - } - ) - - private val headerCodec = { - ("digitsCount" | uint16) :: - ("firstDigitWeight" | uint16) :: - ("sign" | signCodec) :: - ("scale" | uint16) - } - - private val NaNBits = headerCodec.encode(0 :: 0 :: PgDecSign.NaN :: 0 :: HNil) - private val DigitLength = 4 - private val Int16Size = 2 - private val BitsInByte = 8 - - val sizeBound = SizeBound.atLeast(Int16Size * DigitLength * BitsInByte) - - def decode(bits: BitVector): Attempt[DecodeResult[SqlNumeric]] = { - headerCodec.decode(bits).flatMap { headerResult => - val (digitsCount :: firstDigitWeight :: sign :: scale :: HNil) = headerResult.value - vectorOfN(provide(digitsCount), int16).decode(headerResult.remainder).map { digitsResult => - if (sign == PgDecSign.NaN) { - DecodeResult(SqlNumeric.NaN, digitsResult.remainder) - } else { - val weight1 = firstDigitWeight + 1 - - /* convert digits to 1000-base and apply a weight adjustment */ - val digits = digitsResult.value.padTo(weight1, 0) - val digitStrs = digits.map(digit => "%04d".format(digit)) - - val dp = if (digitStrs.size > weight1) { - DecimalParts( - integer = digitStrs.slice(0, weight1).mkString, - fraction = digitStrs.slice(weight1, digitStrs.size).mkString - ) - } else DecimalParts(integer = digitStrs.mkString, fraction = "") - - val bigDecStr = { - val signStr = if (sign == PgDecSign.Negative) "-" else "" - val integerTrimmed = dp.integer.dropWhile(_ == '0') - val fractionPadded = dp.fraction.padTo(scale, '0') - signStr + integerTrimmed + (if (!fractionPadded.isEmpty) "." + fractionPadded else "") - } - - DecodeResult(SqlNumeric.Val(BigDecimal(bigDecStr)), digitsResult.remainder) - } - } - } - } - - def encode(value: SqlNumeric): Attempt[BitVector] = { - value match { - case SqlNumeric.NaN => NaNBits - case SqlNumeric.NegInfinity | SqlNumeric.PosInfinity => Failure(Err("Cannot encode infinity as a PostgreSQL decimal")) - case SqlNumeric.Val(bigDec) => - val dp = prepareDecimalParts(bigDec) - val mergedNoDot = padInteger(dp.integer + dp.fraction) - - encode( - sign = if (bigDec >= 0) PgDecSign.Positive else PgDecSign.Negative, - weight = dp.integer.length / DigitLength, - scale = bigDec.scale, - digits = mergedNoDot.grouped(mergedNoDot.length / DigitLength).map(_.toInt).toVector - ) - } - } - - private def prepareDecimalParts(bigDecimal: BigDecimal): DecimalParts = { - val bigDecStr = bigDecimal.bigDecimal.toPlainString.toList.dropWhile(_ == '-') - val (integer, fraction) = bigDecStr.span(_ != '.') - - DecimalParts( - integer = padInteger(integer.mkString), - fraction = padFraction(fraction.drop(1).mkString) //drop is to drop a dot - ) - } - - private def padInteger(num: String): String = { - val digitCount = math.ceil(num.length.toDouble / DigitLength).toInt - val padNeeded = (digitCount * DigitLength) - num.length - ("0" * padNeeded) + num - } - - private def padFraction(fraction: String): String = { - val zeroTrimmed = fraction.reverse.dropWhile(_ == '0').reverse - val digitCount = math.ceil(zeroTrimmed.length.toDouble / DigitLength).toInt - zeroTrimmed.padTo(digitCount * DigitLength, '0') - } - - private def encode(sign: PgDecSign, weight: Int, scale: Int, digits: Vector[Int]): Attempt[BitVector] = { - (headerCodec ~ vectorOfN(provide(digits.size), int16)).encode( - ( - digits.size :: weight :: sign :: scale :: HNil, - digits - ) - ) - } -} - -object ScodecPgDecimal extends PgDecimal with ScodecPgType[SqlNumeric] with CommonCodec[SqlNumeric] { - def codec(implicit sessionParams: SessionParams): Codec[SqlNumeric] = PgDecimalCodec -} diff --git a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala b/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala deleted file mode 100644 index 4ad668b..0000000 --- a/rdbc-pgsql-scodec/src/main/scala/io/rdbc/pgsql/scodec/types/ScodecPgType.scala +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.scodec.types - -import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.core.types.PgType -import scodec.Attempt.{Failure, Successful} -import scodec.Codec -import scodec.bits.BitVector - -trait ScodecPgType[T] extends PgType[T] { - - def decodeCodec(implicit sessionParams: SessionParams): Codec[T] - def encodeCodec(implicit sessionParams: SessionParams): Codec[T] - - override def toObj(binaryVal: Array[Byte])(implicit sessionParams: SessionParams): T = decodeCodec.decodeValue(BitVector.view(binaryVal)) match { - case Successful(value) => value - case Failure(err) => throw new RuntimeException(err.messageWithContext) //TODO DecodeException - } - - override def toPgBinary(obj: T)(implicit sessionParams: SessionParams): Array[Byte] = encodeCodec.encode(obj) match { - case Successful(value) => value.toByteArray - case Failure(err) => throw new RuntimeException(err.messageWithContext) //TODO EncodeException - } -} diff --git a/rdbc-pgsql-transport-netty/src/main/resources/reference.conf b/rdbc-pgsql-transport-netty/src/main/resources/reference.conf new file mode 100644 index 0000000..1824f63 --- /dev/null +++ b/rdbc-pgsql-transport-netty/src/main/resources/reference.conf @@ -0,0 +1,19 @@ +rdbc.pgsql.netty.defaults { + + transport { + type = "os-default" + + epoll { + threads = 0 + } + + nio { + threads = 0 + } + } + + channel-options { + SO_KEEPALIVE = true + } + +} diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/ChannelOptionValue.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/ChannelOptionValue.scala new file mode 100644 index 0000000..0279b3a --- /dev/null +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/ChannelOptionValue.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.transport.netty + +import io.netty.channel.ChannelOption + +case class ChannelOptionValue[T](option: ChannelOption[T], value: T) diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupExecutionContext.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupExecutionContext.scala deleted file mode 100644 index 230a958..0000000 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupExecutionContext.scala +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2016 Krzysztof Pado - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.rdbc.pgsql.transport.netty - -import java.util.concurrent.RejectedExecutionException - -import io.netty.channel.EventLoopGroup - -import scala.concurrent.ExecutionContext - -class EventLoopGroupExecutionContext(eventLoopGroup: EventLoopGroup, fallbackEc: ExecutionContext) extends ExecutionContext { - - def execute(runnable: Runnable): Unit = { - if (eventLoopGroup.isShuttingDown) { - fallbackEc.execute(runnable) - } else { - try { - eventLoopGroup.execute(runnable) - } catch { - case _: RejectedExecutionException => fallbackEc.execute(runnable) - } - } - } - - def reportFailure(cause: Throwable): Unit = throw cause -} diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupScheduler.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupScheduler.scala index f3618ba..e1e1c02 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupScheduler.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/EventLoopGroupScheduler.scala @@ -17,15 +17,21 @@ package io.rdbc.pgsql.transport.netty import io.netty.channel.EventLoopGroup -import io.rdbc.pgsql.core.scheduler.{ScheduledTask, TaskScheduler} +import io.rdbc.pgsql.core.internal.scheduler.{ScheduledTask, TaskScheduler} import scala.concurrent.duration.FiniteDuration -class EventLoopGroupScheduler(eventLoopGroup: EventLoopGroup) extends TaskScheduler { +private[netty] class EventLoopGroupScheduler(eventLoopGroup: EventLoopGroup) extends TaskScheduler { + def schedule(delay: FiniteDuration)(action: => Unit): ScheduledTask = { - val fut = eventLoopGroup.schedule(new Runnable() { - def run() = action - }, delay.length, delay.unit) - new FutureScheduledTask(fut) + val fut = eventLoopGroup.schedule(runnable(action), delay.length, delay.unit) + new NettyScheduledTask(fut) + } + + /* Scala 2.11 compat */ + private def runnable(action: => Unit): Runnable = { + new Runnable() { + def run(): Unit = action + } } } diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyChannelWriter.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyChannelWriter.scala index 99d780b..95a4120 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyChannelWriter.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyChannelWriter.scala @@ -16,30 +16,35 @@ package io.rdbc.pgsql.transport.netty -import com.typesafe.scalalogging.StrictLogging import io.netty.channel.Channel import io.rdbc.pgsql.core.ChannelWriter import io.rdbc.pgsql.core.exception.PgChannelException -import io.rdbc.pgsql.core.messages.frontend.PgFrontendMessage +import io.rdbc.pgsql.core.pgstruct.messages.frontend.PgFrontendMessage +import io.rdbc.util.Logging import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal -class NettyChannelWriter(ch: Channel)(implicit ec: ExecutionContext) extends ChannelWriter with StrictLogging { +private[netty] class NettyChannelWriter(ch: Channel) + (implicit ec: ExecutionContext) + extends ChannelWriter + with Logging { def write(msgs: PgFrontendMessage*): Future[Unit] = { - msgs.foldLeft(Future.successful(())) { (_, msg) => - logger.trace(s"Writing message '$msg' to channel ${ch.id()}") - ch.write(msg).scalaFut - }.recoverWith { - case NonFatal(ex) => Future.failed(PgChannelException(ex)) - } + msgs + .foldLeft(Future.successful(())) { (_, msg) => + logger.trace(s"Writing message '$msg' to channel ${ch.id()}") + ch.write(msg).scalaFut.map(_ => ()) + } + .recoverWith { + case NonFatal(ex) => Future.failed(new PgChannelException(ex)) + } } def close(): Future[Unit] = { - logger.trace(s"Closing channel ${ch.id()}") - ch.close().scalaFut.recoverWith { - case NonFatal(ex) => Future.failed(PgChannelException(ex)) + logger.debug(s"Closing channel ${ch.id()}") + ch.close().scalaFut.map(_ => ()).recoverWith { + case NonFatal(ex) => Future.failed(new PgChannelException(ex)) } } @@ -48,7 +53,7 @@ class NettyChannelWriter(ch: Channel)(implicit ec: ExecutionContext) extends Cha try { ch.flush() } catch { - case NonFatal(ex) => throw PgChannelException(ex) + case NonFatal(ex) => throw new PgChannelException(ex) } } } diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnFactoryConfig.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnFactoryConfig.scala new file mode 100644 index 0000000..2635bba --- /dev/null +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnFactoryConfig.scala @@ -0,0 +1,144 @@ +/* + * Copyright 2016 Krzysztof Pado + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.rdbc.pgsql.transport.netty + +import java.net.InetSocketAddress + +import akka.stream.ActorMaterializerSettings +import com.typesafe.config.{Config, ConfigFactory} +import io.netty.channel._ +import io.netty.channel.epoll.{Epoll, EpollEventLoopGroup, EpollSocketChannel} +import io.netty.channel.nio.NioEventLoopGroup +import io.netty.channel.socket.SocketChannel +import io.netty.channel.socket.nio.NioSocketChannel +import io.rdbc.ImmutSeq +import io.rdbc.pgsql.core.auth.{Authenticator, UsernamePasswordAuthenticator} +import io.rdbc.pgsql.core.codec.{DecoderFactory, EncoderFactory} +import io.rdbc.pgsql.core.types.PgTypesProvider +import io.rdbc.pgsql.core.util.concurrent.{BlockLockFactory, LockFactory} +import io.rdbc.pgsql.scodec.types.ScodecPgTypesProvider +import io.rdbc.pgsql.scodec.{ScodecDecoderFactory, ScodecEncoderFactory} +import io.rdbc.sapi.TypeConvertersProvider +import io.rdbc.typeconv.StandardTypeConvertersProvider + +import scala.concurrent.ExecutionContext +import scala.concurrent.duration._ + +object NettyPgConnFactoryConfig { + + def apply(username: String, password: String): NettyPgConnFactoryConfig = { + apply(new UsernamePasswordAuthenticator(username, password), username, username) + } + + def apply(host: String, port: Int, username: String, password: String): NettyPgConnFactoryConfig = { + apply(username, password) + .withHost(host) + .withPort(port) + } + + def apply(authenticator: Authenticator, dbRole: String, dbName: String): NettyPgConnFactoryConfig = { + val tconfig = ConfigFactory.load() + + NettyPgConnFactoryConfig( + address = InetSocketAddress.createUnresolved("localhost", 5432), + dbRole = dbRole, + dbName = dbName, + authenticator = authenticator, + maxBatchSize = 100L, + typeConvertersProviders = Vector(new StandardTypeConvertersProvider), + pgTypesProviders = Vector(new ScodecPgTypesProvider), + msgDecoderFactory = new ScodecDecoderFactory, + msgEncoderFactory = new ScodecEncoderFactory, + writeTimeout = 30.seconds, + channelFactory = defaultChannelFactory, + eventLoopGroup = defaultEventLoopGroup, + channelOptions = Vector(ChannelOptionValue(ChannelOption.SO_KEEPALIVE, java.lang.Boolean.TRUE)), + actorSystemConfig = tconfig, + actorMaterializerSettings = ActorMaterializerSettings(tconfig.getConfig("akka.stream.materializer")), + lockFactory = new BlockLockFactory, + ec = ExecutionContext.global + ) + } + + private def defaultChannelFactory: ChannelFactory[SocketChannel] = { + if (Epoll.isAvailable) new ReflectiveChannelFactory(classOf[EpollSocketChannel]) + else new ReflectiveChannelFactory(classOf[NioSocketChannel]) + } + + private def defaultEventLoopGroup: EventLoopGroup = { + if (Epoll.isAvailable) new EpollEventLoopGroup() + else new NioEventLoopGroup() + } +} + +case class NettyPgConnFactoryConfig(address: InetSocketAddress, + dbRole: String, + dbName: String, + authenticator: Authenticator, + maxBatchSize: Long, + typeConvertersProviders: ImmutSeq[TypeConvertersProvider], + pgTypesProviders: ImmutSeq[PgTypesProvider], + msgDecoderFactory: DecoderFactory, + msgEncoderFactory: EncoderFactory, + writeTimeout: FiniteDuration, + channelFactory: ChannelFactory[_ <: Channel], + eventLoopGroup: EventLoopGroup, + channelOptions: ImmutSeq[ChannelOptionValue[_]], + actorSystemConfig: Config, + actorMaterializerSettings: ActorMaterializerSettings, + lockFactory: LockFactory, + ec: ExecutionContext) { + + def withHost(host: String): NettyPgConnFactoryConfig = { + copy( + address = InetSocketAddress.createUnresolved(host, address.getPort) + ) + } + + def withPort(port: Int): NettyPgConnFactoryConfig = { + copy( + address = InetSocketAddress.createUnresolved(address.getHostString, port) + ) + } + + def withUserPassAuth(user: String, password: String): NettyPgConnFactoryConfig = { + copy( + authenticator = new UsernamePasswordAuthenticator(user, password), + dbRole = user, + dbName = user + ) + } + + def withChannelOption[A](channelOption: ChannelOption[A], value: A): NettyPgConnFactoryConfig = { + copy( + channelOptions = channelOptions.toVector :+ ChannelOptionValue(channelOption, value) + ) + } + + def withTypeConvertersProvider[A](typeConvertersProvider: TypeConvertersProvider): NettyPgConnFactoryConfig = { + copy( + typeConvertersProviders = typeConvertersProviders.toVector :+ typeConvertersProvider + ) + } + + def withPgTypesProvider[A](pgTypesProvider: PgTypesProvider): NettyPgConnFactoryConfig = { + copy( + pgTypesProviders = pgTypesProviders.toVector :+ pgTypesProvider + ) + } + +} diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnection.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnection.scala index 7e6853c..fff4c75 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnection.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnection.scala @@ -19,44 +19,44 @@ package io.rdbc.pgsql.transport.netty import java.nio.charset.Charset import akka.stream.Materializer -import com.typesafe.scalalogging.StrictLogging import io.netty.channel.{ChannelHandlerContext, SimpleChannelInboundHandler} import io.rdbc.pgsql.core._ -import io.rdbc.pgsql.core.messages.backend._ -import io.rdbc.pgsql.core.scheduler.TaskScheduler -import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.sapi._ - -import scala.concurrent.{ExecutionContext, Future} - -class NettyPgConnection(pgTypeRegistry: PgTypeRegistry, - rdbcTypeConvRegistry: TypeConverterRegistry, - out: ChannelWriter, - decoder: PgMsgDecoderHandler, - encoder: PgMsgEncoderHandler, - ec: ExecutionContext, - scheduler: TaskScheduler, - requestCanceler: (BackendKeyData) => Future[Unit], - streamMaterializer: Materializer) - extends PgConnection(pgTypeRegistry, rdbcTypeConvRegistry, out, ec, scheduler, requestCanceler, streamMaterializer) - with StrictLogging { +import io.rdbc.pgsql.core.internal.scheduler.TaskScheduler +import io.rdbc.pgsql.core.pgstruct.messages.backend._ + +import scala.concurrent.ExecutionContext + +private[netty] class NettyPgConnection(config: PgConnectionConfig, + out: ChannelWriter, + decoder: PgMsgDecoderHandler, + encoder: PgMsgEncoderHandler, + ec: ExecutionContext, + scheduler: TaskScheduler, + requestCanceler: RequestCanceler, + streamMaterializer: Materializer) + extends AbstractPgConnection( + config = config, + out = out, + ec = ec, + scheduler = scheduler, + requestCanceler = requestCanceler, + streamMaterializer = streamMaterializer) { val handler = new SimpleChannelInboundHandler[PgBackendMessage] { - override def channelRead0(ctx: ChannelHandlerContext, msg: PgBackendMessage): Unit = { - onMessage(msg) + def channelRead0(ctx: ChannelHandlerContext, msg: PgBackendMessage): Unit = { + handleBackendMessage(msg) } override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable): Unit = { - logger.error("Unhandled exception occurred", cause) - doRelease(cause) + handleFatalError("Unhandled exception occurred in channel handler", cause) } } - protected def clientCharsetChanged(charset: Charset): Unit = { - encoder.charset = charset + protected def handleClientCharsetChange(charset: Charset): Unit = { + encoder.changeCharset(charset) } - protected def serverCharsetChanged(charset: Charset): Unit = { - decoder.charset = charset + protected def handleServerCharsetChange(charset: Charset): Unit = { + decoder.changeCharset(charset) } -} \ No newline at end of file +} diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnectionFactory.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnectionFactory.scala index 78d79fb..bc60cdf 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnectionFactory.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyPgConnectionFactory.scala @@ -16,193 +16,226 @@ package io.rdbc.pgsql.transport.netty -import java.net.{InetSocketAddress, SocketAddress} import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicBoolean import akka.actor.ActorSystem import akka.stream.ActorMaterializer -import com.typesafe.scalalogging.StrictLogging import io.netty.bootstrap.Bootstrap -import io.netty.channel.ChannelOption.SO_KEEPALIVE import io.netty.channel._ -import io.netty.channel.epoll.{Epoll, EpollEventLoopGroup, EpollSocketChannel} import io.netty.channel.group.DefaultChannelGroup -import io.netty.channel.nio.NioEventLoopGroup -import io.netty.channel.socket.SocketChannel -import io.netty.channel.socket.nio.NioSocketChannel import io.netty.handler.codec.LengthFieldBasedFrameDecoder import io.netty.handler.timeout.WriteTimeoutHandler import io.netty.util.concurrent.GlobalEventExecutor -import io.rdbc.ImmutSeq -import io.rdbc.api.exceptions.{RdbcException, UncategorizedRdbcException} -import io.rdbc.pgsql.core.PgConnection -import io.rdbc.pgsql.core.auth.{Authenticator, UsernamePasswordAuthenticator} -import io.rdbc.pgsql.core.codec.{DecoderFactory, EncoderFactory} -import io.rdbc.pgsql.core.messages.backend.BackendKeyData -import io.rdbc.pgsql.core.messages.frontend.{CancelRequest, Terminate} +import io.rdbc.api.exceptions.RdbcException +import io.rdbc.pgsql.core.exception.{PgDriverInternalErrorException, PgUncategorizedException} +import io.rdbc.pgsql.core.pgstruct.messages.backend.BackendKeyData +import io.rdbc.pgsql.core.pgstruct.messages.frontend.{CancelRequest, Terminate} import io.rdbc.pgsql.core.types.PgTypeRegistry -import io.rdbc.pgsql.scodec.types.ScodecBuiltInTypes -import io.rdbc.pgsql.scodec.{ScodecDecoderFactory, ScodecEncoderFactory} +import io.rdbc.pgsql.core.{AbstractPgConnection, PgConnectionConfig} import io.rdbc.sapi.{ConnectionFactory, TypeConverterRegistry} -import io.rdbc.typeconv.BuiltInConverters +import io.rdbc.util.Logging -import scala.concurrent.duration._ -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.Future import scala.util.control.NonFatal -case class ChannelOptionValue[T](option: ChannelOption[T], value: T) +object NettyPgConnectionFactory extends Logging { -object NettyPgConnectionFactory { - def apply(host: String, port: Int, username: String, password: String, database: String): NettyPgConnectionFactory = { - new NettyPgConnectionFactory( - remoteAddr = InetSocketAddress.createUnresolved(host, port), - dbUser = username, - dbName = database, - authenticator = new UsernamePasswordAuthenticator(username, password), - rdbcTypeConvRegistry = BuiltInConverters, - pgTypeConvRegistry = ScodecBuiltInTypes, - msgDecoderFactory = ScodecDecoderFactory, - msgEncoderFactory = ScodecEncoderFactory, - writeTimeout = 10.seconds, - channelFactory = defaultChannelFactory, - eventLoopGroup = defaultEventLoopGroup, - channelOptions = Vector(ChannelOptionValue[java.lang.Boolean](SO_KEEPALIVE, true)), - fallbackExecutionContext = ExecutionContext.global) //TODO + def apply(config: NettyPgConnFactoryConfig): NettyPgConnectionFactory = { + new NettyPgConnectionFactory(config) } - private val defaultChannelFactory: ChannelFactory[SocketChannel] = { - if (Epoll.isAvailable) new ReflectiveChannelFactory(classOf[EpollSocketChannel]) - else new ReflectiveChannelFactory(classOf[NioSocketChannel]) + def apply(host: String, + port: Int, + username: String, + password: String): NettyPgConnectionFactory = { + apply( + NettyPgConnFactoryConfig(host, port, username, password) + ) } - private val defaultEventLoopGroup: EventLoopGroup = { - if (Epoll.isAvailable) new EpollEventLoopGroup() - else new NioEventLoopGroup() - } } -class NettyPgConnectionFactory protected(remoteAddr: SocketAddress, - dbUser: String, - dbName: String, - authenticator: Authenticator, - rdbcTypeConvRegistry: TypeConverterRegistry, - pgTypeConvRegistry: PgTypeRegistry, - msgDecoderFactory: DecoderFactory, - msgEncoderFactory: EncoderFactory, - writeTimeout: FiniteDuration, - channelFactory: ChannelFactory[_ <: Channel], - eventLoopGroup: EventLoopGroup, - channelOptions: ImmutSeq[ChannelOptionValue[_]], - fallbackExecutionContext: ExecutionContext) - extends ConnectionFactory with StrictLogging { - - thisFactory => - - val typeConverterRegistry = rdbcTypeConvRegistry - private val scheduler = new EventLoopGroupScheduler(eventLoopGroup) - private implicit val ec = new EventLoopGroupExecutionContext(eventLoopGroup, fallbackExecutionContext) - //TODO are you sure? - private val openChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE) //TODO really global? - - private implicit val actorSystem = ActorSystem("RdbcPgSystem") - //TODO unique system name?, TODO configurable system - private implicit val streamMaterializer = ActorMaterializer() - private val shutDown = new AtomicBoolean(false) - - def connection(): Future[PgConnection] = { +class NettyPgConnectionFactory protected(val config: NettyPgConnFactoryConfig) + extends ConnectionFactory + with Logging { + + private[this] implicit val ec = config.ec + + private[this] val pgTypes = { + PgTypeRegistry(config.pgTypesProviders.flatMap(_.types)) + } + + private[this] val typeConverters = { + TypeConverterRegistry { + config.typeConvertersProviders.flatMap(_.typeConverters) + } + } + + private[this] val scheduler = { + new EventLoopGroupScheduler(config.eventLoopGroup) + } + + private[this] val openChannels = { + new DefaultChannelGroup(GlobalEventExecutor.INSTANCE) //TODO really global? + } + + private[this] val actorSystem = { + ActorSystem("rdbc-pgsql-netty", config.actorSystemConfig) + } + + private[this] val streamMaterializer = { + ActorMaterializer(config.actorMaterializerSettings)(actorSystem) + } + + private[this] val shutDown = new AtomicBoolean(false) + + private class ConnChannelInitializer extends ChannelInitializer[Channel] { + @volatile private var _maybeConn = Option.empty[NettyPgConnection] + + def maybeConn: Option[NettyPgConnection] = _maybeConn + + def initChannel(ch: Channel): Unit = { + val decoderHandler = new PgMsgDecoderHandler(config.msgDecoderFactory) + val encoderHandler = new PgMsgEncoderHandler(config.msgEncoderFactory) + + ch.pipeline().addLast(framingHandler) + ch.pipeline().addLast(decoderHandler) + ch.pipeline().addLast(encoderHandler) + ch.pipeline().addLast(new WriteTimeoutHandler(config.writeTimeout.toSeconds.toInt)) + + val conn = pgConnection(ch, decoderHandler, encoderHandler) + ch.pipeline().addLast(conn.handler) + + _maybeConn = Some(conn) + } + + override def channelActive(ctx: ChannelHandlerContext): Unit = { + openChannels.add(ctx.channel()) + super.channelActive(ctx) + } + } + + def connection(): Future[AbstractPgConnection] = traced { if (!shutDown.get()) { - var conn: NettyPgConnection = null - val bootstrap = new Bootstrap() - .group(eventLoopGroup) - .channelFactory(channelFactory) - .remoteAddress(remoteAddr) - .handler(new ChannelInitializer[Channel] { - def initChannel(ch: Channel): Unit = { - openChannels.add(ch) - val decoderHandler = new PgMsgDecoderHandler(msgDecoderFactory.decoder) - val encoderHandler = new PgMsgEncoderHandler(msgEncoderFactory.encoder) - conn = new NettyPgConnection( - pgTypeConvRegistry, - rdbcTypeConvRegistry, - new NettyChannelWriter(ch), - decoderHandler, encoderHandler, ec, scheduler, thisFactory.abortRequest, streamMaterializer - ) - - ch.pipeline().addLast(framingHandler) - ch.pipeline().addLast(decoderHandler) - ch.pipeline().addLast(encoderHandler) - ch.pipeline().addLast(new WriteTimeoutHandler(writeTimeout.toSeconds.toInt)) - ch.pipeline().addLast(conn.handler) - } - }) - - channelOptions.foreach(opt => bootstrap.option(opt.option.asInstanceOf[ChannelOption[Any]], opt.value)) - - val connectionFut = bootstrap.connect().scalaFut.flatMap { _ => - conn.init(dbUser, dbName, authenticator).map(_ => conn) - }.recoverWith { - case ex: RdbcException => Future.failed(ex) - case NonFatal(ex) => Future.failed(new UncategorizedRdbcException(ex.getMessage)) //TODO cause + val initializer = new ConnChannelInitializer + baseBootstrap() + .handler(initializer) + .connect().scalaFut + .flatMap { _ => + val conn = initializer.maybeConn.getOrElse(throw new PgDriverInternalErrorException( + "Channel initializer did not create a connection instance" + )) + conn.init(config.dbRole, config.dbName, config.authenticator).map(_ => conn) + } + .recoverWith { + case ex: RdbcException => Future.failed(ex) + case NonFatal(ex) => + Future.failed(new PgUncategorizedException(ex.getMessage, ex)) + } + } else { + Future.failed(new PgUncategorizedException("The factory is shut down")) + } + } + + def shutdown(): Future[Unit] = traced { + if (shutDown.compareAndSet(false, true)) { + def warn(detail: String): PartialFunction[Throwable, Unit] = { + case NonFatal(ex) => + logger.warn(s"Error occurred during connection factory shutdown: $detail", ex) } - connectionFut.failed.foreach(_ => conn.release()) + for { + _ <- openChannels.writeAndFlush(Terminate).scalaFut + .recover(warn("could not write 'Terminate' message")) + + _ <- openChannels.close().scalaFut + .recover(warn("could not close open channels")) + + _ <- config.eventLoopGroup.shutdownGracefully(0L, 0L, TimeUnit.SECONDS).scalaFut + .recover(warn("could not shutdown event loop group")) - connectionFut + _ <- actorSystem.terminate() + .recover(warn("could not terminate the actor system")) + } yield () } else { - Future.failed(new RuntimeException("The factory is shut down")) //TODO ex type + logger.warn("Shutdown request received for already shut down connection factory") + Future.successful(()) } } - private def abortRequest(bkd: BackendKeyData): Future[Unit] = { - //TODO code dupl - val bootstrap = new Bootstrap() - .group(eventLoopGroup) - .channelFactory(channelFactory) - .remoteAddress(remoteAddr) - .handler(new ChannelInitializer[Channel] { - def initChannel(ch: Channel): Unit = { - ch.pipeline().addLast(new PgMsgEncoderHandler(msgEncoderFactory.encoder)) - } - }) + private def pgConnection(ch: Channel, + decoderHandler: PgMsgDecoderHandler, + encoderHandler: PgMsgEncoderHandler): NettyPgConnection = traced { + val connConfig = PgConnectionConfig( + pgTypes = pgTypes, + typeConverters = typeConverters, + lockFactory = config.lockFactory, + maxBatchSize = config.maxBatchSize + ) + + new NettyPgConnection( + config = connConfig, + out = new NettyChannelWriter(ch), + decoder = decoderHandler, + encoder = encoderHandler, + ec = ec, + scheduler = scheduler, + requestCanceler = abortRequest, + streamMaterializer = streamMaterializer + ) + } - channelOptions.foreach(opt => bootstrap.option(opt.option.asInstanceOf[ChannelOption[Any]], opt.value)) + private def baseBootstrap(): Bootstrap = traced { + val bootstrap = new Bootstrap() + .group(config.eventLoopGroup) + .channelFactory(config.channelFactory) + .remoteAddress(config.address) - val connectFut = bootstrap.connect() - connectFut.scalaFut.flatMap { _ => - connectFut.channel().writeAndFlush(CancelRequest(bkd.pid, bkd.key)).scalaFut.flatMap(_ => - connectFut.channel().close().scalaFut - ) - }.recoverWith { - case NonFatal(ex) => Future.failed(ex) //TODO cause + config.channelOptions.foreach { opt => + bootstrap.option(opt.option.asInstanceOf[ChannelOption[Any]], opt.value) } + + bootstrap + } + + private def abortRequest(bkd: BackendKeyData): Future[Unit] = traced { + baseBootstrap() + .handler { + channelInitializer { ch => + ch.pipeline().addLast(new PgMsgEncoderHandler(config.msgEncoderFactory)) + } + } + .connect().scalaFut + .flatMap { channel => + channel + .writeAndFlush(CancelRequest(bkd.pid, bkd.key)).scalaFut + .flatMap(_ => channel.close().scalaFut) + .map(_ => ()) + } + .recoverWith { + case NonFatal(ex) => Future.failed(new PgUncategorizedException("Could not abort request", ex)) + } } private def framingHandler: LengthFieldBasedFrameDecoder = { + // format: off val lengthFieldLength = 4 new LengthFieldBasedFrameDecoder( - Int.MaxValue, /* max frame length */ - 1, /* length field offset */ - lengthFieldLength, - -1 * lengthFieldLength, /* length adjustment */ - 0 /* initial bytes to strip */ + /* max frame length = */ Int.MaxValue, + /* length field offset = */ 1, + /* length field length = */ lengthFieldLength, + /* length adjustment = */ -1 * lengthFieldLength, + /* initial bytes to strip = */ 0 ) + // format: on } - def shutdown(): Future[Unit] = { - if (shutDown.compareAndSet(false, true)) { - val warn: PartialFunction[Throwable, Unit] = { - case NonFatal(ex) => logger.warn("Error occurred during connection factory shutdown", ex) - } - for { - _ <- openChannels.writeAndFlush(Terminate).scalaFut.recover(warn) - _ <- openChannels.close().scalaFut.recover(warn) - _ <- eventLoopGroup.shutdownGracefully(0L, 0L, TimeUnit.SECONDS).scalaFut.recover(warn) - _ <- actorSystem.terminate().recover(warn) - } yield () - } else { - Future.successful(()) + /* Scala 2.11 compat */ + private def channelInitializer(f: Channel => Unit): ChannelInitializer[Channel] = { + new ChannelInitializer[Channel] { + def initChannel(ch: Channel): Unit = f(ch) } } } diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/FutureScheduledTask.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyScheduledTask.scala similarity index 75% rename from rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/FutureScheduledTask.scala rename to rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyScheduledTask.scala index 03a5bf1..1295548 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/FutureScheduledTask.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/NettyScheduledTask.scala @@ -17,10 +17,11 @@ package io.rdbc.pgsql.transport.netty import io.netty.util.concurrent.ScheduledFuture -import io.rdbc.pgsql.core.scheduler.ScheduledTask +import io.rdbc.pgsql.core.internal.scheduler.ScheduledTask -class FutureScheduledTask(scheduledFuture: ScheduledFuture[_]) extends ScheduledTask { +private[netty] class NettyScheduledTask(scheduledFuture: ScheduledFuture[_]) + extends ScheduledTask { def cancel(): Unit = { - scheduledFuture.cancel(false) + scheduledFuture.cancel(false) // false = don't interrupt if running } } diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgDecoderHandler.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgDecoderHandler.scala index 05e72b2..2559813 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgDecoderHandler.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgDecoderHandler.scala @@ -17,31 +17,34 @@ package io.rdbc.pgsql.transport.netty import java.nio.charset.Charset -import java.util +import java.{util => ju} import com.typesafe.scalalogging.StrictLogging import io.netty.buffer.ByteBuf import io.netty.channel.ChannelHandlerContext import io.netty.handler.codec.ByteToMessageDecoder import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.core.codec.Decoder +import io.rdbc.pgsql.core.codec.{Decoder, DecoderFactory} +import scodec.bits.ByteVector -protected[netty] class PgMsgDecoderHandler(decoder: Decoder) +private[netty] class PgMsgDecoderHandler(decoderFactory: DecoderFactory) extends ByteToMessageDecoder with StrictLogging { - @volatile private var _charset = SessionParams.default.serverCharset - - def charset = _charset - - def charset_=(charset: Charset): Unit = { - logger.debug(s"Server charset changed to '$charset'") - _charset = charset + @volatile private[this] var decoder: Decoder = { + decoderFactory.decoder(SessionParams.default.serverCharset) } - override def decode(ctx: ChannelHandlerContext, in: ByteBuf, out: util.List[AnyRef]): Unit = { + def decode(ctx: ChannelHandlerContext, in: ByteBuf, out: ju.List[AnyRef]): Unit = { val bytes = new Array[Byte](in.readableBytes()) in.readBytes(bytes) - out.add(decoder.decodeMsg(bytes)(charset).msg) + out.add(decoder.decodeMsg(ByteVector.view(bytes)).msg) } + + def changeCharset(charset: Charset): Unit = { + logger.debug(s"Message decoder charset changed to '$charset'") + decoder = decoderFactory.decoder(charset) + } + + //TODO should this override exceptionCaught? } diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgEncoderHandler.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgEncoderHandler.scala index 5e45bff..eee8cd6 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgEncoderHandler.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/PgMsgEncoderHandler.scala @@ -18,28 +18,30 @@ package io.rdbc.pgsql.transport.netty import java.nio.charset.Charset -import com.typesafe.scalalogging.StrictLogging import io.netty.buffer.ByteBuf import io.netty.channel.ChannelHandlerContext import io.netty.handler.codec.MessageToByteEncoder import io.rdbc.pgsql.core.SessionParams -import io.rdbc.pgsql.core.codec.Encoder -import io.rdbc.pgsql.core.messages.frontend.PgFrontendMessage +import io.rdbc.pgsql.core.codec.{Encoder, EncoderFactory} +import io.rdbc.pgsql.core.pgstruct.messages.frontend.PgFrontendMessage +import io.rdbc.util.Logging -protected[netty] class PgMsgEncoderHandler(encoder: Encoder) - extends MessageToByteEncoder[PgFrontendMessage] - with StrictLogging { +private[netty] class PgMsgEncoderHandler(encoderFactory: EncoderFactory) + extends MessageToByteEncoder[PgFrontendMessage] + with Logging { - @volatile private var _charset = SessionParams.default.clientCharset - - def charset = _charset + @volatile private[this] var encoder: Encoder = { + encoderFactory.encoder(SessionParams.default.clientCharset) + } - def charset_=(charset: Charset): Unit = { - logger.debug(s"Client charset changed to '$charset'") - _charset = charset + def encode(ctx: ChannelHandlerContext, msg: PgFrontendMessage, out: ByteBuf): Unit = { + out.writeBytes(encoder.encode(msg).toArray) } - override def encode(ctx: ChannelHandlerContext, msg: PgFrontendMessage, out: ByteBuf): Unit = { - out.writeBytes(encoder.encode(msg)(charset)) + def changeCharset(charset: Charset): Unit = { + logger.debug(s"Message encoder charset changed to '$charset'") + encoder = encoderFactory.encoder(charset) } + + //TODO should this override exceptionCaught? } diff --git a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/package.scala b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/package.scala index 88a54b4..bf4bb77 100644 --- a/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/package.scala +++ b/rdbc-pgsql-transport-netty/src/main/scala/io/rdbc/pgsql/transport/netty/package.scala @@ -16,29 +16,43 @@ package io.rdbc.pgsql.transport +import io.netty.channel.{Channel, ChannelFuture} import io.netty.util.concurrent.{GenericFutureListener, Future => NettyFuture} import scala.concurrent.{ExecutionContext, Future, Promise} package object netty { - implicit class NettyFut2Scala[T](nettyFut: NettyFuture[T]) { + private[netty] implicit class NettyFut2Scala[T](nettyFut: NettyFuture[T]) { def scalaFut: Future[T] = { val promise = Promise[T] - nettyFut.addListener(new GenericFutureListener[NettyFuture[T]] { - def operationComplete(future: NettyFuture[T]) = { - if (future.isSuccess) promise.success(future.get()) - else promise.failure(future.cause()) - } + nettyFut.addListener(genericFutureListener { future => + if (future.isSuccess) promise.success(future.get()) + else promise.failure(future.cause()) }) promise.future } + + /* Scala 2.11 compat */ + private def genericFutureListener(callback: NettyFuture[T] => Unit): GenericFutureListener[NettyFuture[T]] = { + new GenericFutureListener[NettyFuture[T]] { + def operationComplete(future: NettyFuture[T]): Unit = { + callback(future) + } + } + } } - implicit class NettyVoidFut2Scala(nettyFut: NettyFuture[Void]) { + private[netty] implicit class NettyVoidFut2Scala(nettyFut: NettyFuture[Void]) { def scalaFut(implicit ec: ExecutionContext): Future[Unit] = { new NettyFut2Scala(nettyFut).scalaFut.map(_ => ()) } } + + private[netty] implicit class NettyChannelFut2Scala(channelFut: ChannelFuture) { + def scalaFut(implicit ec: ExecutionContext): Future[Channel] = { + new NettyFut2Scala(channelFut).scalaFut.map(_ => channelFut.channel()) + } + } } diff --git a/scalastyle-config.xml b/scalastyle-config.xml new file mode 100644 index 0000000..fa24dac --- /dev/null +++ b/scalastyle-config.xml @@ -0,0 +1,122 @@ + + rdbc-pgsql style + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + true + + + + + + + + + + COLON, COMMA, RPAREN + + + + + + LPAREN + + + + + + IF, FOR, WHILE, DO, TRY + + + + + + 10 + + + + + + 30 + + + + + + + +