From 5d2be622720baed038d2939c70f2e1876e528f87 Mon Sep 17 00:00:00 2001 From: Miao Zhang Date: Mon, 16 Nov 2020 17:57:03 +0000 Subject: [PATCH] Add support for tunnel multi-port feature --- .github/workflows/ci.yml | 2 +- CMakeLists.txt | 9 +- README.md | 162 ++- ...colGuide.md => V1WebSocketProtocolGuide.md | 4 +- V2WebSocketProtocolGuide.md | 287 ++++ resources/Message.proto | 3 + src/ProxySettings.cpp | 2 +- src/TcpAdapterProxy.cpp | 1166 +++++++++++++---- src/TcpAdapterProxy.h | 182 +-- src/TcpClient.h | 32 + src/TcpConnection.h | 70 + src/TcpServer.h | 39 + src/config/ConfigFile.cpp | 219 ++++ src/config/ConfigFile.h | 23 + src/main.cpp | 182 ++- test/AdapterTests.cpp | 253 +++- test/TestWebsocketServer.cpp | 3 +- 17 files changed, 2183 insertions(+), 455 deletions(-) rename WebsocketProtocolGuide.md => V1WebSocketProtocolGuide.md (99%) create mode 100644 V2WebSocketProtocolGuide.md create mode 100644 src/TcpClient.h create mode 100644 src/TcpConnection.h create mode 100644 src/TcpServer.h create mode 100644 src/config/ConfigFile.cpp create mode 100644 src/config/ConfigFile.h diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c264059..cadb689 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -51,7 +51,7 @@ jobs: run: | mkdir build cd build - cmake .. -DOPENSSL_ROOT_DIR=/usr/local/Cellar/openssl@1.1/1.1.1g/ -DOPENSSL_LIBRARIES=/usr/local/Cellar/openssl@1.1/1.1.1g/lib/ + cmake .. -DOPENSSL_ROOT_DIR=/usr/local/Cellar/openssl@1.1/1.1.1h/ -DOPENSSL_LIBRARIES=/usr/local/Cellar/openssl@1.1/1.1.1h/lib/ make ubuntu: runs-on: ubuntu-latest diff --git a/CMakeLists.txt b/CMakeLists.txt index 0ed3289..007d79f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -66,10 +66,10 @@ find_package(Catch2 REQUIRED) ######################################### # Boost dependencies # ######################################### -set_property(GLOBAL PROPERTY Boost_USE_STATIC_LIBS ON) +set_property(GLOBAL PROPERTY Boost_USE_STATIC_LIBS ON) set_property(GLOBAL PROPERTY Boost_USE_DEBUG_RUNTIME OFF) #set_property(GLOBAL PROPERTY Boost_USE_MULTITHREADED ON) -find_package(Boost 1.68.0 REQUIRED COMPONENTS system log log_setup thread program_options date_time) +find_package(Boost 1.68.0 REQUIRED COMPONENTS system log log_setup thread program_options date_time filesystem) include_directories(${Boost_INCLUDE_DIRS}) foreach(BOOST_LIB ${Boost_LIBRARIES}) string(REPLACE ${CMAKE_SHARED_LIBRARY_SUFFIX} ${CMAKE_STATIC_LIBRARY_SUFFIX} BOOST_STATIC_LIB ${BOOST_LIB}) @@ -81,9 +81,10 @@ endforeach() ######################################### file(GLOB ALL_SOURCES ${PROJECT_SOURCE_DIR}/src/*.cpp) +set(UTIL_SOURCE ${PROJECT_SOURCE_DIR}/src/config/ConfigFile.cpp) set(CORE_SOURCES ${PROJECT_SOURCE_DIR}/src/TcpAdapterProxy.cpp ${PROJECT_SOURCE_DIR}/src/ProxySettings.cpp ${PROTO_HDRS} ${PROTO_SRCS}) -set(MAIN_SOURCES ${PROJECT_SOURCE_DIR}/src/main.cpp ${CORE_SOURCES}) -set(TEST_SOURCES ${PROJECT_SOURCE_DIR}/test/AdapterTests.cpp ${CORE_SOURCES} ${PROJECT_SOURCE_DIR}/test/TestWebsocketServer.cpp) +set(MAIN_SOURCES ${PROJECT_SOURCE_DIR}/src/main.cpp ${CORE_SOURCES} ${UTIL_SOURCE}) +set(TEST_SOURCES ${PROJECT_SOURCE_DIR}/test/AdapterTests.cpp ${CORE_SOURCES} ${UTIL_SOURCE} ${PROJECT_SOURCE_DIR}/test/TestWebsocketServer.cpp) add_executable(${AWS_TUNNEL_LOCAL_PROXY_TARGET_NAME} ${MAIN_SOURCES}) add_executable(${AWS_TUNNEL_LOCAL_PROXY_TEST_NAME} ${TEST_SOURCES}) diff --git a/README.md b/README.md index 30b84f5..d8a2ec2 100644 --- a/README.md +++ b/README.md @@ -167,33 +167,108 @@ Helpful links: The response of OpenTunnel via the AWS IoT Secure Tunneling management API is acquisition of a pair of client access tokens to use to connect two local proxy clients to the ends of the tunnel. One token is designated for the source local proxy, and the other is for the destination. They must be supplied with the matching local proxy run mode argument, otherwise connecting to the service will fail. Additionally, the region parameter supplied to the local proxy must match the AWS region the tunnel was opened in. In a production configuration, delivery of one or both tokens and launching the local proxy process may be automated. The following sections describe how to run the local proxy on both ends of a tunnel. +### Terms + +V1 local proxy: local proxy uses Sec-WebSocket-Protocol _aws.iot.securetunneling-1.0_ when communicates with AWS IoT Tunneling Service. + +V2 local proxy: local proxy uses Sec-WebSocket-Protocol _aws.iot.securetunneling-2.0_ when communicates with AWS IoT Tunneling Service. + +Source local proxy: local proxy that runs in source mode. + +Destination local proxy: local proxy that runs in destination mode. + + +### Multi-port tunneling feature support +Multi-port tunneling feature allows more than one stream multiplexed on same tunnel. +This feature is only supported with V2 local proxy. If you have some devices that on V1 local proxy, some on V2 local proxy, simply upgrade the local proxy on the source device to V2 local proxy. When V2 local proxy talks to V1 local proxy, the backward compatibility is maintained. For more details, please refer to section [backward compatibility](#backward-compatibility) + +### Service identifier (Service ID) +If you need to use multi-port tunneling feature, service ID is needed to start local proxy. A service identifier will be used as the new format to specify the source listening port or destination service when start local proxy. The identifier is like an alias for the source listening port or destination service. For the format requirement of service ID, please refer to AWS public doc [services in DestinationConfig ](https://docs.aws.amazon.com/iot/latest/apireference/API_iot-secure-tunneling_DestinationConfig.html). There is no restriction on how this service ID should be named, as long as it can help uniquely identifying a connection or stream. + +Example 1: _SSH1_ + +You can use the following format: protocol name + connection number. +For example, if two SSH connections needed to be multiplexed over a tunnel , you can choose SSH1 and SSH2 as the service IDs. + +Example 2: _ae5957ef-d6e3-42a5-ba0c-edc667d2b3fb_ + +You can use a UUID to uniquely identify a connection/stream. + +Example 3: _ip-172-31-6-23.us-west-2.compute.internal_ + +You can use remote host name to uniquely identify a stream. + ### Destination service and destination mode local proxy +Destination local proxy is responsible for forwarding application data received from tunnel to destination service. For V1 local proxy, only 1 stream is allowed over the tunnel. With V2 local proxy, more than one streams can be transferred at the same time. For more details, please read section [**Multi-port tunneling feature support**](#multi-port-tunneling-feature-support). -Running the local proxy in destination mode makes it behave like a single TCP client with respect to a listening application that is reachable from the local device. In addition to the client access token, destination mode requires specifying the address and port that must be connected to when an incoming connection request is received over the tunnel. This is an example command to run the local proxy in destination mode, on a tunnel created in us-east-1, and forward incoming requests to a locally running application or service on port 3389. +Example 1: ./localproxy -r us-east-1 -d localhost:3389 -t +This is an example command to run the local proxy in destination mode, on a tunnel created in us-east-1, and forward data packets received from the tunnel to a locally running application/service on port 3389. -We recommend starting the destination application or server before starting the destination local proxy to ensure that when the local proxy attempts to connect to the destination port, it will succeed. When the local proxy starts in destination mode, it will first connect to the service, and then begin listening for a new connection request over the tunnel. Upon receiving a request, it will attempt to connect to the configured destination address and port. If successful, it will transmit data between the TCP connection and tunnel bi-directionally. Destination mode can only manage one connection at a time, so if a new connection request is received over the tunnel while a connection is already established, it will close the current TCP connection and establishes a new one. +Example 2: -### Client application and source mode local proxy + ./localproxy -r us-east-1 -d HTTP1=80,SSH1=22 -t +This is an example command to run the local proxy in destination mode, on a tunnel created in us-east-1, and forward: +- data packets belongs to service ID HTTP1 to a locally running application/service on port 80. +- data packets belongs to service ID SSH1 to a locally running application/service on port 22. + +We recommend starting the destination application or server before starting the destination local proxy to ensure that when the local proxy attempts to connect to the destination port, it will succeed. When the local proxy starts in destination mode, it will first connect to the service, and then begin listening for a new connection request over the tunnel. Upon receiving a request, it will attempt to connect to the configured destination address and port. If successful, it will transmit data between the TCP connection and tunnel bi-directionally. + +If a new instance of destination local proxy starts, using the same client access token as a existing local proxy that is already connected, the old local proxy will be disconnected. Tunnel connection will be reset and the new tunnel connection will be established with the new instance of the destination local proxy. + +For a multiplexed tunnel, one connection drop or connect will not affect the other connections that share the same tunnel. All connections/streams in a multiplexed tunnel is independent. -Running the local proxy in source mode makes it behave like a single connection TCP server, waiting for a TCP client to connect and then relaying data over that connection through the tunnel. In addition to the client access token, source mode requires choosing an available port for the local proxy to listen to. -This is an example command to run the local proxy in source mode, on a tunnel created in us-east-1, waiting for a connection on port 3389: +### Client application and source mode local proxy +Source local proxy is responsible for relaying application data to the tunnel. For V1 local proxy, only 1 stream is allowed over the tunnel. With V2 local proxy, more than one streams can be transferred at the same time. For more details, please read section [**Multi-port tunneling feature support**](#multi-port-tunneling-feature-support). + +Example 1: ./localproxy -r us-east-1 -s 3389 -t + +This is an example command to run the local proxy in source mode, on a tunnel created in us-east-1, waiting for a connection on port 3389. + +Example 2: -When the local proxy starts in source mode, it will first connect to the service, and then begin listening for a new connection on the specified local port and bind address. While the local proxy is running, use the client application (e.g. RemoteDesktopClient, ssh client) to connect to the source local proxy's listening port. After accepting the TCP connection, the local proxy will forward the connection request over the tunnel and immediately transmit data the TCP connection data through the tunnel bidirectionally. Source mode will only accept and manage one connection at a time. If the established TCP connection is terminated for any reason, it will send a disconnect message over the tunnel so the service or server running on the other side can react appropriately. Similarly, if a notification that a disconnect happened on the other side is received by the source local proxy it will close the local TCP connection. Regardless of a local I/O failures, or if a notification of a disconnect comes from the tunnel, after the local TCP connection closes, it will begin listening again on the specified listen port and bind address. + ./localproxy -r us-east-1 -s HTTP1=5555,SSH1=3333 -t + +This is an example command to run the local proxy in source mode, on a tunnel created in us-east-1, + - waiting for a connection on port 5555, for service ID HTTP1. + - waiting for a connection on port 3333, for service ID SSH1. + +When the local proxy starts in source mode, it will first connect to the service, and then begin listening for a new connection on the specified port and bind address. While the local proxy is running, use the client application (e.g. RemoteDesktopClient, ssh client) to connect to the source local proxy's listening port. After accepting the TCP connection, the local proxy will forward the connection request over the tunnel and immediately transmit data the TCP connection data through the tunnel bidirectionally. Source mode can manage more than one connection/stream at a time, if V2 local proxy is used. If the established TCP connection is terminated for any reason, it will send a disconnect message over the tunnel so the service or server running on the other side can react appropriately. Similarly, if a notification that a disconnect happened on the other side is received by the source local proxy it will close the local TCP connection. Regardless of a local I/O failures, or if a notification of a disconnect comes from the tunnel, after the local TCP connection closes, it will begin listening again on the specified listen port and bind address. * If a new connection request sent over the tunnel results in the remote (destination) side being unable to connect to a destination service, it will send a disconnect message back through the tunnel. The exact timing behavior of this depends on the TCP retry settings of the destination local proxy. +* For a multiplexed tunnel, one connection drop or connect will not affect the other connections that share the same tunnel. All connections/streams in a multiplexed tunnel is independent. ### Stopping the local proxy process -The local proxy process can be stopped using various methods: +The local proxy process can be stopped using various methods: * Sending a SIGTERM signal to the process * Closing a tunnel explicitly via CloseTunnel API. This will result in the local proxy dropping the connection to the service and existing the process successfully. * A tunnel expires after its lifetime expiry. This will result in the local proxy dropping the connection to the service and exiting the process successfully. +### Backward compatibility +V2 local proxy is able to communicate with V1 local proxy if only one connection needs to be established over the tunnel. This means when you open a tunnel, no more than one service should be passed in the **services** list. + +Example 1: + + aws iotsecuretunneling open-tunnel --destination-config thingName=foo,services=SSH1,SSH2 +In this example, two service IDs are used (SSH1 and SSH2). Backward compatibility is NOT supported. + +Example 2: + + aws iotsecuretunneling open-tunnel --destination-config thingName=foo,services=SSH2 + +In this example, one service ID is used (SSH2). Backward compatibility is supported. + +Example 3: + + aws iotsecuretunneling open-tunnel + +In this example, no service ID is used. Backward compatibility is supported. + ### Security Considerations #### Certificate setup @@ -251,10 +326,27 @@ Specifies an explicit endpoint to use to connect to the tunneling service. For s Endpoint region where tunnel exists. You cannot specify this option and **-e/--process-endpoint** together. Either this or **--proxy-endpoint** is required **-s/--source-listen-port [argvalue]** -Directs the local proxy to run in source mode, and listen on the specified port for incoming connections. Either this or **--destination-app** is required +Start local proxy in source mode and sets the mappings between service identifier and listening port. For example: SSH1=5555 or 5555. +* It follows format serviceId1=port1, serviceId2=port2, ... +* If only one port is needed to start local proxy, service identifier is not needed. You can simply pass the port to be used, for example, 5555. +* SSH1=5555 means that local proxy will start listening requests on port 5555 for service ID SSH1. +* The value of service ID and how many service IDs are used needs to match with **services** in open tunnel call. For example: + ```shell script + aws iotsecuretunneling open-tunnel --destination-config thingName=foo,services=SSH1,SSH2 + ``` + Then to start local proxy in source mode, need to use: ```-s SSH1=$port1,SSH2=$port2``` **-d/--destination-app [argvalue]** -Directs the local proxy to run in destination mode, and connect to the specified address which may be specified as _address:port_ or just _port_. Address may be specified an IPv4 address or hostname. Either this or **--source-listen-port** is required. +Start local proxy in destination mode and sets the mappings between port and service identifier. For example: SSH1=5555 or 5555. +* It follows format serviceId1=endpoint1, serviceId2=endpoint2, ... +* Endpoint can be IP address:port , port or hostname:port. +* If only one port is needed to start local proxy, service ID is not needed. You can simply pass the port used, for example, 5555. +* An item of the mapping SSH1=5555 means that local proxy will forward data received from the tunnel to TCP port 5555 for service ID SSH1. +* The value of service ID and how many service IDs are used needs to match with **services** in open tunnel call. For example: + ```shell script + aws iotsecuretunneling open-tunnel --destination-config thingName=foo,services=SSH1,SSH2 + ``` + Then to start local proxy in destination mode, need to use: ```-d SSH1=$port1,SSH2=$port2``` **-b/--local-bind-address [argvalue]** Specifies the local bind address (network interface) to use for listening for new connections when running the local proxy in source mode, or the local bind address to use when reaching out to the destination service when running in destination mode @@ -277,6 +369,12 @@ Specifies a file to read command line arguments from. Actual command line argume **-v/--verbose [argvalue]** Specifies the verbosity of the output. Value must be between 0-255, however meaningful values are between 0-6 where 0 = output off, 1 = fatal, 2 = error, 3 = warning, 4 = info [default], 5 = debug, 6 = trace. Any values greater than 6 will be treated the same trace level output. +**-m/--mode [argvalue]** +Specifies the mode local proxy will run. Accepted values are: src, source, dst, destination. + +**--config-dir [argvalue]** +Specifies the configuration directory where service identifier mappings are configured. If this parameter is not specified, local proxy will read configuration files from default directory _./config_, under the file path where `localproxy` binary are located. + ### Options set via --config A configuration file can be used to specify any or all of the CLI arguments. If an option is set via a config file and CLI argument, the CLI argument value overrides. Here is an example file named `config.ini`: @@ -296,6 +394,42 @@ To illustrate composition between using a configuration file and actual CLI argu and a local proxy launch command `./localproxy --config config.ini -t foobar` is equivalent to running the local proxy command `./localproxy -c /opt/rootca -r us-west-2 -b ::1 -s 6000 -t foobar` +**NOTE**: Service ID mappings should be configured by using parameter --config-dir, not --config. + +### Options set via --config-dir + +If you want to start local proxy on fixed ports, you can configure these mappings using configuration files. By default, local proxy will read from directory _./config_, under the file path where `localproxy` binary are located. If you need to direct local proxy reads from specific file path, use parameter `--config-dir` to specify the full path of the configuration directory. +You can put multiple files in this directory or organize them into the sub folders. Local proxy will read all the files in this directory and search for the port mapping needed for a tunnel connection. + +**NOTE**: The configuration files will be read once when local proxy starts and will not be read again unless it is restarted. + +#### Sample configuration files on source device +File name: _SSHSource.ini_ + +Content example: + + SSH1=3333 + SSH2=5555 + +This example means: +* Service ID SSH1 is mapped to port 3333. +* Service ID SSH2 is mapped to port 5555. + +#### Sample configuration files on destination device + +Example configuration file on destination device: +File name: _SSHDestination.ini_ + +Content example: + + SSH1=22 + SSH2=10.0.0.1:80 + +This example means: +* Service ID SSH1 is mapped to port 22. +* Service ID SSH2 is mapped to host with IP address 10.0.0.1, port 80. + + ### Options set via environment variables There are a few environment variables that can set configuration options used by the local proxy. Environment variables have lowest priority in specifying options. Config and CLI arguments will always override them @@ -368,3 +502,13 @@ Defines the maximum data size allowed to be carried via a single tunnel message. ### Building local proxy on a windows Follow instructions in [here](windows-localproxy-build.md) to build a local proxy on a windows environment. + +### Limits for multiplexed tunnels +#### Bandwidth limits +If the tunnel multi-port feature is enabled, multiplexed tunnels have the same bandwidth limit as non-multiplexed tunnels. This limit is mentioned in [AWS public doc](https://docs.aws.amazon.com/general/latest/gr/iot_device_management.html) section **AWS IoT Secure Tunneling**, row _Maximum bandwidth per tunnel_. The bandwidth for a multiplexed tunnel is the bandwidth consumed by all active streams that transfer data over the tunnel connection. If you need this limit increased, please reach out to AWS support and ask for a limit increase. + +#### Service ID limits +There are limits on the maximum streams that can be multiplexed on a tunnel connection. This limit is mentioned in [AWS public doc](https://docs.aws.amazon.com/general/latest/gr/iot_device_management.html) section **AWS IoT Secure Tunneling**, row _Maximum services per tunnel_. If you need this limit increased, please reach out to AWS support and ask for a limit increase. + +#### Load balancing in multiplexed streams +If more than one stream is transferred at the same time, local proxy will not load balance between these streams. If you have one stream that is dominating the bandwidth, the other streams sharing the same tunnel connection may see latency of data packet delivery. \ No newline at end of file diff --git a/WebsocketProtocolGuide.md b/V1WebSocketProtocolGuide.md similarity index 99% rename from WebsocketProtocolGuide.md rename to V1WebSocketProtocolGuide.md index 9591b1a..c8b53d5 100644 --- a/WebsocketProtocolGuide.md +++ b/V1WebSocketProtocolGuide.md @@ -1,8 +1,8 @@ -The reference implementation of the local proxy provides features that may require OS facilities not available on all device runtime environments in the industry. This guide provides details about the communication that occurs between the service and client to enable integration without or beyond the local proxy reference implementation choices. +The reference implementation of the local proxy provides features that may require OS facilities not available on all device runtime environments in the industry. This guide provides details about the communication that occurs between the service and client to enable integration without or beyond the local proxy reference implementation choices. This protocol guide is only applicable for v1 local proxy. ## Core implementation requirements -In order to properly connect with and interpret messages from the AWS IoT Secure Tunneling service, the bare minimum is required: +In order to properly connect with and interpret messages from the AWS IoT Secure Tunneling service, the following is required: **Communications Protocols:** * Websocket protocol ([RFC6455](https://tools.ietf.org/html/rfc6455)) over TCP/IP diff --git a/V2WebSocketProtocolGuide.md b/V2WebSocketProtocolGuide.md new file mode 100644 index 0000000..847f282 --- /dev/null +++ b/V2WebSocketProtocolGuide.md @@ -0,0 +1,287 @@ +The reference implementation of the local proxy provides features that may require OS facilities not available on all device runtime environments in the industry. This guide provides details about the communication that occurs between the service and client to enable integration without or beyond the local proxy reference implementation choices. This protocol guide is only applicable for v2 local proxy. + +## Core implementation requirements + +In order to properly connect with and interpret messages from the AWS IoT Secure Tunneling service, the bare minimum is required: + +**Communications Protocols:** +* Websocket protocol ([RFC6455](https://tools.ietf.org/html/rfc6455)) over TCP/IP +* TLS 1.1+ + +**Data processing** +* ProtocolBuffers library + * Message size requirements are dependent on tunnel peer message sizes + +## Protocol Design + +The AWS IoT Secure Tunneling's usage of WebSocket is in part a subprotocol as defined by [RFC6455](https://tools.ietf.org/html/rfc6455), and there are additional restrictions when communicating with the service called out in this document. The data messages on top of WebSocket use [ProtocolBuffers](https://developers.google.com/protocol-buffers/) with a 2-byte length prefix. The messages themselves carry data and communicate tunnel connectivity information to enable tunnel clients to leverage full duplex communication. The protocol is designed to adapt TCP socket operations over a tunnel, but it is not limited to being used only for TCP based client or server applications. It is possible to implement the protocol directly and provide a network library or API to use directly in a napplication rather than a standalone process. This guide is intended to assist in those interested in directly interfacing with the WebSocket layer of AWS IoT Secure Tunneling. This document is not a programming guide so it is expected that you are familiar with the following: + +- AWS IoT Secure Tunneling service and its major concepts. Particularly the local proxy +- HTTP and WebSocket and how to use it in your preferred language and API (connect, send, and receive data) +- ProtocolBuffers and how to use it in your preferred language (generate code, parse messages, create messages) +- Conceptual familiarity with TCP sockets, and ideally API familiarity in your preferred language + +## Connecting to the proxy server and tunnel: WebSocket handshake + +The handshake performed to connect to a AWS IoT Secure Tunneling server is a standard WebSocket protocol handshake with additional requirements on the HTTP request. These requirements ensure proper access to a tunnel given a client access token: + +- The tunneling service only accepts connections secured with TLS 1.1 or higher +- The HTTP path of the upgrade request must be `/tunnel`. Requests made to any other path will result in a 400 HTTP response +- There must be a URL parameter `local-proxy-mode` specifying the tunnel connection (local proxy) mode. The value of this parameter must be `source` or `destination` +- There must be an access token specified in the request either via cookie, or an HTTP request header + - Set the access token via HTTP request header named 'access-token' or via cookie named 'awsiot-tunnel-token' + - Only one token value may be present in the request. Supplying multiple values for either the access-token header or the cookie, or both combined will cause the handshake to fail. + - Local proxy mode must match the mode of the access token or the handshake will fail. +- The HTTP request size must not exceed 4k bytes in length. Requests larger than this will be rejected +- The 'Sec-WebSocket-Protocol' header must contain at least one valid protocol string based on what is supported by the service + - Valid value: 'aws.iot.securetunneling-2.0' + +An example URI of where to connect is as follows: + +`wss://data.tunneling.iot.us-east-1.amazonaws.com:443` + +The regional endpoint selected must match the region where the OpenTunnel call was made to acquire the client access tokens. + +An example WebSocket handshake request coming from a local proxy: + +``` +GET /tunnel?local-proxy-mode=source HTTP/1.1 +Host: data.tunneling.iot.us-east-1.amazonaws.com +Upgrade: websocket +Connection: upgrade +Sec-WebSocket-Key: 9/h0zvwMEXrg06G+RjnmcA== +Sec-WebSocket-Version: 13 +Sec-WebSocket-Protocol: aws.iot.securetunneling-2.0 +access-token: AQGAAXiVzSmRL1VaJ22G7eRb\_CrPABsAAgABQQAMOTAwNTgyMDkxNTM4AAFUAANDQVQAAQAHYXdzLWttcwBLYXJuOmF3czprbXM6dXMtZWFzdC0xOjcwMTU0NTg5ODcwNzprZXkvMmU4ZTAxMDEtYzE3YS00NjU1LTlhYWQtNjA2N2I2NGVhZWQyALgBAgEAeAJ2EsT4f5oCWm65Y8zRx\_nNaCjcG4FIeNV\_zMyhoOslAVAr521wChjzvogy-2-mxyoAAAB-MHwGCSqGSIb3DQEHBqBvMG0CAQAwaAYJKoZIhvcNAQcBMB4GCWCGSAFlAwQBLjARBAwfBUUjMYI9gDEp0xwCARCAO1VX0NAiSjfU-Ar9PWYaNI5j9v77CxLcucht3tWZd57-Zq3aRQZBM4SQiy-D0Cgv31IfZ8pgWu8asm5FAgAAAAAMAAAQAAAAAAAAAAAAAAAAACniTwIAksExcMygMJ2uHs3\_\_\_\_\_AAAAAQAAAAAAAAAAAAAAAQAAAC9e5K3Isg5gHqO9LYX0geH4hrfthPEUhdrl9ZLksPxcVrk6XC4VugzrmUvEUPuR00J3etgVQZH\_RfxWrVt7Jmg= +User-Agent: localproxy Mac OS 64-bit/boost-1.68.0/openssl-3.0.0/protobuf-3.6.1 +``` + +An example of a handshake request coming from a browser's WebSocket client may specify the following: + +``` +GET /tunnel?local-proxy-mode=source HTTP/1.1 +Host: data.tunneling.iot.us-east-1.amazonaws.com +Upgrade: websocket +Connection: upgrade +Sec-WebSocket-Key: 9/h0zvwMEXrg06G+RjnmcA== +Sec-WebSocket-Version: 13 +Sec-WebSocket-Protocol: aws.iot.securetunneling-2.0 +Cookie: awsiot-tunnel-token=AQGAAXiVzSmRL1VaJ22G7eRb\_CrPABsAAgABQQAMOTAwNTgyMDkxNTM4AAFUAANDQVQAAQAHYXdzLWttcwBLYXJuOmF3czprbXM6dXMtZWFzdC0xOjcwMTU0NTg5ODcwNzprZXkvMmU4ZTAxMDEtYzE3YS00NjU1LTlhYWQtNjA2N2I2NGVhZWQyALgBAgEAeAJ2EsT4f5oCWm65Y8zRx\_nNaCjcG4FIeNV\_zMyhoOslAVAr521wChjzvogy-2-mxyoAAAB-MHwGCSqGSIb3DQEHBqBvMG0CAQAwaAYJKoZIhvcNAQcBMB4GCWCGSAFlAwQBLjARBAwfBUUjMYI9gDEp0xwCARCAO1VX0NAiSjfU-Ar9PWYaNI5j9v77CxLcucht3tWZd57-Zq3aRQZBM4SQiy-D0Cgv31IfZ8pgWu8asm5FAgAAAAAMAAAQAAAAAAAAAAAAAAAAACniTwIAksExcMygMJ2uHs3\_\_\_\_\_AAAAAQAAAAAAAAAAAAAAAQAAAC9e5K3Isg5gHqO9LYX0geH4hrfthPEUhdrl9ZLksPxcVrk6XC4VugzrmUvEUPuR00J3etgVQZH\_RfxWrVt7Jmg= +User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 +``` + +On success, an example of a successful handshake response is: + +``` +HTTP/1.1 101 Switching Protocols +Date: Thu, 16 May 2019 20:56:03 GMT +Content-Length: 0 +Connection: upgrade +channel-id: 0ea2b3fffe6adc0e-0000125a-00005adb-c2f218c35b921565-17c807e1 +upgrade: websocket +sec-websocket-accept: akN+XFrGEeDLcMVNKV9HkQCOLaE= +sec-websocket-protocol: aws.iot.securetunneling-2.0 +``` + +The aspects of the response to consider above a standard successful WebSocket handshake response are: + +- The `channel-id` response header is a unique identifier for the WebSocket session with the service. It may be useful when troubleshooting any suspected issues through AWS Support +- The 'sec-websocket-protocol' response header will contain one of the values specified in the request. That the proxy Clients must understand and properly implement the subprotocol returned in this response header to ensure valid communication over the tunnel. + +After a successful WebSocket handshake with the tunneling service, full duplex communication is possible over WebSocket. Tunnel communication messages are delivered reliably and in order. + +### Handshake error responses + +* If the handshake HTTP response code is within the 500-599 range, the client should retry using an exponential backoff retry strategy. +* If the handshake HTTP response code is within the 400-499 range, the service is rejecting the clients request, or access to the tunnel is not possible or denied. Do not retry unless the problem is understood and the request changes (i.e. use another region endpoint or different client access token) +* Many handshake error responses will contain the `channel-id` header which may be helpful for AWS Support troubleshooting + +## WebSocket Subprotocol: aws.iot.securetunneling-2.0 + +While connected to the service with this protocol selected, the following restrictions apply or capabilities must be supported by clients. Violations may result in the server closing the connection abnormally, or your WebSocket client interface behaving improperly and crashing: + +- WebSocket frames will not have a payload exceeding 131076 bytes from the service +- The server will not accept WebSocket frames with a payload over 131076 bytes +- WebSocket frames of up to 131076 bytes may be sent to clients + - The peer tunnel clients do not dictate WebSocket frame sizes. The service may aggregate data and construct frames of different sizes than sent from the tunnel peer +- The service will respond to WebSocket ping frames with a pong reply containing a copy of the ping frame payload + - The local proxy reference implementation uses this to measure server response latency + - Clients may need to send ping frames to keep the connection alive + - It is not an error for the proxy server to not respond to a ping frame +- Pong frames sent to the service will not illicit a response +- Ping/pong frames received by the service are included in bandwidth consumption for traffic rate limiting +- The server will not normally initiate ping requests to clients, but clients should send a pong reply +- The proxy server will not send text WebSocket frames. This protocol operates entirely with binary messages. If any text frames are received, clients SHOULD close the WebSocket connection +- All non-control WebSocket frames sent to the service must be binary + +### Protocol behavior model: Tunneling data streams + +The core activity during tunneling is sending ProtocolBuffers messages back and forth carrying either data, or messages that manage the connection state (called _control messages_) over the WebSocket connection to the service. This WebSocket connection to the service is synonymous with being connected to the tunnel. The process to support an application data transfer successfully over the tunnel can be divided into three steps. + +#### Step 1: Establish tunnel connection and perform validations +Local proxy will initiate a web socket handshake to connect to the tunnel, using Sec-WebSocket-Protocol _aws.iot.securetunneling-2.0_. The Secure Tunneling service will acknowledge this request after authentication and validation. At this point, we can say the tunnel connection is established. After this, the Secure Tunneling service will send back control message _SERVICE_IDS_, containing a list of service IDs used in OpenTunnel API call, specified by **services** in [DestinationConfig](https://docs.aws.amazon.com/iot/latest/apireference/API_iot-secure-tunneling_DestinationConfig.html). These service IDs will be used as the source of truth of what service IDs are allowed to start local proxy. Upon receving these service IDs, local proxy will validate the service IDs provided through either configuration files or command line arguements. A validation failure on service IDs will cause local proxy fails to start. Below are two possible cases: +1. Service IDs received from the AWS IoT Secure Tunneling server does not match service IDs used to start local proxy. + For example, in OpenTunnel API call, service IDs SSH1, SSH2 are provided. When local proxy starts, it specifies the service IDs as SSH3, through _-s_ or _-d_ parameter. In this case, since SSH3 does not match SSH1 and SSH2, local proxy will fail to start. Even though there is no enforcement on the naming convetion of service ID, the value of service IDs and number of service IDs have to match between OpenTunnel call and local proxy. +2. Local proxy cannot find the port mapping for all the service IDs. + This is more likely to happen in the destination local proxy with invalid configuration files. For example, in OpenTunnel API call, service ID SSH1, SSH2 are provided. However, no port mapping is configured for service ID SSH1, neither through configuration files nor _-d_ parameter. Failing to find a port mapping for SSH1 will cause local proxy fails to start, as local proxy does not know where to route the traffic to. For source local proxy, this is unlikely to happen. When source local proxy fails to find port mapping for certain service ID, it will automatically pick up available port to use when it starts. + +#### Step 2: Start a stream +Once started successfully, source local proxy will listen for incoming connections on the configured ports. Destination local proxy, on the other hand, will wait for control message _StreamStart_. When client application connecting to a configured listening port, source local proxy will accept the TCP connection and sends a _StreamStart_ message to destination local proxy, for this specific service ID. When preparing to send _StreamStart_ message, source local proxy will also store service ID -> stream ID mapping for book keeping. +If multiple ports are used to start local proxy, each stream will send its own _StreamStart_ message when the TCP connection on the configured port is accepted. A _StreamStart_ message contains _streamID_ and _serviceID_. _serviceID_ helps uniquely identify a service transferred over a tunnel . _streamID_ helps to reset a stream and identify stale data. + +#### Step 3: End to end data transfer over the tunnel + +On receiving a StreamStart, the destination local proxy will update the service ID -> Stream ID mapping and connect to the configured destination service for a service ID. The destination local proxy does not send a reply to the source local proxy on successful connection. Immediately after the source local proxy sends StreamStart and immediately after the destination establishes a valid TCP connection, each side respectively can begin to send and receive incoming messages on the active data stream. When the data stream is closed or disrupted (for the local proxy, this is a TCP close or I/O error on the TCP socket), a StreamReset control message with the currently stored stream ID and its service ID should be sent through the tunnel so the tunnel peer can react appropriately and end the data stream. Control messages associated with a stream should be processed with the same stream ID filter. + +Here are some important things to know for a high-level understanding of tunneling data stream handling: + +- The service may use the Service ID to decide how to route traffic between connected tunnel clients. + - For example, when local proxy received a data packet with Service ID SSH1, it will look up the configuration for SSH1 and see which port this service ID is mapped to. If SSH1 is mapped to port 22 on local host, then this data packet will be forward to port 22 on local host. +- The local proxy uses the service ID -> stream ID mapping to check the current active stream ID for a specific service ID. +- The stream ID validation for a certain stream(service ID) will only be performed on message type _StreamReset_ and _Data_. If a received message failed the stream ID validation, this message is considered to be stale and will be discarded by local proxy. +- The local proxy, and library clients may use stream ID to determine how to respond to or filter incoming messages + - For example: if a source sends a _StreamStart_ with a stream ID of 345 in response to a newly accepted TCP connection, and afterwards receives a _Data_ message marked with stream ID of 565, that data should be ignored. It's origin is tied to a prior connection over the tunnel from the perspective of the tunnel peer that originated it + - Another example: if a source local proxy sends a _StreamStart_ with a stream ID of 345 in response to a newly accepted TCP connection, and afterwards receives a _StreamReset_ message marked with stream ID of 565, that message should be ignored. Only a _StreamReset_ with a stream ID of 345 should cause the client to close its local connection +- Ending a data stream (normally or abnormally) is accomplished by either side sending a _StreamReset_ with the stream ID that is meant to be closed +- Locally detected network failures are communicated by sending _StreamReset_ over the tunnel using the active stream ID if one is active. +- If there is a network issue with the WebSocket connection, no control message is necessary to send. However, the active stream should be considered invalid and closed. Reconnect to the tunnel via the service and start a new stream. + + +### Tunneling message frames + +WebSocket binary frames contain a sequence of tunnel frames or messages. Each data message has a **2-byte unsigned short, big endian** data length prefix, followed by sequence of bytes whose length is specified by the data length. These bytes must be parsed into a ProtocolBuffers object that uses the schema shown in this document. Every message received must be processed, and should be processed in order for data stream integrity. If the order of messages is lost or cannot be understood during processing by the client, it should end the data stream with a _StreamReset_. Messages may control the state of the data stream, or it may contain actual stream data. Inspecting the message's type is the first step in processing a message. A single data length + bytes parsed into a ProtocolBuffers message represents an entire tunneling message frame, and the beginning of the next frame's length prefix follows immediately. This is a visual diagram of a single frame: + + |-----------------------------------------------------------------| + | 2-byte data length | N byte ProtocolBuffers message | + |-----------------------------------------------------------------| + +Tunneling message frames are very loosely coupled with WebSocket frames. It is not required that a WebSocket frame contain an entire tunneling message frame. The start and end of a WebSocket frame does not have to be aligned with a tunneling frame and vice versa. A WebSocket frame may contain multiple tunneling frames, or it may contain only a slice of a tunneling frame started in a previous WebSocket frame and will finish in a later WebSocket frame. This means that processing the WebSocket data must be done as pure a sequence of bytes that sequentially construct tunneling frames regardless of what the WebSocket fragmentation is. + +Additionally, the WebSocket framing decided by one tunnel peer is not guaranteed to be the same as those received by the other side. For example, the maximum WebSocket frame size in the `aws.iot.securetunneling-2.0` protocol is 131076 bytes, and the service may aggregate data to a point that aggregates multiple messages to this size into a single frame. The tunneling message frames generated by a tunnel peer are maintained by the service and cannot be aggregated or fragmented. This enables known tunnel peers to operate under more restrictive guidelines than what is valid in this protocol guide. One example of this is reducing the maximum payload of a tunneling message to 16kb down from 64kb to enable local proxy implementations to reduce the size of processing buffers. + +### ProtocolBuffers Message Schema + +The data that must be parsed into a ProtocolBuffers message conforms to the following schema: + +``` +syntax = "proto3"; + +package com.amazonaws.iot.securedtunneling; + +option java_outer_classname = "Protobuf"; +option optimize_for = LITE_RUNTIME; + +message Message { + Type type = 1; + int32 streamId = 2; + bool ignorable = 3; + bytes payload = 4; + string serviceId = 5; + repeated string availableServiceIds = 6; + + enum Type { + UNKNOWN = 0; + DATA = 1; + STREAM_START = 2; + STREAM_RESET = 3; + SESSION_RESET = 4; + SERVICE_IDS = 5; + } +} +``` + +Tunneling frames (without the data length prefix) must parse into a _Message_ object and satisfy the following rules: + +- _Type_ field must be set to a non-zero enum value. Due to ProtocolBuffers schema recommendation, the keyword 'required' is not used in the actual schema +- It is invalid for a client connected with mode=destination to send a message with _Type_ = _StreamStart_ over the tunnel. +- It is invalid for any client to send messages types associated with a stream (_StreamStart_, _Data_, _StreamReset_) with a stream ID of 0 +- It is invalid for any client to send _SessionReset_ +- They payload of any message may not contain more than 63kb (64512 bytes) of data. +- It is invalid to extend the schema with additional fields and send them through the tunnel. The service will close the WebSocket connection if this occurs +- Avoid negative stream ID numbers for message size efficiency. Stream ID of 0 is invalid +- It is invalid for any local proxy to send message types _SERVICE_IDS_. It can only be sent from the Secure Tunneling service. +- Change the tag numbers of exisiting field of ProtocolBuffers will cause backward compatibility issue between V1 and V2 local proxy. Fore more information, please read [Extending a Protocol Buffer](https://developers.google.com/protocol-buffers/docs/cpptutorial#extending-a-protocol-buffer). + +### Backward compatibility +#### Backward compatibility between V1 and V2 local proxy +V1 local proxy protocol uses Sec-WebSocket-Protocol _aws.iot.securetunneling-1.0_ when communicates with AWS IoT Tunneling Service. +V2 local proxy protocol uses Sec-WebSocket-Protocol _aws.iot.securetunneling-2.0_ when communicates with AWS IoT Tunneling Service. +The communication between V1 and V2 local proxy is supported for a non-multiplexed tunnel. +- _aws.iot.securetunneling-1.0_ and _aws.iot.securetunneling-2.0_ subprotocol are interoperable. +- An empty service ID field in a message should be interpreted as service ID field is not present. This is because in protocol buffers _proto3_, it can not tell if a field is set with an empty string or a field is not present at all. +- Since V1 local proxy doesn't support multiplexing, data transferred using these two subprotocols can not be multiplexed. In that case, V2 local proxy needs to either use a single service ID or not send a service ID at all. Using V2 local proxy with multiple services to communicate with V1 local proxy is not supported. +- If V1 local proxy receives a message from V2 local proxy, it will ignore the service ID field. +- An empty service ID field in a received message should be interpreted as a message sent from V1 local proxy. In that case, V2 local proxy should ignore the service ID field. +- V2 local proxy is not allowed to alternate between messages with and without service ID field during the lifetime of a tunnel connection. For example, if V2 local proxy sends the first stream start without service ID, then all the subsequent messages should not contain a service ID. And vice versa, if V2 local proxy sends the first stream start with service ID, then all subsequent messages should contain the service ID field. +#### V2 local proxy falls back to V1 local proxy protocol +V2 local proxy is allowed to send messages without service ID field either using V1 protocol buffer definition, or simply setting the service ID field to an empty string using a V2 protocol buffer definition. These will be interpreted as messages from V1 local proxy. + + +### Message type reference + +#### StreamStart + +* _StreamStart_ is the first message sent to start and establish the new and active data stream. For local proxies, this message carries across similar meaning to a TCP SYN packet. +* When to send + * When the source tunnel client wants to initiate a new data stream with the destination, it does this by sending a _StreamStart_ with a temporally unique stream ID and service ID. Stream ID should be chosen in a way that is unlikely to repeat through a tunnel's lifetime. Service ID is determined by which port accepts the TCP connection. For example, if you configure SSH1 to listen on port 5555, SSH2 to listen on port 6666, when the connection from port 5555 is accepted, service ID will be choosen to be SSH1. +* Behavior on receive: + * Destination local proxy should treat this as a request to initiate a new stream to a configured destination service and establish the given stream ID as the current. + * If the destination mode tunnel client already has an already open/active stream and receives a _StreamStart_, it should consider the current data stream to have closed and immediately start a new active stream with the new stream ID. A _StreamReset_ MAY be sent for the replaced stream ID. + * Source mode tunnel clients SHOULD treat receiving _StreamStart_ as an error and close the active data stream and WebSocket connection. +* Notes + * After the source client sends _StreamStart_, it may immediately send request data and assume the destination will connect. Failure will result in a _StreamReset_ coming back, and success (with data response) results in receiving data on the stream ID +* Example: Message(type=STREAM_START, streamId=1, payload=, serviceId=ssh1, availableServiceIds=, ignorable=) + +#### StreamReset + +* _StreamReset_ messages conveys that the data stream has ended, either in error, or closed intentionally for the tunnel peer. It is also sent to the source tunnel peer if an attempt to establish a new data stream fails on the destination side. +* When to send: + * During a stream's data transmission, if anything happens that makes it impossible to process a data stream's data correctly or in order (I/O error, logic error), a _StreamReset_ should be sent with the active stream ID and valid service ID. + * While attempting to establish a new data stream, if the destination tunnel client fails to establish a local connection, it should send a _StreamReset_ back over the tunnel with the requested stream ID and service ID. +* Behavior on receive: + * Both tunnel client modes should respond to a _StreamReset_ message by closing the active data stream or connection when the stream ID matches the current stream + * After closing the current stream, the current stream ID should be unset internally + * The tunnel client SHOULD perform an orderly shutdown of the data stream or connection and flush any local connection buffers before closing + * If the receiver does not have an active stream, it is safe to ignore a _StreamReset_ message +* Notes + * The proxy server may generate _StreamReset_ messages in the following scenarios: + * The tunnel peer is replaced (likely has reconnected) by a new peer bearing a valid access token + * An internal error has disrupted the internal routing for the tunnel +* Example: Message(type=STREAM_RESET, streamId=1, payload=, serviceId=ssh1, availableServiceIds=, ignorable=) + +#### SessionReset + +* _SessionReset_ messages can only originate from Secure Tunneling service if an internal data transmission error is detected +* When to send: + * N/A - tunnel client cannot send this message through the service +* Behavior on receive: + * This message should be handled the same as _StreamReset_ except that it carries no stream ID association so any active stream should be closed +* Notes + * This message type should rarely be observed. + * If the receiver does not have an active stream, it is safe to ignore a _SessionReset_ message +* Example: Message(type=SESSION_RESET, streamId=, payload=, serviceId=, availableServiceIds=, ignorable=) + + +#### Data + +* _Data_ messages carry a payload with a sequence of bytes to write to the active data stream when received by local proxy. When local proxy reads data from its local connection, those bytes should be inserted into the payload of a _Data_ message and sent over a tunnel +* When to send: + * When a tunnel client reads data on the (non-WebSocket) data stream (e.g. the TCP connection for the local proxy), it must construct _Data_ messages with the sequence of bytes put into the payload - up to 63kb in size - and set the active stream ID and valid service ID on the message. +* Behavior on receive: + * When a local proxy receives _Data_ messages, it must write the payload data directly to the (non-WebSocket) data stream +* Example: Message(type=DATA, streamId=1, payload=[byte sequence], serviceId=ssh1, availableServiceIds=, ignorable=) + +#### ServiceIDs +* _ServiceIDs_ message carry a list of unique service IDs used when open a tunnel with **services** in [DestinationConfig](https://docs.aws.amazon.com/iot/latest/apireference/API_iot-secure-tunneling_DestinationConfig.html) parameter. It's sent to local proxy for providing the source of truth of what service IDs can be used in local proxy. +* When to send: + * N/A - tunnel client cannot send this message through the service +* Behavior on receive: + * Validate user input. If there is a mismatch between service ID list in OpenTunnel API and local proxy, local proxy will fail to start. + * Build or update in-memory ports mapping for book keeping. It will build or update an unordered map, with service ID as the key, and the configured port as the value. In the future, when a data packet is received from the tunnel, local proxy will extract the service ID from a data packet and find which port should this packet be forwarded using this map. + +* Example: Message(type=SERVICE_IDS, streamId=, payload=, serviceId=, availableServiceIds=ssh1, ssh2, ignorable=) + + +### Ignorable field + +If a message is received and its type is unrecognized, and this field is set to true, it is ok for the tunnel client to ignore the message safely. The tunnel client MAY still treat the unrecognized message as an error out of caution. If this field is unset, it must be considered as false. diff --git a/resources/Message.proto b/resources/Message.proto index 0ae68af..70ed7e9 100644 --- a/resources/Message.proto +++ b/resources/Message.proto @@ -12,6 +12,8 @@ message Message { int32 streamId = 2; bool ignorable = 3; bytes payload = 4; + string serviceId = 5; + repeated string availableServiceIds = 6; enum Type { UNKNOWN = 0; @@ -19,5 +21,6 @@ message Message { STREAM_START = 2; STREAM_RESET = 3; SESSION_RESET = 4; + SERVICE_IDS = 5; } } diff --git a/src/ProxySettings.cpp b/src/ProxySettings.cpp index c81c8eb..393067c 100644 --- a/src/ProxySettings.cpp +++ b/src/ProxySettings.cpp @@ -48,7 +48,7 @@ namespace aws { namespace iot { namespace securedtunneling { namespace settings bool const DEFAULT_WEB_SOCKET_DATA_ERROR_RETRY = true; char const * const KEY_WEB_SOCKET_SUBPROTOCOL = "tunneling.proxy.websocket.subprotocol"; - std::string const DEFAULT_WEB_SOCKET_SUBPROTOCOL = "aws.iot.securetunneling-1.0"; + std::string const DEFAULT_WEB_SOCKET_SUBPROTOCOL = "aws.iot.securetunneling-2.0"; char const * const KEY_WEB_SOCKET_MAX_FRAME_SIZE = "tunneling.proxy.websocket.max_frame_size"; std::size_t const DEFAULT_WEB_SOCKET_MAX_FRAME_SIZE = DEFAULT_MAX_DATA_FRAME_SIZE * 2; diff --git a/src/TcpAdapterProxy.cpp b/src/TcpAdapterProxy.cpp index 8c7cfb4..9a1d465 100644 --- a/src/TcpAdapterProxy.cpp +++ b/src/TcpAdapterProxy.cpp @@ -24,6 +24,7 @@ #include "TcpAdapterProxy.h" #include "ProxySettings.h" +#include "config/ConfigFile.h" namespace aws { namespace iot { namespace securedtunneling { using boost::asio::io_context; @@ -37,15 +38,22 @@ namespace aws { namespace iot { namespace securedtunneling { using boost::log::trivial::error; using boost::log::trivial::fatal; + using std::uint16_t; + using std::string; + using std::tuple; + char const * const PROXY_MODE_QUERY_PARAM = "local-proxy-mode"; char const * const ACCESS_TOKEN_HEADER = "access-token"; char const * const SOURCE_PROXY_MODE = "source"; char const * const DESTINATION_PROXY_MODE = "destination"; + char const * const LOCALHOST_IP = "127.0.0.1"; + std::string const SOURCE_LOCAL_PROXY_PORT_BIND_EXCEPTION = "Source local proxy fails to bind address"; std::uint16_t const DEFAULT_PROXY_SERVER_PORT = 443; - std::set MESSAGE_TYPES_REQUIRING_STREAM_ID { + std::set MESSAGE_TYPES_VALIDATING_STREAM_ID { com::amazonaws::iot::securedtunneling::Message_Type_DATA, - com::amazonaws::iot::securedtunneling::Message_Type_STREAM_RESET }; + com::amazonaws::iot::securedtunneling::Message_Type_STREAM_RESET}; + std::string get_region_endpoint(std::string const ®ion, boost::property_tree::ptree const &settings) { @@ -143,12 +151,8 @@ namespace aws { namespace iot { namespace securedtunneling { tcp_adapter_proxy::tcp_adapter_proxy(ptree const &settings, adapter_proxy_config const &config) : settings{ settings }, adapter_config{ config }, - outgoing_message_buffer{ GET_SETTING(settings, MAX_DATA_FRAME_SIZE) }, incoming_message_buffer{ GET_SETTING(settings, WEB_SOCKET_READ_BUFFER_SIZE) }, - message_parse_buffer{ GET_SETTING(settings, MESSAGE_MAX_SIZE) }, - tcp_write_buffer{ GET_SETTING(settings, TCP_WRITE_BUFFER_SIZE) }, - tcp_read_buffer{ GET_SETTING(settings, TCP_READ_BUFFER_SIZE) }, - web_socket_data_write_buffer{ GET_SETTING(settings, WEB_SOCKET_WRITE_BUFFER_SIZE) } + message_parse_buffer{ GET_SETTING(settings, MESSAGE_MAX_SIZE) } { } tcp_adapter_proxy::~tcp_adapter_proxy() @@ -168,14 +172,13 @@ namespace aws { namespace iot { namespace securedtunneling { } catch (proxy_exception &e) { + if (e.what() == SOURCE_LOCAL_PROXY_PORT_BIND_EXCEPTION) + { + return EXIT_FAILURE; + } if (GET_SETTING(settings, WEB_SOCKET_DATA_ERROR_RETRY)) { BOOST_LOG_SEV(log, error) << "Error from io_ctx::run(): " << e.what(); - if(e.is_local_port_bind_failure() && adapter_config.mode == proxy_mode::SOURCE) { - BOOST_LOG_SEV(log, error) << "Local proxy failed to bind to port " << tac.adapter_config.data_port - << " in source mode. Verify the selected port is not already in use and try again."; - return EXIT_FAILURE; - } } else { @@ -186,82 +189,180 @@ namespace aws { namespace iot { namespace securedtunneling { } } - void tcp_adapter_proxy::setup_tcp_socket(tcp_adapter_context &tac) + void tcp_adapter_proxy::initialize_tcp_clients(tcp_adapter_context &tac) + { + BOOST_LOG_SEV(log, trace) << "Initializing tcp clients ..."; + for (auto m: tac.adapter_config.serviceId_to_endpoint_map) + { + string service_id = m.first; + // create new tcp clients if needed + if (tac.serviceId_to_tcp_client_map.find(service_id) == tac.serviceId_to_tcp_client_map.end()) + { + tac.serviceId_to_tcp_client_map[service_id] = tcp_client::create(tac.io_ctx, + GET_SETTING(settings, TCP_WRITE_BUFFER_SIZE), + GET_SETTING(settings, TCP_READ_BUFFER_SIZE), + GET_SETTING(settings, WEB_SOCKET_WRITE_BUFFER_SIZE)); + } + } + } + + void tcp_adapter_proxy::initialize_tcp_servers(tcp_adapter_context &tac) { - BOOST_LOG_SEV(log, trace) << "Setting up tcp socket..."; + BOOST_LOG_SEV(log, trace) << "Initializing tcp servers ..."; + for (auto m: tac.adapter_config.serviceId_to_endpoint_map) + { + string service_id = m.first; + // create new tcp servers if needed + if (tac.serviceId_to_tcp_server_map.find(service_id) == tac.serviceId_to_tcp_server_map.end()) + { + tac.serviceId_to_tcp_server_map[service_id] = tcp_server::create(tac.io_ctx, + GET_SETTING(settings, TCP_WRITE_BUFFER_SIZE), + GET_SETTING(settings, TCP_READ_BUFFER_SIZE), + GET_SETTING(settings, WEB_SOCKET_WRITE_BUFFER_SIZE)); + } + } + } + + void tcp_adapter_proxy::setup_tcp_sockets(tcp_adapter_context &tac) + { + BOOST_LOG_SEV(log, trace) << "Setting up tcp sockets "; + clear_ws_buffers(tac); if (adapter_config.mode == proxy_mode::DESTINATION) { - after_setup_tcp_socket = std::bind(&tcp_adapter_proxy::async_setup_bidirectional_data_transfers, this, std::ref(tac)); - on_recieve_stream_start = std::bind(&tcp_adapter_proxy::async_setup_dest_tcp_socket, this, std::ref(tac)); - tac.stream_id = -1; - async_web_socket_read_until_stream_start(tac); + initialize_tcp_clients(tac); + async_setup_destination_tcp_sockets(tac); } else { - after_send_message = std::bind(&tcp_adapter_proxy::async_setup_bidirectional_data_transfers, this, std::ref(tac)); - after_setup_tcp_socket = std::bind(&tcp_adapter_proxy::async_send_stream_start, this, std::ref(tac)); - async_setup_source_tcp_socket(tac); + initialize_tcp_servers(tac); + async_setup_source_tcp_sockets(tac); + } + } + + void tcp_adapter_proxy::setup_tcp_socket(tcp_adapter_context &tac, std::string const & service_id) + { + BOOST_LOG_SEV(log, trace) << "Setting up tcp socket for service id: " << service_id; + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); + if (adapter_config.mode == proxy_mode::DESTINATION) + { + tcp_client::pointer client = tac.serviceId_to_tcp_client_map[service_id]; + client->on_receive_stream_start = std::bind(&tcp_adapter_proxy::async_setup_dest_tcp_socket, this, std::ref(tac), service_id); + client->after_setup_tcp_socket = std::bind(&tcp_adapter_proxy::async_setup_bidirectional_data_transfers, this, std::ref(tac), service_id); + async_web_socket_read_until_stream_start(tac, service_id); + } + else + { + tcp_server::pointer server = tac.serviceId_to_tcp_server_map[service_id]; + server->connection_->after_send_message = std::bind(&tcp_adapter_proxy::async_setup_bidirectional_data_transfers, this, std::ref(tac), service_id); + server->after_setup_tcp_socket = std::bind(&tcp_adapter_proxy::async_send_stream_start, this, std::ref(tac), service_id); + std::shared_ptr retry_config = + std::make_shared(tac.io_ctx, + GET_SETTING(settings, TCP_CONNECTION_RETRY_COUNT), + GET_SETTING(settings, TCP_CONNECTION_RETRY_DELAY_MS), + nullptr); + retry_config->operation = std::bind(&tcp_adapter_proxy::async_setup_source_tcp_socket_retry, this, std::ref(tac), retry_config, service_id); + async_setup_source_tcp_socket_retry(tac, retry_config, service_id); } } void tcp_adapter_proxy::setup_web_socket(tcp_adapter_context &tac) { BOOST_LOG_SEV(log, trace) << "Setting up web socket..."; - after_setup_web_socket = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac)); + after_setup_web_socket = std::bind(&tcp_adapter_proxy::async_wait_for_service_ids, this, std::ref(tac)); async_setup_web_socket(tac); } - void tcp_adapter_proxy::tcp_socket_reset(tcp_adapter_context &tac, std::function then_what) + void tcp_adapter_proxy::tcp_socket_reset_all(tcp_adapter_context &tac, std::function post_reset_operation) + { + for (auto m: tac.adapter_config.serviceId_to_endpoint_map) + { + string service_id = m.first; + tcp_adapter_proxy::tcp_socket_reset(tac, service_id, post_reset_operation); + } + } + + tcp_connection::pointer tcp_adapter_proxy::get_tcp_connection(tcp_adapter_context &tac, string service_id) + { + tcp_connection::pointer connection_ptr; + if (tac.adapter_config.mode == proxy_mode::SOURCE) + { + if (tac.serviceId_to_tcp_server_map.find(service_id) == tac.serviceId_to_tcp_server_map.end()) + { + BOOST_LOG_SEV(log, debug) << "No serviceId_to_tcp_server mapping for service_id: " << service_id; + return connection_ptr; + } + connection_ptr = tac.serviceId_to_tcp_server_map[service_id]->connection_; + } + else if (tac.adapter_config.mode == proxy_mode::DESTINATION) + { + if (tac.serviceId_to_tcp_client_map.find(service_id) == tac.serviceId_to_tcp_client_map.end()) + { + BOOST_LOG_SEV(log, debug) << "No serviceId_to_tcp_client mapping for service_id: " << service_id; + + return connection_ptr; + } + connection_ptr = tac.serviceId_to_tcp_client_map[service_id]->connection_; + } + else + { + throw proxy_exception((boost::format("Unknown mode: %1%") % tac.adapter_config.mode).str()); + } + return connection_ptr; + } + + void tcp_adapter_proxy::tcp_socket_reset(tcp_adapter_context &tac, string service_id, std::function post_reset_operation) { - if (!tac.tcp_socket.is_open()) + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); + if (!connection->socket_.is_open()) { BOOST_LOG_SEV(log, debug) << "Ignoring explicit reset because TCP socket is already closed"; return; } - BOOST_LOG_SEV(log, debug) << "Handling explicit reset by closing TCP"; + BOOST_LOG_SEV(log, debug) << "Handling explicit reset by closing TCP for service id: " << service_id; - tac.tcp_socket.shutdown(boost::asio::ip::tcp::socket::shutdown_receive); + connection->socket_.shutdown(boost::asio::ip::tcp::socket::shutdown_receive); std::shared_ptr web_socket_write_buffer_drain_complete = std::make_shared(false); std::shared_ptr tcp_write_buffer_drain_complete = std::make_shared(false); //ignore next tcp read error if a read operation is happening when TCP gets closed - on_tcp_error = - [this](boost::system::error_code const &ec) + connection->on_tcp_error = + [=](boost::system::error_code const &ec) { //We *may* want to confirm that the error code is actually operation canceled or aborted due to TCP close as //any unexpected errors in this situation perhaps signals something else. But also we may want to ignore all errors //anyways given we know we are closing the tcp socket to create a new one anyways - BOOST_LOG_SEV(this->log, trace) << "Received expected TCP socket error and ignoring it. TCP socket read loop has been canceled"; + BOOST_LOG_SEV(this->log, trace) << "Received expected TCP socket error and ignoring it. TCP socket read loop has been canceled for service id: " << service_id; }; - on_data_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); - on_control_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); - on_web_socket_write_buffer_drain_complete = - [this, web_socket_write_buffer_drain_complete, tcp_write_buffer_drain_complete, then_what]() + connection->on_data_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); + connection->on_control_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); + connection->on_web_socket_write_buffer_drain_complete = + [=]() { BOOST_LOG_SEV(this->log, trace) << "Post-reset web socket drain complete"; *web_socket_write_buffer_drain_complete = true; if (*tcp_write_buffer_drain_complete) { BOOST_LOG_SEV(this->log, trace) << "Both socket drains complete."; - then_what(); + post_reset_operation(); } }; - on_tcp_write_buffer_drain_complete = - [this, web_socket_write_buffer_drain_complete, tcp_write_buffer_drain_complete, then_what, &tac]() + connection->on_tcp_write_buffer_drain_complete = + [=, &tac]() { - BOOST_LOG_SEV(this->log, trace) << "Post-reset TCP drain complete. Closing TCP socket"; - BOOST_LOG_SEV(this->log, info) << "Disconnected from: " << tac.tcp_socket.remote_endpoint(); - tac.tcp_socket.close(); + tcp_connection::pointer connection_to_reset = get_tcp_connection(tac, service_id); + BOOST_LOG_SEV(this->log, trace) << "Post-reset TCP drain complete. Closing TCP socket for service id " << service_id; + BOOST_LOG_SEV(this->log, info) << "Disconnected from: " << connection_to_reset->socket().remote_endpoint(); + connection_to_reset->socket_.close(); *tcp_write_buffer_drain_complete = true; if (*web_socket_write_buffer_drain_complete) { BOOST_LOG_SEV(this->log, trace) << "Both socket drains complete. Setting up TCP socket again"; - then_what(); + post_reset_operation(); } }; - async_setup_web_socket_write_buffer_drain(tac); - async_tcp_write_buffer_drain(tac); + async_setup_web_socket_write_buffer_drain(tac, service_id); + async_tcp_write_buffer_drain(tac, service_id); } void tcp_adapter_proxy::web_socket_close_and_stop(tcp_adapter_context &tac) @@ -296,27 +397,29 @@ namespace aws { namespace iot { namespace securedtunneling { tac.io_ctx.stop(); } - void tcp_adapter_proxy::tcp_socket_error(tcp_adapter_context &tac, boost::system::error_code const &ec) + void tcp_adapter_proxy::tcp_socket_error(tcp_adapter_context &tac, boost::system::error_code const &ec, string const & service_id) { - BOOST_LOG_SEV(log, debug) << "Handling tcp socket error: " << ec.message(); - - BOOST_LOG_SEV(this->log, info) << "Disconnected from: " << tac.tcp_socket.remote_endpoint(); - tac.tcp_socket.close(); - tcp_write_buffer.consume(tcp_write_buffer.max_size()); + BOOST_LOG_SEV(log, debug) << "Handling tcp socket error for service id: " << service_id << " . error message:" << ec.message(); + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); + BOOST_LOG_SEV(this->log, info) << "Disconnected from: " << connection->socket().remote_endpoint(); + connection->socket_.close(); + connection->tcp_write_buffer_.consume(connection->tcp_write_buffer_.max_size()); - on_data_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); - on_control_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); + connection->on_data_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); + connection->on_control_message = std::bind(&tcp_adapter_proxy::ignore_message_and_stop, this, std::ref(tac), std::placeholders::_1); - on_web_socket_write_buffer_drain_complete = [&]() + connection->on_web_socket_write_buffer_drain_complete = [&, service_id]() { - after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac)); - async_send_stream_reset(tac, tac.stream_id); + tcp_connection::pointer socket_connection = get_tcp_connection(tac, service_id); + socket_connection->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac), service_id); + async_send_stream_reset(tac, service_id); }; - async_setup_web_socket_write_buffer_drain(tac); + async_setup_web_socket_write_buffer_drain(tac, service_id); } void tcp_adapter_proxy::async_send_message(tcp_adapter_context &tac, message const &message) { + boost::beast::flat_buffer outgoing_message_buffer; std::size_t const frame_size = static_cast(message.ByteSizeLong()) + GET_SETTING(settings, DATA_LENGTH_SIZE); void *frame_data = outgoing_message_buffer.prepare(frame_size).data(); @@ -326,76 +429,103 @@ namespace aws { namespace iot { namespace securedtunneling { *reinterpret_cast(frame_data) = boost::endian::native_to_big(data_length); message.SerializeToArray(frame_data_msg_offset, static_cast(GET_SETTING(settings, MESSAGE_MAX_SIZE))); outgoing_message_buffer.commit(frame_size); - - tac.is_web_socket_writing = true; - tac.wss->async_write(outgoing_message_buffer.data(), [&](boost::system::error_code const &ec, std::size_t const bytes_sent) - { - tac.is_web_socket_writing = false; - this->outgoing_message_buffer.consume(this->outgoing_message_buffer.max_size()); - if (ec) - { - throw proxy_exception("Error sending web socket message", ec); - } - else - { - BOOST_LOG_SEV(log, trace) << "Sent " << bytes_sent << " bytes over websocket"; - invoke_and_clear_handler(after_send_message); - } - }); + string service_id = message.serviceid(); + async_send_message_to_web_socket(tac, std::make_shared(outgoing_message_buffer), service_id); } - void tcp_adapter_proxy::async_send_stream_start(tcp_adapter_context &tac) + void tcp_adapter_proxy::async_send_stream_start(tcp_adapter_context &tac, string const & service_id) { using namespace com::amazonaws::iot::securedtunneling; - - if (tac.stream_id == -1) + if (!tac.is_service_ids_received) { - tac.stream_id = 1; + std::shared_ptr retry_config = + std::make_shared(tac.io_ctx, + GET_SETTING(settings, TCP_CONNECTION_RETRY_COUNT), + GET_SETTING(settings, TCP_CONNECTION_RETRY_DELAY_MS), + std::bind(&tcp_adapter_proxy::async_send_stream_start, this, std::ref(tac), service_id)); + BOOST_LOG_SEV(log, error) << "No service ids received. Will retry."; + basic_retry_execute(log, retry_config, []() { throw std::runtime_error("Fail all the retries to get service ids before stream start. Exit."); }); + return; } - else + std::string src_listening_port = boost::lexical_cast(tac.serviceId_to_tcp_server_map[service_id]->acceptor().local_endpoint().port()); + if (tac.adapter_config.serviceId_to_endpoint_map.find(service_id) == tac.adapter_config.serviceId_to_endpoint_map.end() || + tac.adapter_config.serviceId_to_endpoint_map.at(service_id) != src_listening_port) { - if (tac.stream_id == std::numeric_limits::max()) + throw std::runtime_error((boost::format("Receive incoming connection from non-configured port: %1%") % src_listening_port).str()); + } + + /** + * Initialize stream id to 1. If a mapping exist for a certain service id, it will be overwrite to the value + * from the serviceId_to_streamId_map. + */ + std::int32_t new_stream_id = 1; + + if(tac.serviceId_to_streamId_map.find(service_id) != tac.serviceId_to_streamId_map.end()) + { + std::int32_t old_stream_id = tac.serviceId_to_streamId_map[service_id]; + // Reset old stream id to 0 if it already reaches the max value of current type + if (old_stream_id == std::numeric_limits::max()) { - tac.stream_id = 0; + old_stream_id = 0; } - ++tac.stream_id; + new_stream_id = old_stream_id + 1; } - BOOST_LOG_SEV(log, debug) << "Setting new stream ID to: " << tac.stream_id; + + // Update streamId <-> serviceId mapping for future book keeping + tac.serviceId_to_streamId_map[service_id] = new_stream_id; + + BOOST_LOG_SEV(log, debug) << "Setting new stream ID to: " << new_stream_id << ", service id: " << service_id; + outgoing_message.set_type(Message_Type_STREAM_START); - outgoing_message.set_streamid(tac.stream_id); + outgoing_message.set_serviceid(service_id); + outgoing_message.set_streamid(new_stream_id); outgoing_message.set_ignorable(false); outgoing_message.clear_payload(); - async_send_message(tac, outgoing_message); } - void tcp_adapter_proxy::async_send_stream_reset(tcp_adapter_context &tac, std::int32_t stream_id) + void tcp_adapter_proxy::async_send_stream_reset(tcp_adapter_context &tac, std::string const & service_id) { using namespace com::amazonaws::iot::securedtunneling; + BOOST_LOG_SEV(log, trace) << "Reset stream for service id: " << service_id; + if (tac.serviceId_to_streamId_map.find(service_id) == tac.serviceId_to_streamId_map.end()) + { + BOOST_LOG_SEV(log, warning) << "No stream id mapping found for service id " << service_id << " . Skip stream reset."; + return; + } + // NOTE: serviceIds -> streamId mapping will be updated when send/receive stream start, no action needed now. + std::int32_t stream_id = tac.serviceId_to_streamId_map[service_id]; outgoing_message.set_type(Message_Type_STREAM_RESET); + outgoing_message.set_serviceid(service_id); outgoing_message.set_streamid(stream_id); outgoing_message.set_ignorable(false); outgoing_message.clear_payload(); - async_send_message(tac, outgoing_message); } - void tcp_adapter_proxy::async_setup_bidirectional_data_transfers(tcp_adapter_context &tac) + void tcp_adapter_proxy::async_setup_bidirectional_data_transfers(tcp_adapter_context &tac, string const & service_id) { - BOOST_LOG_SEV(log, trace) << "Setting up bi-directional data transfer with stream_id: " << tac.stream_id; - clear_buffers(); - on_tcp_error = nullptr; - on_control_message = std::bind(&tcp_adapter_proxy::handle_control_message_data_transfer, this, std::ref(tac), std::placeholders::_1); - on_data_message = std::bind(&tcp_adapter_proxy::forward_data_message_to_tcp_write, this, std::ref(tac), std::placeholders::_1); + BOOST_LOG_SEV(log, trace) << "Setting up bi-directional data transfer for service id: " << service_id; + // clear tcp_buffers for this stream + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); + if (!connection) + { + BOOST_LOG_SEV(log, trace) << "Null connection pointers, skip"; + return; + } + clear_tcp_connection_buffers(connection); + connection->on_control_message = std::bind(&tcp_adapter_proxy::handle_control_message_data_transfer, this, std::ref(tac), std::placeholders::_1); + connection->on_data_message = std::bind(&tcp_adapter_proxy::forward_data_message_to_tcp_write, this, std::ref(tac), std::placeholders::_1); this->async_web_socket_read_loop(tac); - this->async_tcp_socket_read_loop(tac); + this->async_tcp_socket_read_loop(tac, service_id); } - void tcp_adapter_proxy::async_web_socket_read_until_stream_start(tcp_adapter_context &tac) + void tcp_adapter_proxy::async_web_socket_read_until_stream_start(tcp_adapter_context &tac, string const & service_id) { BOOST_LOG_SEV(log, trace) << "Waiting for stream start..."; - on_control_message = std::bind(&tcp_adapter_proxy::async_wait_for_stream_start, this, std::ref(tac), std::placeholders::_1); - on_data_message = std::bind(&tcp_adapter_proxy::ignore_message, this, std::ref(tac), std::placeholders::_1); + tcp_client::pointer client = tac.serviceId_to_tcp_client_map[service_id]; + client->connection_->on_control_message = std::bind(&tcp_adapter_proxy::async_wait_for_stream_start, this, std::ref(tac), std::placeholders::_1); + client->connection_->on_data_message = std::bind(&tcp_adapter_proxy::ignore_message, this, std::ref(tac), std::placeholders::_1); this->async_web_socket_read_loop(tac); } @@ -409,9 +539,10 @@ namespace aws { namespace iot { namespace securedtunneling { long long pong_millis = 0; switch (ws_message_type) { + BOOST_LOG_SEV(log, trace) << "handle_web_socket_control_message, message type: " << static_cast(ws_message_type); case boost::beast::websocket::frame_type::close: - BOOST_LOG_SEV(log, info) << "Web socket close recieved. Code: " << tac.wss->reason().code << "; Reason: " << tac.wss->reason().reason; - tcp_socket_reset(tac, std::bind(&tcp_adapter_proxy::web_socket_close_and_stop, this, std::ref(tac))); + BOOST_LOG_SEV(log, info) << "Web socket close received. Code: " << tac.wss->reason().code << "; Reason: " << tac.wss->reason().reason; + tcp_socket_reset_all(tac, std::bind(&tcp_adapter_proxy::web_socket_close_and_stop, this, std::ref(tac))); break; case boost::beast::websocket::frame_type::ping: #ifdef DEBUG @@ -477,7 +608,7 @@ namespace aws { namespace iot { namespace securedtunneling { BOOST_LOG_SEV(log, info) << "Web socket stream already open. Continuing to use existing connection"; if (after_setup_web_socket) { - invoke_and_clear_handler(after_setup_web_socket); + after_setup_web_socket(); } return; } @@ -520,7 +651,8 @@ namespace aws { namespace iot { namespace securedtunneling { BOOST_LOG_SEV(log, info) << "Attempting to establish web socket connection with endpoint wss://" << tac.adapter_config.proxy_host << ":" << tac.adapter_config.proxy_port; //start first async handler which chains into adding the rest BOOST_LOG_SEV(log, trace) << "Resolving proxy host: " << tac.adapter_config.proxy_host; - tac.resolver.async_resolve(tac.adapter_config.proxy_host, boost::lexical_cast(tac.adapter_config.proxy_port), [=, &tac](boost::system::error_code const &ec, tcp::resolver::results_type results) + + tac.wss_resolver.async_resolve(tac.adapter_config.proxy_host, boost::lexical_cast(tac.adapter_config.proxy_port), [=, &tac](boost::system::error_code const &ec, tcp::resolver::results_type results) { if (ec) { @@ -610,7 +742,6 @@ namespace aws { namespace iot { namespace securedtunneling { } BOOST_LOG_SEV(log, debug) << "Web socket subprotocol selected: " << tac.wss_response[boost::beast::http::field::sec_websocket_protocol].to_string(); BOOST_LOG_SEV(log, info) << "Successfully established websocket connection with proxy server: wss://" << tac.adapter_config.proxy_host << ":" << tac.adapter_config.proxy_port; - std::shared_ptr ping_data = std::make_shared(); do_ping_data(tac, *ping_data); std::shared_ptr ping_period = @@ -638,55 +769,65 @@ namespace aws { namespace iot { namespace securedtunneling { }); } - void tcp_adapter_proxy::async_tcp_socket_read_loop(tcp_adapter_context & tac) + void tcp_adapter_proxy::async_tcp_socket_read_loop(tcp_adapter_context & tac, string const & service_id) { - if (tac.is_tcp_socket_reading) + BOOST_LOG_SEV(log, trace) << "Begin tcp socket read loop for service id : " << service_id; + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); + if (!connection->socket().is_open()) + { + BOOST_LOG_SEV(log, trace) << "socket for service id : " << service_id << " is not open yet, skip reading"; + return; + } + if (connection->is_tcp_socket_reading_) { #ifdef DEBUG BOOST_LOG_SEV(log, debug) << "Not starting TCP read loop"; #endif } - else if (wss_has_enough_write_buffer_space(tac)) + else if (wss_has_enough_write_buffer_space(connection->web_socket_data_write_buffer_)) { - //max bytes to read not to execeed either the read buffer capacity, or the available space in the web socket write buffer - std::size_t max_bytes_to_read = std::min(web_socket_data_write_buffer.max_size() - web_socket_data_write_buffer.size(), tcp_read_buffer.max_size()); - tac.is_tcp_socket_reading = true; - tac.tcp_socket.async_read_some(tcp_read_buffer.prepare(max_bytes_to_read), - [&](boost::system::error_code const &ec, std::size_t const bytes_read) + //max bytes to read not to exceed either the read buffer capacity, or the available space in the web socket write buffer + std::size_t max_bytes_to_read = std::min(connection->web_socket_data_write_buffer_.max_size() - connection->web_socket_data_write_buffer_.size(), connection->tcp_read_buffer_.max_size()); + connection->is_tcp_socket_reading_ = true; + connection->socket_.async_read_some(connection->tcp_read_buffer_.prepare(max_bytes_to_read), + [&, service_id](boost::system::error_code const &ec, std::size_t const bytes_read) { - tac.is_tcp_socket_reading = false; + BOOST_LOG_SEV(log, trace) << "Reading from tcp socket for service id " << service_id; + tcp_connection::pointer socket_read_connection = get_tcp_connection(tac, service_id); + socket_read_connection->is_tcp_socket_reading_ = false; if (ec) { - if (on_tcp_error) + if (socket_read_connection->on_tcp_error) { - on_tcp_error(ec); - on_tcp_error = nullptr; + socket_read_connection->on_tcp_error(ec); + socket_read_connection->on_tcp_error = nullptr; } else { - tcp_socket_error(tac, ec); + tcp_socket_error(tac, ec, service_id); } } else { - tcp_read_buffer.commit(bytes_read); + socket_read_connection->tcp_read_buffer_.commit(bytes_read); #ifdef DEBUG BOOST_LOG_SEV(log, trace) << "TCP socket read " << bytes_read << " bytes"; #endif - std::size_t bytes_copied = boost::asio::buffer_copy(web_socket_data_write_buffer.prepare(bytes_read), tcp_read_buffer.data(), bytes_read); - tcp_read_buffer.consume(bytes_read); - web_socket_data_write_buffer.commit(bytes_copied); + BOOST_LOG_SEV(log, trace) << "TCP socket read " << bytes_read << " bytes"; + std::size_t bytes_copied = boost::asio::buffer_copy(socket_read_connection->web_socket_data_write_buffer_.prepare(bytes_read), socket_read_connection->tcp_read_buffer_.data(), bytes_read); + socket_read_connection->tcp_read_buffer_.consume(bytes_read); + socket_read_connection->web_socket_data_write_buffer_.commit(bytes_copied); - if (wss_has_enough_write_buffer_space(tac)) + if (wss_has_enough_write_buffer_space(socket_read_connection->web_socket_data_write_buffer_)) { - async_tcp_socket_read_loop(tac); + async_tcp_socket_read_loop(tac, service_id); } else { BOOST_LOG_SEV(log, debug) << "No more space in web socket write buffer or tcp socket is closed. Stopping tcp read loop"; } - if (web_socket_data_write_buffer.size() > 0) { - async_setup_web_socket_write_buffer_drain(tac); + if (socket_read_connection->web_socket_data_write_buffer_.size() > 0) { + async_setup_web_socket_write_buffer_drain(tac, service_id); } } }); @@ -715,10 +856,23 @@ namespace aws { namespace iot { namespace securedtunneling { return false; } - bool tcp_adapter_proxy::async_wait_for_stream_start(tcp_adapter_context &tac, message const &message) + bool tcp_adapter_proxy::async_wait_for_service_ids(tcp_adapter_context &tac) { using namespace com::amazonaws::iot::securedtunneling; + BOOST_LOG_SEV(log, trace) << "Waiting for service ids..."; + on_web_socket_control_message = std::bind(&tcp_adapter_proxy::handle_control_message_service_ids, this, std::ref(tac), std::placeholders::_1); + on_web_socket_data_message = std::bind(&tcp_adapter_proxy::ignore_message, this, std::ref(tac), std::placeholders::_1); + after_get_service_ids = std::bind(&tcp_adapter_proxy::setup_tcp_sockets, this, std::ref(tac)); + this->async_web_socket_read_loop_for_service_ids(tac); + return true; + } + bool tcp_adapter_proxy::async_wait_for_stream_start(tcp_adapter_context &tac, message const &message) + { + using namespace com::amazonaws::iot::securedtunneling; + BOOST_LOG_SEV(log, trace) << "Wait for control message stream start, receive message type:" << message.type(); + std::int32_t stream_id = static_cast(message.streamid()); + string service_id = message.serviceid(); switch (message.type()) { case Message_Type_SESSION_RESET: @@ -737,16 +891,30 @@ namespace aws { namespace iot { namespace securedtunneling { #ifdef DEBUG BOOST_LOG_SEV(log, debug) << "Stream start recieved"; #endif - tac.stream_id = static_cast(message.streamid()); - if (!tac.stream_id) + stream_id = static_cast(message.streamid()); + if (!stream_id) { throw proxy_exception("No stream ID set for stream start message!"); } - //now that we have stream start, do what's next - invoke_and_clear_handler(on_recieve_stream_start); + BOOST_LOG_SEV(log, debug) << "Received service id :" << service_id << " ,stream id: " << message.streamid(); + // v1 message format does not need to validate service id. Set to the one service id stored in memory. + if (tac.adapter_config.is_v1_message_format) + { + service_id = tac.adapter_config.serviceId_to_endpoint_map.cbegin()->first; + } + else if (tac.adapter_config.serviceId_to_endpoint_map.find(service_id) == tac.adapter_config.serviceId_to_endpoint_map.end()) + { + throw proxy_exception((boost::format("Invalid service id received for stream start: %1%") % service_id).str()); + } + + tac.serviceId_to_streamId_map[service_id] = stream_id; + tac.serviceId_to_tcp_client_map[service_id]->on_receive_stream_start(); return false; case Message_Type_DATA: //handling the following cases alleviates clang compiler warnings throw std::logic_error("Data message recieved in control message handler"); + case Message_Type_SERVICE_IDS: + // service ids should already be received at this point, no actions to process again. + return true; case Message_Type_UNKNOWN: case Message_Type_Message_Type_INT_MIN_SENTINEL_DO_NOT_USE_: case Message_Type_Message_Type_INT_MAX_SENTINEL_DO_NOT_USE_: @@ -757,15 +925,194 @@ namespace aws { namespace iot { namespace securedtunneling { if (message.ignorable()) { return true; } - throw std::logic_error((boost::format("Unrecognized message type recieved while waiting for stream start: %1%") % message.type()).str()); + throw std::logic_error((boost::format("Unrecognized message type received while waiting for stream start: %1%") % message.type()).str()); } } + /** + * Upon receiving service ids, validate service ids provided through the configurations through CLI (-s, -d) + * @return if configurations are valid + */ + bool tcp_adapter_proxy::validate_service_ids_from_configuration(tcp_adapter_context &tac, std::unordered_set service_id_list) + { + BOOST_LOG_SEV(log, trace) << "Validating service ids configuration"; + /** + * Configurations are not provided when local proxy starts, no need to check further. + * v1 local proxy format does not need to do this validation since service id won't be used. + */ + if (fall_back_to_v1_message_format(tac.adapter_config.serviceId_to_endpoint_map)) return true; + + if (tac.adapter_config.serviceId_to_endpoint_map.empty()) return true; + + if (tac.adapter_config.serviceId_to_endpoint_map.size() != service_id_list.size()) + { + BOOST_LOG_SEV(log, debug) << "Number of the service ids provided through CLI (-s or -d) does not match with open tunnel call. Please provide the same sets of service ids."; + return false; + } + for (auto s: service_id_list) + { + if (tac.adapter_config.serviceId_to_endpoint_map.find(s) == tac.adapter_config.serviceId_to_endpoint_map.end()) + { + BOOST_LOG_SEV(log, debug) << "Service ids provided through open tunnel call " << s << " cannot be found in the CLI parameters (-s or -d).Please provide the same sets of service ids."; + return false; + } + } + return true; + } + + /** + * Extracts service ids from the control message type Message_Type_SERVICE_IDS + */ + bool tcp_adapter_proxy::handle_control_message_service_ids(tcp_adapter_context &tac, message const & message) + { + using namespace com::amazonaws::iot::securedtunneling; + using namespace aws::iot::securedtunneling::config_file; + tac.is_service_ids_received = true; + std::unordered_set service_id_list; + std::unordered_set found_service_ids; + std::unordered_set unfound_service_ids; + // Cannot start the stream before receiving service ids. + if (message.type() == Message_Type_STREAM_START) + { + throw proxy_exception("Receive stream start before receiving service ids. Cannot forward data."); + } + else if (message.type() != Message_Type_SERVICE_IDS) + { + BOOST_LOG_SEV(log, debug) << "Expect:Message_Type_SERVICE_IDS. Ignore message type: " << message.type(); + return false; + } + BOOST_LOG_SEV(log, debug) << "Extracting service Ids from control message " << message.type(); + for (int i = 0; i < message.availableserviceids_size(); i++) + { + std::string id = message.availableserviceids(i); + if (service_id_list.find(id) != service_id_list.end()) + { + BOOST_LOG_SEV(log, warning) << "Duplicate service Id received, ignore: "<< id; + continue; + } + service_id_list.insert(id); + } + BOOST_LOG_SEV(log, trace) << "Service id received: "; + for (auto s: service_id_list) + { + BOOST_LOG_SEV(log, trace) << s; + } + if (!tcp_adapter_proxy::validate_service_ids_from_configuration(tac, service_id_list)) + { + throw std::runtime_error("Wrong configurations detected in local proxy. Please starts local proxy with right sets of service ids."); + } + + /** + * Set flag to mark local proxy will communicate using local proxy v1 message format. + * local proxy v1 message format: 1 service id. It can be a empty string when open tunnel with no service in destination config. + */ + if (service_id_list.size() == 1) + { + tac.adapter_config.is_v1_message_format = true; + } + /** + * Build serviceId <-> endpoint mapping if not done yet. + * Case1: Configuration is provided through configuration files. Upon receiving service ids, search through + * the configuration directory and find the service ids provided in those files. + * Case 2: Configuration is NOT provided from both files or CLI. Local proxy need to randomly pick up ports + * to use if running in source mode. + * Case 3: If not enough service ids are found through configuration files, local proxy helps to pick random + * available ports, if starts in source mode. + * If serviceId <-> endpoint mapping already exists, validate the mapping provided through CLI. + */ + + if (tac.adapter_config.serviceId_to_endpoint_map.empty()) + { + BOOST_LOG_SEV(log, trace) << "Build serviceId <-> endpoint mapping upon receiving service ids"; + + // Scan configuration files to find port mappings + if (!tac.adapter_config.config_files.empty()) + { + BOOST_LOG_SEV(log, info) << "Scan configuration files to find the service ids"; + read_service_ids_from_config_files(tac.adapter_config.config_files, service_id_list, tac.adapter_config.serviceId_to_endpoint_map); + + std::transform(tac.adapter_config.serviceId_to_endpoint_map.cbegin(), tac.adapter_config.serviceId_to_endpoint_map.cend(), + std::inserter(found_service_ids, found_service_ids.begin()), + [](const std::pair& key_value) + { return key_value.first; }); + + std::set_difference(service_id_list.begin(), service_id_list.end(), found_service_ids.begin(), found_service_ids.end(), + std::inserter(unfound_service_ids, unfound_service_ids.end())); + + if (!unfound_service_ids.empty()) + { + BOOST_LOG_SEV(log, trace) << "Receive number of service ids: " << service_id_list.size() << + " .But only found " << tac.adapter_config.serviceId_to_endpoint_map.size() << " in configuration files"; + if (tac.adapter_config.mode != proxy_mode::SOURCE) + { + throw std::runtime_error("Not enough the service ids are found in the configuration files. Fail to start."); + } + + BOOST_LOG_SEV(log, trace) << "Not all the service ids are found in the configuration files. Local proxy will help to pick up " << unfound_service_ids.size() << " ports."; + // initialize the port to be 0 in the service id <-> endpoint mapping, so that local proxy will help picking available ports when establish tcp connection with client's APP + for (auto service_id :unfound_service_ids) + { + tac.adapter_config.serviceId_to_endpoint_map[service_id] = "0"; + } + tac.adapter_config.on_listen_port_assigned = std::bind(&tcp_adapter_proxy::handle_listen_port_assigned, this, std::placeholders::_1, std::placeholders::_2, std::ref(tac)); + } + } + // If configuration files not provided, initialize the port to be 0 if in source mode. + else + { + if (tac.adapter_config.mode != proxy_mode::SOURCE) + { + throw std::runtime_error("No port mapping exists. Fail to start local proxy in destination mode."); + } + for (auto service_id:service_id_list) + { + tac.adapter_config.serviceId_to_endpoint_map[service_id] = "0"; + } + tac.adapter_config.on_listen_port_assigned = std::bind(&tcp_adapter_proxy::handle_listen_port_assigned, this, std::placeholders::_1, std::placeholders::_2, std::ref(tac)); + } + + // Update in-memory mapping + BOOST_LOG_SEV(log, info) << "Use port mapping:"; + BOOST_LOG_SEV(log, info) << "---------------------------------"; + for (auto m: tac.adapter_config.serviceId_to_endpoint_map) + { + BOOST_LOG_SEV(log, info) << m.first << " = " << m.second; + } + BOOST_LOG_SEV(log, info) << "---------------------------------"; + } + else if (tcp_adapter_proxy::fall_back_to_v1_message_format(tac.adapter_config.serviceId_to_endpoint_map) && service_id_list.size() == 1) + { + // v1 format service id is an empty string in the map + std::string endpoint = tac.adapter_config.serviceId_to_endpoint_map[""]; + std::string service_id = *service_id_list.begin(); + + // Remove empty string map and put new mapping + tac.adapter_config.serviceId_to_endpoint_map.erase(""); + tac.adapter_config.serviceId_to_endpoint_map[service_id] = endpoint; + BOOST_LOG_SEV(log, info) << "Updated port mapping for v1 format: "; + for (auto m : tac.adapter_config.serviceId_to_endpoint_map) + { + BOOST_LOG_SEV(log, info) << m.first << " = " << m.second; + } + } + if (after_get_service_ids) + { + after_get_service_ids(); + } + return true; + } + bool tcp_adapter_proxy::handle_control_message_data_transfer(tcp_adapter_context &tac, message const &message) { using namespace com::amazonaws::iot::securedtunneling; BOOST_LOG_SEV(log, trace) << "Handling control message..."; - + std::int32_t stream_id = static_cast(message.streamid()); + string service_id = message.serviceid(); + // v1 message format does not need to validate service id. Set to the one service id stored in memory. + if (tac.adapter_config.is_v1_message_format) + { + service_id = tac.adapter_config.serviceId_to_endpoint_map.cbegin()->first; + } switch (message.type()) { case Message_Type_SESSION_RESET: @@ -773,23 +1120,36 @@ namespace aws { namespace iot { namespace securedtunneling { BOOST_LOG_SEV(log, trace) << "Session reset recieved"; #endif //validation has already been done on stream_id before calling this, so we can just listen - tcp_socket_reset(tac, std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac))); + tcp_socket_reset_all(tac, std::bind(&tcp_adapter_proxy::setup_tcp_sockets, this, std::ref(tac))); return true; //indicates we should stop reading from the web socket after processing this message case Message_Type_STREAM_RESET: #ifdef DEBUG BOOST_LOG_SEV(log, trace) << "Stream reset recieved"; #endif //validation has already been done on stream_id before calling this, so we can just listen - tcp_socket_reset(tac, std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac))); + tcp_socket_reset(tac, service_id, std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac), service_id)); return true; //indicates we should stop reading from the web socket after processing this message - case Message_Type_STREAM_START: //could verify that this is a destination mode local proxy. Source mode shouldn't be recieving stream start - BOOST_LOG_SEV(log, warning) << "Stream start recieved during data transfer"; - tcp_socket_reset(tac, std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac))); - tac.stream_id = static_cast(message.streamid()); - if (!tac.stream_id) + case Message_Type_STREAM_START: //could verify that this is a destination mode local proxy. Source mode shouldn't receive stream start + if (!stream_id) { throw proxy_exception("No stream ID set for stream start message!"); } + if (tac.serviceId_to_streamId_map.find(service_id) == tac.serviceId_to_streamId_map.end()) + { + BOOST_LOG_SEV(log, warning) << "Starting new stream for service id: " << service_id; + tac.serviceId_to_streamId_map[service_id] = stream_id; + tac.serviceId_to_tcp_client_map[service_id]->on_receive_stream_start(); + } + else if (tac.serviceId_to_streamId_map.at(service_id) != message.streamid()) + { + BOOST_LOG_SEV(log, warning) << "Stream start received during data transfer for service id :" << service_id << "with new stream id: " << message.streamid(); + BOOST_LOG_SEV(log, warning) << "Reset this stream"; + tcp_socket_reset(tac, service_id, std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac), service_id)); + } + return true; + case Message_Type_SERVICE_IDS: + // service ids should be received and validate before any stream can start. Ignore this control message if receive after stream already start. + BOOST_LOG_SEV(log, info) << "Receive service Ids during data transfer. ignore"; return true; case Message_Type_DATA: //handling the following cases alleviates clang compiler warnings throw std::logic_error("Data message recieved in control message handler"); @@ -809,17 +1169,34 @@ namespace aws { namespace iot { namespace securedtunneling { bool tcp_adapter_proxy::forward_data_message_to_tcp_write(tcp_adapter_context &tac, message const &message) { + // Get the endpoint information based on the service id mapping + // Validate if this mapping exists, if not, discard the message + string service_id = message.serviceid(); + /** + * v1 message format does not need to have service id field, so we don't need to do validation on this field. + * Fill the service id with the current one used in the local proxy mapping. + */ + if(tac.adapter_config.is_v1_message_format) + { + service_id = tac.adapter_config.serviceId_to_endpoint_map.cbegin()->first; + } + else if (tac.serviceId_to_streamId_map.find(service_id) == tac.serviceId_to_streamId_map.end()) + { + BOOST_LOG_SEV(log, error) << "Received non exist service Id, ignore"; + return false; + } + tcp_connection::pointer connection = get_tcp_connection(tac, service_id);; //capture write buffer size (we care if it is empty, that means we will need to trigger a drain) - size_t write_buffer_size_before = tcp_write_buffer.size(); - boost::asio::buffer_copy(tcp_write_buffer.prepare(message.payload().size()), boost::asio::buffer(message.payload())); - tcp_write_buffer.commit(message.payload().size()); + size_t write_buffer_size_before = connection->tcp_write_buffer_.size(); + boost::asio::buffer_copy(connection->tcp_write_buffer_.prepare(message.payload().size()), boost::asio::buffer(message.payload())); + connection->tcp_write_buffer_.commit(message.payload().size()); if (write_buffer_size_before == 0) { - async_tcp_write_buffer_drain(tac); + async_tcp_write_buffer_drain(tac, service_id); } - if (tcp_has_enough_write_buffer_space(tac)) + if (tcp_has_enough_write_buffer_space(connection)) { return true; } @@ -880,7 +1257,7 @@ namespace aws { namespace iot { namespace securedtunneling { } #ifdef DEBUG //BOOST_LOG_SEV(log, trace) << "Message recieved:\n" << message.DebugString(); //re-add when linked to protobuf instead of protobuf-lite - BOOST_LOG_SEV(log, trace) << "Message parsed successfully"; + BOOST_LOG_SEV(log, trace) << "Message parsed successfully , type :" << incoming_message.type(); #endif if (!is_valid_stream_id(tac, incoming_message)) { @@ -891,13 +1268,36 @@ namespace aws { namespace iot { namespace securedtunneling { } else { + string service_id = incoming_message.serviceid(); + // v1 message format does not need to validate service id. Set to the one service id stored in memory. + if (tac.adapter_config.is_v1_message_format) + { + service_id = tac.adapter_config.serviceId_to_endpoint_map.cbegin()->first; + } + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); + // if per connection handler is available, trigger them. if (incoming_message.type() != Message_Type_DATA) { - continue_reading = on_control_message(incoming_message); + if (connection != nullptr && connection->on_control_message != nullptr) + { + continue_reading = connection->on_control_message(incoming_message); + } + else + { + continue_reading = on_web_socket_control_message(incoming_message); + } } else if (incoming_message.type() == Message_Type_DATA) { - continue_reading = on_data_message(incoming_message); + if (connection != nullptr && connection->on_data_message != nullptr) + { + continue_reading = connection->on_data_message(incoming_message); + } + else + { + continue_reading = on_web_socket_data_message(incoming_message); + } + } } } @@ -923,54 +1323,94 @@ namespace aws { namespace iot { namespace securedtunneling { void tcp_adapter_proxy::async_web_socket_read_loop(tcp_adapter_context &tac) { - if (!on_control_message || !on_data_message) + if (!on_web_socket_control_message || !on_web_socket_data_message) { throw std::logic_error("Cannot run web socket read loop without handlers in place for control messages and data messages"); } if (!tcp_has_enough_write_buffer_space(tac)) { + BOOST_LOG_SEV(log, trace) << "Scheduled async web socket read into tcp write buffer and it does not have enough space!"; #ifdef DEBUG BOOST_LOG_SEV(log, trace) << "Scheduled async web socket read into tcp write buffer and it does not have enough space!"; #endif } + else if (tac.is_web_socket_reading) { + BOOST_LOG_SEV(log, debug) << "Starting web socket read loop while web socket is already reading. Ignoring..."; #ifdef DEBUG BOOST_LOG_SEV(log, debug) << "Starting web socket read loop while web socket is already reading. Ignoring..."; #endif } else { + BOOST_LOG_SEV(log, debug) << "Starting web socket read loop continue reading..."; tac.is_web_socket_reading = true; tac.wss->async_read_some(incoming_message_buffer, incoming_message_buffer.max_size() - incoming_message_buffer.size(), std::bind(&tcp_adapter_proxy::on_web_socket_read, this, std::ref(tac), std::placeholders::_1, std::placeholders::_2)); } } - void tcp_adapter_proxy::async_tcp_write_buffer_drain(tcp_adapter_context &tac) + /** + * NOTE: No tcp read or write buffer needs to be initialized before we receive service ids. + * This is because before getting the service ids, we don't know which applications to connect/listen to. No + * tcp connections need to be established at this point. + * @param tac + */ + void tcp_adapter_proxy::async_web_socket_read_loop_for_service_ids(tcp_adapter_context &tac) { + BOOST_LOG_SEV(log, trace) << "async_web_socket_read_loop_for_service_ids"; + + if (!on_web_socket_control_message || !on_web_socket_data_message) + { + throw std::logic_error("Cannot run web socket read loop without handlers in place for control messages and data messages"); + } + if (tac.is_web_socket_reading) + { +#ifdef DEBUG + BOOST_LOG_SEV(log, debug) << "Starting web socket read loop while web socket is already reading. Ignoring..."; +#endif + } + else + { + tac.is_web_socket_reading = true; + BOOST_LOG_SEV(log, debug) << "Scheduled next read:"; + tac.wss->async_read_some(incoming_message_buffer, incoming_message_buffer.max_size() - incoming_message_buffer.size(), + std::bind(&tcp_adapter_proxy::on_web_socket_read, this, std::ref(tac), std::placeholders::_1, std::placeholders::_2)); + } + } + + void tcp_adapter_proxy::async_tcp_write_buffer_drain(tcp_adapter_context &tac, string service_id) + { + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); + if (!connection->socket_.is_open()) + { + throw proxy_exception((boost::format("TCP socket is not open service id: %1%") % service_id).str()); + } static std::function write_done; - write_done = [&](boost::system::error_code const &ec, size_t bytes_written) + write_done = [&, service_id](boost::system::error_code const &ec, size_t bytes_written) { - tac.is_tcp_socket_writing = false; + BOOST_LOG_SEV(log, trace) << "write done service id " << service_id; + tcp_connection::pointer socket_write_connection = get_tcp_connection(tac, service_id); + socket_write_connection->is_tcp_socket_writing_ = false; if (ec) { - if (on_tcp_error) + if (socket_write_connection->on_tcp_error) { - on_tcp_error(ec); - on_tcp_error = nullptr; + socket_write_connection->on_tcp_error(ec); + socket_write_connection->on_tcp_error = nullptr; } else { - tcp_socket_error(tac, ec); + tcp_socket_error(tac, ec, service_id); } } else { BOOST_LOG_SEV(log, trace) << "Wrote " << bytes_written << " bytes to tcp socket"; - bool had_space_before = tcp_has_enough_write_buffer_space(tac); - tcp_write_buffer.consume(bytes_written); - bool has_space_after = tcp_has_enough_write_buffer_space(tac); + bool had_space_before = tcp_has_enough_write_buffer_space(socket_write_connection); + socket_write_connection->tcp_write_buffer_.consume(bytes_written); + bool has_space_after = tcp_has_enough_write_buffer_space(socket_write_connection); if (!had_space_before && has_space_after) { #ifdef DEBUG @@ -978,98 +1418,172 @@ namespace aws { namespace iot { namespace securedtunneling { #endif async_web_socket_read_loop(tac); } - if (tcp_write_buffer.size() > 0) + if (socket_write_connection->tcp_write_buffer_.size() > 0) { - tac.is_tcp_socket_writing = true; - tac.tcp_socket.async_write_some(tcp_write_buffer.data(), write_done); + socket_write_connection->is_tcp_socket_writing_ = true; + BOOST_LOG_SEV(log, debug) << "Write to tcp socket"; + socket_write_connection->socket_.async_write_some(socket_write_connection->tcp_write_buffer_.data(), write_done); } else { - if (on_tcp_write_buffer_drain_complete) + if (socket_write_connection->on_tcp_write_buffer_drain_complete) { - invoke_and_clear_handler(on_tcp_write_buffer_drain_complete); + invoke_and_clear_handler(socket_write_connection->on_tcp_write_buffer_drain_complete); } + BOOST_LOG_SEV(log, trace) << "TCP write buffer drain complete"; #ifdef DEBUG BOOST_LOG_SEV(log, trace) << "TCP write buffer drain complete"; #endif } + BOOST_LOG_SEV(log, trace) << "Done writing for: " << service_id; } }; - if (tac.is_tcp_socket_writing) + if (connection->is_tcp_socket_writing_) { BOOST_LOG_SEV(log, debug) << "TCP write buffer drain cannot be started while already writing"; } - else if (tcp_write_buffer.size() == 0) + else if (connection->tcp_write_buffer_.size() == 0) { - invoke_and_clear_handler(on_tcp_write_buffer_drain_complete); + invoke_and_clear_handler(connection->on_tcp_write_buffer_drain_complete); } else { - tac.is_tcp_socket_writing = true; - tac.tcp_socket.async_write_some(tcp_write_buffer.data(), write_done); + connection->is_tcp_socket_writing_ = true; + connection->socket_.async_write_some(connection->tcp_write_buffer_.data(), write_done); } } - void tcp_adapter_proxy::async_setup_web_socket_write_buffer_drain(tcp_adapter_context &tac) + void tcp_adapter_proxy::async_setup_web_socket_write_buffer_drain(tcp_adapter_context &tac, std::string const & service_id) { + BOOST_LOG_SEV(log, trace) << "Web socket write buffer drain for service id: " << service_id; + boost::beast::flat_buffer outgoing_message_buffer; + tcp_connection::pointer connection = get_tcp_connection(tac, service_id); using namespace com::amazonaws::iot::securedtunneling; - - if (tac.is_web_socket_writing) + if (connection->web_socket_data_write_buffer_.size() > 0) { - //already writing, do nothing - } - else if (web_socket_data_write_buffer.size() > 0) - { //not writing, and buffer isn't empty so schedule one - bool had_buffer_write_space = wss_has_enough_write_buffer_space(tac); + // Get end point from the tcp socket outgoing_message.set_type(Message_Type_DATA); - outgoing_message.set_streamid(tac.stream_id); + if (tac.adapter_config.serviceId_to_endpoint_map.find(service_id) == tac.adapter_config.serviceId_to_endpoint_map.end()) + { + throw proxy_exception((boost::format("Could not forward traffic from invalid service id: %1%") % service_id).str()); + } + else if (tac.serviceId_to_streamId_map.find(service_id) == tac.serviceId_to_streamId_map.end()) + { + throw proxy_exception((boost::format("No streamId exists for the service Id %1%") % service_id).str()); + } + BOOST_LOG_SEV(log, debug) << "Prepare to send data message: service id: " << service_id << " stream id: " << tac.serviceId_to_streamId_map[service_id]; + // Construct outgoing message + outgoing_message.set_serviceid(service_id); + outgoing_message.set_streamid(tac.serviceId_to_streamId_map[service_id]); size_t const send_size = std::min(GET_SETTING(settings, MESSAGE_MAX_PAYLOAD_SIZE), - web_socket_data_write_buffer.size()); - boost::asio::buffer_copy(outgoing_message_buffer.prepare(send_size), web_socket_data_write_buffer.data(), send_size); + connection->web_socket_data_write_buffer_.size()); + boost::asio::buffer_copy(outgoing_message_buffer.prepare(send_size), connection->web_socket_data_write_buffer_.data(), send_size); outgoing_message_buffer.commit(send_size); - web_socket_data_write_buffer.consume(send_size); outgoing_message.set_payload(outgoing_message_buffer.data().data(), send_size); + + // Clean up web_socket_data_write_buffer + connection->web_socket_data_write_buffer_.consume(send_size); outgoing_message_buffer.consume(outgoing_message_buffer.max_size()); //after message is sent, continue with the loop - after_send_message = std::bind(&tcp_adapter_proxy::async_setup_web_socket_write_buffer_drain, this, std::ref(tac)); + connection->after_send_message = std::bind(&tcp_adapter_proxy::async_setup_web_socket_write_buffer_drain, this, std::ref(tac), service_id); async_send_message(tac, outgoing_message); //if this write cleared up enough space - if (!had_buffer_write_space && wss_has_enough_write_buffer_space(tac)) + if (wss_has_enough_write_buffer_space(connection->web_socket_data_write_buffer_)) + { + BOOST_LOG_SEV(log, debug) << "Write buffer has enough space, continue tcp read loop for " << service_id ; + async_tcp_socket_read_loop(tac, service_id); + } + else { - async_tcp_socket_read_loop(tac); + BOOST_LOG_SEV(log, debug) << " write DOES NOT cleared up enough space, no tcp read loop" << service_id ; } } else { //not writing, no buffer contents, skip straight to being done draining - invoke_and_clear_handler(on_web_socket_write_buffer_drain_complete); + invoke_and_clear_handler(connection->on_web_socket_write_buffer_drain_complete); } } + void tcp_adapter_proxy::async_setup_source_tcp_sockets(tcp_adapter_context &tac) + { + BOOST_LOG_SEV(log, trace) << "Setting up source tcp sockets"; + for (auto m: tac.adapter_config.serviceId_to_endpoint_map) + { + string service_id = m.first; + setup_tcp_socket(tac, service_id); + } + } - void tcp_adapter_proxy::async_setup_source_tcp_socket(tcp_adapter_context &tac) + void tcp_adapter_proxy::tcp_adapter_proxy::async_setup_destination_tcp_sockets(tcp_adapter_context &tac) { - std::shared_ptr retry_config = - std::make_shared(tac.io_ctx, - GET_SETTING(settings, TCP_CONNECTION_RETRY_COUNT), - GET_SETTING(settings, TCP_CONNECTION_RETRY_DELAY_MS), - nullptr); - retry_config->operation = std::bind(&tcp_adapter_proxy::async_setup_source_tcp_socket_retry, this, std::ref(tac), retry_config); - async_setup_source_tcp_socket_retry(tac, retry_config); + for (auto m: tac.adapter_config.serviceId_to_endpoint_map) + { + string service_id = m.first; + setup_tcp_socket(tac, service_id); + } + } + + void tcp_adapter_proxy::async_send_message_to_web_socket(tcp_adapter_context &tac, std::shared_ptr const& data_to_send, std::string const & service_id) + { + BOOST_LOG_SEV(log, trace) << "Sending messages over web socket for service id: " << service_id; + BOOST_LOG_SEV(log, trace) << "Current queue size: " << tac.web_socket_outgoing_message_queue.size(); + // Always add to queue and invoke the send message complete + if (data_to_send != nullptr) + { + BOOST_LOG_SEV(log, trace) << "Put data " << data_to_send->size() << " bytes into the web_socket_outgoing_message_queue for service id: " << service_id; + tcp_connection::pointer socket_connection = get_tcp_connection(tac, service_id); + data_message temp = std::make_pair(data_to_send, socket_connection->after_send_message); + tac.web_socket_outgoing_message_queue.push(temp); + // Are we already writing? + if(tac.web_socket_outgoing_message_queue.size() > 1) + return; + } + + // We are not currently writing, so send this immediately + data_message message_to_send = tac.web_socket_outgoing_message_queue.front(); + tac.wss->async_write(message_to_send.first->data(), [=, &tac](boost::system::error_code const &ec, std::size_t const bytes_sent) + { + if (ec) + { + throw proxy_exception("Error sending web socket message", ec); + } + BOOST_LOG_SEV(log, trace) << "Sent " << bytes_sent << " bytes over websocket for service id: " << service_id; + std::function capture_after_send_message = message_to_send.second; + tac.web_socket_outgoing_message_queue.pop(); + + if(capture_after_send_message) + { + capture_after_send_message(); + } + if(tac.web_socket_outgoing_message_queue.empty()) + { + BOOST_LOG_SEV(log, trace) << "web_socket_outgoing_message_queue is empty, no more messages to send."; + return; + } + async_send_message_to_web_socket(tac, nullptr, service_id); + }); } - void tcp_adapter_proxy::async_setup_source_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config) + void tcp_adapter_proxy::async_setup_source_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config, string service_id) { - tcp_socket_ensure_closed(tac); - tac.acceptor.close(); + tcp_server::pointer server = tac.serviceId_to_tcp_server_map[service_id]; + tcp_socket_ensure_closed(server->connection_->socket()); + server->acceptor_.close(); static boost::asio::socket_base::reuse_address reuse_addr_option(true); tac.bind_address_actual = tac.adapter_config.bind_address.get_value_or(GET_SETTING(settings, DEFAULT_BIND_ADDRESS)); BOOST_LOG_SEV(log, debug) << "Resolving bind address host: " << tac.bind_address_actual; - tac.resolver.async_resolve(tac.bind_address_actual, boost::lexical_cast(tac.adapter_config.data_port), + + std::string endpoint = tac.adapter_config.serviceId_to_endpoint_map[service_id]; + tuple endpoint_to_connect = get_host_and_port(endpoint, tac.bind_address_actual); + std::string src_port = std::get<1>(endpoint_to_connect); + std::uint16_t port_to_connect = boost::lexical_cast(src_port); + BOOST_LOG_SEV(log, debug) << "Port to connect " << port_to_connect; + server->resolver_.async_resolve(tac.bind_address_actual, src_port, boost::asio::ip::resolver_base::passive, [=, &tac](boost::system::error_code const &ec, tcp::resolver::results_type results) { @@ -1083,53 +1597,61 @@ namespace aws { namespace iot { namespace securedtunneling { { BOOST_LOG_SEV(log, debug) << "Resolved bind IP: " << results->endpoint().address().to_string(); boost::system::error_code bind_ec; - tac.acceptor.open(results->endpoint().protocol()); - if (tac.adapter_config.data_port) + server->acceptor_.open(results->endpoint().protocol()); + if (port_to_connect) { //if data port is 0 (means pick an empheral port), then don't set this option - tac.acceptor.set_option(reuse_addr_option); + server->acceptor_.set_option(reuse_addr_option); } - tac.acceptor.bind(results->endpoint(), bind_ec); + server->acceptor_.bind(results->endpoint(), bind_ec); if (bind_ec) { BOOST_LOG_SEV(log, error) << (boost::format("Could not bind to address: %1%:%2% -- %3%") % results->endpoint().address().to_string() % results->endpoint().port() % bind_ec.message()).str(); basic_retry_execute(log, retry_config, - [&tac, &ec]() { throw proxy_exception((boost::format("Failed to bind to address %1%:%2%") % tac.bind_address_actual % tac.adapter_config.data_port).str(), ec); }); + []() { throw proxy_exception(SOURCE_LOCAL_PROXY_PORT_BIND_EXCEPTION); }); } else { - tac.local_port = static_cast(tac.acceptor.local_endpoint().port()); - BOOST_LOG_SEV(log, info) << "Listening for new connection on port " << tac.local_port; + std::uint16_t local_port = static_cast(server->acceptor_.local_endpoint().port()); + BOOST_LOG_SEV(log, info) << "Listening for new connection on port " << local_port; boost::system::error_code listen_ec; - tac.acceptor.listen(0, listen_ec); + server->acceptor_.listen(0, listen_ec); if (listen_ec) { - BOOST_LOG_SEV(log, error) << (boost::format("Could not listen on bind address: %1% -- %2%") - % results->endpoint().address().to_string() % listen_ec.message()).str(); + BOOST_LOG_SEV(log, error) << (boost::format("Could not listen on bind address: %1%:%2% -- %3%") + % results->endpoint().address().to_string() % local_port % listen_ec.message()).str(); basic_retry_execute(log, retry_config, - [&tac, &listen_ec]() { throw proxy_exception((boost::format("Failed to listen on bind address %1%:%2%") % tac.bind_address_actual % tac.adapter_config.data_port).str(), listen_ec, true); }); + []() { throw proxy_exception(SOURCE_LOCAL_PROXY_PORT_BIND_EXCEPTION); }); } else { - if (tac.adapter_config.data_port == 0 && tac.adapter_config.on_listen_port_assigned) + if (port_to_connect == 0 && tac.adapter_config.on_listen_port_assigned) { - tac.adapter_config.on_listen_port_assigned(tac.local_port); + tac.adapter_config.on_listen_port_assigned(local_port, service_id); } - - tac.acceptor.async_accept( - [=, &tac](boost::system::error_code const &ec, boost::asio::ip::tcp::socket new_socket) + server->acceptor_.async_accept( + [=, &tac](boost::system::error_code const &ec, boost::asio::ip::tcp::socket new_socket) { + if (ec) { BOOST_LOG_SEV(log, error) << (boost::format("Could not listen/accept incoming connection on %1%:%2% -- %3%") - % tac.bind_address_actual % tac.local_port % ec.message()).str(); + % tac.bind_address_actual % local_port % ec.message()).str(); basic_retry_execute(log, retry_config, - [&tac, &ec]() { throw std::runtime_error((boost::format("Failed to accept new connection -- %2%") % tac.adapter_config.data_port % ec.message()).str()); }); + [=, &ec]() { throw std::runtime_error((boost::format("Failed to accept new connection on %1% -- %2%") % local_port % ec.message()).str()); }); } else { - tac.tcp_socket = std::move(new_socket); - BOOST_LOG_SEV(log, info) << "Accepted tcp connection on port " << tac.local_port << " from " << tac.tcp_socket.remote_endpoint(); - invoke_and_clear_handler(after_setup_tcp_socket); + BOOST_LOG_SEV(log, debug) << "socket port " << new_socket.local_endpoint().port(); + string endpoint = boost::lexical_cast(new_socket.local_endpoint().port()); + BOOST_LOG_SEV(log, debug) << "endpoint mapping:"; + for (auto m: tac.adapter_config.serviceId_to_endpoint_map) + { + BOOST_LOG_SEV(log, debug) << m.first << " = " << m.second; + } + tcp_server::pointer server = tac.serviceId_to_tcp_server_map[service_id]; + server->connection_->socket() = std::move(new_socket); + BOOST_LOG_SEV(log, info) << "Accepted tcp connection on port " << server->connection_->socket().local_endpoint().port() << " from " << server->connection_->socket().remote_endpoint(); + invoke_and_clear_handler(server->after_setup_tcp_socket); } }); } @@ -1138,64 +1660,79 @@ namespace aws { namespace iot { namespace securedtunneling { }); } - void tcp_adapter_proxy::async_resolve_destination_for_connect(tcp_adapter_context &tac, std::shared_ptr retry_config, boost::system::error_code const &ec, tcp::resolver::results_type results) + void tcp_adapter_proxy::async_resolve_destination_for_connect(tcp_adapter_context &tac, std::shared_ptr retry_config, string const & service_id, boost::system::error_code const &ec, tcp::resolver::results_type results) { + BOOST_LOG_SEV(log, trace) << "Resolve destination to connect for service id: " << service_id; if (ec) { - BOOST_LOG_SEV(log, error) << (boost::format("Could not resolve ip/host {%1}: %2%") % tac.adapter_config.data_host % ec.message()).str(); + string endpoint = tac.adapter_config.serviceId_to_endpoint_map[service_id]; + BOOST_LOG_SEV(log, error) << (boost::format("Could not resolve endpoint %1%. Error message: %2%") % endpoint % ec.message()).str(); basic_retry_execute(log, retry_config, - [this, &tac]() + [this, &tac, service_id]() { - this->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac)); - async_send_stream_reset(tac, tac.stream_id); + tcp_connection::pointer socket_connection = get_tcp_connection(tac, service_id); + socket_connection->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac), service_id); + async_send_stream_reset(tac, service_id); }); } else { - BOOST_LOG_SEV(log, debug) << "Resolved destination host to IP: " << results->endpoint().address().to_string(); - BOOST_LOG_SEV(log, trace) << "Connecting to " << results->endpoint().address().to_string(); - tac.tcp_socket.async_connect(*results.begin(), + tcp_client::pointer client = tac.serviceId_to_tcp_client_map[service_id]; + std::string dst_host = results->endpoint().address().to_string(); + unsigned short dst_port = results->endpoint().port(); + BOOST_LOG_SEV(log, debug) << "Resolved destination host to IP: " << dst_host << " , connecting ..."; + client->connection_->socket().async_connect(*results.begin(), [=, &tac](boost::system::error_code const &ec) { if (ec) { - BOOST_LOG_SEV(log, error) << (boost::format("Could not connect to destination %1%:%2% -- %3%") % tac.adapter_config.data_host % tac.adapter_config.data_port % ec.message()).str(); + BOOST_LOG_SEV(log, error) << (boost::format("Could not connect to destination %1%:%2% -- %3%") % dst_host % dst_host % ec.message()).str(); basic_retry_execute(log, retry_config, - [this, &tac]() + [this, &tac, service_id]() { - this->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac)); - async_send_stream_reset(tac, tac.stream_id); + tcp_connection::pointer socket_connection = get_tcp_connection(tac, service_id); + socket_connection->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac), service_id); + async_send_stream_reset(tac, service_id); }); } else { - BOOST_LOG_SEV(log, info) << "Connected to " << tac.adapter_config.data_host << ":" << tac.adapter_config.data_port; - invoke_and_clear_handler(after_setup_tcp_socket); + BOOST_LOG_SEV(log, info) << "Connected to " << dst_host << ", port: " << dst_port; + tcp_client::pointer client = tac.serviceId_to_tcp_client_map[service_id]; + invoke_and_clear_handler(client->after_setup_tcp_socket); } } ); } } - void tcp_adapter_proxy::async_setup_dest_tcp_socket(tcp_adapter_context &tac) + void tcp_adapter_proxy::async_setup_dest_tcp_socket(tcp_adapter_context &tac, string const & service_id) { + BOOST_LOG_SEV(log, trace) << "Setup destination tcp socket for service id" << service_id; std::shared_ptr retry_config = std::make_shared(tac.io_ctx, GET_SETTING(settings, TCP_CONNECTION_RETRY_COUNT), GET_SETTING(settings, TCP_CONNECTION_RETRY_DELAY_MS), nullptr); - retry_config->operation = std::bind(&tcp_adapter_proxy::async_setup_dest_tcp_socket_retry, this, std::ref(tac), retry_config); - async_setup_dest_tcp_socket_retry(tac, retry_config); + retry_config->operation = std::bind(&tcp_adapter_proxy::async_setup_dest_tcp_socket_retry, this, std::ref(tac), retry_config, service_id); + async_setup_dest_tcp_socket_retry(tac, retry_config, service_id); } - void tcp_adapter_proxy::async_setup_dest_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config) + void tcp_adapter_proxy::async_setup_dest_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config, string const & service_id) { - tcp_socket_ensure_closed(tac); - BOOST_LOG_SEV(log, info) << "Attempting to establish tcp socket connection to: " << tac.adapter_config.data_host << ":" << tac.adapter_config.data_port; + tcp_client::pointer client = tac.serviceId_to_tcp_client_map[service_id]; + tcp_socket_ensure_closed(client->connection_->socket()); + if (tac.adapter_config.serviceId_to_endpoint_map.find((service_id)) == tac.adapter_config.serviceId_to_endpoint_map.end()) + { + throw std::runtime_error((boost::format("Receive invalid service id %1%") % service_id).str()); + } + std::string endpoint = tac.adapter_config.serviceId_to_endpoint_map[service_id]; + + BOOST_LOG_SEV(log, info) << "Attempting to establish tcp socket connection to: " << endpoint; if (tac.adapter_config.bind_address.has_value()) { BOOST_LOG_SEV(log, debug) << "Resolving local address host: " << tac.adapter_config.bind_address.get(); - tac.resolver.async_resolve(tac.adapter_config.bind_address.get(), boost::lexical_cast("0"), + client->resolver_.async_resolve(tac.adapter_config.bind_address.get(), boost::lexical_cast("0"), boost::asio::ip::resolver_base::passive, [=, &tac](boost::system::error_code const &ec, tcp::resolver::results_type results) { @@ -1203,10 +1740,11 @@ namespace aws { namespace iot { namespace securedtunneling { { BOOST_LOG_SEV(log, error) << (boost::format("Could not resolve bind address: %1% -- %2%") % tac.adapter_config.bind_address.get() % ec.message()).str(); basic_retry_execute(log, retry_config, - [this, &tac]() + [this, &tac, service_id]() { - this->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac)); - async_send_stream_reset(tac, tac.stream_id); + tcp_connection::pointer socket_connection = get_tcp_connection(tac, service_id); + socket_connection->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac), service_id); + async_send_stream_reset(tac, service_id); }); } else @@ -1214,85 +1752,187 @@ namespace aws { namespace iot { namespace securedtunneling { BOOST_LOG_SEV(log, debug) << "Resolved bind IP: " << results->endpoint().address().to_string(); boost::system::error_code bind_ec; - tac.tcp_socket.open(results->endpoint().protocol()); - tac.tcp_socket.bind({results->endpoint().address(), 0}, bind_ec); + client->connection_->socket().open(results->endpoint().protocol()); + client->connection_->socket().bind({results->endpoint().address(), 0}, bind_ec); if (bind_ec) { BOOST_LOG_SEV(log, error) << (boost::format("Could not bind to address: %1% -- %2%") % results->endpoint().address().to_string() % bind_ec.message()).str(); basic_retry_execute(log, retry_config, - [this, &tac]() + [this, &tac, service_id]() { - this->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac)); - async_send_stream_reset(tac, tac.stream_id); + tcp_connection::pointer socket_connection = get_tcp_connection(tac, service_id); + socket_connection->after_send_message = std::bind(&tcp_adapter_proxy::setup_tcp_socket, this, std::ref(tac), service_id); + async_send_stream_reset(tac, service_id); }); } else { - tac.local_port = static_cast(tac.tcp_socket.local_endpoint().port()); - BOOST_LOG_SEV(log, trace) << "Resolving destination host: " << tac.adapter_config.data_host; - tac.resolver.async_resolve(tac.adapter_config.data_host, boost::lexical_cast(tac.adapter_config.data_port), - std::bind(&tcp_adapter_proxy::async_resolve_destination_for_connect, this, std::ref(tac), retry_config, std::placeholders::_1, std::placeholders::_2)); + tuple endpoint_to_connect = tcp_adapter_proxy::get_host_and_port(endpoint, tac.adapter_config.bind_address.get()); + std::string dst_host = std::get<0>(endpoint_to_connect); + std::string dst_port = std::get<1>(endpoint_to_connect); + client->resolver_.async_resolve(dst_host, dst_port, + std::bind(&tcp_adapter_proxy::async_resolve_destination_for_connect, this, std::ref(tac), retry_config, service_id, std::placeholders::_1, std::placeholders::_2)); } } }); } else { - BOOST_LOG_SEV(log, trace) << "Resolving destination host: " << tac.adapter_config.data_host; - tac.resolver.async_resolve(tac.adapter_config.data_host, boost::lexical_cast(tac.adapter_config.data_port), - std::bind(&tcp_adapter_proxy::async_resolve_destination_for_connect, this, std::ref(tac), retry_config, std::placeholders::_1, std::placeholders::_2)); + tuple endpoint_to_connect = tcp_adapter_proxy::get_host_and_port(endpoint, LOCALHOST_IP); + std::string dst_host = std::get<0>(endpoint_to_connect); + std::string dst_port = std::get<1>(endpoint_to_connect); + BOOST_LOG_SEV(log, trace) << "Resolving destination host: " << dst_host << " port: " << dst_port; + client->resolver_.async_resolve(dst_host, dst_port, + std::bind(&tcp_adapter_proxy::async_resolve_destination_for_connect, this, std::ref(tac), retry_config, service_id, std::placeholders::_1, std::placeholders::_2)); } } - void tcp_adapter_proxy::tcp_socket_ensure_closed(tcp_adapter_context &tac) + void tcp_adapter_proxy::tcp_socket_ensure_closed(tcp::socket & tcp_socket) { boost::system::error_code ec; - if (tac.tcp_socket.is_open()) + if (tcp_socket.is_open()) { BOOST_LOG_SEV(log, debug) << "Previously open connection detected. Closing..."; - auto remote_endpoint = tac.tcp_socket.remote_endpoint(ec); + auto remote_endpoint = tcp_socket.remote_endpoint(ec); if (!ec) { BOOST_LOG_SEV(this->log, info) << "Disconnected from: " << remote_endpoint; } - tac.tcp_socket.close(); + tcp_socket.close(); } } - void tcp_adapter_proxy::clear_buffers() + void tcp_adapter_proxy::clear_ws_buffers(tcp_adapter_context &tac) { - BOOST_LOG_SEV(log, trace) << "Clearing all data buffers"; - outgoing_message_buffer.consume(outgoing_message_buffer.max_size()); + BOOST_LOG_SEV(log, trace) << "Clearing all ws data buffers"; incoming_message_buffer.consume(incoming_message_buffer.max_size()); message_parse_buffer.consume(message_parse_buffer.max_size()); - tcp_write_buffer.consume(tcp_write_buffer.max_size()); - tcp_read_buffer.consume(tcp_read_buffer.max_size()); - web_socket_data_write_buffer.consume(web_socket_data_write_buffer.max_size()); + BOOST_LOG_SEV(log, trace) << "Finished Clearing all ws data buffers"; + } + + void tcp_adapter_proxy::clear_tcp_connection_buffers(tcp_connection::pointer connection) + { + BOOST_LOG_SEV(log, trace) << "Clearing tcp connection buffers"; + connection->tcp_read_buffer_.consume(connection->tcp_read_buffer_.max_size()); + connection->tcp_write_buffer_.consume(connection->tcp_write_buffer_.max_size()); + connection->web_socket_data_write_buffer_.consume(connection->web_socket_data_write_buffer_.max_size()); } bool tcp_adapter_proxy::is_valid_stream_id(tcp_adapter_context const& tac, message const &message) { - if (MESSAGE_TYPES_REQUIRING_STREAM_ID.find(message.type()) != MESSAGE_TYPES_REQUIRING_STREAM_ID.end()) + if (MESSAGE_TYPES_VALIDATING_STREAM_ID.find(message.type()) != MESSAGE_TYPES_VALIDATING_STREAM_ID.end()) { + string service_id = message.serviceid(); + // v1 message format does not need to validate service id. Set to the one service id stored in memory. + if (tac.adapter_config.is_v1_message_format) + { + service_id = tac.adapter_config.serviceId_to_endpoint_map.cbegin()->first; + } + else if (tac.serviceId_to_streamId_map.find(service_id) == tac.serviceId_to_streamId_map.end()) + { + BOOST_LOG_SEV(log, warning) << "No stream found for service id: " << service_id << ". Ignore stream id: " << message.streamid(); + return false; + } + int32_t stream_id = tac.serviceId_to_streamId_map.at(service_id); if (message.streamid() == 0) { BOOST_LOG_SEV(log, warning) << "Message recieved with streamid not set"; return false; } - return tac.stream_id == message.streamid(); + return stream_id == message.streamid(); } return true; } - bool tcp_adapter_proxy::tcp_has_enough_write_buffer_space(tcp_adapter_context const &tac) + bool tcp_adapter_proxy::tcp_has_enough_write_buffer_space(tcp_connection::pointer connection) { //tcp write buffer needs at least enough space to hold a max data size web socket message //because we can't limit how much data we might recieve next frame - return (tcp_write_buffer.max_size() - tcp_write_buffer.size()) >= GET_SETTING(settings, MESSAGE_MAX_PAYLOAD_SIZE); + return (connection->tcp_write_buffer_.max_size() - connection->tcp_write_buffer_.size()) >= GET_SETTING(settings, MESSAGE_MAX_PAYLOAD_SIZE); } - bool tcp_adapter_proxy::wss_has_enough_write_buffer_space(tcp_adapter_context const &tac) + // Check if all tcp write buffers have space. If one of them does not have enough, return false + bool tcp_adapter_proxy::tcp_has_enough_write_buffer_space(tcp_adapter_context const &tac) + { + bool has_enough_space = true; + for (auto m : tac.serviceId_to_tcp_client_map) + { + string service_id = m.first; + tcp_connection::pointer connection = m.second->connection_; + if ( (connection->tcp_write_buffer_.max_size() - connection->tcp_write_buffer_.size()) < GET_SETTING(settings, MESSAGE_MAX_PAYLOAD_SIZE) ) + { + has_enough_space = false; + break; + } + } + return has_enough_space; + } + + bool tcp_adapter_proxy::wss_has_enough_write_buffer_space(boost::beast::multi_buffer const &buffer) { //web socket write buffer only needs non-zero space because we can make TCP read //calls that limit the data recieved - return (web_socket_data_write_buffer.max_size() - web_socket_data_write_buffer.size()) > 0; + + return (buffer.max_size() - buffer.size()) > 0; + } + + /** + * Given a string of endpoint, returns the boost tcp endpoint. + */ + std::tuple tcp_adapter_proxy::get_host_and_port( const std::string & endpoint, const std::string & default_host) + { + std::tuple res; + std::vector split_res; + std::string endpoint_to_process = endpoint; + std::string port; + std::string host; + transform(endpoint_to_process.begin(), endpoint_to_process.end(), endpoint_to_process.begin(), ::tolower); + boost::split(split_res, endpoint_to_process, boost::is_any_of(":")); + + if (split_res.empty()) { + throw std::runtime_error("Must provide at least one port or host name/ip!"); + } + else if (split_res.size() == 1) + { + boost::trim(split_res[0]); + res = std::make_tuple(default_host, split_res[0]); + } + else if (split_res.size() == 2) + { + boost::trim(split_res[0]); + boost::trim(split_res[1]); + res = std::make_tuple(split_res[0], split_res[1]); + } + else + { + // If step in this case, it means host name has delimiter ":" + uint16_t hostname_len = endpoint_to_process.size() - split_res[split_res.size()-1].size(); + host = endpoint_to_process.substr(0, hostname_len); + boost::trim(split_res[split_res.size()-1]); + BOOST_LOG_SEV(log, trace) << "host name: " << host; + res = std::make_tuple(host, split_res[split_res.size()-1]); + } + return res; + } + + void tcp_adapter_proxy::handle_listen_port_assigned(const std::uint16_t & port_assigned, const std::string & service_id, tcp_adapter_context &tac) + { + BOOST_LOG_SEV(log, trace) << "Handling source listening port assigned"; + // Update service_id <-> endpoint mapping + tac.adapter_config.serviceId_to_endpoint_map[service_id] = boost::lexical_cast(port_assigned); + + // Output new port mapping to user + BOOST_LOG_TRIVIAL(info) << "Listen port assigned for service id " << service_id << ". New port mapping: "; + BOOST_LOG_TRIVIAL(info) << service_id << " = " << port_assigned; + } + + bool tcp_adapter_proxy::fall_back_to_v1_message_format(std::unordered_map const & serviceId_to_endpoint_map) + { + if (serviceId_to_endpoint_map.size() == 1 && serviceId_to_endpoint_map.begin()->first.empty()) + { + return true; + } + else + { + return false; + } } }}} diff --git a/src/TcpAdapterProxy.h b/src/TcpAdapterProxy.h index 379754a..611d1f2 100644 --- a/src/TcpAdapterProxy.h +++ b/src/TcpAdapterProxy.h @@ -3,8 +3,10 @@ #pragma once #include +#include #include #include +#include #include #include #include @@ -14,18 +16,24 @@ #include #include #include +#include #include #include #include "ProxySettings.h" +#include "TcpConnection.h" +#include "TcpServer.h" +#include "TcpClient.h" #include "Message.pb.h" namespace aws { namespace iot { namespace securedtunneling { + using namespace aws::iot::securedtunneling::connection; enum proxy_mode { UNKNOWN = 0, SOURCE = 1, DESTINATION = 2 }; - + + typedef std::pair, std::function> data_message; extern std::uint16_t const DEFAULT_PROXY_SERVER_PORT; extern std::string get_region_endpoint(std::string const ®ion, boost::property_tree::ptree const &settings); @@ -36,12 +44,22 @@ namespace aws { namespace iot { namespace securedtunneling { std::uint16_t proxy_port{ 0 }; std::string access_token { }; proxy_mode mode{ proxy_mode::UNKNOWN }; - std::string data_host { }; - std::uint16_t data_port { 0 }; boost::optional bind_address; boost::optional additional_ssl_verify_path; bool no_ssl_host_verify; - std::function on_listen_port_assigned; + std::function on_listen_port_assigned; + std::vector config_files; + /** + * Store mapping serviceId -> address:port + * The end point will store either source listening or destination service depends on the mode of local proxy. + */ + std::unordered_map serviceId_to_endpoint_map; + /** + * A flag to judge if v2 local proxy needs to fallback to communicate using v1 local proxy message format. + * v1 local proxy format fallback will be enabled when a tunnel is opened with no or 1 service id. + * If this is set to true, it means that v2 local proxy won't validate service id field. + */ + bool is_v1_message_format {false}; }; namespace @@ -66,13 +84,10 @@ namespace aws { namespace iot { namespace securedtunneling { explicit proxy_exception(std::string const & message) : message(message) {} proxy_exception(std::string const & message, boost::system::error_code const & ec) : message{ (boost::format("%1%; Underlying boost::system error: [%2%]") % message%ec.message()).str() }, boost_error_code(boost::make_optional(ec)) {} - proxy_exception(std::string const & message, boost::system::error_code const & ec, bool const & bind_failure) - : proxy_exception(message, ec) { local_port_bind_failure = bind_failure; } proxy_exception(boost::system::error_code const & ec) : message{ (boost::format("Boost::System error: [%1%]") % ec.message()).str() }, boost_error_code(boost::make_optional(ec)) {} virtual char const * what() const noexcept { return message.c_str(); } boost::optional error_code() const { return boost_error_code; } - bool is_local_port_bind_failure() const { return local_port_bind_failure; } proxy_exception(proxy_exception const &) = default; ~proxy_exception() = default; @@ -80,9 +95,9 @@ namespace aws { namespace iot { namespace securedtunneling { protected: std::string message; boost::optional boost_error_code; //boost system error code if the cause - bool local_port_bind_failure; }; + //this structure is pretty much *the* program visibility of all //async function handlers so it's likely to get a bit disorganized struct tcp_adapter_context @@ -91,55 +106,52 @@ namespace aws { namespace iot { namespace securedtunneling { io_ctx{ }, ec{ }, adapter_config{ cfg }, - acceptor{ io_ctx }, - resolver{ io_ctx }, ssl_ctx{ boost::asio::ssl::context::sslv23 }, wss{ nullptr }, + wss_resolver{ io_ctx }, wss_response{ }, - tcp_socket{ io_ctx }, stream_id{ -1 }, - local_port{ 0 }, + service_id{ "" }, + serviceId_to_streamId_map{}, + serviceId_to_tcp_server_map{}, + serviceId_to_tcp_client_map{}, bind_address_actual{ }, - is_web_socket_writing{ false }, is_web_socket_reading{ false }, - is_tcp_socket_writing{ false }, - is_tcp_socket_reading{ false } + is_service_ids_received{ false }, + web_socket_outgoing_message_queue{} { } - boost::asio::io_context io_ctx; - boost::system::error_code ec; - adapter_proxy_config const & adapter_config; - - tcp::acceptor acceptor; - tcp::resolver resolver; - boost::asio::ssl::context ssl_ctx; + boost::asio::io_context io_ctx; + boost::system::error_code ec; + adapter_proxy_config adapter_config; + boost::asio::ssl::context ssl_ctx; - shared_ptr wss; + shared_ptr wss; + tcp::resolver wss_resolver; //response of current wss connection upgrade request //we need this somewhere because it can(should) contain //information identifying this websocket connection //instance that we can tag operations/logging with for better //debuggability. - boost::beast::websocket::response_type wss_response; + boost::beast::websocket::response_type wss_response; - tcp::socket tcp_socket; //represents the current stream ID to expect data from //care should be taken how(if) this is updated directly - std::int32_t stream_id; - std::uint16_t local_port; - std::string bind_address_actual; + // To be deleted + std::int32_t stream_id; + std::string service_id; + std::unordered_map serviceId_to_streamId_map; + std::unordered_map serviceId_to_tcp_server_map; + std::unordered_map serviceId_to_tcp_client_map; + std::string bind_address_actual; //flag set to true while web socket data is being drained //necessary for better TCP socket recovery rather than destroying //what's in the buffer - bool is_web_socket_writing; //flag neccessary to know on TCP resets whether or not web socket //has a current read (usually should, but may not if - bool is_web_socket_reading; - //used to ensure command resets of TCP don't schedule multiple - //drains on TCP - bool is_tcp_socket_writing; - - bool is_tcp_socket_reading; + bool is_web_socket_reading; + bool is_service_ids_received; + std::queue web_socket_outgoing_message_queue; }; //simple re-usable structure for a basic retry strategy's state @@ -168,39 +180,36 @@ namespace aws { namespace iot { namespace securedtunneling { tcp_adapter_proxy(tcp_adapter_proxy &&) = default; int run_proxy(); - private: - void setup_tcp_socket(tcp_adapter_context &tac); + void setup_tcp_socket(tcp_adapter_context &tac, std::string const & service_id); + void setup_tcp_sockets(tcp_adapter_context &tac); //setup async io flow to connect tcp socket to the adapter config's data host/port - void async_setup_dest_tcp_socket(tcp_adapter_context &tac); - void async_setup_dest_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config); - void async_setup_source_tcp_socket(tcp_adapter_context &tac); - void async_setup_source_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config); - std::function after_setup_tcp_socket = nullptr; - + void async_setup_dest_tcp_socket(tcp_adapter_context &tac, std::string const & service_id); + void async_setup_dest_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config, std::string const & service_id); + void async_setup_source_tcp_sockets(tcp_adapter_context &tac); + void async_setup_source_tcp_socket_retry(tcp_adapter_context &tac, std::shared_ptr retry_config, std::string service_id); + void initialize_tcp_clients(tcp_adapter_context &tac); + void initialize_tcp_servers(tcp_adapter_context &tac); void setup_web_socket(tcp_adapter_context &tac); - //setup async web socket, and as soon as connection is up, setup async ping schedule void async_setup_web_socket(tcp_adapter_context &tac); - std::function after_setup_web_socket = nullptr; //Call in order to close and reset the TCP connection. If error code is set //then the reset is intentionally reset via web socket, and retries //occur definitely (regardless of retry configuration) - void tcp_socket_reset(tcp_adapter_context &tac, std::function then_what); + void tcp_socket_reset_all(tcp_adapter_context &tac, std::function post_reset_operation); + void tcp_socket_reset(tcp_adapter_context &tac, std::string service_id, std::function post_reset_operation); + tcp_connection::pointer get_tcp_connection(tcp_adapter_context &tac, std::string service_id); - void tcp_socket_error(tcp_adapter_context &tac, boost::system::error_code const &_ec); + void tcp_socket_error(tcp_adapter_context &tac, boost::system::error_code const &_ec, std::string const & service_id); //sets up a web socket read loop that will read, and ignore most messages until a stream start //is read and then do something with it (likely, connect to configured endpoint) - void async_web_socket_read_until_stream_start(tcp_adapter_context &tac); - std::function on_recieve_stream_start = nullptr; - - - void async_setup_bidirectional_data_transfers(tcp_adapter_context &tac); + void async_web_socket_read_until_stream_start(tcp_adapter_context &tac, std::string const & service_id); //setup async web socket repeat loop void async_web_socket_read_loop(tcp_adapter_context &tac); + void async_web_socket_read_loop_for_service_ids(tcp_adapter_context &tac); //handlers for messages during the web socket read loop return false //if the read loop should be stopped after processing the message. @@ -209,8 +218,6 @@ namespace aws { namespace iot { namespace securedtunneling { //followed by data void on_web_socket_read(tcp_adapter_context &tac, boost::system::error_code const &ec, size_t bytes_read); - std::function on_control_message = nullptr; - std::function on_data_message = nullptr; bool ignore_message(tcp_adapter_context &tac, message const &message); bool ignore_message_and_stop(tcp_adapter_context &tac, message const &message); bool forward_data_message_to_tcp_write(tcp_adapter_context &tac, message const &message); @@ -218,25 +225,22 @@ namespace aws { namespace iot { namespace securedtunneling { //invokes after_setup_web_socket_read_until_stream_start() after stream start is encountered bool async_wait_for_stream_start(tcp_adapter_context &tac, message const &message); - - void async_tcp_socket_read_loop(tcp_adapter_context &tac); - //if the below handler is set and an error occurs, call it to handle the tcp error - //rather than invoking the standard tcp error handling path - std::function on_tcp_error = nullptr; + bool async_wait_for_service_ids(tcp_adapter_context &tac); + void async_tcp_socket_read_loop(tcp_adapter_context &tac, std::string const & service_id); //below loop does continuous writes to TCP socket from the TCP adapter //context's tcp_write_buffer. After consuming chunks out of the buffer //the behavior will be to check - void async_tcp_write_buffer_drain(tcp_adapter_context &tac); - std::function on_tcp_write_buffer_drain_complete = nullptr; - - void async_setup_web_socket_write_buffer_drain(tcp_adapter_context &tac); - std::function on_web_socket_write_buffer_drain_complete; + void async_tcp_write_buffer_drain(tcp_adapter_context &tac, std::string service_id); + + void async_setup_bidirectional_data_transfers(tcp_adapter_context &tac, std::string const & service_id); + void async_setup_web_socket_write_buffer_drain(tcp_adapter_context &tac, std::string const & service_id); //returns a boolean that indicates if another web socket data read message can be put //onto the tcp write buffer. We have no way of knowing what the next message is and if //it will be too big to process, thus we don't do the read applying back pressure on //the socket. Implicitly, this means that an async_read is not happening on the web socket + bool tcp_has_enough_write_buffer_space(tcp_connection::pointer connection); bool tcp_has_enough_write_buffer_space(tcp_adapter_context const &tac); //returns a boolean that indicates if another tcp socket read's data can be put on the @@ -244,17 +248,15 @@ namespace aws { namespace iot { namespace securedtunneling { //because we can limit the amout of data we pull from a read, even a single byte means we //can perform the read. //Not setting up the read applies back pressure on the tcp socket - bool wss_has_enough_write_buffer_space(tcp_adapter_context const &tac); + bool wss_has_enough_write_buffer_space(boost::beast::multi_buffer const &buffer); void handle_web_socket_control_message(tcp_adapter_context &tac, boost::beast::websocket::frame_type kind, boost::beast::string_view payload); bool is_valid_stream_id(tcp_adapter_context const& tac, message const &message); - //same handler used to send any message - std::function after_send_message; void async_send_message(tcp_adapter_context &tac, message const &message); - void async_send_stream_start(tcp_adapter_context &tac); - void async_send_stream_reset(tcp_adapter_context &tac, std::int32_t stream_id); + void async_send_stream_start(tcp_adapter_context &tac, std::string const & service_id); + void async_send_stream_reset(tcp_adapter_context &tac, std::string const & service_id); //handler for successfully sent ping will delay the next one void async_ping_handler_loop(tcp_adapter_context &tac, @@ -263,9 +265,10 @@ namespace aws { namespace iot { namespace securedtunneling { std::shared_ptr ping_timer, boost::system::error_code const &ec); - void clear_buffers(); + void clear_ws_buffers(tcp_adapter_context &tac); + void clear_tcp_connection_buffers(tcp_connection::pointer connection); - void tcp_socket_ensure_closed(tcp_adapter_context &tac); + void tcp_socket_ensure_closed(boost::asio::ip::tcp::socket & tcp_socket); //closes the websocket connection //1 - shutdown the receive side of TCP @@ -274,32 +277,43 @@ namespace aws { namespace iot { namespace securedtunneling { //4 - perform teardown procedure on websocket void web_socket_close_and_stop(tcp_adapter_context &tac); - void async_resolve_destination_for_connect(tcp_adapter_context &tac, std::shared_ptr retry_config, boost::system::error_code const &ec, tcp::resolver::results_type results); + void async_resolve_destination_for_connect(tcp_adapter_context &tac, std::shared_ptr retry_config, std::string const & service_id, boost::system::error_code const &ec, tcp::resolver::results_type results); bool process_incoming_websocket_buffer(tcp_adapter_context &tac, boost::beast::multi_buffer &message_buffer); bool parse_protobuf_and_consume_input(boost::beast::multi_buffer &message_buffer, size_t data_length, message &msg); + bool handle_control_message_service_ids(tcp_adapter_context &tac, message const & message); + + void handle_listen_port_assigned(const std::uint16_t & port_assigned, const std::string & service_id, tcp_adapter_context &tac); + + bool validate_service_ids_from_configuration(tcp_adapter_context &tac, std::unordered_set service_id_list); + + std::tuple get_host_and_port( const std::string & endpoint, const std::string & default_host); + + bool fall_back_to_v1_message_format(std::unordered_map const & serviceId_to_endpoint_map); + + void async_send_message_to_web_socket(tcp_adapter_context &tac, std::shared_ptr const& ss, std::string const & service_id); + + void async_setup_destination_tcp_sockets(tcp_adapter_context &tac); + private: logger log; ptree const & settings; - adapter_proxy_config const & adapter_config; + adapter_proxy_config adapter_config; //below messages are re-used by local functions/callbacks as necessary to put the data in the //right object (protobuf) then serialize to a Boost Asio buffer to actually send/recv message outgoing_message; - boost::beast::flat_buffer outgoing_message_buffer; message incoming_message; boost::beast::multi_buffer incoming_message_buffer; boost::beast::flat_buffer message_parse_buffer; - - boost::beast::multi_buffer tcp_write_buffer; - boost::beast::flat_buffer tcp_read_buffer; - - //Buffer sequence storing the raw bytes read from the tcp socket reads - //to send over web socket. The bytes in this buffer represent the raw application - //data not already packaged in protobuf messages. This allows us to - //condense smaller TCP read chunks to bigger web socket writes. It also makes - //it impossible to "inject" a non-data message in data sequence order - boost::beast::multi_buffer web_socket_data_write_buffer; + // function object defines what to do after set up web socket + std::function after_setup_web_socket = nullptr; + // function object defines what to do after receiving service id + std::function after_get_service_ids = nullptr; + // function object defines what to do after receiving control message from web socket connection + std::function on_web_socket_control_message = nullptr; + // function object defines what to do after receiving data message from web socket connection + std::function on_web_socket_data_message = nullptr; }; }}} diff --git a/src/TcpClient.h b/src/TcpClient.h new file mode 100644 index 0000000..0b7162a --- /dev/null +++ b/src/TcpClient.h @@ -0,0 +1,32 @@ +// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +#pragma once +#include +#include +#include +#include "Message.pb.h" + +namespace aws { namespace iot { namespace securedtunneling { namespace connection { + class tcp_client + { + public: + typedef boost::shared_ptr pointer; + tcp_client(boost::asio::io_context & io_context, std::size_t write_buf_size, std::size_t read_buf_size, std::size_t ws_write_buf_size) + : resolver_(io_context) + { + connection_ = + tcp_connection::create(io_context, write_buf_size, read_buf_size, ws_write_buf_size); + } + static pointer create(boost::asio::io_context& io_context, std::size_t const & write_buf_size, std::size_t const & read_buf_size, std::size_t const & ws_write_buf_size) + { + return pointer(new tcp_client(io_context, write_buf_size, read_buf_size, ws_write_buf_size)); + } + + tcp_connection::pointer connection_; + tcp::resolver resolver_; + // function object defines what to do after set up a tcp socket + std::function after_setup_tcp_socket = nullptr; + // function object defines what to do receiving control message: stream start + std::function on_receive_stream_start = nullptr; + }; +}}}} \ No newline at end of file diff --git a/src/TcpConnection.h b/src/TcpConnection.h new file mode 100644 index 0000000..34a7894 --- /dev/null +++ b/src/TcpConnection.h @@ -0,0 +1,70 @@ +// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +#pragma once +#include +#include +#include +#include +#include "Message.pb.h" + +namespace aws { namespace iot { namespace securedtunneling { namespace connection { + using message = com::amazonaws::iot::securedtunneling::Message; + using boost::asio::ip::tcp; + class tcp_connection + : public boost::enable_shared_from_this + { + public: + typedef boost::shared_ptr pointer; + + static pointer create(boost::asio::io_context& io_context, std::size_t const & write_buf_size, std::size_t const & read_buf_size, std::size_t ws_write_buf_size) + { + return pointer(new tcp_connection(io_context, write_buf_size, read_buf_size, ws_write_buf_size)); + } + + tcp::socket& socket() + { + return socket_; + } + + tcp_connection(boost::asio::io_context & io_context, std::size_t write_buf_size, std::size_t read_buf_size, std::size_t ws_write_buf_size) + : socket_(io_context) + , tcp_write_buffer_(write_buf_size) + , tcp_read_buffer_(read_buf_size) + , web_socket_data_write_buffer_(ws_write_buf_size) + { + } + + tcp::socket socket_; + // A buffer holding data writes to customer's application + boost::beast::multi_buffer tcp_write_buffer_; + // A buffer holding data reads from customer's application + boost::beast::flat_buffer tcp_read_buffer_; + /** + * A buffer holding data that will be sent to secure tunneling server through web socket connection. + * This buffer will only hold data belongs to its own stream in a multiplexed tunnel. + */ + boost::beast::flat_buffer outgoing_message_buffer_; + //Buffer sequence storing the raw bytes read from the tcp socket reads + //to send over web socket. The bytes in this buffer represent the raw application + //data not already packaged in protobuf messages. This allows us to + //condense smaller TCP read chunks to bigger web socket writes. It also makes + //it impossible to "inject" a non-data message in data sequence order + boost::beast::multi_buffer web_socket_data_write_buffer_; + // Is this tcp socket currently writing + bool is_tcp_socket_writing_{ false }; + // Is this tcp socket currently reading + bool is_tcp_socket_reading_{ false }; + // function object defines what to do after send a message + std::function after_send_message; + // function object defines what to do upon receiving control message + std::function on_control_message = nullptr; + // function object defines what to do upon receiving data message + std::function on_data_message = nullptr; + // function object defines what to do if there is a tcp error occurred + std::function on_tcp_error = nullptr; + // function object defines what to do when tcp_write_buffer_ drain has completed + std::function on_tcp_write_buffer_drain_complete = nullptr; + // function object defines what to do when web_socket_data_write_buffer_ drain has completed + std::function on_web_socket_write_buffer_drain_complete = nullptr; + }; +}}}} \ No newline at end of file diff --git a/src/TcpServer.h b/src/TcpServer.h new file mode 100644 index 0000000..65c3267 --- /dev/null +++ b/src/TcpServer.h @@ -0,0 +1,39 @@ +// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +#pragma once +#include +#include +#include +#include "TcpConnection.h" + +namespace aws { namespace iot { namespace securedtunneling { namespace connection { + using boost::asio::ip::tcp; + class tcp_server + { + public: + typedef boost::shared_ptr pointer; + tcp_server(boost::asio::io_context & io_context, std::size_t write_buf_size, std::size_t read_buf_size, std::size_t ws_write_buf_size) + : acceptor_(io_context) + , resolver_(io_context) + { + connection_ = + tcp_connection::create(io_context, write_buf_size, read_buf_size, ws_write_buf_size); + } + + static pointer create(boost::asio::io_context& io_context, std::size_t const & write_buf_size, std::size_t const & read_buf_size, std::size_t const & ws_write_buf_size) + { + return pointer(new tcp_server(io_context, write_buf_size, read_buf_size, ws_write_buf_size)); + } + + tcp::acceptor & acceptor() + { + return acceptor_; + } + + tcp::acceptor acceptor_; + tcp::resolver resolver_; + tcp_connection::pointer connection_; + // function object defines what to do after set up a tcp socket + std::function after_setup_tcp_socket = nullptr; + }; +}}}} \ No newline at end of file diff --git a/src/config/ConfigFile.cpp b/src/config/ConfigFile.cpp new file mode 100644 index 0000000..71fbf62 --- /dev/null +++ b/src/config/ConfigFile.cpp @@ -0,0 +1,219 @@ +// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ConfigFile.h" + +using std::uint16_t; +using std::endl; +using std::exception; +using std::get; +using std::string; +using std::tuple; +using std::unordered_set; +using std::vector; +using std::unordered_map; + +namespace filesys = boost::filesystem; +using boost::log::trivial::trace; +using boost::log::trivial::debug; +using boost::log::trivial::info; +using boost::log::trivial::warning; +using boost::log::trivial::error; +using boost::log::trivial::fatal; +using logger = boost::log::sources::severity_logger; + + + +namespace aws { namespace iot { namespace securedtunneling { namespace config_file { + logger log; + /** + * Check if given path is a valid directory + * @param file_dir : directory file path + * @return true: valid configuration. false: invalid configuration + */ + bool is_valid_directory(string const & file_dir) { + bool is_dir = false; + try { + filesys::path path_obj(file_dir); + /** + * Validate if: + * 1. Directory path exists + * 2. Is a directory + * 3. Is an empty folder + */ + if (filesys::exists(path_obj) && filesys::is_directory(path_obj) && (!filesys::is_empty(path_obj))) + { + is_dir = true; + } + else if (!filesys::exists(path_obj)) + BOOST_LOG_SEV(log, debug) << file_dir << " does not exist!"; + else if (!filesys::is_directory(path_obj)) + BOOST_LOG_SEV(log, debug) << file_dir << " is not a directory!"; + else if (filesys::is_empty(path_obj)) + BOOST_LOG_SEV(log, debug) << file_dir << " empty dir! Please add configuration files."; + else + BOOST_LOG_SEV(log, debug) << file_dir << " is not valid!"; + } + catch (const filesys::filesystem_error & e) { + BOOST_LOG_SEV(log, fatal) << e.what(); + } + return is_dir; + } + + /** + * Recursively get the list of all files in the given directory + * @param file_dir : directory file path + * @return file paths under the given directory and its subdirectories. + */ + vector get_all_files(const string & file_dir) { + vector files_under_directory; + filesys::recursive_directory_iterator end_iter; + for (filesys::recursive_directory_iterator dir_itr(file_dir); dir_itr != end_iter; ++dir_itr) { + BOOST_LOG_SEV(log, info) << "Detect configuration files: "; + if (filesys::is_regular_file(dir_itr->status())) { + BOOST_LOG_SEV(log, info) << dir_itr->path().generic_string(); + files_under_directory.push_back(dir_itr->path().generic_string()); + } + } + return files_under_directory; + } + + void read_service_ids_from_config_files(std::vector const & file_paths, unordered_set const & service_ids, unordered_map & serviceId_to_endpoint_mapping) + { + for (auto file_path: file_paths) + { + boost::property_tree::ptree pt; + // If find all the service ids, stop searching + if (serviceId_to_endpoint_mapping.size() == service_ids.size()) + { + break; + } + // Parse file in .ini format, if having issues, skip this file and read the next file in the folder. + try { + boost::property_tree::ini_parser::read_ini(file_path, pt); + } + catch (const std::exception & e) { + BOOST_LOG_SEV(log, warning) << "Fail to parse " << file_path << " .Please make sure your file is in .ini format."; + BOOST_LOG_SEV(log, warning) << "Error message from parsing: " << e.what() << " .Continue to the next file."; + continue; + } + for (auto service_id: service_ids) { + /** + * Search for service ids that does not have a port mapping detected. + * If more than one service id mappings found in the configuration files, use the first one found. + */ + if (serviceId_to_endpoint_mapping.find(service_id) != serviceId_to_endpoint_mapping.end()) + { + continue; + } + try { + string endpoint = pt.get(service_id); + serviceId_to_endpoint_mapping.insert({service_id, endpoint}); + } + catch (boost::property_tree::ptree_bad_path &e) { + BOOST_LOG_SEV(log, warning) << "Fail to read file: " << file_path << ". Error message: " << e.what() << ". Ignore this file."; + } + } + } + } + + /** + * Interpret the CLI mappings for -s and -d and use this information to build: service_id to endpoint(address:port or port) mapping + * @param cli_input: the string from -s and -d in the CLI. Example: -s SSH1=5555,SSH2=6666 + * @param serviceId_to_endpoint_mapping: the mapping to be updated: service_id -> endpoint + * Mapping update is in place. + */ + void update_port_mapping(const string & input, unordered_map & serviceId_to_endpoint_mapping) + { + vector splitting_1st_res; + // Different mappings are delimited by , + boost::split(splitting_1st_res, input, boost::is_any_of(","), boost::algorithm::token_compress_on); + + if (splitting_1st_res.empty()) { + throw std::runtime_error("Must provide at least one port or port mapping for destination-app!"); + } + + // Process each port mapping tags + for (auto res: splitting_1st_res) { + // Ignore empty string + if (res.empty()) continue; + vector splitting_2rd_res; + // Inside the mapping, the service_id and port are delimited by = + boost::split(splitting_2rd_res, + res, + boost::algorithm::is_any_of("="), boost::algorithm::token_compress_on); + if (splitting_2rd_res.size() != 2) { + /** For v1 format, v2 local proxy will continue to support + * Example 1: Local proxy starts in v1 source mode: + * ./localproxy -r us-east-1 -s 3389 -t + * cli_input will be 3389 + * Example 2: Local proxy starts in v1 destination mode: + * ./localproxy -r us-east-1 -d localhost:22 -t + * cli_input will be localhost:22 + */ + if (splitting_1st_res.size() == 1 && splitting_2rd_res.size() == 1) { + boost::trim(splitting_2rd_res[0]); + serviceId_to_endpoint_mapping[""] = splitting_2rd_res[0]; + return; + } + else + { + throw std::runtime_error("Wrong format for the port mappings! Example: SSH1=5555,SSH2=6666."); + } + } + + // Trim whitespace and insert + string service_id = boost::trim_copy(splitting_2rd_res[0]); + string endpoint = boost::trim_copy(splitting_2rd_res[1]); + + if (service_id.empty() || endpoint.empty()) { + string error_message = + string("Wrong format for the port mappings: ") + res + string(" .Example: SSH1=5555"); + throw std::runtime_error(error_message); + } + // Check if it's a duplicate mapping, ignore if it has been provided + if (serviceId_to_endpoint_mapping.find(service_id) != serviceId_to_endpoint_mapping.end()) { + BOOST_LOG_SEV(log, warning) << "Duplicate mappings, ignore. This mapping already exists: " << service_id << " : " + << serviceId_to_endpoint_mapping[service_id]; + continue; + } + serviceId_to_endpoint_mapping[service_id] = endpoint; + } + } + + std::string get_default_port_mapping_dir() + { + boost::filesystem::path full_path(boost::filesystem::current_path()); + return (boost::format("%1%/config") % full_path.string()).str(); + } +}}}} \ No newline at end of file diff --git a/src/config/ConfigFile.h b/src/config/ConfigFile.h new file mode 100644 index 0000000..7053099 --- /dev/null +++ b/src/config/ConfigFile.h @@ -0,0 +1,23 @@ +// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +#pragma once +#include +#include +#include +#include +#include + +using std::string; +using std::unordered_set; +using std::vector; +using std::unordered_map; + +namespace aws { namespace iot { namespace securedtunneling { namespace config_file { + bool is_valid_directory(string const & file_dir); + std::vector get_all_files(const string & file_dir); + std::string get_default_port_mapping_dir(); + void read_service_ids_from_config_files(std::vector const & file_paths, + unordered_set const & service_ids, + unordered_map & serviceId_to_endpoint_mapping); + void update_port_mapping(const string & cli_input, unordered_map & serviceId_to_endpoint_mapping); +}}}} diff --git a/src/main.cpp b/src/main.cpp index da9f418..e86ba99 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -1,13 +1,16 @@ // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 + #include -#include #include #include #include #include #include #include +#include +#include + #include #include #include @@ -19,10 +22,10 @@ #include #include #include -#include #include "ProxySettings.h" #include "TcpAdapterProxy.h" +#include "config/ConfigFile.h" using std::uint16_t; using std::endl; @@ -30,6 +33,9 @@ using std::exception; using std::get; using std::string; using std::tuple; +using std::unordered_set; +using std::vector; +using std::unordered_map; using boost::property_tree::ptree; using boost::program_options::value; @@ -71,31 +77,6 @@ tuple get_host_and_port(string const & endpoint, uint16_t defa } } -tuple get_host_and_port(string const & endpoint, std::string default_host) -{ - try - { - size_t position = endpoint.find(':'); - if (position != string::npos && position != endpoint.length() - 1) - { - const string host = endpoint.substr(0, position); - const string port = endpoint.substr(position + 1, endpoint.length() - (position + 1)); - const uint16_t portnum = static_cast(stoi(port, &position)); - if (port.length() == 0 || position != port.length()) throw std::invalid_argument(""); - return std::make_tuple(host, portnum); - } - else - { - if (position == endpoint.length() - 1) throw std::invalid_argument(""); - return std::make_tuple(default_host, stoi(endpoint)); - } - } - catch(std::invalid_argument &) - { - throw std::invalid_argument((boost::format("Invalid endpoint specified: %1%") % endpoint).str()); - } -} - void log_formatter(boost::log::formatting_ostream& strm, boost::log::record_view const& rec) { //Example log format: @@ -142,16 +123,16 @@ void set_logging_filter(std::uint16_t level_numeric) } } -void init_logging() +void init_logging(std::uint16_t &logging_level) { boost::log::add_common_attributes(); boost::log::add_console_log(std::cout, boost::log::keywords::format = boost::phoenix::bind(&log_formatter, boost::log::expressions::stream, boost::log::expressions::record)); - set_logging_filter(4); + set_logging_filter(logging_level); } bool process_cli(int argc, char ** argv, adapter_proxy_config &cfg, ptree &settings, std::uint16_t &logging_level) { - init_logging(); + using namespace aws::iot::securedtunneling::config_file; #ifdef _AWSIOT_TUNNELING_NO_SSL std::cerr << "SSL is disabled" << std::endl; #endif @@ -163,8 +144,8 @@ bool process_cli(int argc, char ** argv, adapter_proxy_config &cfg, ptree &setti ("access-token,t", value()->required(), "Client access token") ("proxy-endpoint,e", value(), "Endpoint of proxy server with port (if not default 443). Example: data.tunneling.iot.us-east-1.amazonaws.com:443") ("region,r", value(), "Endpoint region where tunnel exists. Mutually exclusive flag with --proxy-endpoint") - ("source-listen-port,s", value(), "Assigns source mode and sets the port to listen to.") - ("destination-app,d", value(), "Assigns destination mode and sets the endpoint with the arg in [host]: or just (default localhost) format.") + ("source-listen-port,s", value(), "Sets the mappings between source listening ports and service identifier. Example: SSH1=5555 or 5555") + ("destination-app,d", value(), "Sets the mappings between the endpoint(address:port/port) and service identifier. Example: SSH1=127.0.0.1:22 or 22") ("local-bind-address,b", value(&cfg.bind_address), "Assigns a specific local address to bind to for listening in source mode or a local socket address for destination mode.") ("capath,c", value(&cfg.additional_ssl_verify_path), "Adds the directory containing certificate authority files to be used for performing verification") ("no-ssl-host-verify,k", boost::program_options::bool_switch(&cfg.no_ssl_host_verify), "Turn off SSL host verification") @@ -172,6 +153,8 @@ bool process_cli(int argc, char ** argv, adapter_proxy_config &cfg, ptree &setti ("settings-json", value(), "Use the input JSON file to apply fine grained settings.") ("config", value(), "Use the supplied configuration file to apply CLI args. Actual CLI args override the contents of this file") ("verbose,v", value()->default_value(4), "Logging level to standard out. [0, 255] (0=off, 1=fatal, 2=error, 3=warning, 4=info, 5=debug, >=6=trace)") + ("mode,m", value(), "The mode local proxy will run: src(source) or dst(destination)") + ("config-dir", value(), "Set the configuration directory where service identifier mappings are stored. If not specified, will read mappings from default directory ./config (same directory where local proxy binary is running)") ; store(parse_command_line(argc, argv, cliargs_desc), vm); @@ -187,6 +170,10 @@ bool process_cli(int argc, char ** argv, adapter_proxy_config &cfg, ptree &setti return false; } + //collect and normalize CLI args to usable inputs + logging_level = vm["verbose"].as(); + init_logging(logging_level); + bool token_cli_warning = vm.count("access-token") != 0; //dont allow above settings to be impacted by configuration file or environment variable parsers @@ -214,10 +201,6 @@ bool process_cli(int argc, char ** argv, adapter_proxy_config &cfg, ptree &setti boost::property_tree::json_parser::read_json(vm["settings-json"].as(), settings); } - if (vm.count("source-listen-port") + vm.count("destination-app") > 1 || vm.count("source-listen-port") + vm.count("destination-app") == 0) - { - throw std::runtime_error("Must specify one and only one of --source-listen-port/-s or --destination-app/-d options"); - } if (vm.count("region") + vm.count("proxy-endpoint") > 1 || vm.count("region") + vm.count("proxy-endpoint") == 0) { @@ -226,8 +209,6 @@ bool process_cli(int argc, char ** argv, adapter_proxy_config &cfg, ptree &setti //trigger validation of required options notify(vm); - //collect and normalize CLI args to usable inputs - logging_level = vm["verbose"].as(); if (token_cli_warning) { BOOST_LOG_TRIVIAL(warning) << "Found access token supplied via CLI arg. Consider using environment variable " << TOKEN_ENV_VARIABLE << " instead"; @@ -243,27 +224,125 @@ bool process_cli(int argc, char ** argv, adapter_proxy_config &cfg, ptree &setti cfg.proxy_port = std::get<1>(proxy_host_and_port); cfg.mode = vm.count("destination-app") == 1 ? proxy_mode::DESTINATION : proxy_mode::SOURCE; - if (cfg.mode == proxy_mode::DESTINATION) + + if (vm.count("mode")) + { + string mode = vm["mode"].as(); + if (mode != "src" && mode != "dst" && mode != "source" && mode != "destination") + { + throw std::runtime_error("Mode value is wrong! Allowed values are: src, dst, source, destination"); + } + // Assign the value to the right mode + if (mode == "src" || mode == "source") + { + cfg.mode = proxy_mode::SOURCE; + } + else if (mode == "dst" || mode == "destination") + { + cfg.mode = proxy_mode::DESTINATION; + } + else + { + throw std::runtime_error("Internal error. Mode value is wrong!"); + } + } + + /** Invalid input combination for: -s, -d and --mode + * 1. -s and -d should NOT used together + * 2. -s and mode value is dst/destination should NOT used together + * 3. -d and mode value is src/source should NOT used together. + * 4. At least one of the parameter should be provided to start local proxy in either source or destination mode: + * -s, -d or -m + */ + if (vm.count("source-listen-port") + vm.count("destination-app") > 1) { - string data_endpoint = vm["destination-app"].as(); - transform(data_endpoint.begin(), data_endpoint.end(), data_endpoint.begin(), ::tolower); - tuple data_endpoint_and_point = get_host_and_port(data_endpoint, ""); - cfg.data_host = std::get<0>(data_endpoint_and_point); - cfg.data_port = std::get<1>(data_endpoint_and_point); + throw std::runtime_error("Must specify one and only one of --source-listen-port/-s or --destination-app/-d"); } - else + else if (vm.count("source-listen-port") + vm.count("destination-app") + vm.count("mode")== 0) { - //data host remains unused in source mode - cfg.data_port = vm["source-listen-port"].as(); - cfg.on_listen_port_assigned = [](std::uint16_t listen_port) - { - BOOST_LOG_TRIVIAL(info) << "Listen port assigned: " << listen_port; - }; + throw std::runtime_error("Must specify one of --source-listen-port/-s or --destination-app/-d or --mode"); + } + else if (vm.count("source-listen-port") && vm.count("mode") && cfg.mode == proxy_mode::DESTINATION ) + { + throw std::runtime_error("-s and --mode have mismatched mode. Mode is set to destination!"); } + else if (vm.count("destination-app") && vm.count("mode") && cfg.mode == proxy_mode::SOURCE ) + { + throw std::runtime_error("-s and --mode have mismatched mode. Mode is set to source!"); + } + + /** + * 1. Generate from the CLI parsing + * 2. Have a reserve mapping for port_mappings + */ + if (vm.count("destination-app")) + { + cfg.mode = proxy_mode::DESTINATION; + update_port_mapping(vm["destination-app"].as(), cfg.serviceId_to_endpoint_map); + // Support v1 local proxy format + if (cfg.serviceId_to_endpoint_map.size() == 1 && cfg.serviceId_to_endpoint_map.begin()->first.empty()) + { + BOOST_LOG_TRIVIAL(debug) << "v2 local proxy starts with v1 local proxy format"; + } + else + { + BOOST_LOG_TRIVIAL(debug) << "Detect port mapping configuration provided through CLI in destination mode:"; + BOOST_LOG_TRIVIAL(debug) << "----------------------------------------------------------"; + for (auto m: cfg.serviceId_to_endpoint_map) + { + BOOST_LOG_TRIVIAL(debug) << m.first << " = " << m.second; + } + BOOST_LOG_TRIVIAL(debug) << "----------------------------------------------------------"; + } + } + + if (vm.count("source-listen-port")) + { + cfg.mode = proxy_mode::SOURCE; + update_port_mapping(vm["source-listen-port"].as(), cfg.serviceId_to_endpoint_map); + // Support v1 local proxy format + if (cfg.serviceId_to_endpoint_map.size() == 1 && cfg.serviceId_to_endpoint_map.begin()->first.empty()) + { + BOOST_LOG_TRIVIAL(debug) << "v2 local proxy starts with v1 local proxy format"; + } + else + { + BOOST_LOG_TRIVIAL(debug) << "Detect port mapping configuration provided through CLI in source mode:"; + BOOST_LOG_TRIVIAL(debug) << "----------------------------------------------------------"; + for (auto m: cfg.serviceId_to_endpoint_map) + { + BOOST_LOG_TRIVIAL(debug) << m.first << " = " << m.second; + } + BOOST_LOG_TRIVIAL(debug) << "----------------------------------------------------------"; + } + } + + if (vm.count("config-dir")) + { + string config_dir = vm["config-dir"].as(); + BOOST_LOG_TRIVIAL(debug) << "Detect port mapping configuration provided through configuration directory :" << config_dir; + // Run validation against the input + if (!is_valid_directory(config_dir)) { + std::string error_message = std::string("Invalid configuration directory: ") + config_dir; + throw std::runtime_error(error_message); + } + cfg.config_files = get_all_files(config_dir); + } + else if (is_valid_directory(get_default_port_mapping_dir())) + { + // read default directory, if no configuration directory is provided. + cfg.config_files = get_all_files(get_default_port_mapping_dir()); + } + + if (cfg.mode == proxy_mode::SOURCE && cfg.config_files.empty() && cfg.serviceId_to_endpoint_map.empty()) + { + BOOST_LOG_TRIVIAL(debug) << "Local proxy does not detect any port mapping configuration. Will pick up random ports to run in source mode."; + } return true; } + int main(int argc, char ** argv) { try @@ -287,3 +366,4 @@ int main(int argc, char ** argv) return EXIT_SUCCESS; } + diff --git a/test/AdapterTests.cpp b/test/AdapterTests.cpp index db6b9ea..836f8cc 100644 --- a/test/AdapterTests.cpp +++ b/test/AdapterTests.cpp @@ -5,8 +5,15 @@ #include #include - +#include #include +#include +#include +#include +#include +#include +#include +#include #include #include #include "Message.pb.h" @@ -16,6 +23,8 @@ #include #include #include +#include +#include using boost::property_tree::ptree; using boost::system::errc::errc_t; @@ -34,7 +43,6 @@ namespace aws { namespace iot { namespace securedtunneling { namespace test { cfg.proxy_host = ws_endpoint.address().to_string(); cfg.proxy_port = ws_endpoint.port(); - cfg.data_host = LOCALHOST; //tests should always be pointing here } void apply_test_settings(ptree &settings) @@ -45,13 +53,146 @@ namespace aws { namespace iot { namespace securedtunneling { namespace test settings.put(KEY_WEB_SOCKET_CONNECT_RETRY_COUNT, 0); settings.put(KEY_WEB_SOCKET_DATA_ERROR_RETRY, false); } + + uint16_t get_available_port(boost::asio::io_context & io_ctx) + { + boost::asio::ip::tcp::acceptor acceptor(io_ctx); + boost::asio::ip::tcp::endpoint endPoint(boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), 0)); + acceptor.open(endPoint.protocol()); + acceptor.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); + acceptor.bind(endPoint); + acceptor.listen(); + boost::asio::ip::tcp::endpoint le = acceptor.local_endpoint(); + acceptor.close(); + return (uint16_t)le.port(); + } }}}} using namespace std; using namespace aws::iot::securedtunneling::test; +namespace aws { namespace iot { namespace securedtunneling { namespace test { namespace config { + std::string const DEFAULT_CONFIG_DIR_SUFFIX = "/config"; + /** + * Note: catch2 does not support mocking. So for now, create the files and directories. + * For future, we can integrate with https://github.com/matepek/catch2-with-gmock + */ + TEST_CASE("Unit tests for ConfigFile", "[config]") { + namespace fs = boost::filesystem; + // Test case set up + fs::path full_path(boost::filesystem::current_path()); + string current_dir = fs::canonical(full_path).string(); + + string const test_dir = "testDir"; + if (fs::exists(test_dir)) + { + try { + fs::remove_all(test_dir); + } + catch (const fs::filesystem_error & e) { + std::cout << "Error deleting test dir " << e.what() << std::endl; + } + } + REQUIRE(true == fs::create_directory(test_dir)); + + string const test_config_file_name = "configFile"; + bool ok = static_cast(std::ofstream(test_config_file_name)); + REQUIRE(ok == true); + /** + * Create config file for unit test + * SSH1=555 + */ + ofstream test_file; + test_file.open(test_config_file_name); + string identifier = "SSH1"; + string endpoint = "5555"; + std::vector tmp{identifier, endpoint}; + std::string file_content = boost::algorithm::join(tmp, "= "); + test_file << file_content; + test_file.close(); + + SECTION("Test is_valid_directory: valid directory") { + CHECK(true == aws::iot::securedtunneling::config_file::is_valid_directory(current_dir)); + } + + SECTION("Test invalid directory") { + CHECK(false == aws::iot::securedtunneling::config_file::is_valid_directory("a")); + } + + SECTION("Test is_valid_directory: empty directory") { + CHECK(false == aws::iot::securedtunneling::config_file::is_valid_directory(test_dir)); + } + + SECTION("Test is_valid_directory: pass a file instead of a directory") { + CHECK(false == aws::iot::securedtunneling::config_file::is_valid_directory(test_config_file_name)); + } + + SECTION("Test happy path for get_all_files") { + CHECK_NOTHROW(aws::iot::securedtunneling::config_file::get_all_files(current_dir)); + } + + SECTION("Test happy path for read_service_ids_from_config_files, 1 service id") { + std::unordered_map serviceId_to_endpoint_mapping {}; + std::vector file_paths {test_config_file_name}; + unordered_set service_ids {}; + service_ids.insert(identifier); + aws::iot::securedtunneling::config_file::read_service_ids_from_config_files(file_paths, service_ids, serviceId_to_endpoint_mapping); + CHECK(serviceId_to_endpoint_mapping.size() == 1); + CHECK(serviceId_to_endpoint_mapping[identifier] == endpoint); + } + + SECTION("Test happy path for read_service_ids_from_config_files, 0 service id") { + std::unordered_map serviceId_to_endpoint_mapping {}; + std::vector file_paths {}; + unordered_set service_ids {}; + aws::iot::securedtunneling::config_file::read_service_ids_from_config_files(file_paths, service_ids, serviceId_to_endpoint_mapping); + CHECK(serviceId_to_endpoint_mapping.size() == 0); + } + + SECTION("Test happy path for find_service_ids") { + std::unordered_map serviceId_to_endpoint_mapping; + aws::iot::securedtunneling::config_file::update_port_mapping(file_content, serviceId_to_endpoint_mapping); + CHECK(serviceId_to_endpoint_mapping.size() == 1); + CHECK(serviceId_to_endpoint_mapping[identifier] == endpoint); + } + + SECTION("Test happy path for get_default_port_mapping_dir") { + CHECK(aws::iot::securedtunneling::config_file::get_default_port_mapping_dir() == current_dir + DEFAULT_CONFIG_DIR_SUFFIX); + } + + // Test case clean up. + int remove_file_stat = std::remove(test_config_file_name.c_str()); + if (remove_file_stat != 0) + { + std::cout << "Error deleting file " << test_config_file_name << std::endl; + } + // Can comment out below line if does not want to check for clean up. + CHECK(remove_file_stat == 0); + if (fs::exists(test_dir)) + { + try { + fs::remove_all(test_dir); + } + catch (const fs::filesystem_error & e) { + std::cout << "Error deleting test dir " << e.what() << std::endl; + } + } + } +}}}}} + + TEST_CASE( "Test source mode", "[source]") { + using namespace com::amazonaws::iot::securedtunneling; + /** + * Test case set up + * 1. Create tcp socket to acts as destination app. + * 2. Create web socket server to act as secure tunneling service (cloud side). + * 3. Configure adapter config used for the local proxy. + */ + boost::asio::io_context io_ctx{}; + tcp::socket client_socket{ io_ctx }; + boost::system::error_code ec; ptree settings; apply_test_settings(settings); @@ -62,12 +203,11 @@ TEST_CASE( "Test source mode", "[source]") { adapter_proxy_config adapter_cfg; apply_test_config(adapter_cfg, ws_address); adapter_cfg.mode = proxy_mode::SOURCE; - adapter_cfg.data_port = 0; adapter_cfg.bind_address = LOCALHOST; adapter_cfg.access_token = "foobar_token"; - uint16_t adapter_chosen_port = 0; - //capture the random listen port in source mode - adapter_cfg.on_listen_port_assigned = [&adapter_chosen_port](uint16_t port) { adapter_chosen_port = port; }; + const std::string service_id= "ssh1"; + uint16_t adapter_chosen_port = get_available_port(io_ctx); + adapter_cfg.serviceId_to_endpoint_map[service_id] = boost::lexical_cast(adapter_chosen_port); tcp_adapter_proxy proxy{ settings, adapter_cfg }; @@ -75,19 +215,29 @@ TEST_CASE( "Test source mode", "[source]") { thread ws_server_thread{[&ws_server]() { ws_server.run(); } }; thread tcp_adapter_thread{[&proxy]() { proxy.run_proxy(); } }; - boost::asio::io_context io_ctx{}; - tcp::socket client_socket{ io_ctx }; - + // Verify web socket handshake request from local proxy this_thread::sleep_for(chrono::milliseconds(IO_PAUSE_MS)); CHECK( ws_server.get_handshake_request().method() == boost::beast::http::verb::get ); CHECK( ws_server.get_handshake_request().target() == "/tunnel?local-proxy-mode=source" ); - CHECK( ws_server.get_handshake_request().base()["sec-websocket-protocol"] == "aws.iot.securetunneling-1.0" ); + CHECK( ws_server.get_handshake_request().base()["sec-websocket-protocol"] == "aws.iot.securetunneling-2.0" ); CHECK( ws_server.get_handshake_request().base()["access-token"] == adapter_cfg.access_token ); + // Simulate cloud side sends control message Message_Type_SERVICE_IDS + message ws_server_message{}; + ws_server_message.set_type(Message_Type_SERVICE_IDS); + ws_server_message.add_availableserviceids(service_id); + ws_server_message.set_ignorable(false); + ws_server_message.clear_payload(); + + ws_server.deliver_message(ws_server_message); + this_thread::sleep_for(chrono::milliseconds(IO_PAUSE_MS)); + + // Simulate source app connects to source local proxy client_socket.connect( tcp::endpoint{boost::asio::ip::make_address(adapter_cfg.bind_address.get()), adapter_chosen_port} ); uint8_t read_buffer[READ_BUFFER_SIZE]; + // Simulate sending data messages from source app for(int i = 0; i < 5; ++i) { string const test_string = (boost::format("test message: %1%") % i).str(); @@ -96,6 +246,7 @@ TEST_CASE( "Test source mode", "[source]") { CHECK( string(reinterpret_cast(read_buffer)) == test_string ); } + // Verify local proxy sends Message_Type_STREAM_RESET ws_server.expect_next_message( [](message const&msg) { @@ -105,8 +256,10 @@ TEST_CASE( "Test source mode", "[source]") { this_thread::sleep_for(chrono::milliseconds(IO_PAUSE_MS)); + // Simulate source app connects to source local proxy client_socket.connect( tcp::endpoint{boost::asio::ip::make_address(adapter_cfg.bind_address.get()), adapter_chosen_port} ); + // Simulate sending data messages from source app for(int i = 0; i < 5; ++i) { string const test_string = (boost::format("test message: %1%") % i).str(); @@ -130,18 +283,23 @@ TEST_CASE( "Test source mode", "[source]") { TEST_CASE( "Test destination mode", "[destination]") { using namespace com::amazonaws::iot::securedtunneling; - + /** + * Test case set up + * 1. Create tcp socket to acts as destination app. + * 2. Create web socket server to act as secure tunneling service (cloud side). + * 3. Configure adapter config used for the local proxy. + */ boost::asio::io_context io_ctx{}; tcp::socket destination_socket{ io_ctx }; tcp::acceptor acceptor{io_ctx, {boost::asio::ip::make_address(LOCALHOST), 0}}; cout << "Destination app listening on address: " << acceptor.local_endpoint().address().to_string() - << ":" << acceptor.local_endpoint().port() << endl; + << ":" << acceptor.local_endpoint().port() << endl; bool accepted = false; thread tcp_accept_thread{[&acceptor, &destination_socket, &accepted]() - { - acceptor.accept(destination_socket); - accepted = true; - }}; + { + acceptor.accept(destination_socket); + accepted = true; + }}; boost::system::error_code ec; ptree settings; @@ -156,33 +314,49 @@ TEST_CASE( "Test destination mode", "[destination]") { adapter_proxy_config adapter_cfg; apply_test_config(adapter_cfg, ws_address); adapter_cfg.mode = proxy_mode::DESTINATION; - adapter_cfg.data_host = acceptor.local_endpoint().address().to_string(); - adapter_cfg.data_port = acceptor.local_endpoint().port(); adapter_cfg.bind_address = LOCALHOST; adapter_cfg.access_token = "foobar_dest_token"; + const std::string service_id= "ssh1"; + std::string dst_host = acceptor.local_endpoint().address().to_string(); + std::string dst_port = boost::lexical_cast(acceptor.local_endpoint().port()); + adapter_cfg.serviceId_to_endpoint_map[service_id] = dst_host+ + ":" + dst_port; tcp_adapter_proxy proxy{ settings, adapter_cfg }; thread tcp_adapter_thread{[&proxy]() { proxy.run_proxy(); } }; this_thread::sleep_for(chrono::milliseconds(IO_PAUSE_MS)); + // Verify web socket handshake request from local proxy CHECK( ws_server.get_handshake_request().method() == boost::beast::http::verb::get ); CHECK( ws_server.get_handshake_request().target() == "/tunnel?local-proxy-mode=destination" ); - CHECK( ws_server.get_handshake_request().base()["sec-websocket-protocol"] == "aws.iot.securetunneling-1.0" ); + CHECK( ws_server.get_handshake_request().base()["sec-websocket-protocol"] == "aws.iot.securetunneling-2.0" ); CHECK( ws_server.get_handshake_request().base()["access-token"] == adapter_cfg.access_token ); - message outgoing_message{}; - outgoing_message.set_type(Message_Type_STREAM_START); - outgoing_message.set_streamid(1); - outgoing_message.set_ignorable(false); - outgoing_message.clear_payload(); + // Simulate cloud side sends control message Message_Type_SERVICE_IDS + message ws_server_message{}; + ws_server_message.set_type(Message_Type_SERVICE_IDS); + ws_server_message.add_availableserviceids(service_id); + ws_server_message.set_ignorable(false); + ws_server_message.clear_payload(); - ws_server.deliver_message(outgoing_message); + ws_server.deliver_message(ws_server_message); this_thread::sleep_for(chrono::milliseconds(IO_PAUSE_MS)); + // Simulate cloud side sends control message Message_Type_STREAM_START + ws_server_message.set_type(Message_Type_STREAM_START); + ws_server_message.set_serviceid(service_id); + ws_server_message.set_streamid(1); + ws_server_message.set_ignorable(false); + ws_server_message.clear_payload(); + + ws_server.deliver_message(ws_server_message); + this_thread::sleep_for(chrono::milliseconds(IO_PAUSE_MS)); + + // Verify destination app is connected REQUIRE( accepted ); tcp_accept_thread.join(); + // Simulate sending data messages from destination app uint8_t read_buffer[READ_BUFFER_SIZE]; for(int i = 0; i < 5; ++i) @@ -193,30 +367,32 @@ TEST_CASE( "Test destination mode", "[destination]") { CHECK( string(reinterpret_cast(read_buffer)) == test_string ); } + // Verify local proxy sends Message_Type_STREAM_RESET ws_server.expect_next_message( - [](message const&msg) - { - return (msg.type() == com::amazonaws::iot::securedtunneling::Message_Type_STREAM_RESET) && msg.streamid() == 1; - }); + [](message const&msg) + { + return (msg.type() == com::amazonaws::iot::securedtunneling::Message_Type_STREAM_RESET) && msg.streamid() == 1; + }); destination_socket.close(); accepted = false; tcp_accept_thread = std::thread{[&acceptor, &destination_socket, &accepted]() - { - acceptor.accept(destination_socket); - accepted = true; - }}; - ws_server.deliver_message(outgoing_message); + { + acceptor.accept(destination_socket); + accepted = true; + }}; + ws_server.deliver_message(ws_server_message); this_thread::sleep_for(chrono::milliseconds(IO_PAUSE_MS)); REQUIRE( accepted ); tcp_accept_thread.join(); + // Simulate sending data messages from destination app for(int i = 0; i < 5; ++i) { - string const test_string = (boost::format("test message: %1%") % i).str(); - destination_socket.send(boost::asio::buffer(test_string)); - destination_socket.read_some(boost::asio::buffer(reinterpret_cast(read_buffer), READ_BUFFER_SIZE)); - CHECK( string(reinterpret_cast(read_buffer)) == test_string ); + string const test_string = (boost::format("test message: %1%") % i).str(); + destination_socket.send(boost::asio::buffer(test_string)); + destination_socket.read_some(boost::asio::buffer(reinterpret_cast(read_buffer), READ_BUFFER_SIZE)); + CHECK( string(reinterpret_cast(read_buffer)) == test_string ); } //instruct websocket to close on client @@ -228,4 +404,3 @@ TEST_CASE( "Test destination mode", "[destination]") { ws_server_thread.join(); tcp_adapter_thread.join(); } - diff --git a/test/TestWebsocketServer.cpp b/test/TestWebsocketServer.cpp index 0c55734..ceced3e 100644 --- a/test/TestWebsocketServer.cpp +++ b/test/TestWebsocketServer.cpp @@ -51,7 +51,7 @@ void TestWebsocketServer::run() [](boost::beast::websocket::response_type& response) { response.set("channel-id", boost::uuids::to_string({})); //default init for uuid is all 0s - response.set("Sec-WebSocket-Protocol", "aws.iot.securetunneling-1.0"); + response.set("Sec-WebSocket-Protocol", "aws.iot.securetunneling-2.0"); }, ec); if(ec) @@ -200,4 +200,5 @@ void TestWebsocketServer::expect_next_message(std::function