-
Notifications
You must be signed in to change notification settings - Fork 214
/
CloudEventsRoute.java
executable file
·347 lines (301 loc) · 14.8 KB
/
CloudEventsRoute.java
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
/*
* Copyright (c) 2020 Contributors to the Eclipse Foundation
*
* See the NOTICE file(s) distributed with this work for additional
* information regarding copyright ownership.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0
*
* SPDX-License-Identifier: EPL-2.0
*/
package org.eclipse.ditto.gateway.service.endpoints.routes.cloudevents;
import java.net.URI;
import java.text.MessageFormat;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.annotation.Nullable;
import org.eclipse.ditto.base.model.acks.AcknowledgementRequest;
import org.eclipse.ditto.base.model.acks.DittoAcknowledgementLabel;
import org.eclipse.ditto.base.model.exceptions.CloudEventMissingPayloadException;
import org.eclipse.ditto.base.model.exceptions.CloudEventNotParsableException;
import org.eclipse.ditto.base.model.exceptions.CloudEventUnsupportedDataSchemaException;
import org.eclipse.ditto.base.model.exceptions.UnsupportedMediaTypeException;
import org.eclipse.ditto.base.model.headers.DittoHeaderDefinition;
import org.eclipse.ditto.base.model.headers.DittoHeaders;
import org.eclipse.ditto.base.model.json.JsonSchemaVersion;
import org.eclipse.ditto.base.model.signals.Signal;
import org.eclipse.ditto.base.model.signals.commands.CommandNotSupportedException;
import org.eclipse.ditto.gateway.service.endpoints.actors.AbstractHttpRequestActor;
import org.eclipse.ditto.gateway.service.endpoints.routes.AbstractRoute;
import org.eclipse.ditto.gateway.service.endpoints.routes.RouteBaseProperties;
import org.eclipse.ditto.gateway.service.util.config.endpoints.CloudEventsConfig;
import org.eclipse.ditto.internal.utils.pekko.logging.DittoLoggerFactory;
import org.eclipse.ditto.internal.utils.pekko.logging.ThreadSafeDittoLogger;
import org.eclipse.ditto.json.JsonObject;
import org.eclipse.ditto.protocol.JsonifiableAdaptable;
import org.eclipse.ditto.protocol.ProtocolFactory;
import org.eclipse.ditto.protocol.adapter.DittoProtocolAdapter;
import org.eclipse.ditto.things.model.signals.commands.ThingCommand;
import org.apache.pekko.actor.Status;
import org.apache.pekko.http.javadsl.model.ContentType;
import org.apache.pekko.http.javadsl.model.ContentTypes;
import org.apache.pekko.http.javadsl.model.HttpRequest;
import org.apache.pekko.http.javadsl.model.HttpResponse;
import org.apache.pekko.http.javadsl.model.StatusCodes;
import org.apache.pekko.http.javadsl.server.RequestContext;
import org.apache.pekko.http.javadsl.server.Route;
import org.apache.pekko.stream.javadsl.Sink;
import org.apache.pekko.util.ByteString;
import io.cloudevents.CloudEvent;
import io.cloudevents.CloudEventData;
import io.cloudevents.core.message.MessageReader;
import io.cloudevents.http.HttpMessageFactory;
import io.cloudevents.rw.CloudEventRWException;
/**
* Builder for creating Pekko HTTP route for {@code /cloudevents}.
*/
public final class CloudEventsRoute extends AbstractRoute {
private static final ThreadSafeDittoLogger LOGGER = DittoLoggerFactory.getThreadSafeLogger(CloudEventsRoute.class);
/**
* Public endpoint of cloud events.
*/
public static final String PATH_CLOUDEVENTS = "cloudevents";
private static final DittoProtocolAdapter PROTOCOL_ADAPTER = DittoProtocolAdapter.newInstance();
private static final String DATA_SCHEMA_SCHEME = "ditto";
private final CloudEventsConfig cloudEventsConfig;
/**
* Constructs a {@code CloudEventsRoute} object.
*
* @param routeBaseProperties the base properties of the route.
* @param cloudEventsConfig the configuration settings for cloud events.
* @throws NullPointerException if any argument is {@code null}.
*/
public CloudEventsRoute(final RouteBaseProperties routeBaseProperties,
final CloudEventsConfig cloudEventsConfig) {
super(routeBaseProperties);
this.cloudEventsConfig = cloudEventsConfig;
}
/**
* Builds the {@code /cloudevents} route.
*
* @return the {@code /cloudevents} route.
*/
public Route buildCloudEventsRoute(final RequestContext ctx, final DittoHeaders dittoHeaders) {
return path(PATH_CLOUDEVENTS, () -> // /cloudevents
post(() -> // POST
acceptCloudEvent(ctx, dittoHeaders)
)
);
}
private Route acceptCloudEvent(final RequestContext ctx, final DittoHeaders dittoHeaders) {
return extractDataBytes(payloadSource -> {
final CompletableFuture<HttpResponse> httpResponseFuture = new CompletableFuture<>();
runWithSupervisionStrategy(payloadSource
// collect the binary payload
.fold(ByteString.emptyByteString(), ByteString::concat)
// map the payload to a cloud event
.map(payload -> toCloudEvent(ctx, dittoHeaders, payload))
// validate the cloud event
.map(cloudEvent -> validateCloudEvent(cloudEvent, ctx, dittoHeaders))
// process the event
.map(cloudEvent -> {
try {
// DON'T replace this try-catch by .recover: The supervising strategy is called before recovery!
final Optional<Signal<?>> optionalSignal =
jsonToDittoSignal(cloudEvent.getData(), dittoHeaders);
if (optionalSignal.isEmpty()) {
return new Status.Failure(CloudEventMissingPayloadException
.withDetailedInformationBuilder()
.dittoHeaders(dittoHeaders)
.build());
}
final Signal<?> signal = optionalSignal.get();
final JsonSchemaVersion schemaVersion = signal.getImplementedSchemaVersion();
return signal.implementsSchemaVersion(schemaVersion) ? signal
: CommandNotSupportedException.newBuilder(schemaVersion.toInt())
.dittoHeaders(dittoHeaders)
.build();
} catch (final Exception e) {
return new Status.Failure(e);
}
})
.to(Sink.actorRef(createHttpPerRequestActor(ctx, httpResponseFuture),
AbstractHttpRequestActor.COMPLETE_MESSAGE))
);
return completeWithFuture(httpResponseFuture.thenApply(response -> {
if (response.status().isSuccess()) {
// as the /cloudevents is only intended to accept Ditto commands and apply them, replace the
// actual successful response with just 202 (accepted) and no additional body/headers
return HttpResponse.create().withStatus(StatusCodes.ACCEPTED);
} else {
return response;
}
}));
});
}
/**
* Convert the request and payload to a cloud event.
*
* @param ctx The request context.
* @param dittoHeaders the DittoHeaders to apply to the parsed Ditto Signal.
* @param payload The binary payload, this may contain the cloud event payload, or the fully encoded cloud
* event structure.
* @return The cloud event
*/
private CloudEvent toCloudEvent(final RequestContext ctx, final DittoHeaders dittoHeaders,
final ByteString payload) {
if (LOGGER.isTraceEnabled()) {
final StringBuilder headers = new StringBuilder("CloudEvent raw HTTP Headers:");
ctx.getRequest().getHeaders()
.forEach(header -> headers
.append("\n\t")
.append(header.name())
.append(" = ")
.append(header.value()));
LOGGER.withCorrelationId(dittoHeaders)
.trace(headers.toString());
LOGGER.withCorrelationId(dittoHeaders)
.trace("CloudEvent Ditto Headers: {}", dittoHeaders);
}
try {
// create a reader for the message
final MessageReader reader = HttpMessageFactory.createReader(acceptor -> {
// NOTE: this acceptor may be run multiple times by the message reader
// record if we saw the content type header
final AtomicBoolean sawContentType = new AtomicBoolean();
// consume the HTTP request headers
ctx.getRequest().getHeaders().forEach(header -> {
if (header.lowercaseName().equals(DittoHeaderDefinition.CONTENT_TYPE.getKey())) {
sawContentType.set(true);
}
acceptor.accept(header.name(), header.value());
});
if (!sawContentType.get()) {
// we didn't see the content type in the header, so extract it from pekko's request
acceptor.accept(DittoHeaderDefinition.CONTENT_TYPE.getKey(),
ctx.getRequest().entity().getContentType().mediaType().toString());
}
}, payload.toArray());
return reader.toEvent();
} catch (final CloudEventRWException | IllegalStateException e) {
throw CloudEventNotParsableException.withDetailedInformationBuilder(e.getMessage())
.dittoHeaders(dittoHeaders)
.build();
}
}
private CloudEvent validateCloudEvent(final CloudEvent cloudEvent, final RequestContext ctx,
final DittoHeaders dittoHeaders) {
if (cloudEvent.getData() == null) {
throw CloudEventMissingPayloadException
.withDetailedInformationBuilder()
.dittoHeaders(dittoHeaders)
.build();
}
LOGGER.withCorrelationId(dittoHeaders)
.debug("CloudEvent: {}", cloudEvent);
ensureDataContentType(cloudEvent.getDataContentType(), ctx, dittoHeaders);
ensureDataSchema(cloudEvent.getDataSchema(), ctx, dittoHeaders);
return cloudEvent;
}
private Optional<Signal<?>> jsonToDittoSignal(@Nullable final CloudEventData data,
final DittoHeaders dittoHeaders) {
if (data == null) {
return Optional.empty();
}
final byte[] payload = data.toBytes();
if (payload == null || payload.length == 0) {
return Optional.empty();
}
final JsonObject jsonObject = JsonObject.of(payload);
LOGGER.withCorrelationId(dittoHeaders)
.debug("CloudEvent payload JSON: {}", jsonObject);
final JsonifiableAdaptable jsonifiableAdaptable = ProtocolFactory.jsonifiableAdaptableFromJson(jsonObject);
final Signal<?> signal = PROTOCOL_ADAPTER.fromAdaptable(jsonifiableAdaptable);
final Signal<?> signalWithAdjustedHeaders;
if (signal instanceof ThingCommand) {
final DittoHeaders adjustedHeaders = dittoHeaders.toBuilder()
.responseRequired(false)
.acknowledgementRequest(AcknowledgementRequest.of(DittoAcknowledgementLabel.TWIN_PERSISTED))
.build();
signalWithAdjustedHeaders = signal.setDittoHeaders(
signal.getDittoHeaders().toBuilder().putHeaders(adjustedHeaders).build()
);
} else {
signalWithAdjustedHeaders = signal.setDittoHeaders(
signal.getDittoHeaders().toBuilder().putHeaders(dittoHeaders).build()
);
}
return Optional.of(signalWithAdjustedHeaders);
}
private void ensureDataContentType(@Nullable final String dataContentType,
final RequestContext ctx,
final DittoHeaders dittoHeaders) {
if (!isCorrectDataType(dataContentType)) {
if (LOGGER.isInfoEnabled()) {
LOGGER.withCorrelationId(dittoHeaders)
.info("CloudEvent request rejected: unsupported data-content-type: <{}> request: <{}>",
dataContentType, requestToLogString(ctx.getRequest()));
}
throw UnsupportedMediaTypeException
.withDetailedInformationBuilder(dataContentType != null ? dataContentType : "none",
cloudEventsConfig.getDataTypes())
.dittoHeaders(dittoHeaders)
.build();
}
}
/**
* Test if the data type is acceptable.
* <p>
* A missing, empty or malformed data type is not acceptable.
*
* @param dataContentType The content type to check.
* @return {@code true} if the content type is acceptable, {@code false} otherwise.
*/
private boolean isCorrectDataType(@Nullable final String dataContentType) {
if (dataContentType == null) {
// no content type
return false;
}
final ContentType type = ContentTypes.parse(dataContentType);
if (type == null) {
// failed to parse content type
return false;
}
return this.cloudEventsConfig.getDataTypes().contains(type.mediaType().toString());
}
/**
* Ensure that the data schema starts with {@code ditto:}.
*
* @param dataSchema The schema to verify
* @param ctx The request context.
* @param dittoHeaders The ditto headers.
*/
private void ensureDataSchema(@Nullable final URI dataSchema,
final RequestContext ctx,
final DittoHeaders dittoHeaders) {
if (dataSchema == null && cloudEventsConfig.isEmptySchemaAllowed()) {
// early return, no schema, but no requirement to have one
return;
}
if (dataSchema == null || !dataSchema.getScheme().equals(DATA_SCHEMA_SCHEME)) {
if (LOGGER.isInfoEnabled()) {
LOGGER.withCorrelationId(dittoHeaders)
.info("CloudEvent request rejected: unsupported data-schema: <{}> request: <{}>",
dataSchema, requestToLogString(ctx.getRequest()));
}
throw CloudEventUnsupportedDataSchemaException
.withDetailedInformationBuilder(dataSchema != null ? dataSchema.toString() : "none")
.dittoHeaders(dittoHeaders)
.build();
}
}
private static String requestToLogString(final HttpRequest request) {
return MessageFormat.format("{0} {1} {2}",
request.getUri().getHost().address(),
request.method().value(),
request.getUri().getPathString());
}
}