Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ISSUE-297: Sonar warnings fixes #298

Merged
merged 3 commits into from
Mar 23, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.provectus.kafka.ui.config;

import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.ReadOnlyModeException;
import com.provectus.kafka.ui.service.ClustersStorage;
import java.util.regex.Pattern;
Expand Down Expand Up @@ -39,7 +39,8 @@ public Mono<Void> filter(ServerWebExchange exchange, @NotNull WebFilterChain cha
var clusterName = matcher.group("clusterName");
var kafkaCluster = clustersStorage.getClusterByName(clusterName)
.orElseThrow(
() -> new NotFoundException(String.format("No cluster for name '%s'", clusterName)));
() -> new ClusterNotFoundException(
String.format("No cluster for name '%s'", clusterName)));

if (!kafkaCluster.getReadOnly()) {
return chain.filter(exchange);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ public Mono<ResponseEntity<Flux<ConsumerGroup>>> getConsumerGroups(String cluste
return clusterService.getConsumerGroups(clusterName)
.map(Flux::fromIterable)
.map(ResponseEntity::ok)
.switchIfEmpty(Mono.just(ResponseEntity.notFound()
.build())); // TODO: check behaviour on cluster not found and empty groups list
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package com.provectus.kafka.ui.exception;

public class ClusterNotFoundException extends CustomBaseException {

public ClusterNotFoundException() {
super("Cluster not found");
}

public ClusterNotFoundException(String message) {
super(message);
}

@Override
public ErrorCode getErrorCode() {
return ErrorCode.CLUSTER_NOT_FOUND;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package com.provectus.kafka.ui.exception;

public class ConnectNotFoundException extends CustomBaseException {

public ConnectNotFoundException() {
super("Connect not found");
}

@Override
public ErrorCode getErrorCode() {
return ErrorCode.CONNECT_NOT_FOUND;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,23 @@


public abstract class CustomBaseException extends RuntimeException {
public CustomBaseException() {
protected CustomBaseException() {
super();
}

public CustomBaseException(String message) {
protected CustomBaseException(String message) {
super(message);
}

public CustomBaseException(String message, Throwable cause) {
protected CustomBaseException(String message, Throwable cause) {
super(message, cause);
}

public CustomBaseException(Throwable cause) {
protected CustomBaseException(Throwable cause) {
super(cause);
}

public CustomBaseException(String message, Throwable cause, boolean enableSuppression,
protected CustomBaseException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,14 @@ public enum ErrorCode {
UNEXPECTED(5000, HttpStatus.INTERNAL_SERVER_ERROR),
BINDING_FAIL(4001, HttpStatus.BAD_REQUEST),
VALIDATION_FAIL(4002, HttpStatus.BAD_REQUEST),
ENTITY_NOT_FOUND(4003, HttpStatus.NOT_FOUND),
READ_ONLY_MODE_ENABLE(4004, HttpStatus.METHOD_NOT_ALLOWED),
REBALANCE_IN_PROGRESS(4005, HttpStatus.CONFLICT),
DUPLICATED_ENTITY(4006, HttpStatus.CONFLICT),
UNPROCESSABLE_ENTITY(4007, HttpStatus.UNPROCESSABLE_ENTITY);
READ_ONLY_MODE_ENABLE(4003, HttpStatus.METHOD_NOT_ALLOWED),
REBALANCE_IN_PROGRESS(4004, HttpStatus.CONFLICT),
DUPLICATED_ENTITY(4005, HttpStatus.CONFLICT),
UNPROCESSABLE_ENTITY(4006, HttpStatus.UNPROCESSABLE_ENTITY),
CLUSTER_NOT_FOUND(4007, HttpStatus.NOT_FOUND),
TOPIC_NOT_FOUND(4008, HttpStatus.NOT_FOUND),
SCHEMA_NOT_FOUND(4009, HttpStatus.NOT_FOUND),
CONNECT_NOT_FOUND(4010, HttpStatus.NOT_FOUND);

static {
// codes uniqueness check
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package com.provectus.kafka.ui.exception;

public class SchemaNotFoundException extends CustomBaseException {

public SchemaNotFoundException() {
super("Schema not found");
}

public SchemaNotFoundException(String message) {
super(message);
}

@Override
public ErrorCode getErrorCode() {
return ErrorCode.SCHEMA_NOT_FOUND;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package com.provectus.kafka.ui.exception;

public class TopicNotFoundException extends CustomBaseException {

public TopicNotFoundException() {
super("Topic not found");
}

@Override
public ErrorCode getErrorCode() {
return ErrorCode.TOPIC_NOT_FOUND;
}
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui.service;

import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.TopicNotFoundException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.model.Broker;
import com.provectus.kafka.ui.model.BrokerMetrics;
Expand Down Expand Up @@ -88,7 +89,7 @@ public TopicsResponse getTopics(String name, Optional<Integer> page,
int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
var cluster = clustersStorage.getClusterByName(name)
.orElseThrow(() -> new NotFoundException("No such cluster"));
.orElseThrow(ClusterNotFoundException::new);
var totalPages = (cluster.getTopics().size() / perPage)
+ (cluster.getTopics().size() % perPage == 0 ? 0 : 1);
return new TopicsResponse()
Expand Down Expand Up @@ -178,11 +179,10 @@ public Map<TopicPartition, Long> topicPartitionsEndOffsets(
}
}

@SneakyThrows
public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
return clustersStorage.getClusterByName(clusterName)
.map(kafkaService::getConsumerGroups)
.orElse(Mono.empty());
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
.flatMap(kafkaService::getConsumerGroups);
}

public Flux<Broker> getBrokers(String clusterName) {
Expand Down Expand Up @@ -211,10 +211,10 @@ public Mono<Topic> updateTopic(String clusterName, String topicName,

public Mono<Void> deleteTopic(String clusterName, String topicName) {
var cluster = clustersStorage.getClusterByName(clusterName)
.orElseThrow(() -> new NotFoundException("No such cluster"));
getTopicDetails(clusterName, topicName)
.orElseThrow(() -> new NotFoundException("No such topic"));
return kafkaService.deleteTopic(cluster, topicName)
.orElseThrow(ClusterNotFoundException::new);
var topic = getTopicDetails(clusterName, topicName)
.orElseThrow(TopicNotFoundException::new);
return kafkaService.deleteTopic(cluster, topic.getName())
.doOnNext(t -> updateCluster(topicName, clusterName, cluster));
}

Expand Down Expand Up @@ -243,9 +243,9 @@ public Flux<TopicMessage> getMessages(String clusterName, String topicName,
public Mono<Void> deleteTopicMessages(String clusterName, String topicName,
List<Integer> partitions) {
var cluster = clustersStorage.getClusterByName(clusterName)
.orElseThrow(() -> new NotFoundException("No such cluster"));
.orElseThrow(ClusterNotFoundException::new);
if (!cluster.getTopics().containsKey(topicName)) {
throw new NotFoundException("No such topic");
throw new TopicNotFoundException();
}
return consumingService.offsetsForDeletion(cluster, topicName, partitions)
.flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
package com.provectus.kafka.ui.service;

import com.provectus.kafka.ui.client.KafkaConnectClients;
import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.ConnectNotFoundException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.mapper.KafkaConnectMapper;
import com.provectus.kafka.ui.model.Connect;
Expand Down Expand Up @@ -181,7 +182,7 @@ public Mono<ConnectorPluginConfigValidationResponse> validateConnectorPluginConf
private Mono<KafkaCluster> getCluster(String clusterName) {
return clustersStorage.getClusterByName(clusterName)
.map(Mono::just)
.orElse(Mono.error(new NotFoundException("No such cluster")));
.orElse(Mono.error(ClusterNotFoundException::new));
}

private Mono<String> getConnectAddress(String clusterName, String connectName) {
Expand All @@ -194,7 +195,7 @@ private Mono<String> getConnectAddress(String clusterName, String connectName) {
)
.flatMap(connect -> connect
.map(Mono::just)
.orElse(Mono.error(new NotFoundException("No such connect cluster")))
.orElse(Mono.error(ConnectNotFoundException::new))
);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@ private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigR
.all(), topicCr.name());
}

@SuppressWarnings("deprecation")
private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource topicCr,
ExtendedAdminClient ac) {
List<ConfigEntry> configEntries = topicFormData.getConfigs().entrySet().stream()
Expand All @@ -359,7 +360,6 @@ private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource top
Config config = new Config(configEntries);
Map<ConfigResource, Config> map = Collections.singletonMap(topicCr, config);
return ClusterUtil.toMono(ac.getAdminClient().alterConfigs(map).all(), topicCr.name());

}

private InternalTopic mergeWithStats(InternalTopic topic,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@
import static org.springframework.http.HttpStatus.NOT_FOUND;
import static org.springframework.http.HttpStatus.UNPROCESSABLE_ENTITY;

import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.DuplicateEntityException;
import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.SchemaNotFoundException;
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
Expand Down Expand Up @@ -39,7 +40,6 @@
public class SchemaRegistryService {
public static final String NO_SUCH_SCHEMA_VERSION = "No such schema %s with version %s";
public static final String NO_SUCH_SCHEMA = "No such schema %s";
public static final String NO_SUCH_CLUSTER = "No such cluster";

private static final String URL_SUBJECTS = "/subjects";
private static final String URL_SUBJECT = "/subjects/{schemaName}";
Expand All @@ -66,7 +66,7 @@ public Mono<String[]> getAllSubjectNames(String clusterName) {
.bodyToMono(String[].class)
.doOnError(log::error)
)
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
}

public Flux<SchemaSubject> getAllVersionsBySubject(String clusterName, String subject) {
Expand All @@ -82,7 +82,7 @@ private Flux<Integer> getSubjectVersions(String clusterName, String schemaName)
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA))
).bodyToFlux(Integer.class)
).orElse(Flux.error(new NotFoundException(NO_SUCH_CLUSTER)));
).orElse(Flux.error(ClusterNotFoundException::new));
}

public Mono<SchemaSubject> getSchemaSubjectByVersion(String clusterName, String schemaName,
Expand Down Expand Up @@ -113,7 +113,7 @@ private Mono<SchemaSubject> getSchemaSubject(String clusterName, String schemaNa
return schema;
})
)
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
}

/**
Expand Down Expand Up @@ -145,7 +145,7 @@ private Mono<ResponseEntity<Void>> deleteSchemaSubject(String clusterName, Strin
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
).toBodilessEntity()
).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
).orElse(Mono.error(ClusterNotFoundException::new));
}

public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(String clusterName,
Expand All @@ -158,7 +158,7 @@ public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(String clusterName
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName))
)
.toBodilessEntity())
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
}

/**
Expand All @@ -181,7 +181,7 @@ public Mono<SchemaSubject> registerNewSchema(String clusterName,
.flatMap(s -> submitNewSchema(subject, newSchema, schemaRegistryUrl))
.flatMap(resp -> getLatestSchemaVersionBySubject(clusterName, subject))
)
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
});
}

Expand Down Expand Up @@ -219,7 +219,7 @@ private Mono<SchemaSubject> checkSchemaOnDuplicate(String subject,
@NotNull
private Function<ClientResponse, Mono<? extends Throwable>> throwIfNotFoundStatus(
String formatted) {
return resp -> Mono.error(new NotFoundException(formatted));
return resp -> Mono.error(new SchemaNotFoundException(formatted));
}

/**
Expand All @@ -241,7 +241,7 @@ public Mono<Void> updateSchemaCompatibility(String clusterName, String schemaNam
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
.bodyToMono(Void.class);
}).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
}).orElse(Mono.error(ClusterNotFoundException::new));
}

public Mono<Void> updateSchemaCompatibility(String clusterName,
Expand Down Expand Up @@ -287,7 +287,7 @@ public Mono<CompatibilityCheckResponse> checksSchemaCompatibility(
.bodyToMono(InternalCompatibilityCheck.class)
.map(mapper::toCompatibilityCheckResponse)
.log()
).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
).orElse(Mono.error(ClusterNotFoundException::new));
}

public String formatted(String str, Object... args) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,9 @@ public static InternalTopic mapToInternalTopic(TopicDescription topicDescription
topic.inSyncReplicas(inSyncReplicasCount);

topic.replicationFactor(
topicDescription.partitions().size() > 0
? topicDescription.partitions().get(0).replicas().size()
: 0
topicDescription.partitions().isEmpty()
? 0
: topicDescription.partitions().get(0).replicas().size()
);

topic.underReplicatedPartitions(urpCount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,9 @@ private Map<String, BigDecimal> getJmxMetric(String canonicalName, MBeanServerCo
var attrNames = msc.getMBeanInfo(name).getAttributes();
for (MBeanAttributeInfo attrName : attrNames) {
var value = msc.getAttribute(name, attrName.getName());
if (value instanceof Number) {
if (!(value instanceof Double) || !((Double) value).isInfinite()) {
resultAttr.put(attrName.getName(), new BigDecimal(value.toString()));
}
if ((value instanceof Number)
&& (!(value instanceof Double) || !((Double) value).isInfinite())) {
resultAttr.put(attrName.getName(), new BigDecimal(value.toString()));
}
}
} catch (MalformedURLException url) {
Expand Down
Loading