Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ISSUE-297: Sonar warnings fixes #298

Merged
merged 3 commits into from
Mar 23, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
package com.provectus.kafka.ui.config;

import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.ReadOnlyModeException;
import com.provectus.kafka.ui.service.ClustersStorage;
import java.util.regex.Pattern;
Expand Down Expand Up @@ -39,7 +39,8 @@ public Mono<Void> filter(ServerWebExchange exchange, @NotNull WebFilterChain cha
var clusterName = matcher.group("clusterName");
var kafkaCluster = clustersStorage.getClusterByName(clusterName)
.orElseThrow(
() -> new NotFoundException(String.format("No cluster for name '%s'", clusterName)));
() -> new ClusterNotFoundException(
String.format("No cluster for name '%s'", clusterName)));

if (!kafkaCluster.getReadOnly()) {
return chain.filter(exchange);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
package com.provectus.kafka.ui.exception;

public class ClusterNotFoundException extends NotFoundException {

public ClusterNotFoundException() {
super("Cluster not found");
}

public ClusterNotFoundException(String message) {
super(message);
}

@Override
public ErrorCode getErrorCode() {
return ErrorCode.CLUSTER_NOT_FOUND;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,23 @@


public abstract class CustomBaseException extends RuntimeException {
public CustomBaseException() {
protected CustomBaseException() {
super();
}

public CustomBaseException(String message) {
protected CustomBaseException(String message) {
super(message);
}

public CustomBaseException(String message, Throwable cause) {
protected CustomBaseException(String message, Throwable cause) {
super(message, cause);
}

public CustomBaseException(Throwable cause) {
protected CustomBaseException(Throwable cause) {
super(cause);
}

public CustomBaseException(String message, Throwable cause, boolean enableSuppression,
protected CustomBaseException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ public enum ErrorCode {
READ_ONLY_MODE_ENABLE(4004, HttpStatus.METHOD_NOT_ALLOWED),
REBALANCE_IN_PROGRESS(4005, HttpStatus.CONFLICT),
DUPLICATED_ENTITY(4006, HttpStatus.CONFLICT),
UNPROCESSABLE_ENTITY(4007, HttpStatus.UNPROCESSABLE_ENTITY);
UNPROCESSABLE_ENTITY(4007, HttpStatus.UNPROCESSABLE_ENTITY),
CLUSTER_NOT_FOUND(4008, HttpStatus.NOT_FOUND);

static {
// codes uniqueness check
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
package com.provectus.kafka.ui.service;

import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.model.Broker;
Expand Down Expand Up @@ -88,7 +89,7 @@ public TopicsResponse getTopics(String name, Optional<Integer> page,
int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
var cluster = clustersStorage.getClusterByName(name)
.orElseThrow(() -> new NotFoundException("No such cluster"));
.orElseThrow(ClusterNotFoundException::new);
var totalPages = (cluster.getTopics().size() / perPage)
+ (cluster.getTopics().size() % perPage == 0 ? 0 : 1);
return new TopicsResponse()
Expand Down Expand Up @@ -211,7 +212,7 @@ public Mono<Topic> updateTopic(String clusterName, String topicName,

public Mono<Void> deleteTopic(String clusterName, String topicName) {
var cluster = clustersStorage.getClusterByName(clusterName)
.orElseThrow(() -> new NotFoundException("No such cluster"));
.orElseThrow(ClusterNotFoundException::new);
getTopicDetails(clusterName, topicName)
.orElseThrow(() -> new NotFoundException("No such topic"));
return kafkaService.deleteTopic(cluster, topicName)
Expand Down Expand Up @@ -243,7 +244,7 @@ public Flux<TopicMessage> getMessages(String clusterName, String topicName,
public Mono<Void> deleteTopicMessages(String clusterName, String topicName,
List<Integer> partitions) {
var cluster = clustersStorage.getClusterByName(clusterName)
.orElseThrow(() -> new NotFoundException("No such cluster"));
.orElseThrow(ClusterNotFoundException::new);
if (!cluster.getTopics().containsKey(topicName)) {
throw new NotFoundException("No such topic");
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package com.provectus.kafka.ui.service;

import com.provectus.kafka.ui.client.KafkaConnectClients;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.mapper.KafkaConnectMapper;
Expand Down Expand Up @@ -181,7 +182,7 @@ public Mono<ConnectorPluginConfigValidationResponse> validateConnectorPluginConf
private Mono<KafkaCluster> getCluster(String clusterName) {
return clustersStorage.getClusterByName(clusterName)
.map(Mono::just)
.orElse(Mono.error(new NotFoundException("No such cluster")));
.orElse(Mono.error(ClusterNotFoundException::new));
}

private Mono<String> getConnectAddress(String clusterName, String connectName) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@ private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigR
.all(), topicCr.name());
}

@SuppressWarnings("deprecation")
private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource topicCr,
ExtendedAdminClient ac) {
List<ConfigEntry> configEntries = topicFormData.getConfigs().entrySet().stream()
Expand All @@ -359,7 +360,6 @@ private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource top
Config config = new Config(configEntries);
Map<ConfigResource, Config> map = Collections.singletonMap(topicCr, config);
return ClusterUtil.toMono(ac.getAdminClient().alterConfigs(map).all(), topicCr.name());

}

private InternalTopic mergeWithStats(InternalTopic topic,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import static org.springframework.http.HttpStatus.NOT_FOUND;
import static org.springframework.http.HttpStatus.UNPROCESSABLE_ENTITY;

import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.exception.DuplicateEntityException;
import com.provectus.kafka.ui.exception.NotFoundException;
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
Expand Down Expand Up @@ -39,7 +40,6 @@
public class SchemaRegistryService {
public static final String NO_SUCH_SCHEMA_VERSION = "No such schema %s with version %s";
public static final String NO_SUCH_SCHEMA = "No such schema %s";
public static final String NO_SUCH_CLUSTER = "No such cluster";

private static final String URL_SUBJECTS = "/subjects";
private static final String URL_SUBJECT = "/subjects/{schemaName}";
Expand All @@ -66,7 +66,7 @@ public Mono<String[]> getAllSubjectNames(String clusterName) {
.bodyToMono(String[].class)
.doOnError(log::error)
)
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
}

public Flux<SchemaSubject> getAllVersionsBySubject(String clusterName, String subject) {
Expand All @@ -82,7 +82,7 @@ private Flux<Integer> getSubjectVersions(String clusterName, String schemaName)
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA))
).bodyToFlux(Integer.class)
).orElse(Flux.error(new NotFoundException(NO_SUCH_CLUSTER)));
).orElse(Flux.error(ClusterNotFoundException::new));
}

public Mono<SchemaSubject> getSchemaSubjectByVersion(String clusterName, String schemaName,
Expand Down Expand Up @@ -113,7 +113,7 @@ private Mono<SchemaSubject> getSchemaSubject(String clusterName, String schemaNa
return schema;
})
)
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
}

/**
Expand Down Expand Up @@ -145,7 +145,7 @@ private Mono<ResponseEntity<Void>> deleteSchemaSubject(String clusterName, Strin
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
).toBodilessEntity()
).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
).orElse(Mono.error(ClusterNotFoundException::new));
}

public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(String clusterName,
Expand All @@ -158,7 +158,7 @@ public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(String clusterName
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName))
)
.toBodilessEntity())
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
}

/**
Expand All @@ -181,7 +181,7 @@ public Mono<SchemaSubject> registerNewSchema(String clusterName,
.flatMap(s -> submitNewSchema(subject, newSchema, schemaRegistryUrl))
.flatMap(resp -> getLatestSchemaVersionBySubject(clusterName, subject))
)
.orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
.orElse(Mono.error(ClusterNotFoundException::new));
});
}

Expand Down Expand Up @@ -241,7 +241,7 @@ public Mono<Void> updateSchemaCompatibility(String clusterName, String schemaNam
.onStatus(NOT_FOUND::equals,
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
.bodyToMono(Void.class);
}).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
}).orElse(Mono.error(ClusterNotFoundException::new));
}

public Mono<Void> updateSchemaCompatibility(String clusterName,
Expand Down Expand Up @@ -287,7 +287,7 @@ public Mono<CompatibilityCheckResponse> checksSchemaCompatibility(
.bodyToMono(InternalCompatibilityCheck.class)
.map(mapper::toCompatibilityCheckResponse)
.log()
).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
).orElse(Mono.error(ClusterNotFoundException::new));
}

public String formatted(String str, Object... args) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,9 @@ public static InternalTopic mapToInternalTopic(TopicDescription topicDescription
topic.inSyncReplicas(inSyncReplicasCount);

topic.replicationFactor(
topicDescription.partitions().size() > 0
? topicDescription.partitions().get(0).replicas().size()
: 0
topicDescription.partitions().isEmpty()
? 0
: topicDescription.partitions().get(0).replicas().size()
);

topic.underReplicatedPartitions(urpCount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,9 @@ private Map<String, BigDecimal> getJmxMetric(String canonicalName, MBeanServerCo
var attrNames = msc.getMBeanInfo(name).getAttributes();
for (MBeanAttributeInfo attrName : attrNames) {
var value = msc.getAttribute(name, attrName.getName());
if (value instanceof Number) {
if (!(value instanceof Double) || !((Double) value).isInfinite()) {
resultAttr.put(attrName.getName(), new BigDecimal(value.toString()));
}
if ((value instanceof Number)
&& (!(value instanceof Double) || !((Double) value).isInfinite())) {
resultAttr.put(attrName.getName(), new BigDecimal(value.toString()));
}
}
} catch (MalformedURLException url) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ void seekToBeginningAllPartitions() {
new ConsumerPosition(SeekType.BEGINNING, Map.of(0, 0L, 1, 0L)));
seek.assignAndSeek(consumer);
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1);
assertThat(consumer.position(tp0)).isEqualTo(0L);
assertThat(consumer.position(tp0)).isZero();
assertThat(consumer.position(tp1)).isEqualTo(10L);
}

Expand All @@ -68,9 +68,9 @@ void seekToBeginningWithPartitionsList() {
new ConsumerPosition(SeekType.BEGINNING, Map.of()));
seek.assignAndSeek(consumer);
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
assertThat(consumer.position(tp0)).isEqualTo(0L);
assertThat(consumer.position(tp0)).isZero();
assertThat(consumer.position(tp1)).isEqualTo(10L);
assertThat(consumer.position(tp2)).isEqualTo(0L);
assertThat(consumer.position(tp2)).isZero();
assertThat(consumer.position(tp3)).isEqualTo(25L);
}

Expand All @@ -81,7 +81,7 @@ void seekToOffset() {
new ConsumerPosition(SeekType.OFFSET, Map.of(0, 0L, 1, 1L, 2, 2L)));
seek.assignAndSeek(consumer);
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2);
assertThat(consumer.position(tp0)).isEqualTo(0L);
assertThat(consumer.position(tp0)).isZero();
assertThat(consumer.position(tp1)).isEqualTo(1L);
assertThat(consumer.position(tp2)).isEqualTo(2L);
}
Expand Down