Skip to content

Commit

Permalink
Merge pull request #3202 from ingef/release
Browse files Browse the repository at this point in the history
Merge Release
  • Loading branch information
awildturtok committed Nov 8, 2023
2 parents 70a1065 + 2492232 commit 0b3d6bf
Show file tree
Hide file tree
Showing 358 changed files with 9,147 additions and 2,381 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,11 @@ public FrontendMeInformation getUserInformation(@NonNull User user) {
// User can use the dataset and can possibly upload ids for resolving
datasetAblilites.put(
dataset.getId(),
new FrontendDatasetAbility(user.isPermitted(dataset, Ability.PRESERVE_ID))
new FrontendDatasetAbility(
user.isPermitted(dataset, Ability.PRESERVE_ID),
user.isPermitted(dataset, Ability.ENTITY_PREVIEW) && user.isPermitted(dataset, Ability.PRESERVE_ID),
user.isPermitted(dataset, Ability.QUERY_PREVIEW)
)
);
}

Expand Down Expand Up @@ -93,6 +97,8 @@ public static class FrontendMeInformation {
@NoArgsConstructor
public static class FrontendDatasetAbility {
private boolean canUpload;
private boolean canViewEntityPreview;
private boolean canViewQueryPreview;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.function.Predicate;
Expand Down Expand Up @@ -48,6 +50,7 @@
import com.bakdata.conquery.models.auth.entities.Subject;
import com.bakdata.conquery.models.auth.entities.User;
import com.bakdata.conquery.models.auth.permissions.Ability;
import com.bakdata.conquery.models.auth.permissions.ConqueryPermission;
import com.bakdata.conquery.models.common.Range;
import com.bakdata.conquery.models.config.ColumnConfig;
import com.bakdata.conquery.models.config.ConqueryConfig;
Expand All @@ -58,6 +61,7 @@
import com.bakdata.conquery.models.error.ConqueryError;
import com.bakdata.conquery.models.execution.ExecutionState;
import com.bakdata.conquery.models.execution.ManagedExecution;
import com.bakdata.conquery.models.identifiable.ids.specific.GroupId;
import com.bakdata.conquery.models.identifiable.ids.specific.ManagedExecutionId;
import com.bakdata.conquery.models.identifiable.mapping.IdPrinter;
import com.bakdata.conquery.models.query.ExecutionManager;
Expand Down Expand Up @@ -312,6 +316,43 @@ public void patchQuery(Subject subject, ManagedExecution execution, MetaDataPatc

log.info("Patching {} ({}) with patch: {}", execution.getClass().getSimpleName(), execution, patch);

// If the patch shares the execution, we also share all subQueries
if (patch.getGroups() != null && !patch.getGroups().isEmpty()) {


for (ManagedExecutionId managedExecutionId : execution.getSubmitted().collectRequiredQueries()) {
final ManagedExecution subQuery = storage.getExecution(managedExecutionId);

if (!subject.isPermitted(subQuery, Ability.READ)) {
log.warn("Not sharing {} as User {} is not allowed to see it themselves.", subQuery.getId(), subject);
continue;
}

final ConqueryPermission canReadQuery = subQuery.createPermission(Set.of(Ability.READ));

final Set<GroupId> groupsToShareWith = new HashSet<>(patch.getGroups());

// Find all groups the query is already shared with, so we do not remove them, as patch is absolute
for (Group group : storage.getAllGroups()) {
if (groupsToShareWith.contains(group.getId())){
continue;
}

final Set<ConqueryPermission> effectivePermissions = group.getEffectivePermissions();

if(effectivePermissions.stream().anyMatch(perm -> perm.implies(canReadQuery))) {
groupsToShareWith.add(group.getId());
}
}

final MetaDataPatch sharePatch = MetaDataPatch.builder()
.groups(new ArrayList<>(groupsToShareWith))
.build();

patchQuery(subject, subQuery, sharePatch);
}
}

patch.applyTo(execution, storage, subject);
storage.updateExecution(execution);
}
Expand Down Expand Up @@ -397,7 +438,9 @@ public ExternalUploadResult uploadEntities(Subject subject, Dataset dataset, Ext
*/
public FullExecutionStatus getSingleEntityExport(Subject subject, UriBuilder uriBuilder, String idKind, String entity, List<Connector> sources, Dataset dataset, Range<LocalDate> dateRange) {

final Namespace namespace = datasetRegistry.get(dataset.getId());
subject.authorize(dataset, Ability.ENTITY_PREVIEW);
subject.authorize(dataset, Ability.PRESERVE_ID);

final PreviewConfig previewConfig = datasetRegistry.get(dataset.getId()).getPreviewConfig();
final EntityPreviewForm form = EntityPreviewForm.create(entity, idKind, dateRange, sources, previewConfig.getSelects(), previewConfig.getTimeStratifiedSelects(), datasetRegistry);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ public abstract class ExecutionStatus {
private String queryType;
private SecondaryIdDescriptionId secondaryId;

private boolean containsDates;


/**
* The urls under from which the result of the execution can be downloaded as soon as it finished successfully.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ public class FullExecutionStatus extends ExecutionStatus {
*/
private boolean canExpand;

private boolean containsDates;

/**
* Is set to the query description if the user can expand all included concepts.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import com.bakdata.conquery.apiv1.query.Query;
import com.bakdata.conquery.io.cps.CPSType;
import com.bakdata.conquery.io.jackson.View;
import com.bakdata.conquery.models.error.ConqueryError;
import com.bakdata.conquery.models.identifiable.ids.specific.ManagedExecutionId;
import com.bakdata.conquery.models.query.ManagedQuery;
import com.bakdata.conquery.models.query.QueryExecutionContext;
Expand Down Expand Up @@ -74,6 +75,11 @@ public QPNode createQueryPlan(QueryPlanContext context, ConceptQueryPlan plan) {
@Override
public void resolve(QueryResolveContext context) {
query = ((ManagedQuery) context.getStorage().getExecution(queryId));

if(query == null){
throw new ConqueryError.ExecutionCreationResolveError(queryId);
}

resolvedQuery = query.getQuery();

// Yey recursion, because the query might consist of another CQReusedQuery or CQExternal
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
package com.bakdata.conquery.io.mina;

import java.io.File;
import java.util.UUID;

import javax.validation.Validator;

import com.bakdata.conquery.io.jackson.Jackson;
import com.bakdata.conquery.models.exceptions.ValidatorHelper;
import com.bakdata.conquery.models.messages.network.NetworkMessage;
import com.bakdata.conquery.models.worker.IdResolveContext;
Expand All @@ -14,7 +10,6 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.SerializationFeature;
import lombok.extern.slf4j.Slf4j;

@Slf4j
Expand All @@ -26,34 +21,26 @@ public class BinaryJacksonCoder implements CQCoder<NetworkMessage<?>> {

public BinaryJacksonCoder(IdResolveContext datasets, Validator validator, ObjectMapper objectMapper) {
this.validator = validator;
this.writer = objectMapper
.writerFor(NetworkMessage.class);
this.reader = datasets
.injectIntoNew(objectMapper.readerFor(NetworkMessage.class))
.without(Feature.AUTO_CLOSE_SOURCE);
writer = objectMapper.writerFor(NetworkMessage.class);
reader = datasets.injectIntoNew(objectMapper.readerFor(NetworkMessage.class)).without(Feature.AUTO_CLOSE_SOURCE);
}

@Override
public Chunkable encode(NetworkMessage<?> message) throws Exception {
ValidatorHelper.failOnError(log, validator.validate(message));

UUID id = message.getMessageId();
Chunkable chunkable = new Chunkable(id, writer, message);
if(log.isTraceEnabled()) {
Jackson.MAPPER.writerFor(NetworkMessage.class).with(SerializationFeature.INDENT_OUTPUT).writeValue(new File("dumps/out_"+id+".json"), message);
}
return chunkable;
return new Chunkable(message.getMessageId(), writer, message);
}

@Override
public NetworkMessage<?> decode(ChunkedMessage message) throws Exception {
try(EndCheckableInputStream is = message.createInputStream()) {
Object obj = reader.readValue(is);
if(!is.isAtEnd()) {
throw new IllegalStateException("After reading the JSON message "+obj+" the buffer has still bytes available");
try (EndCheckableInputStream is = message.createInputStream()) {
final Object obj = reader.readValue(is);
if (!is.isAtEnd()) {
throw new IllegalStateException("After reading the JSON message " + obj + " the buffer has still bytes available");
}
ValidatorHelper.failOnError(log, validator.validate(obj));
return (NetworkMessage<?>)obj;
return (NetworkMessage<?>) obj;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ public class ChunkWriter extends ProtocolEncoderAdapter {

@Getter
@Setter
private int bufferSize = Ints.checkedCast(Size.megabytes(32).toBytes());
private int bufferSize = Ints.checkedCast(Size.megabytes(2).toBytes());
private final SoftPool<IoBuffer> bufferPool = new SoftPool<>(() -> IoBuffer.allocate(bufferSize));
@SuppressWarnings("rawtypes")
private final CQCoder coder;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import com.bakdata.conquery.mode.StorageListener;
import com.bakdata.conquery.models.config.ConqueryConfig;
import com.bakdata.conquery.models.jobs.JobManager;
import com.bakdata.conquery.models.worker.ClusterHealthCheck;
import com.bakdata.conquery.models.worker.DatasetRegistry;
import com.bakdata.conquery.models.worker.DistributedNamespace;
import com.bakdata.conquery.models.worker.ShardNodeInformation;
Expand All @@ -22,32 +23,26 @@
public class ClusterManagerProvider implements ManagerProvider {

public ClusterManager provideManager(ConqueryConfig config, Environment environment) {
JobManager jobManager = ManagerProvider.newJobManager(config);
InternalObjectMapperCreator creator = ManagerProvider.newInternalObjectMapperCreator(config, environment.getValidator());
ClusterState clusterState = new ClusterState();
NamespaceHandler<DistributedNamespace> namespaceHandler = new ClusterNamespaceHandler(clusterState, config, creator);
DatasetRegistry<DistributedNamespace> datasetRegistry = ManagerProvider.createDatasetRegistry(namespaceHandler, config, creator);
final JobManager jobManager = ManagerProvider.newJobManager(config);
final InternalObjectMapperCreator creator = ManagerProvider.newInternalObjectMapperCreator(config, environment.getValidator());
final ClusterState clusterState = new ClusterState();
final NamespaceHandler<DistributedNamespace> namespaceHandler = new ClusterNamespaceHandler(clusterState, config, creator);
final DatasetRegistry<DistributedNamespace> datasetRegistry = ManagerProvider.createDatasetRegistry(namespaceHandler, config, creator);
creator.init(datasetRegistry);

ClusterConnectionManager connectionManager = new ClusterConnectionManager(
datasetRegistry, jobManager, environment.getValidator(), config, creator, clusterState
);
ImportHandler importHandler = new ClusterImportHandler(config, datasetRegistry);
StorageListener extension = new ClusterStorageListener(jobManager, datasetRegistry);
Supplier<Collection<ShardNodeInformation>> nodeProvider = () -> clusterState.getShardNodes().values();
List<Task> adminTasks = List.of(new ReportConsistencyTask(clusterState));

DelegateManager<DistributedNamespace> delegate = new DelegateManager<>(
config,
environment,
datasetRegistry,
importHandler,
extension,
nodeProvider,
adminTasks,
creator,
jobManager
);
final ClusterConnectionManager connectionManager =
new ClusterConnectionManager(datasetRegistry, jobManager, environment.getValidator(), config, creator, clusterState);

final ImportHandler importHandler = new ClusterImportHandler(config, datasetRegistry);
final StorageListener extension = new ClusterStorageListener(jobManager, datasetRegistry);
final Supplier<Collection<ShardNodeInformation>> nodeProvider = () -> clusterState.getShardNodes().values();
final List<Task> adminTasks = List.of(new ReportConsistencyTask(clusterState));

final DelegateManager<DistributedNamespace>
delegate =
new DelegateManager<>(config, environment, datasetRegistry, importHandler, extension, nodeProvider, adminTasks, creator, jobManager);

environment.healthChecks().register("cluster", new ClusterHealthCheck(clusterState));

return new ClusterManager(delegate, connectionManager);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ public enum Ability {
DOWNLOAD, // Allow download of per entity results
PRESERVE_ID, // Needs extra implementation: Allow the user to see the real id of entities and externally resolve real ids into conquery

ENTITY_PREVIEW,
QUERY_PREVIEW, //TODO not yet implemented

// FormConfig specific
MODIFY;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,29 +7,15 @@
@UtilityClass
public class AbilitySets {

public static final EnumSet<Ability> QUERY_CREATOR = EnumSet.of(
Ability.READ,
Ability.DELETE,
Ability.SHARE,
Ability.TAG,
Ability.CANCEL,
Ability.LABEL
);
public static final EnumSet<Ability> QUERY_CREATOR = EnumSet.of(Ability.READ, Ability.DELETE, Ability.SHARE, Ability.TAG, Ability.CANCEL, Ability.LABEL);

public static final EnumSet<Ability> FORM_CONFIG_CREATOR = EnumSet.of(
Ability.READ,
Ability.DELETE,
Ability.SHARE,
Ability.TAG,
Ability.LABEL,
Ability.MODIFY
);
public static final EnumSet<Ability>
FORM_CONFIG_CREATOR =
EnumSet.of(Ability.READ, Ability.DELETE, Ability.SHARE, Ability.TAG, Ability.LABEL, Ability.MODIFY);

public static final EnumSet<Ability> SHAREHOLDER = EnumSet.of(
Ability.READ,
Ability.TAG,
Ability.LABEL
);
public static final EnumSet<Ability> SHAREHOLDER = EnumSet.of(Ability.READ, Ability.TAG, Ability.LABEL);

public static final EnumSet<Ability> DATASET_CREATOR = EnumSet.of(Ability.READ, Ability.DOWNLOAD, Ability.PRESERVE_ID);
public static final EnumSet<Ability>
DATASET_CREATOR =
EnumSet.of(Ability.READ, Ability.DOWNLOAD, Ability.PRESERVE_ID, Ability.ENTITY_PREVIEW, Ability.QUERY_PREVIEW);
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package com.bakdata.conquery.models.dictionary;


import java.util.stream.IntStream;

import com.bakdata.conquery.models.events.stores.root.IntegerStore;
import com.bakdata.conquery.models.events.stores.root.StringStore;
import it.unimi.dsi.fastutil.ints.Int2IntMap;
Expand Down Expand Up @@ -40,17 +42,17 @@ public static DictionaryMapping createAndImport(Dictionary from, Dictionary into

int newIds = 0;

Int2IntMap source2Target = new Int2IntOpenHashMap(from.size());
final Int2IntMap source2Target = new Int2IntOpenHashMap(from.size());

source2Target.defaultReturnValue(-1);

Int2IntMap target2Source = new Int2IntOpenHashMap(from.size());
final Int2IntMap target2Source = new Int2IntOpenHashMap(from.size());

target2Source.defaultReturnValue(-1);

for (int id = 0; id < from.size(); id++) {

byte[] value = from.getElement(id);
final byte[] value = from.getElement(id);
int targetId = into.getId(value);

//if id was unknown until now
Expand Down Expand Up @@ -92,22 +94,21 @@ public IntCollection target() {
* Mutably applies mapping to store.
*/
public void applyToStore(StringStore from, IntegerStore to) {
for (int event = 0; event < from.getLines(); event++) {
if (!from.has(event)) {
to.setNull(event);
continue;
}

final int string = from.getString(event);

int value = source2Target(string);

if (value == -1) {
throw new IllegalStateException(String.format("Missing mapping for %s", string));
}

to.setInteger(event, value);
}
IntStream.range(0, from.getLines())
.parallel()
.forEach(event -> {
if (!from.has(event)) {
to.setNull(event);
return;
}
final int string = from.getString(event);
final int value = source2Target(string);

if (value == -1) {
throw new IllegalStateException(String.format("Missing mapping for %s", string));
}
to.setInteger(event, value);
});
}

}

0 comments on commit 0b3d6bf

Please sign in to comment.