diff --git a/java/converters/rdf2Graph/pom.xml b/java/converters/rdf2Graph/pom.xml index 875795055..01192b29d 100644 --- a/java/converters/rdf2Graph/pom.xml +++ b/java/converters/rdf2Graph/pom.xml @@ -94,6 +94,11 @@ validation-api 2.0.1.Final + + com.google.code.gson + gson + 2.8.2 + diff --git a/java/converters/rdf2Graph/src/main/java/io/opensaber/utils/converters/RDF2Graph.java b/java/converters/rdf2Graph/src/main/java/io/opensaber/utils/converters/RDF2Graph.java index fdb01dbfe..1933adad6 100644 --- a/java/converters/rdf2Graph/src/main/java/io/opensaber/utils/converters/RDF2Graph.java +++ b/java/converters/rdf2Graph/src/main/java/io/opensaber/utils/converters/RDF2Graph.java @@ -5,6 +5,7 @@ import java.util.List; import java.util.Stack; +import com.google.gson.Gson; import org.apache.jena.rdf.model.RDFNode; import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; @@ -120,8 +121,15 @@ private static void extractModelFromVertex(ModelBuilder builder, Vertex s) { Iterator> propertyIter = s.properties(); while (propertyIter.hasNext()) { VertexProperty property = propertyIter.next(); - logger.debug("ADDING Property " + property.label() + ": " + property.value()); - Object object = property.value(); + logger.debug("ADDING Property "+property.label()+": "+property.value()); + Object propValue = property.value(); + try{ + List list = (new Gson()).fromJson(String.valueOf(propValue), List.class); + propValue = list; + } catch(com.google.gson.JsonSyntaxException ex) { + + } + Object object = propValue; Property metaProperty = property.property("@type"); String type = null; if (metaProperty.isPresent()) { diff --git a/java/cukes/src/test/java/io/opensaber/registry/test/RegistryIntegrationSteps.java b/java/cukes/src/test/java/io/opensaber/registry/test/RegistryIntegrationSteps.java index a30e16b1f..a54cc0d4e 100644 --- a/java/cukes/src/test/java/io/opensaber/registry/test/RegistryIntegrationSteps.java +++ b/java/cukes/src/test/java/io/opensaber/registry/test/RegistryIntegrationSteps.java @@ -6,11 +6,14 @@ import java.io.IOException; import java.io.StringReader; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.jena.rdf.model.Model; import org.apache.jena.rdf.model.ModelFactory; +import org.apache.jena.rdf.model.Resource; import org.apache.jena.riot.RDFDataMgr; +import org.apache.jena.util.ResourceUtils; import org.json.JSONObject; import org.springframework.http.HttpEntity; import org.springframework.http.HttpHeaders; @@ -53,6 +56,7 @@ public class RegistryIntegrationSteps extends RegistryTestBase { private static String duplicateLabel; private HttpHeaders headers; private String updateId; + private Map IDMap = new HashMap<>(); Type type = new TypeToken>() { }.getType(); @@ -110,18 +114,28 @@ public void setMissingAuthToken(){ @When("^issuing the record into the registry") public void addEntity(){ - response = callRegistryCreateAPI(); + callRegistryCreateAPI(); } - - @When("^an entity for the record is issued into the registry$") + + private void extractAndMapIDfromResponse() { + String newid = extractID(String.valueOf(response.getBody().getResult().get("entity"))); + mapID(id,newid); + } + + private void mapID(String id, String newid) { + IDMap.put(baseUrl+id,baseUrl+newid); + IDMap.put(baseUrl+newid,baseUrl+id); + } + + @When("^an entity for the record is issued into the registry$") public void add_entity_to_existing_record_in_registry(){ jsonldData(ENTITY_JSONLD); - response = callRegistryCreateAPI(baseUrl+updateId,baseUrl+"basicProficiencyLevel"); + callRegistryCreateAPI(baseUrl+updateId,baseUrl+"basicProficiencyLevel"); } @When("^the same entity for the record is issued into the registry$") public void add_existing_entity_to_existing_record_in_registry(){ - response = callRegistryCreateAPI(baseUrl+updateId,baseUrl+"basicProficiencyLevel"); + callRegistryCreateAPI(baseUrl+updateId,baseUrl+"basicProficiencyLevel"); } public void jsonldData(String filename){ @@ -130,28 +144,28 @@ public void jsonldData(String filename){ assertNotNull(jsonld); } - private ResponseEntity callRegistryCreateAPI() { + private void callRegistryCreateAPI() { headers.setContentType(MediaType.APPLICATION_JSON); HttpEntity entity = new HttpEntity(jsonld,headers); - ResponseEntity response = restTemplate.postForEntity( + response = restTemplate.postForEntity( baseUrl+ADD_ENTITY, entity, - Response.class); - return response; + Response.class); + extractAndMapIDfromResponse(); } - private ResponseEntity callRegistryCreateAPI(String entityLabel, String property) { + private void callRegistryCreateAPI(String entityLabel, String property) { headers.setContentType(MediaType.APPLICATION_JSON); HttpEntity entity = new HttpEntity(jsonld,headers); Map uriVariables = new HashMap(); - uriVariables.put("id", entityLabel); + uriVariables.put("id", getMappedID(entityLabel)); uriVariables.put("prop", property); - ResponseEntity response = restTemplate.postForEntity( + response = restTemplate.postForEntity( baseUrl+ADD_ENTITY+"?id={id}&prop={prop}", entity, Response.class, - uriVariables); - return response; + uriVariables); + extractAndMapIDfromResponse(); } @Then("^record issuing should be successful") @@ -192,6 +206,9 @@ private void checkForIsomorphicModel() throws IOException{ Model actualModel = ModelFactory.createDefaultModel(); String newJsonld = new JSONObject(result).toString(2); RDFDataMgr.read(actualModel, new StringReader(newJsonld), null, org.apache.jena.riot.RDFLanguages.JSONLD); + remapURIs(expectedModel); +// printModel(expectedModel); +// printModel(actualModel); assertTrue(expectedModel.isIsomorphicWith(actualModel)); } @@ -247,12 +264,20 @@ public void retrieving_the_record_from_the_registry(){ private ResponseEntity callRegistryReadAPI() { HttpEntity entity = new HttpEntity<>(headers); - ResponseEntity response = restTemplate.exchange(baseUrl+"/"+id, HttpMethod.GET,entity,Response.class); +// String fetchID; +// if(updateId!=null) +// fetchID = updateId; +// else +// fetchID = id; + ResponseEntity response = restTemplate.exchange(getMappedID(baseUrl+id), HttpMethod.GET,entity,Response.class); return response; - } - @Then("^record retrieval should be unsuccessful$") + private String getMappedID(String id) { + return IDMap.get(id); + } + + @Then("^record retrieval should be unsuccessful$") public void record_retrieval_should_be_unsuccessful() throws Exception { checkUnsuccessfulResponse(); } @@ -282,7 +307,7 @@ public void validResponseFormat(){ setJsonld(VALID_NEWJSONLD); id=setJsonldWithNewRootLabel(); setValidAuthHeader(); - response = callRegistryCreateAPI(); + callRegistryCreateAPI(); } catch (Exception e) { response = null; } @@ -333,4 +358,16 @@ public void test_audit_record_unexpected_in_read() throws Exception { checkSuccessfulResponse(); } } + + private void remapURIs(Model expectedModel) { + Iterator iter = expectedModel.listSubjects(); + while(iter.hasNext()){ + Resource subject = iter.next(); + String oldURI = subject.getURI(); + String newURI = IDMap.get(oldURI); + if(oldURI!=null&&newURI!=null){ + ResourceUtils.renameResource(subject,newURI); + } + } + } } diff --git a/java/cukes/src/test/java/io/opensaber/registry/test/RegistryTestBase.java b/java/cukes/src/test/java/io/opensaber/registry/test/RegistryTestBase.java index 29d0158f2..9963b88f3 100644 --- a/java/cukes/src/test/java/io/opensaber/registry/test/RegistryTestBase.java +++ b/java/cukes/src/test/java/io/opensaber/registry/test/RegistryTestBase.java @@ -3,6 +3,7 @@ import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; import io.opensaber.pojos.Response; +import org.apache.jena.rdf.model.Model; import org.springframework.http.*; import org.springframework.http.client.HttpComponentsClientHttpRequestFactory; import org.springframework.web.client.RestTemplate; @@ -17,6 +18,7 @@ import java.nio.file.Files; import java.nio.file.Paths; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.UUID; import java.util.regex.Matcher; @@ -162,4 +164,23 @@ private static String generateAuthToken() { return myMap.getOrDefault("access_token", ""); } + public void printModel(Model rdfModel) { + System.out.println("==================="); + Iterator iter = rdfModel.listStatements(); + while(iter.hasNext()){ + System.out.println(iter.next()); + } + System.out.println("==================="); + } + + public String extractID(String URI) { + String longLabel = ""; + Pattern r = Pattern.compile(".*\\/(\\d+)"); + Matcher m = r.matcher(URI); + if (m.find( )) { + longLabel = m.group(1); + } + return longLabel; + } + } diff --git a/java/cukes/src/test/java/io/opensaber/registry/test/create.feature b/java/cukes/src/test/java/io/opensaber/registry/test/create.feature index a252daa8e..b4cf0f97b 100644 --- a/java/cukes/src/test/java/io/opensaber/registry/test/create.feature +++ b/java/cukes/src/test/java/io/opensaber/registry/test/create.feature @@ -1,6 +1,7 @@ @controller @create Feature: Inserting a record into the registry + @issue Scenario: Issuing a valid record Given a valid record And a valid auth token @@ -8,12 +9,11 @@ Feature: Inserting a record into the registry Then record issuing should be successful And fetching the record from the registry should match the issued record - Scenario: Issuing a duplicate record + Scenario: Issuing a duplicate record, should not be ok Given a record issued into the registry And a valid auth token When issuing the record into the registry again Then record issuing should be unsuccessful - And error message is Cannot insert duplicate record Scenario: Inserting second valid record into the registry Given a record issued into the registry diff --git a/java/registry/data/conf/neo4j.conf b/java/registry/data/conf/neo4j.conf new file mode 100755 index 000000000..5625dcc0d --- /dev/null +++ b/java/registry/data/conf/neo4j.conf @@ -0,0 +1,796 @@ +#***************************************************************** +# Neo4j configuration +# +# For more details and a complete list of settings, please see +# https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/ +#***************************************************************** + +# The name of the database to mount. Note that this is *not* to be confused with +# the causal_clustering.database setting, used to specify a logical database +# name when creating a multi-clustering deployment. +#dbms.active_database=graph.db + +# Paths of directories in the installation. +#dbms.directories.data=data +#dbms.directories.plugins=plugins +#dbms.directories.certificates=certificates +#dbms.directories.logs=logs +#dbms.directories.lib=lib +#dbms.directories.run=run +#dbms.directories.metrics=metrics + +# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to +# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the +# `LOAD CSV` section of the manual for details. +dbms.directories.import=import + +# Whether requests to Neo4j are authenticated. +# To disable authentication, uncomment this line +dbms.security.auth_enabled=false + +# Enable this to be able to upgrade a store from an older version. +#dbms.allow_upgrade=true + +# Java Heap Size: by default the Java heap size is dynamically +# calculated based on available system resources. +# Uncomment these lines to set specific initial and maximum +# heap size. +dbms.memory.heap.initial_size=512m +dbms.memory.heap.max_size=1G + +# The amount of memory to use for mapping the store files, in bytes (or +# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g'). +# If Neo4j is running on a dedicated server, then it is generally recommended +# to leave about 2-4 gigabytes for the operating system, give the JVM enough +# heap to hold all your transaction state and query context, and then leave the +# rest for the page cache. +# The default page cache memory assumes the machine is dedicated to running +# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size. +dbms.memory.pagecache.size=512m + +# Enable online backups to be taken from this database. +#dbms.backup.enabled=true + +# By default the backup service will only listen on localhost. +# To enable remote backups you will have to bind to an external +# network interface (e.g. 0.0.0.0 for all interfaces). +# The protocol running varies depending on deployment. In a Causal Clustering environment this is the +# same protocol that runs on causal_clustering.transaction_listen_address. +#dbms.backup.address=0.0.0.0:6362 + +# Enable encryption on the backup service for CC instances (does not work for single-instance or HA clusters) +#dbms.backup.ssl_policy=backup + +#***************************************************************** +# Network connector configuration +#***************************************************************** + +# With default configuration Neo4j only accepts local connections. +# To accept non-local connections, uncomment this line: +#dbms.connectors.default_listen_address=0.0.0.0 + +# You can also choose a specific network interface, and configure a non-default +# port for each connector, by setting their individual listen_address. + +# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or +# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for +# individual connectors below. +#dbms.connectors.default_advertised_address=localhost + +# You can also choose a specific advertised hostname or IP address, and +# configure an advertised port for each connector, by setting their +# individual advertised_address. + +# Bolt connector +dbms.connector.bolt.enabled=true +#dbms.connector.bolt.tls_level=OPTIONAL +#dbms.connector.bolt.listen_address=:7687 + +# HTTP Connector. There must be exactly one HTTP connector. +dbms.connector.http.enabled=true +#dbms.connector.http.listen_address=:7474 + +# HTTPS Connector. There can be zero or one HTTPS connectors. +dbms.connector.https.enabled=true +#dbms.connector.https.listen_address=:7473 + +# Number of Neo4j worker threads. +#dbms.threads.worker_count= + +#***************************************************************** +# SSL system configuration +#***************************************************************** + +# Names of the SSL policies to be used for the respective components. + +# The legacy policy is a special policy which is not defined in +# the policy configuration section, but rather derives from +# dbms.directories.certificates and associated files +# (by default: neo4j.key and neo4j.cert). Its use will be deprecated. + +# The policies to be used for connectors. +# +# N.B: Note that a connector must be configured to support/require +# SSL/TLS for the policy to actually be utilized. +# +# see: dbms.connector.*.tls_level + +#bolt.ssl_policy=legacy +#https.ssl_policy=legacy + +# For a causal cluster the configuring of a policy mandates its use. + +#causal_clustering.ssl_policy= + +#***************************************************************** +# SSL policy configuration +#***************************************************************** + +# Each policy is configured under a separate namespace, e.g. +# dbms.ssl.policy..* +# +# The example settings below are for a new policy named 'default'. + +# The base directory for cryptographic objects. Each policy will by +# default look for its associated objects (keys, certificates, ...) +# under the base directory. +# +# Every such setting can be overriden using a full path to +# the respective object, but every policy will by default look +# for cryptographic objects in its base location. +# +# Mandatory setting + +#dbms.ssl.policy.default.base_directory=certificates/default + +# Allows the generation of a fresh private key and a self-signed +# certificate if none are found in the expected locations. It is +# recommended to turn this off again after keys have been generated. +# +# Keys should in general be generated and distributed offline +# by a trusted certificate authority (CA) and not by utilizing +# this mode. + +#dbms.ssl.policy.default.allow_key_generation=false + +# Enabling this makes it so that this policy ignores the contents +# of the trusted_dir and simply resorts to trusting everything. +# +# Use of this mode is discouraged. It would offer encryption but no security. + +#dbms.ssl.policy.default.trust_all=false + +# The private key for the default SSL policy. By default a file +# named private.key is expected under the base directory of the policy. +# It is mandatory that a key can be found or generated. + +#dbms.ssl.policy.default.private_key= + +# The private key for the default SSL policy. By default a file +# named public.crt is expected under the base directory of the policy. +# It is mandatory that a certificate can be found or generated. + +#dbms.ssl.policy.default.public_certificate= + +# The certificates of trusted parties. By default a directory named +# 'trusted' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). +# +# To enforce client authentication client_auth must be set to 'require'! + +#dbms.ssl.policy.default.trusted_dir= + +# Certificate Revocation Lists (CRLs). By default a directory named +# 'revoked' is expected under the base directory of the policy. It is +# mandatory to create the directory so that it exists, because it cannot +# be auto-created (for security purposes). + +#dbms.ssl.policy.default.revoked_dir= + +# Client authentication setting. Values: none, optional, require +# The default is to require client authentication. +# +# Servers are always authenticated unless explicitly overridden +# using the trust_all setting. In a mutual authentication setup this +# should be kept at the default of require and trusted certificates +# must be installed in the trusted_dir. + +#dbms.ssl.policy.default.client_auth=require + +# A comma-separated list of allowed TLS versions. +# By default only TLSv1.2 is allowed. + +#dbms.ssl.policy.default.tls_versions= + +# A comma-separated list of allowed ciphers. +# The default ciphers are the defaults of the JVM platform. + +#dbms.ssl.policy.default.ciphers= + +#***************************************************************** +# Logging configuration +#***************************************************************** + +# To enable HTTP logging, uncomment this line +#dbms.logs.http.enabled=true + +# Number of HTTP logs to keep. +#dbms.logs.http.rotation.keep_number=5 + +# Size of each HTTP log that is kept. +#dbms.logs.http.rotation.size=20m + +# To enable GC Logging, uncomment this line +#dbms.logs.gc.enabled=true + +# GC Logging Options +# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information. +#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution + +# Number of GC logs to keep. +#dbms.logs.gc.rotation.keep_number=5 + +# Size of each GC log that is kept. +#dbms.logs.gc.rotation.size=20m + +# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k", +# "m" or "g". +#dbms.logs.debug.rotation.size=20m + +# Maximum number of history files for the internal log. +#dbms.logs.debug.rotation.keep_number=7 + +# Log executed queries that takes longer than the configured threshold. Enable by uncommenting this line. +#dbms.logs.query.enabled=true + +# If the execution of query takes more time than this threshold, the query is logged. If set to zero then all queries +# are logged. +#dbms.logs.query.threshold=0 + +# The file size in bytes at which the query log will auto-rotate. If set to zero then no rotation will occur. Accepts a +# binary suffix "k", "m" or "g". +#dbms.logs.query.rotation.size=20m + +# Maximum number of history files for the query log. +#dbms.logs.query.rotation.keep_number=7 + +# Include parameters for the executed queries being logged (this is enabled by default). +#dbms.logs.query.parameter_logging_enabled=true + +# Uncomment this line to include detailed time information for the executed queries being logged: +#dbms.logs.query.time_logging_enabled=true + +# Uncomment this line to include bytes allocated by the executed queries being logged: +#dbms.logs.query.allocation_logging_enabled=true + +# Uncomment this line to include page hits and page faults information for the executed queries being logged: +#dbms.logs.query.page_logging_enabled=true + +# The security log is always enabled when `dbms.security.auth_enabled=true`, and resides in `logs/security.log`. + +# Log level for the security log. One of DEBUG, INFO, WARN and ERROR. +#dbms.logs.security.level=INFO + +# Threshold for rotation of the security log. +#dbms.logs.security.rotation.size=20m + +# Minimum time interval after last rotation of the security log before it may be rotated again. +#dbms.logs.security.rotation.delay=300s + +# Maximum number of history files for the security log. +#dbms.logs.security.rotation.keep_number=7 + +#***************************************************************** +# Causal Clustering Configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in Causal Clustering mode. +# See the Causal Clustering documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# CORE - Core member of the cluster, part of the consensus quorum. +# READ_REPLICA - Read replica in the cluster, an eventually-consistent read-only instance of the database. +# To operate this Neo4j instance in Causal Clustering mode as a core member, uncomment this line: +#dbms.mode=CORE + +# Expected number of Core servers in the cluster at formation +#causal_clustering.minimum_core_cluster_size_at_formation=3 + +# Minimum expected number of Core servers in the cluster at runtime. +#causal_clustering.minimum_core_cluster_size_at_runtime=3 + +# A comma-separated list of the address and port for which to reach all other members of the cluster. It must be in the +# host:port format. For each machine in the cluster, the address will usually be the public ip address of that machine. +# The port will be the value used in the setting "causal_clustering.discovery_listen_address". +#causal_clustering.initial_discovery_members=localhost:5000,localhost:5001,localhost:5002 + +# Host and port to bind the cluster member discovery management communication. +# This is the setting to add to the collection of address in causal_clustering.initial_core_cluster_members. +# Use 0.0.0.0 to bind to any network interface on the machine. If you want to only use a specific interface +# (such as a private ip address on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.discovery_listen_address=:5000 + +# Network interface and port for the transaction shipping server to listen on. +# Please note that it is also possible to run the backup client against this port so always limit access to it via the +# firewall and configure an ssl policy. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.transaction_listen_address=:6000 + +# Network interface and port for the RAFT server to listen on. If you want to allow for messages to be read from +# any network on this machine, us 0.0.0.0. If you want to constrain communication to a specific network address +# (such as a private ip on AWS, for example) then use that ip address instead. +# If you don't know what value to use here, use this machines ip address. +#causal_clustering.raft_listen_address=:7000 + +# List a set of names for groups to which this server should belong. This +# is a comma-separated list and names should only use alphanumericals +# and underscore. This can be used to identify groups of servers in the +# configuration for load balancing and replication policies. +# +# The main intention for this is to group servers, but it is possible to specify +# a unique identifier here as well which might be useful for troubleshooting +# or other special purposes. +#causal_clustering.server_groups= + +#***************************************************************** +# Causal Clustering Load Balancing +#***************************************************************** + +# N.B: Read the online documentation for a thorough explanation! + +# Selects the load balancing plugin that shall be enabled. +#causal_clustering.load_balancing.plugin=server_policies + +####### Examples for "server_policies" plugin ####### + +# Will select all available servers as the default policy, which is the +# policy used when the client does not specify a policy preference. The +# default configuration for the default policy is all(). +#causal_clustering.load_balancing.config.server_policies.default=all() + +# Will select servers in groups 'group1' or 'group2' under the default policy. +#causal_clustering.load_balancing.config.server_policies.default=groups(group1,group2) + +# Slightly more advanced example: +# Will select servers in 'group1', 'group2' or 'group3', but only if there are at least 2. +# This policy will be exposed under the name of 'mypolicy'. +#causal_clustering.load_balancing.config.server_policies.mypolicy=groups(group1,group2,group3) -> min(2) + +# Below will create an even more advanced policy named 'regionA' consisting of several rules +# yielding the following behaviour: +# +# select servers in regionA, if at least 2 are available +# otherwise: select servers in regionA and regionB, if at least 2 are available +# otherwise: select all servers +# +# The intention is to create a policy for a particular region which prefers +# a certain set of local servers, but which will fallback to other regions +# or all available servers as required. +# +# N.B: The following configuration uses the line-continuation character \ +# which allows you to construct an easily readable rule set spanning +# several lines. +# +#causal_clustering.load_balancing.config.server_policies.policyA=\ +#groups(regionA) -> min(2);\ +#groups(regionA,regionB) -> min(2); + +# Note that implicitly the last fallback is to always consider all() servers, +# but this can be prevented by specifying a halt() as the last rule. +# +#causal_clustering.load_balancing.config.server_policies.regionA_only=\ +#groups(regionA);\ +#halt(); + +#***************************************************************** +# Causal Clustering Additional Configuration Options +#***************************************************************** +# The following settings are used less frequently. +# If you don't know what these are, you don't need to change these from their default values. + +# The name of the database being hosted by this server instance. This +# configuration setting may be safely ignored unless deploying a multicluster. +# Instances may be allocated to constituent clusters by assigning them +# distinct database names using this setting. For instance if you had 6 +# instances you could form 2 clusters by assigning half the database name +# "foo", half the name "bar". The setting value must match exactly between +# members of the same cluster. This setting is a one-off: once an instance +# is configured with a database name it may not be changed in future without +# using `neo4j-admin unbind`. +#causal_clustering.database=default + +# Address and port that this machine advertises that it's RAFT server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.raft_advertised_address=:7000 + +# Address and port that this machine advertises that it's transaction shipping server is listening at. Should be a +# specific network address. If you are unsure about what value to use here, use this machine's ip address. +#causal_clustering.transaction_advertised_address=:6000 + +# The time limit within which a new leader election will occur if no messages from the current leader are received. +# Larger values allow for more stable leaders at the expense of longer unavailability times in case of leader +# failures. +#causal_clustering.leader_election_timeout=7s + +# The time limit allowed for a new member to attempt to update its data to match the rest of the cluster. +#causal_clustering.join_catch_up_timeout=10m + +# The size of the batch for streaming entries to other machines while trying to catch up another machine. +#causal_clustering.catchup_batch_size=64 + +# When to pause sending entries to other machines and allow them to catch up. +#causal_clustering.log_shipping_max_lag=256 + +# Raft log pruning frequncy. +#causal_clustering.raft_log_pruning_frequency=10m + +# The size to allow the raft log to grow before rotating. +#causal_clustering.raft_log_rotation_size=250M + +### The following setting is relevant for Edge servers only. +# The interval of pulling updates from Core servers. +#causal_clustering.pull_interval=1s + +# For how long should drivers cache the discovery data from +# the dbms.cluster.routing.getServers() procedure. Defaults to 300s. +#causal_clustering.cluster_routing_ttl=300s + +#***************************************************************** +# HA configuration +#***************************************************************** + +# Uncomment and specify these lines for running Neo4j in High Availability mode. +# See the High Availability documentation at https://neo4j.com/docs/ for details. + +# Database mode +# Allowed values: +# HA - High Availability +# SINGLE - Single mode, default. +# To run in High Availability mode uncomment this line: +#dbms.mode=HA + +# ha.server_id is the number of each instance in the HA cluster. It should be +# an integer (e.g. 1), and should be unique for each cluster instance. +#ha.server_id= + +# ha.initial_hosts is a comma-separated list (without spaces) of the host:port +# where the ha.host.coordination of all instances will be listening. Typically +# this will be the same for all cluster instances. +#ha.initial_hosts=127.0.0.1:5001,127.0.0.1:5002,127.0.0.1:5003 + +# IP and port for this instance to listen on, for communicating cluster status +# information with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.coordination=127.0.0.1:5001 + +# IP and port for this instance to listen on, for communicating transaction +# data with other instances (also see ha.initial_hosts). The IP +# must be the configured IP address for one of the local interfaces. +#ha.host.data=127.0.0.1:6001 + +# The interval, in seconds, at which slaves will pull updates from the master. You must comment out +# the option to disable periodic pulling of updates. +ha.pull_interval=10 + +# Amount of slaves the master will try to push a transaction to upon commit +# (default is 1). The master will optimistically continue and not fail the +# transaction even if it fails to reach the push factor. Setting this to 0 will +# increase write performance when writing through master but could potentially +# lead to branched data (or loss of transaction) if the master goes down. +#ha.tx_push_factor=1 + +# Strategy the master will use when pushing data to slaves (if the push factor +# is greater than 0). There are three options available "fixed_ascending" (default), +# "fixed_descending" or "round_robin". Fixed strategies will start by pushing to +# slaves ordered by server id (accordingly with qualifier) and are useful when +# planning for a stable fail-over based on ids. +#ha.tx_push_strategy=fixed_ascending + +# Policy for how to handle branched data. +#ha.branched_data_policy=keep_all + +# How often heartbeat messages should be sent. Defaults to ha.default_timeout. +#ha.heartbeat_interval=5s + +# How long to wait for heartbeats from other instances before marking them as suspects for failure. +# This value reflects considerations of network latency, expected duration of garbage collection pauses +# and other factors that can delay message sending and processing. Larger values will result in more +# stable masters but also will result in longer waits before a failover in case of master failure. +# This value should not be set to less than twice the ha.heartbeat_interval value otherwise there is a high +# risk of frequent master switches and possibly branched data occurrence. +#ha.heartbeat_timeout=40s + +# If you are using a load-balancer that doesn't support HTTP Auth, you may need to turn off authentication for the +# HA HTTP status endpoint by uncommenting the following line. +#dbms.security.ha_status_auth_enabled=false + +# Whether this instance should only participate as slave in cluster. If set to +# true, it will never be elected as master. +#ha.slave_only=false + +#******************************************************************** +# Security Configuration +#******************************************************************** + +# The authentication and authorization provider that contains both users and roles. +# This can be one of the built-in `native` or `ldap` auth providers, +# or it can be an externally provided plugin, with a custom name prefixed by `plugin`, +# i.e. `plugin-`. +#dbms.security.auth_provider=native + +# The time to live (TTL) for cached authentication and authorization info when using +# external auth providers (LDAP or plugin). Setting the TTL to 0 will +# disable auth caching. +#dbms.security.auth_cache_ttl=10m + +# The maximum capacity for authentication and authorization caches (respectively). +#dbms.security.auth_cache_max_capacity=10000 + +# Set to log successful authentication events to the security log. +# If this is set to `false` only failed authentication events will be logged, which +# could be useful if you find that the successful events spam the logs too much, +# and you do not require full auditing capability. +#dbms.security.log_successful_authentication=true + +#================================================ +# LDAP Auth Provider Configuration +#================================================ + +# URL of LDAP server to use for authentication and authorization. +# The format of the setting is `://:`, where hostname is the only required field. +# The supported values for protocol are `ldap` (default) and `ldaps`. +# The default port for `ldap` is 389 and for `ldaps` 636. +# For example: `ldaps://ldap.example.com:10389`. +# +# NOTE: You may want to consider using STARTTLS (`dbms.security.ldap.use_starttls`) instead of LDAPS +# for secure connections, in which case the correct protocol is `ldap`. +#dbms.security.ldap.host=localhost + +# Use secure communication with the LDAP server using opportunistic TLS. +# First an initial insecure connection will be made with the LDAP server, and then a STARTTLS command +# will be issued to negotiate an upgrade of the connection to TLS before initiating authentication. +#dbms.security.ldap.use_starttls=false + +# The LDAP referral behavior when creating a connection. This is one of `follow`, `ignore` or `throw`. +# `follow` automatically follows any referrals +# `ignore` ignores any referrals +# `throw` throws an exception, which will lead to authentication failure +#dbms.security.ldap.referral=follow + +# The timeout for establishing an LDAP connection. If a connection with the LDAP server cannot be +# established within the given time the attempt is aborted. +# A value of 0 means to use the network protocol's (i.e., TCP's) timeout value. +#dbms.security.ldap.connection_timeout=30s + +# The timeout for an LDAP read request (i.e. search). If the LDAP server does not respond within +# the given time the request will be aborted. A value of 0 means wait for a response indefinitely. +#dbms.security.ldap.read_timeout=30s + +#---------------------------------- +# LDAP Authentication Configuration +#---------------------------------- + +# LDAP authentication mechanism. This is one of `simple` or a SASL mechanism supported by JNDI, +# for example `DIGEST-MD5`. `simple` is basic username +# and password authentication and SASL is used for more advanced mechanisms. See RFC 2251 LDAPv3 +# documentation for more details. +#dbms.security.ldap.authentication.mechanism=simple + +# LDAP user DN template. An LDAP object is referenced by its distinguished name (DN), and a user DN is +# an LDAP fully-qualified unique user identifier. This setting is used to generate an LDAP DN that +# conforms with the LDAP directory's schema from the user principal that is submitted with the +# authentication token when logging in. +# The special token {0} is a placeholder where the user principal will be substituted into the DN string. +#dbms.security.ldap.authentication.user_dn_template=uid={0},ou=users,dc=example,dc=com + +# Determines if the result of authentication via the LDAP server should be cached or not. +# Caching is used to limit the number of LDAP requests that have to be made over the network +# for users that have already been authenticated successfully. A user can be authenticated against +# an existing cache entry (instead of via an LDAP server) as long as it is alive +# (see `dbms.security.auth_cache_ttl`). +# An important consequence of setting this to `true` is that +# Neo4j then needs to cache a hashed version of the credentials in order to perform credentials +# matching. This hashing is done using a cryptographic hash function together with a random salt. +# Preferably a conscious decision should be made if this method is considered acceptable by +# the security standards of the organization in which this Neo4j instance is deployed. +#dbms.security.ldap.authentication.cache_enabled=true + +#---------------------------------- +# LDAP Authorization Configuration +#---------------------------------- +# Authorization is performed by searching the directory for the groups that +# the user is a member of, and then map those groups to Neo4j roles. + +# Perform LDAP search for authorization info using a system account instead of the user's own account. +# +# If this is set to `false` (default), the search for group membership will be performed +# directly after authentication using the LDAP context bound with the user's own account. +# The mapped roles will be cached for the duration of `dbms.security.auth_cache_ttl`, +# and then expire, requiring re-authentication. To avoid frequently having to re-authenticate +# sessions you may want to set a relatively long auth cache expiration time together with this option. +# NOTE: This option will only work if the users are permitted to search for their +# own group membership attributes in the directory. +# +# If this is set to `true`, the search will be performed using a special system account user +# with read access to all the users in the directory. +# You need to specify the username and password using the settings +# `dbms.security.ldap.authorization.system_username` and +# `dbms.security.ldap.authorization.system_password` with this option. +# Note that this account only needs read access to the relevant parts of the LDAP directory +# and does not need to have access rights to Neo4j, or any other systems. +#dbms.security.ldap.authorization.use_system_account=false + +# An LDAP system account username to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +# Note that the `dbms.security.ldap.authentication.user_dn_template` will not be applied to this username, +# so you may have to specify a full DN. +#dbms.security.ldap.authorization.system_username= + +# An LDAP system account password to use for authorization searches when +# `dbms.security.ldap.authorization.use_system_account` is `true`. +#dbms.security.ldap.authorization.system_password= + +# The name of the base object or named context to search for user objects when LDAP authorization is enabled. +# A common case is that this matches the last part of `dbms.security.ldap.authentication.user_dn_template`. +#dbms.security.ldap.authorization.user_search_base=ou=users,dc=example,dc=com + +# The LDAP search filter to search for a user principal when LDAP authorization is +# enabled. The filter should contain the placeholder token {0} which will be substituted for the +# user principal. +#dbms.security.ldap.authorization.user_search_filter=(&(objectClass=*)(uid={0})) + +# A list of attribute names on a user object that contains groups to be used for mapping to roles +# when LDAP authorization is enabled. +#dbms.security.ldap.authorization.group_membership_attributes=memberOf + +# An authorization mapping from LDAP group names to Neo4j role names. +# The map should be formatted as a semicolon separated list of key-value pairs, where the +# key is the LDAP group name and the value is a comma separated list of corresponding role names. +# For example: group1=role1;group2=role2;group3=role3,role4,role5 +# +# You could also use whitespaces and quotes around group names to make this mapping more readable, +# for example: dbms.security.ldap.authorization.group_to_role_mapping=\ +# "cn=Neo4j Read Only,cn=users,dc=example,dc=com" = reader; \ +# "cn=Neo4j Read-Write,cn=users,dc=example,dc=com" = publisher; \ +# "cn=Neo4j Schema Manager,cn=users,dc=example,dc=com" = architect; \ +# "cn=Neo4j Administrator,cn=users,dc=example,dc=com" = admin +#dbms.security.ldap.authorization.group_to_role_mapping= + + +#***************************************************************** +# Miscellaneous configuration +#***************************************************************** + +# Enable this to specify a parser other than the default one. +#cypher.default_language_version=3.0 + +# Determines if Cypher will allow using file URLs when loading data using +# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV` +# clauses that load data from the file system. +#dbms.security.allow_csv_import_from_file_urls=true + +# Retention policy for transaction logs needed to perform recovery and backups. +#dbms.tx_log.rotation.retention_policy=7 days + +# Limit the number of IOs the background checkpoint process will consume per second. +# This setting is advisory, is ignored in Neo4j Community Edition, and is followed to +# best effort in Enterprise Edition. +# An IO is in this case a 8 KiB (mostly sequential) write. Limiting the write IO in +# this way will leave more bandwidth in the IO subsystem to service random-read IOs, +# which is important for the response time of queries when the database cannot fit +# entirely in memory. The only drawback of this setting is that longer checkpoint times +# may lead to slightly longer recovery times in case of a database or system crash. +# A lower number means lower IO pressure, and consequently longer checkpoint times. +# The configuration can also be commented out to remove the limitation entirely, and +# let the checkpointer flush data as fast as the hardware will go. +# Set this to -1 to disable the IOPS limit. +# dbms.checkpoint.iops.limit=300 + +# Enable a remote shell server which Neo4j Shell clients can log in to. +#dbms.shell.enabled=true +# The network interface IP the shell will listen on (use 0.0.0.0 for all interfaces). +#dbms.shell.host=127.0.0.1 +# The port the shell will listen on, default is 1337. +#dbms.shell.port=1337 + +# Only allow read operations from this Neo4j instance. This mode still requires +# write access to the directory for lock purposes. +#dbms.read_only=false + +# Comma separated list of JAX-RS packages containing JAX-RS resources, one +# package name for each mountpoint. The listed package names will be loaded +# under the mountpoints specified. Uncomment this line to mount the +# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from +# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of +# http://localhost:7474/examples/unmanaged/helloworld/{nodeId} +#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged + +# Specified comma separated list of id types (like node or relationship) that should be reused. +# When some type is specified database will try to reuse corresponding ids as soon as it will be safe to do so. +# Currently only 'node' and 'relationship' types are supported. +# This settings is ignored in Neo4j Community Edition. +#dbms.ids.reuse.types.override=node,relationship + +#******************************************************************** +# JVM Parameters +#******************************************************************** + +# G1GC generally strikes a good balance between throughput and tail +# latency, without too much tuning. +dbms.jvm.additional=-XX:+UseG1GC + +# Have common exceptions keep producing stack traces, so they can be +# debugged regardless of how often logs are rotated. +dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow + +# Make sure that `initmemory` is not only allocated, but committed to +# the process, before starting the database. This reduces memory +# fragmentation, increasing the effectiveness of transparent huge +# pages. It also reduces the possibility of seeing performance drop +# due to heap-growing GC events, where a decrease in available page +# cache leads to an increase in mean IO response time. +# Try reducing the heap memory, if this flag degrades performance. +dbms.jvm.additional=-XX:+AlwaysPreTouch + +# Trust that non-static final fields are really final. +# This allows more optimizations and improves overall performance. +# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or +# serialization to change the value of final fields! +dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions +dbms.jvm.additional=-XX:+TrustFinalNonStaticFields + +# Disable explicit garbage collection, which is occasionally invoked by the JDK itself. +dbms.jvm.additional=-XX:+DisableExplicitGC + +# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and +# jmx.password files are required. +# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords, +# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'. +# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html +# On Unix based systems the jmx.password file needs to be owned by the user that will run the server, +# and have permissions set to 0600. +# For details on setting these file permissions on Windows see: +# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637 +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password +#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access + +# Some systems cannot discover host name automatically, and need this line configured: +#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME + +# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes. +# This is to protect the server from any potential passive eavesdropping. +dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048 + +# This mitigates a DDoS vector. +dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true + +#******************************************************************** +# Wrapper Windows NT/2000/XP Service Properties +#******************************************************************** +# WARNING - Do not modify any of these properties when an application +# using this configuration file has been installed as a service. +# Please uninstall the service before modifying this section. The +# service can then be reinstalled. + +# Name of the service +dbms.windows_service_name=neo4j + +#******************************************************************** +# Other Neo4j system properties +#******************************************************************** +dbms.jvm.additional=-Dunsupported.dbms.udc.source=desktop +dbms.logs.query.enabled=true +dbms.logs.query.threshold=0 +dbms.logs.query.parameter_logging_enabled=true +dbms.logs.query.time_logging_enabled=true +dbms.logs.query.allocation_logging_enabled=true +dbms.logs.query.page_logging_enabled=true + diff --git a/java/registry/src/main/java/io/opensaber/registry/dao/impl/RegistryDaoImpl.java b/java/registry/src/main/java/io/opensaber/registry/dao/impl/RegistryDaoImpl.java index 4289a7e49..da2780e8a 100644 --- a/java/registry/src/main/java/io/opensaber/registry/dao/impl/RegistryDaoImpl.java +++ b/java/registry/src/main/java/io/opensaber/registry/dao/impl/RegistryDaoImpl.java @@ -6,6 +6,7 @@ import java.util.regex.Pattern; import com.google.common.collect.ImmutableList; import io.opensaber.pojos.OpenSaberInstrumentation; +import com.google.gson.Gson; import io.opensaber.registry.exception.AuditFailedException; import io.opensaber.registry.model.AuditRecord; import io.opensaber.registry.schema.config.SchemaConfigurator; @@ -41,7 +42,10 @@ public class RegistryDaoImpl implements RegistryDao { public static final String META = "meta."; public static final String EMPTY_STRING = StringUtils.EMPTY; private static Logger logger = LoggerFactory.getLogger(RegistryDaoImpl.class); - private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + public static final String INTERNAL_PROPERTY_PREFIX = "@_"; + public static final String IMPOSSIBLE_LABEL = "-1"; + + private static final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); @Autowired private DatabaseProvider databaseProvider; @@ -73,118 +77,65 @@ public List getEntityList() { return null; } - /*@Override - public String addEntity(Graph entity, String label) throws DuplicateRecordException, NoSuchElementException, EncryptionException, AuditFailedException, RecordNotFoundException { - logger.debug("RegistryDaoImpl : Database Provider features: \n" + databaseProvider.getGraphStore().features()); - watch.start("getGraphStore() and traversal() in addEntity() Performance Monitoring !"); - Graph graphFromStore = databaseProvider.getGraphStore(); - GraphTraversalSource dbGraphTraversalSource = graphFromStore.traversal(); - watch.stop(); - prefLogger.info(watch.prettyPrint()); - if (dbGraphTraversalSource.clone().V().hasLabel(label).hasNext()) { - //closeGraph(graphFromStore); - throw new DuplicateRecordException(Constants.DUPLICATE_RECORD_MESSAGE); - } - String rootNodeLabel; - TinkerGraph graph = (TinkerGraph) entity; - GraphTraversalSource traversal = graph.traversal(); - if (graphFromStore.features().graph().supportsTransactions()) { - org.apache.tinkerpop.gremlin.structure.Transaction tx = graphFromStore.tx(); - tx.onReadWrite(org.apache.tinkerpop.gremlin.structure.Transaction.READ_WRITE_BEHAVIOR.AUTO); - //rootNodeLabel = createOrUpdateEntity(dbGraphTraversalSource, graph, label, "create"); - watch.start("addOrUpdateVerticesAndEdges() inside addEntity() Performance Monitoring !"); - rootNodeLabel = addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, label, "create"); - watch.stop(); - prefLogger.info(watch.prettyPrint()); - tx.commit(); - logger.debug("RegistryDaoImpl : Entity added for transactional DB with rootNodeLabel : {}", rootNodeLabel); - } else { - watch.start("addOrUpdateVerticesAndEdges() inside addEntity() Performance Monitoring !"); - rootNodeLabel = addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, label, "create"); - watch.stop(); - prefLogger.info(watch.prettyPrint()); - logger.debug("RegistryDaoImpl : Entity added for non-transactional DB with rootNodeLabel : {}", rootNodeLabel); - } - logger.info("Successfully created entity with label " + rootNodeLabel); - // closeGraph(graphFromStore); - return rootNodeLabel; - }*/ - - - @Override - public String addEntity(Graph entity, String label, String rootNodeLabel, String property) throws DuplicateRecordException, RecordNotFoundException, NoSuchElementException, EncryptionException, AuditFailedException { - logger.debug("RegistryDaoImpl : Database Provider features: \n" + databaseProvider.getGraphStore().features()); - Graph graphFromStore = databaseProvider.getGraphStore(); - GraphTraversalSource dbGraphTraversalSource = graphFromStore.traversal(); - if (rootNodeLabel != null && property != null && !dbGraphTraversalSource.clone().V().hasLabel(rootNodeLabel).hasNext()) { - //closeGraph(graphFromStore); - throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); - } else if (dbGraphTraversalSource.clone().V().hasLabel(label).hasNext()) { - //closeGraph(graphFromStore); - throw new DuplicateRecordException(Constants.DUPLICATE_RECORD_MESSAGE); - } + @Override + public String addEntity(Graph entity, String label, String rootNodeLabel, String property) throws DuplicateRecordException, RecordNotFoundException, NoSuchElementException, EncryptionException, AuditFailedException{ + logger.debug("Database Provider features: \n" + databaseProvider.getGraphStore().features()); + Graph graphFromStore = databaseProvider.getGraphStore(); + if (rootNodeLabel!=null && property!=null && !doesExist(rootNodeLabel, graphFromStore)) { + throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); + } else if (doesExist(label, graphFromStore)) { + throw new DuplicateRecordException(Constants.DUPLICATE_RECORD_MESSAGE); + } - TinkerGraph graph = (TinkerGraph) entity; - GraphTraversalSource traversal = graph.traversal(); - if (graphFromStore.features().graph().supportsTransactions()) { - org.apache.tinkerpop.gremlin.structure.Transaction tx = graphFromStore.tx(); - tx.onReadWrite(org.apache.tinkerpop.gremlin.structure.Transaction.READ_WRITE_BEHAVIOR.AUTO); - //label = createOrUpdateEntity(graph, label, "create"); + TinkerGraph graph = (TinkerGraph) entity; + GraphTraversalSource traversal = graph.traversal(); + if (graphFromStore.features().graph().supportsTransactions()) { + org.apache.tinkerpop.gremlin.structure.Transaction tx = graphFromStore.tx(); + tx.onReadWrite(org.apache.tinkerpop.gremlin.structure.Transaction.READ_WRITE_BEHAVIOR.AUTO); watch.start("RegistryDaoImpl.addOrUpdateVerticesAndEdges"); - label = addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, label, "create"); + label = addOrUpdateVerticesAndEdges(graphFromStore, traversal, label, "create"); watch.stop("RegistryDaoImpl.addOrUpdateVerticesAndEdges"); - if (rootNodeLabel != null && property != null) { - connectNodes(rootNodeLabel, label, property); - } + if (rootNodeLabel!=null && property!=null){ + connectNodes(rootNodeLabel, label, property); + } tx.commit(); logger.debug("RegistryDaoImpl : Entity added for transactional DB with rootNodeLabel : {}, label : {}, property : {}", rootNodeLabel, label, property); - } else { + }else{ watch.start("RegistryDaoImpl.addOrUpdateVerticesAndEdges"); - label = addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, label, "create"); - watch.stop("RegistryDaoImpl.addOrUpdateVerticesAndEdges"); + label = addOrUpdateVerticesAndEdges(graphFromStore, traversal, label, "create"); + watch.stop("RegistryDaoImpl.addOrUpdateVerticesAndEdges"); logger.debug("RegistryDaoImpl : Entity added for non-transactional DB with rootNodeLabel : {}, label : {}, property : {}", rootNodeLabel, label, property); - if (rootNodeLabel != null && property != null) { - connectNodes(rootNodeLabel, label, property); - } - } - logger.info("Successfully created entity with label " + label); - // closeGraph(graphFromStore); - return label; - } - - /* - private void closeGraph(Graph graph) { - try { - graph.close(); - } catch (Exception ex) { - logger.error("Exception when closing the database graph", ex); + if (rootNodeLabel!=null && property!=null){ + connectNodes(rootNodeLabel, label, property); + } } + logger.info("Successfully created entity with label " + label); + // closeGraph(graphFromStore); + return label; } - */ - private void connectNodes(String rootLabel, String label, String property) throws RecordNotFoundException, NoSuchElementException, EncryptionException, AuditFailedException { Graph graphFromStore = databaseProvider.getGraphStore(); - GraphTraversalSource traversalSource = graphFromStore.traversal(); - if (!traversalSource.clone().V().hasLabel(rootLabel).hasNext()) { + if (!doesExist(rootLabel, graphFromStore)) { // closeGraph(graphFromStore); throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); } - if (!traversalSource.clone().V().hasLabel(label).hasNext()) { + + if (!doesExist(label, graphFromStore)) { // closeGraph(graphFromStore); throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); } - connectRootToEntity(traversalSource, rootLabel, label, property); + connectRootToEntity(graphFromStore, rootLabel, label, property); } - private void connectRootToEntity(GraphTraversalSource dbTraversalSource, String rootLabel, String label, String property) throws RecordNotFoundException, NoSuchElementException, EncryptionException, AuditFailedException { - GraphTraversal rootGts = dbTraversalSource.clone().V().hasLabel(rootLabel); - GraphTraversal entityGts = dbTraversalSource.clone().V().hasLabel(label); + private void connectRootToEntity(Graph dbGraph, String rootLabel, String label, String property) throws NoSuchElementException, AuditFailedException { + Iterator rootGts = getVerticesIterator(dbGraph,rootLabel); + Iterator entityGts = getVerticesIterator(dbGraph,label); Vertex rootVertex = rootGts.next(); Vertex entityVertex = entityGts.next(); rootVertex.addEdge(property, entityVertex); - if(auditEnabled) { + if(auditEnabled){ watch.start("RegistryDaoImpl.connectRootToEntity.auditRecord"); AuditRecord record = appContext.getBean(AuditRecord.class); record @@ -195,218 +146,165 @@ private void connectRootToEntity(GraphTraversalSource dbTraversalSource, String .record(databaseProvider); watch.stop("RegistryDaoImpl.connectRootToEntity.auditRecord"); } - logger.debug("RegistryDaoImpl : Audit record generated of connectRootToEntity for rootLabel : {}, label : {}, property : {}", rootLabel, label, property); } - /** - * This method is commonly used for both create and update entity - * @param entity - * @param rootLabel - * @throws EncryptionException - * @throws NoSuchElementException - */ - - /*private String createOrUpdateEntity(GraphTraversalSource dbGraphTraversalSource, Graph entity, String rootLabel, String methodOrigin) throws NoSuchElementException, EncryptionException, AuditFailedException, RecordNotFoundException{ - - TinkerGraph graph = (TinkerGraph) entity; - GraphTraversalSource traversal = graph.traversal(); - - // Root node label will be auto-generate if a new entity is created - String rootNodeLabel; - - if (graphFromStore.features().graph().supportsTransactions()) { - org.apache.tinkerpop.gremlin.structure.Transaction tx = graphFromStore.tx(); - tx.onReadWrite(org.apache.tinkerpop.gremlin.structure.Transaction.READ_WRITE_BEHAVIOR.AUTO); - rootNodeLabel = addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, rootLabel, methodOrigin); - tx.commit(); - // tx.close(); - } else { - rootNodeLabel = addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, rootLabel, methodOrigin); - } - - return addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, rootLabel, methodOrigin); - - }*/ - - /** - * This method creates the root node of the entity if it already isn't present in the graph store - * or updates the properties of the root node or adds new properties if the properties are not already - * present in the node. - * - * @param dbTraversalSource - * @param entitySource - * @param rootLabel - * @throws EncryptionException - * @throws NoSuchElementException - */ - private String addOrUpdateVerticesAndEdges(GraphTraversalSource dbTraversalSource, + /** + * This method creates the root node of the entity if it already isn't present in the graph store + * or updates the properties of the root node or adds new properties if the properties are not already + * present in the node. + * @param dbGraph + * @param entitySource + * @param rootLabel + * @throws EncryptionException + * @throws NoSuchElementException + */ + private String addOrUpdateVerticesAndEdges(Graph dbGraph, GraphTraversalSource entitySource, String rootLabel, String methodOrigin) - throws NoSuchElementException, EncryptionException, AuditFailedException, RecordNotFoundException { - - GraphTraversal gts = entitySource.clone().V().hasLabel(rootLabel); - String label = rootLabel; - while (gts.hasNext()) { - Vertex v = gts.next(); - GraphTraversal hasLabel = dbTraversalSource.clone().V().hasLabel(rootLabel); - ImmutableTable.Builder> encDecPropertyBuilder = ImmutableTable.> builder(); - - if (hasLabel.hasNext()) { - logger.info(String.format("Root node label {} already exists. Updating properties for the root node.", rootLabel)); - Vertex existingVertex = hasLabel.next(); - setAuditInfo(v, false); - copyProperties(v, existingVertex, methodOrigin, encDecPropertyBuilder); - // watch.start("RegistryDaoImpl.addOrUpdateVertexAndEdge()"); - addOrUpdateVertexAndEdge(v, existingVertex, dbTraversalSource, methodOrigin, encDecPropertyBuilder); - // watch.stop(); - } else { - if (methodOrigin.equalsIgnoreCase("update")) { - throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); - } - label = generateBlankNodeLabel(rootLabel); - logger.info(String.format("Creating entity with label {}", rootLabel)); - Vertex newVertex = dbTraversalSource.clone().addV(label).next(); - setAuditInfo(v, true); - copyProperties(v, newVertex, methodOrigin, encDecPropertyBuilder); - // watch.start("RegistryDaoImpl.addOrUpdateVertexAndEdge()"); - addOrUpdateVertexAndEdge(v, newVertex, dbTraversalSource, methodOrigin, encDecPropertyBuilder); - // watch.stop(); - } - Table> encDecPropertyTable = encDecPropertyBuilder.build(); - // watch.start("RegistryDaoImpl.updateEncryptedDecryptedProperties"); - if(encDecPropertyTable.size() > 0){ - updateEncryptedDecryptedProperties(encDecPropertyTable, methodOrigin); - } - // watch.stop(); - } - - return label; - } + throws NoSuchElementException, EncryptionException, AuditFailedException, RecordNotFoundException { + + GraphTraversal gts = entitySource.clone().V().hasLabel(rootLabel); + String label = rootLabel; + while (gts.hasNext()) { + Vertex v = gts.next(); + Iterator hasLabel = getVerticesIterator(dbGraph, label); + ImmutableTable.Builder> encDecPropertyBuilder = ImmutableTable.builder(); + if (hasLabel.hasNext()) { + logger.info(String.format("Root node label {} already exists. Updating properties for the root node.", rootLabel)); + Vertex existingVertex = hasLabel.next(); + setAuditInfo(v, false); + copyProperties(v, existingVertex, methodOrigin, encDecPropertyBuilder); + addOrUpdateVertexAndEdge(v, existingVertex, dbGraph, methodOrigin, encDecPropertyBuilder); + } else { + if(methodOrigin.equalsIgnoreCase("update")){ + throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); + } + logger.info(String.format("Creating entity with label {}", rootLabel)); + Vertex newVertex = dbGraph.addVertex(rootLabel); + label = generateNamespacedLabel(newVertex.id().toString()); + newVertex.property(internalPropertyKey("id"),label); + setAuditInfo(v, true); + copyProperties(v, newVertex,methodOrigin, encDecPropertyBuilder); + addOrUpdateVertexAndEdge(v, newVertex, dbGraph,methodOrigin, encDecPropertyBuilder); + } + Table> encDecPropertyTable = encDecPropertyBuilder.build(); + long timestamp = System.currentTimeMillis(); + if(encDecPropertyTable.size() > 0){ + updateEncryptedDecryptedProperties(encDecPropertyTable, methodOrigin); + } + logger.info("Time taken to update encrypted properties:"+(System.currentTimeMillis() - timestamp)); + } + return label; + } /** - * This method takes the root node of an entity and then recursively creates or updates child vertices - * and edges. - * - * @param v - * @param dbVertex - * @param dbGraph - * @throws EncryptionException - * @throws NoSuchElementException - */ - private void addOrUpdateVertexAndEdge(Vertex v, Vertex dbVertex, GraphTraversalSource dbGraph, String methodOrigin, ImmutableTable.Builder> encDecPropertyBuilder) - throws NoSuchElementException, EncryptionException, AuditFailedException, RecordNotFoundException { - Iterator edges = v.edges(Direction.OUT); - Iterator edgeList = v.edges(Direction.OUT); - Stack> parsedVertices = new Stack<>(); - List dbEdgesForVertex = ImmutableList.copyOf(dbVertex.edges(Direction.OUT)); - List edgeVertexMatchList = new ArrayList(); - - while (edgeList.hasNext()) { - Edge e = edgeList.next(); - Vertex ver = e.inVertex(); - String edgeLabel = e.label(); - Optional edgeVertexAlreadyExists = - dbEdgesForVertex.stream().filter(ed -> ed.label().equalsIgnoreCase(edgeLabel) && ed.inVertex().label().equalsIgnoreCase(ver.label())).findFirst(); - if (edgeVertexAlreadyExists.isPresent()) { - edgeVertexMatchList.add(edgeVertexAlreadyExists.get()); - } - } - logger.debug("RegistryDaoImpl : Matching list size:" + edgeVertexMatchList.size()); - - while (edges.hasNext()) { - Edge e = edges.next(); - Vertex ver = e.inVertex(); - String edgeLabel = e.label(); - GraphTraversal gt = dbGraph.clone().V().hasLabel(ver.label()); - Optional edgeAlreadyExists = - dbEdgesForVertex.stream().filter(ed -> ed.label().equalsIgnoreCase(e.label())).findFirst(); - Optional edgeVertexAlreadyExists = - dbEdgesForVertex.stream().filter(ed -> ed.label().equalsIgnoreCase(edgeLabel) && ed.inVertex().label().equalsIgnoreCase(ver.label())).findFirst(); - verifyAndDelete(dbVertex, e, edgeAlreadyExists, edgeVertexMatchList, methodOrigin); - if (gt.hasNext()) { - Vertex existingV = gt.next(); - setAuditInfo(ver, false); - logger.info(String.format("Vertex with label {} already exists. Updating properties for the vertex", existingV.label())); - copyProperties(ver, existingV, methodOrigin, encDecPropertyBuilder); - if (!edgeVertexAlreadyExists.isPresent()) { - Edge edgeAdded = dbVertex.addEdge(edgeLabel, existingV); - edgeVertexMatchList.add(edgeAdded); - if(auditEnabled) { + * This method takes the root node of an entity and then recursively creates or updates child vertices + * and edges. + * @param sourceVertex + * @param dbVertex + * @param dbGraph + * @throws EncryptionException + * @throws NoSuchElementException + */ + private void addOrUpdateVertexAndEdge(Vertex sourceVertex, Vertex dbVertex, Graph dbGraph, String methodOrigin, ImmutableTable.Builder> encDecPropertyBuilder) + throws NoSuchElementException, EncryptionException, AuditFailedException, RecordNotFoundException{ + Iterator sourceEdges1 = sourceVertex.edges(Direction.OUT); + Iterator sourceEdges2 = sourceVertex.edges(Direction.OUT); + Stack> parsedVertices = new Stack<>(); + List dbEdgesForVertex = ImmutableList.copyOf(dbVertex.edges(Direction.OUT)); + List existingEdgeList = new ArrayList(); + + while(sourceEdges2.hasNext()) { + Edge e = sourceEdges2.next(); + Vertex ver = e.inVertex(); + String edgeLabel = e.label(); + // get the first matching edge in the DB + Optional existingEdge = + getFirstMatchingEdgeAndVertex(dbEdgesForVertex, ver, edgeLabel); + if(existingEdge.isPresent()){ + existingEdgeList.add(existingEdge.get()); + } + } + // existingEdgeList contains matching edges + logger.info("RegistryDaoImpl : Matching list size:"+existingEdgeList.size()); + + while(sourceEdges1.hasNext()) { + Edge sourceEdge = sourceEdges1.next(); + Vertex sourceInVertex = sourceEdge.inVertex(); + String edgeLabel = sourceEdge.label(); + Iterator dbVertexIterator = getVerticesIterator(dbGraph,sourceInVertex.label()); + List matchedIDs; + if(!dbVertexIterator.hasNext()){ + matchedIDs = databaseProvider.getIDsFromLabel(sourceInVertex.label()); + if(!matchedIDs.isEmpty()) + dbVertexIterator = getVerticesIterator(dbGraph, generateNamespacedLabel(matchedIDs.get(0))); + } + Optional existingEdge = + dbEdgesForVertex.stream().filter(dbEdge -> { + return dbEdge.label().equalsIgnoreCase(sourceEdge.label()); + }).findFirst(); + Optional existingEdgeConnectedToExistingVertex = + getFirstMatchingEdgeAndVertex(dbEdgesForVertex, sourceInVertex, edgeLabel); + deleteSingleValuedVertexEdge(dbVertex, sourceEdge, existingEdge, existingEdgeList, methodOrigin); + if (dbVertexIterator.hasNext()) { + Vertex existingVertex = dbVertexIterator.next(); + setAuditInfo(sourceInVertex, false); + logger.info(String.format("Vertex with label {} already exists. Updating properties for the vertex", existingVertex.label())); + copyProperties(sourceInVertex, existingVertex,methodOrigin, encDecPropertyBuilder); + if(!existingEdgeConnectedToExistingVertex.isPresent()){ + Edge edgeAdded = dbVertex.addEdge(edgeLabel, existingVertex); + existingEdgeList.add(edgeAdded); + if(auditEnabled){ watch.start("RegistryDaoImpl.addOrUpdateVertexAndEdge.auditRecord"); AuditRecord record = appContext.getBean(AuditRecord.class); record - .subject(dbVertex.label()) - .predicate(e.label()) - .oldObject(null) - .newObject(existingV.label()) - .record(databaseProvider); - watch.stop("RegistryDaoImpl.addOrUpdateVertexAndEdge.auditRecord"); - } - logger.debug("RegistryDaoImpl : Audit record created for update/insert(upsert) with label : {} ", dbVertex.label()); - } - parsedVertices.push(new Pair<>(ver, existingV)); - } else { - if (methodOrigin.equalsIgnoreCase("update") && !isIRI(ver.label())) { - throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); - } - String label = generateBlankNodeLabel(ver.label()); - Vertex newV = dbGraph.addV(label).next(); - setAuditInfo(ver, true); - logger.debug(String.format("RegistryDaoImpl : Adding vertex with label {} and adding properties", newV.label())); - copyProperties(ver, newV, methodOrigin, encDecPropertyBuilder); - logger.debug(String.format("RegistryDaoImpl : Adding edge with label {} for the vertex label {}.", e.label(), newV.label())); - - Edge edgeAdded = dbVertex.addEdge(edgeLabel, newV); - edgeVertexMatchList.add(edgeAdded); - if(auditEnabled) { - AuditRecord record = appContext.getBean(AuditRecord.class); - watch.start("RegistryDaoImpl.addOrUpdateVertexAndEdge.auditRecord"); - record .subject(dbVertex.label()) - .predicate(e.label()) + .predicate(sourceEdge.label()) .oldObject(null) - .newObject(newV.label()) + .newObject(existingVertex.label()) .record(databaseProvider); + watch.stop("RegistryDaoImpl.addOrUpdateVertexAndEdge.auditRecord"); + } + logger.debug("RegistryDaoImpl : Audit record created for update/insert(upsert) with label : {} ", dbVertex.label()); + } + parsedVertices.push(new Pair<>(sourceInVertex, existingVertex)); + } else { + // did not find matching vertex in the DB + if(methodOrigin.equalsIgnoreCase("update") && isaBlankNode(sourceInVertex.label())){ + throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); + } + String label = generateBlankNodeLabel(sourceInVertex.label()); + Vertex newDBVertex = dbGraph.addVertex(label);//dbGraph.addVertex(label); + newDBVertex.property(internalPropertyKey("id"),label); + setAuditInfo(sourceInVertex, true); + logger.debug(String.format("RegistryDaoImpl : Adding vertex with label {} and adding properties", newDBVertex.label())); + copyProperties(sourceInVertex, newDBVertex, methodOrigin, encDecPropertyBuilder); + logger.debug(String.format("RegistryDaoImpl : Adding edge with label {} for the vertex label {}.", sourceEdge.label(), newDBVertex.label())); + + Edge edgeAdded = dbVertex.addEdge(edgeLabel, newDBVertex); + existingEdgeList.add(edgeAdded); + if(auditEnabled){ + AuditRecord record = appContext.getBean(AuditRecord.class); + watch.start("RegistryDaoImpl.addOrUpdateVertexAndEdge.auditRecord"); + record + .subject(dbVertex.label()) + .predicate(sourceEdge.label()) + .oldObject(null) + .newObject(newDBVertex.label()) + .record(databaseProvider); watch.stop("RegistryDaoImpl.addOrUpdateVertexAndEdge.auditRecord"); - } - logger.debug("RegistryDaoImpl : Audit record created for update with label : {} ", dbVertex.label()); - parsedVertices.push(new Pair<>(ver, newV)); - } - } - for (Pair pv : parsedVertices) { - addOrUpdateVertexAndEdge(pv.getValue0(), pv.getValue1(), dbGraph, methodOrigin, encDecPropertyBuilder); - } - - - } - - /*private void deleteEdgeAndNode(Vertex dbVertex, Edge e, Optional edgeAlreadyExists,List edgeVertexMatchList, String methodOrigin) - throws AuditFailedException, RecordNotFoundException{ - - Graph graphFromStore = databaseProvider.getGraphStore(); - GraphTraversalSource traversalSource = graphFromStore.traversal(); - GraphTraversal dbHasLabel = traversalSource.clone().V().hasLabel(dbVertex.label()); - if (!dbHasLabel.hasNext()) { - throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); - } - boolean isSingleValued = schemaConfigurator.isSingleValued(e.label()); - if(dbHasLabel.hasNext()){ - Vertex dbSourceVertex = dbHasLabel.next(); - if (graphFromStore.features().graph().supportsTransactions()) { - org.apache.tinkerpop.gremlin.structure.Transaction tx = graphFromStore.tx(); - tx.onReadWrite(org.apache.tinkerpop.gremlin.structure.Transaction.READ_WRITE_BEHAVIOR.AUTO); - deleteEdgeAndNode(isSingleValued, dbSourceVertex, e, edgeAlreadyExists, edgeVertexMatchList, methodOrigin); - tx.commit(); - }else{ - deleteEdgeAndNode(isSingleValued, dbSourceVertex, e, edgeAlreadyExists, edgeVertexMatchList, methodOrigin); - } - } - }*/ - + } + logger.info("Audit record created for update with label : {} ", dbVertex.label()); + parsedVertices.push(new Pair<>(sourceInVertex, newDBVertex)); + } + } + for(Pair pv : parsedVertices) { + addOrUpdateVertexAndEdge(pv.getValue0(), pv.getValue1(), dbGraph,methodOrigin, encDecPropertyBuilder); + } + } /** * This method checks if deletion of edge and node is required based on criteria and invokes deleteEdgeAndNode method - * * @param dbSourceVertex * @param e * @param edgeAlreadyExists @@ -414,16 +312,16 @@ private void addOrUpdateVertexAndEdge(Vertex v, Vertex dbVertex, GraphTraversalS * @param methodOrigin * @throws AuditFailedException */ - private void verifyAndDelete(Vertex dbSourceVertex, Edge e, Optional edgeAlreadyExists, List edgeVertexMatchList, String methodOrigin) - throws AuditFailedException { + private void deleteSingleValuedVertexEdge(Vertex dbSourceVertex, Edge e, Optional edgeAlreadyExists, List edgeVertexMatchList, String methodOrigin) + throws AuditFailedException{ boolean isSingleValued = schemaConfigurator.isSingleValued(e.label()); - if ((edgeAlreadyExists.isPresent() && methodOrigin.equalsIgnoreCase("update")) || isSingleValued) { + if((edgeAlreadyExists.isPresent() && methodOrigin.equalsIgnoreCase("update")) || isSingleValued){ Iterator edgeIter = dbSourceVertex.edges(Direction.OUT, e.label()); - while (edgeIter.hasNext()) { + while(edgeIter.hasNext()){ Edge edge = edgeIter.next(); Optional existingEdgeVertex = - edgeVertexMatchList.stream().filter(ed -> ed.label().equalsIgnoreCase(edge.label()) && ed.inVertex().label().equalsIgnoreCase(edge.inVertex().label())).findFirst(); - if (!existingEdgeVertex.isPresent()) { + getFirstMatchingEdgeAndVertex(edgeVertexMatchList, edge.inVertex(), edge.label()); + if(!existingEdgeVertex.isPresent()){ deleteEdgeAndNode(dbSourceVertex, edge, null); } } @@ -440,8 +338,6 @@ private void verifyAndDelete(Vertex dbSourceVertex, Edge e, Optional edgeA */ private void deleteEdgeAndNode(Vertex v, Edge dbEdgeToBeRemoved, Vertex dbVertexToBeDeleted) throws AuditFailedException { logger.info("Deleting edge and node of label : {}", dbEdgeToBeRemoved.label()); - - if (dbVertexToBeDeleted == null) { dbVertexToBeDeleted = dbEdgeToBeRemoved.inVertex(); } @@ -475,28 +371,19 @@ private void deleteEdgeAndNode(Vertex v, Edge dbEdgeToBeRemoved, Vertex dbVertex } - /** * Blank nodes are no longer supported. If the input data has a blank node, which is identified * by the node's label which starts with :_, then a random UUID is used as the label for the blank node. - * * @param label * @return */ private String generateBlankNodeLabel(String label) { - if (!isIRI(label)) { + if(isaBlankNode(label)){ label = String.format("%s%s", registryContext, generateRandomUUID()); } return label; } - /*private boolean isBlankNode(String label){ - if(label.startsWith("_:")) { - return true; - } - return false; - } - */ private boolean isIRI(String label) { UrlValidator urlValidator = new UrlValidator(UrlValidator.ALLOW_LOCAL_URLS); if (urlValidator.isValid(label)) { @@ -513,43 +400,35 @@ public static String generateRandomUUID() { public boolean updateEntity(Graph entityForUpdate, String rootNodeLabel, String methodOrigin) throws RecordNotFoundException, NoSuchElementException, EncryptionException, AuditFailedException { Graph graphFromStore = databaseProvider.getGraphStore(); - GraphTraversalSource dbGraphTraversalSource = graphFromStore.traversal(); TinkerGraph graphForUpdate = (TinkerGraph) entityForUpdate; GraphTraversalSource traversal = graphForUpdate.traversal(); - // Check if the root node being updated exists in the database - GraphTraversal hasRootLabel = dbGraphTraversalSource.clone().V().hasLabel(rootNodeLabel); + Iterator hasRootLabel = getVerticesIterator(graphFromStore, rootNodeLabel); if (!hasRootLabel.hasNext()) { - // closeGraph(graphFromStore); throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); } else { if (graphFromStore.features().graph().supportsTransactions()) { org.apache.tinkerpop.gremlin.structure.Transaction tx = graphFromStore.tx(); tx.onReadWrite(org.apache.tinkerpop.gremlin.structure.Transaction.READ_WRITE_BEHAVIOR.AUTO); - //createOrUpdateEntity(graphForUpdate, rootNodeLabel, methodOrigin); watch.start("RegistryDaoImpl.updateEntity"); - addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, rootNodeLabel, methodOrigin); + addOrUpdateVerticesAndEdges(graphFromStore, traversal, rootNodeLabel, methodOrigin); tx.commit(); watch.stop("RegistryDaoImpl.updateEntity"); logger.debug("RegistryDaoImpl : Entity Updated for transactional DB with rootNodeLabel : {}", rootNodeLabel); - } else { + }else{ watch.start("RegistryDaoImpl.updateEntity"); - addOrUpdateVerticesAndEdges(dbGraphTraversalSource, traversal, rootNodeLabel, methodOrigin); + addOrUpdateVerticesAndEdges(graphFromStore, traversal, rootNodeLabel, methodOrigin); watch.stop("RegistryDaoImpl.updateEntity"); logger.debug("RegistryDaoImpl : Entity Updated for non-transactional DB with rootNodeLabel : {}", rootNodeLabel); - //createOrUpdateEntity(graphForUpdate, rootNodeLabel, methodOrigin); } - //closeGraph(graphFromStore); } return false; } - @Override public Graph getEntityById(String label) throws RecordNotFoundException, NoSuchElementException, EncryptionException, AuditFailedException { Graph graphFromStore = databaseProvider.getGraphStore(); - GraphTraversalSource traversalSource = graphFromStore.traversal(); - GraphTraversal hasLabel = traversalSource.clone().V().hasLabel(label); - ImmutableTable.Builder> encDecPropertyBuilder = ImmutableTable.>builder(); + Iterator hasLabel = getVerticesIterator(graphFromStore,label); + ImmutableTable.Builder> encDecPropertyBuilder = ImmutableTable.> builder(); Graph parsedGraph = TinkerGraph.open(); if (!hasLabel.hasNext()) { logger.info("Record not found for label : {}", label); @@ -557,13 +436,14 @@ public Graph getEntityById(String label) throws RecordNotFoundException, NoSuchE } else { logger.info("Record exists for label : {}", label); Vertex subject = hasLabel.next(); - Vertex newSubject = parsedGraph.addVertex(subject.label()); - copyProperties(subject, newSubject, "read", encDecPropertyBuilder); + Vertex newSubject = parsedGraph.addVertex(generateNamespacedLabel(String.valueOf(subject.id()))); + copyProperties(subject, newSubject,"read", encDecPropertyBuilder); watch.start("RegistryDaoImpl.getEntityById.extractGraphFromVertex"); - extractGraphFromVertex(parsedGraph, newSubject, subject, encDecPropertyBuilder); + extractGraphFromVertex(parsedGraph,newSubject,subject, encDecPropertyBuilder); watch.stop("RegistryDaoImpl.getEntityById.extractGraphFromVertex"); - Table> encDecPropertyTable = encDecPropertyBuilder.build(); - if (encDecPropertyTable.size() > 0) { + Table> encDecPropertyTable = encDecPropertyBuilder.build(); + long timestamp = System.currentTimeMillis(); + if(encDecPropertyTable.size()>0){ watch.start("RegistryDaoImpl.getEntityById.updateEncryptedDecryptedProperties"); updateEncryptedDecryptedProperties(encDecPropertyTable, "read"); watch.stop("RegistryDaoImpl.getEntityById.updateEncryptedDecryptedProperties"); @@ -572,7 +452,6 @@ public Graph getEntityById(String label) throws RecordNotFoundException, NoSuchE return parsedGraph; } - private void copyProperties(Vertex subject, Vertex newSubject, String methodOrigin, ImmutableTable.Builder> encDecPropertyBuilder) throws NoSuchElementException, EncryptionException, AuditFailedException { HashMap> propertyMetaPropertyMap = new HashMap>(); @@ -581,25 +460,27 @@ private void copyProperties(Vertex subject, Vertex newSubject, String methodOrig while (iter.hasNext()) { VertexProperty property = iter.next(); - String tailOfPropertyKey = property.key().substring(property.key().lastIndexOf("/") + 1).trim(); - boolean existingEncyptedPropertyKey = tailOfPropertyKey - .substring(0, Math.min(tailOfPropertyKey.length(), 9)).equalsIgnoreCase("encrypted"); - if ((methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")) && existingEncyptedPropertyKey) { - property.remove(); - } - if ((methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")) && schemaConfigurator.isPrivate(property.key()) && !existingEncyptedPropertyKey) { - propertyMap.put(property.key(), property.value()); - } else if (methodOrigin.equalsIgnoreCase("read") && schemaConfigurator.isEncrypted(tailOfPropertyKey)) { - propertyMap.put(property.key(), property.value()); - String decryptedKey = property.key().replace(tailOfPropertyKey, tailOfPropertyKey.substring(9)); - setProperty(newSubject, decryptedKey, EMPTY_STRING, methodOrigin); - } else if (isaMetaProperty(property.key())) { - buildPropertyMetaMap(propertyMetaPropertyMap, property); - } else { - if (!(methodOrigin.equalsIgnoreCase("read") - && property.key().contains("@audit"))) { - setProperty(newSubject, property.key(), property.value(), methodOrigin); - setMetaProperty(subject, newSubject, property, methodOrigin); + if (!isInternalProperty(property)) { + String tailOfPropertyKey = property.key().substring(property.key().lastIndexOf("/") + 1).trim(); + boolean existingEncyptedPropertyKey = tailOfPropertyKey + .substring(0, Math.min(tailOfPropertyKey.length(), 9)).equalsIgnoreCase("encrypted"); + if ((methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")) && existingEncyptedPropertyKey) { + property.remove(); + } + if ((methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")) && schemaConfigurator.isPrivate(property.key()) && !existingEncyptedPropertyKey) { + propertyMap.put(property.key(), property.value()); + } else if (methodOrigin.equalsIgnoreCase("read") && schemaConfigurator.isEncrypted(tailOfPropertyKey)) { + propertyMap.put(property.key(), property.value()); + String decryptedKey = property.key().replace(tailOfPropertyKey, tailOfPropertyKey.substring(9)); + setProperty(newSubject, decryptedKey, EMPTY_STRING, methodOrigin); + } else if (isaMetaProperty(property.key())) { + buildPropertyMetaMap(propertyMetaPropertyMap, property); + } else { + if (!(methodOrigin.equalsIgnoreCase("read") + && property.key().contains("@audit"))) { + setProperty(newSubject, property.key(), property.value(), methodOrigin); + setMetaProperty(subject, newSubject, property, methodOrigin); + } } } } @@ -607,7 +488,6 @@ private void copyProperties(Vertex subject, Vertex newSubject, String methodOrig if (propertyMap.size() > 0) { encDecPropertyBuilder.put(subject, newSubject, propertyMap); } - } private boolean isaMetaProperty(String key) { @@ -618,24 +498,36 @@ private void setProperty(Vertex v, String key, Object newValue, String methodOri if (!(methodOrigin.equalsIgnoreCase("read") && isAuditField(key))) { VertexProperty vp = v.property(key); Object oldValue = vp.isPresent() ? vp.value() : null; - if (oldValue != null && !methodOrigin.equalsIgnoreCase("update") && !schemaConfigurator.isSingleValued(key)) { + if(oldValue!=null && !methodOrigin.equalsIgnoreCase("update") && !schemaConfigurator.isSingleValued(key)){ List valueList = new ArrayList(); - if (oldValue instanceof List) { - valueList = (List) oldValue; - } else { - String valueStr = (String) oldValue; + if(oldValue instanceof List){ + valueList = (List)oldValue; + } else{ + String valueStr = (String)oldValue; valueList.add(valueStr); } - if (newValue instanceof List) { - valueList.addAll((List) newValue); - } else { + if(newValue instanceof List){ + valueList.addAll((List)newValue); + } else{ valueList.add(newValue); } - // newValue = valueList; - newValue = processVertexProperty(valueList); + newValue = valueList; + // TODO fix multi values + // newValue = processVertexProperty(valueList); + } + // v.property(key, processVertexProperty(newValue)); + if(newValue instanceof List){ + if(databaseProvider.isMultiValuedLiteralPropertySupported()){ + v.property(key, newValue); + } else { + Gson gson = new Gson(); + String json = gson.toJson(newValue); + v.property(key, json); + } + } else { + v.property(key, newValue); } - v.property(key, processVertexProperty(newValue)); if (!isAuditField(key) && auditEnabled) { if (!isaMetaProperty(key) && !Objects.equals(oldValue, newValue)) { GraphTraversal configTraversal = @@ -652,9 +544,9 @@ private void setProperty(Vertex v, String key, Object newValue, String methodOri .newObject(newValue) .record(databaseProvider); watch.stop("RegistryDaoImpl.setProperty.auditRecord"); - logger.debug("Audit record created for {} !", v.label()); + logger.info("Audit record created for {} !", v.label()); } else { - // System.out.println("NOT AUDITING"); + logger.debug("not auditing in the Application!"); } } else { logger.debug("No change found for auditing !"); @@ -682,15 +574,15 @@ private Object processVertexProperty(Object propertyValue) { private void setMetaPropertyFromMap(Vertex newSubject, HashMap> propertyMetaPropertyMap) { Iterator propertyIter = propertyMetaPropertyMap.entrySet().iterator(); - while (propertyIter.hasNext()) { - Map.Entry pair = (Map.Entry) propertyIter.next(); + while(propertyIter.hasNext()){ + Map.Entry pair = (Map.Entry)propertyIter.next(); logger.info("PROPERTY <- " + pair.getKey()); - HashMap _mpmap = (HashMap) pair.getValue(); + HashMap _mpmap = (HashMap) pair.getValue(); Iterator _mpmapIter = _mpmap.entrySet().iterator(); - while (_mpmapIter.hasNext()) { - Map.Entry _pair = (Map.Entry) _mpmapIter.next(); - logger.info("META PROPERTY <- " + _pair.getKey() + "|" + _pair.getValue() + "|" + newSubject.property(pair.getKey().toString()).isPresent()); - newSubject.property(pair.getKey().toString()).property(_pair.getKey().toString(), _pair.getValue().toString()); + while(_mpmapIter.hasNext()) { + Map.Entry _pair = (Map.Entry)_mpmapIter.next(); + logger.info("META PROPERTY <- " + _pair.getKey() + "|" + _pair.getValue() + "|" + newSubject.property(pair.getKey().toString()).isPresent()); + newSubject.property(pair.getKey().toString()).property(_pair.getKey().toString(),_pair.getValue().toString()); } } } @@ -735,80 +627,20 @@ private void buildPropertyMetaMap(HashMap> prope } } - /*@Override - public boolean deleteEntity (Graph entity, String rootLabel) throws RecordNotFoundException,AuditFailedException { - Graph graphFromStore = databaseProvider.getGraphStore(); - GraphTraversalSource traversalSource = graphFromStore.traversal(); - GraphTraversal dbHasLabel = traversalSource.clone().V().hasLabel(rootLabel); - if (!dbHasLabel.hasNext()) { - throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); - } - - GraphTraversal hasNestedLabel = traversalSource.clone().V().hasLabel(labelToBeDeleted); - if (!hasNestedLabel.hasNext()) { - logger.info("Record not found to be deleted !"); - throw new RecordNotFoundException(Constants.ENTITY_NOT_FOUND); - } - TinkerGraph graph = (TinkerGraph) entity; - GraphTraversalSource traversal = graph.traversal(); - GraphTraversal gts = traversal.clone().V().hasLabel(rootLabel); - - if (graphFromStore.features().graph().supportsTransactions()) { - org.apache.tinkerpop.gremlin.structure.Transaction tx; - tx = graphFromStore.tx(); - tx.onReadWrite(org.apache.tinkerpop.gremlin.structure.Transaction.READ_WRITE_BEHAVIOR.AUTO); - deleteEdgeAndNode(dbHasLabel.next(), gts); - tx.commit(); - tx.close(); - logger.info("Entity for {} deleted !", rootLabel); - return true; - } else { - deleteEdgeAndNode(dbHasLabel.next(), gts); - logger.info("Entity for {} deleted !", rootLabel); - } - - return false; - }*/ - - /*private void deleteEdgeAndNode(Vertex v, GraphTraversal gts) throws AuditFailedException{ - List dbEdgesForVertex = ImmutableList.copyOf(v.edges(Direction.OUT)); - if(gts.hasNext()){ - Vertex vertex = gts.next(); - Iterator edgeIter = vertex.edges(Direction.OUT); - while(edgeIter.hasNext()){ - Edge e = edgeIter.next(); - Optional edgeAlreadyExists = - dbEdgesForVertex.stream().filter(ed -> ed.label().equalsIgnoreCase(e.label())).findFirst(); - if(edgeAlreadyExists.isPresent()){ - Vertex vertexToBeDeleted = e.inVertex(); - Edge dbEdge = edgeAlreadyExists.get(); - Vertex dbVertexToBeDeleted = dbEdge.inVertex(); - if(vertexToBeDeleted.label().equalsIgnoreCase(dbVertexToBeDeleted.label())){ - Iterator inEdgeIter = dbVertexToBeDeleted.edges(Direction.IN); - Iterator outEdgeIter = dbVertexToBeDeleted.edges(Direction.OUT); - if(inEdgeIter.hasNext() || outEdgeIter.hasNext()){ - dbEdge.remove(); - }else{ - dbVertexToBeDeleted.remove(); - dbEdge.remove(); - } - AuditRecord record = appContext.getBean(AuditRecord.class); - String tailOfdbVertex=v.label().substring(v.label().lastIndexOf("/") + 1).trim(); - String auditVertexlabel= registryContext+tailOfdbVertex; - record - .subject(auditVertexlabel) - .predicate(dbEdge.label()) - .oldObject(dbVertexToBeDeleted.label()) - .newObject(null) - .record(databaseProvider); - - logger.info("Audit record created for delete with label : {} !", auditVertexlabel); - } - } - } - } - - }*/ + private boolean isaBlankNode(String label) { + return !isIRI(label); + } + + private String generateNamespacedLabel(String label) { + return String.format("%s%s", registryContext, label); + } + + private boolean isInternalProperty(VertexProperty property) { + boolean internalProperty = false; + if(property.key().startsWith(INTERNAL_PROPERTY_PREFIX)) + internalProperty = true; + return internalProperty; + } private void extractGraphFromVertex(Graph parsedGraph,Vertex parsedGraphSubject,Vertex s, ImmutableTable.Builder> encDecPropertyBuilder) throws NoSuchElementException, EncryptionException, AuditFailedException { @@ -820,6 +652,7 @@ private void extractGraphFromVertex(Graph parsedGraph,Vertex parsedGraphSubject, edge = edgeIter.next(); Vertex o = edge.inVertex(); Vertex newo = parsedGraph.addVertex(o.label()); +// newo.property(internalPropertyKey("id"),o.label()); copyProperties(o, newo,"read", encDecPropertyBuilder); parsedGraphSubject.addEdge(edge.label(), newo); vStack.push(o); @@ -864,80 +697,120 @@ private void updateEncryptedDecryptedProperties(Table updateEncDecListMap(Map listPropertyMap, String methodOrigin) throws EncryptionException{ + Map encDecListPropertyMap = new HashMap(); + for(Map.Entry entry: listPropertyMap.entrySet()){ + String k = entry.getKey(); + Object v = entry.getValue(); + List values = (List)v; + List encValues = new ArrayList(); + for(Object listV : values){ + String encDecValue = null; + if(methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")){ + encDecValue = encryptionService.encrypt(listV); + }else{ + encDecValue = encryptionService.decrypt(listV); + } + encValues.add(encDecValue); + } + encDecListPropertyMap.put(k, encValues); + } + return encDecListPropertyMap; + } - private Map updateEncDecListMap(Map listPropertyMap, String methodOrigin) throws EncryptionException{ - Map encDecListPropertyMap = new HashMap(); - for(Map.Entry entry: listPropertyMap.entrySet()){ - String k = entry.getKey(); - Object v = entry.getValue(); - List values = (List)v; - List encValues = new ArrayList(); - for(Object listV : values){ - String encDecValue = null; - if(methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")){ - encDecValue = encryptionService.encrypt(listV); - }else{ - encDecValue = encryptionService.decrypt(listV); - } - encValues.add(encDecValue); - } - encDecListPropertyMap.put(k, encValues); - } - return encDecListPropertyMap; - } + private void setEncDecMap(Map encryptedMap, Table> encDecPropertyTable){ + for(Map.Entry entry : encryptedMap.entrySet()){ + encDecPropertyTable.values().forEach(map -> { if(map.containsKey(entry.getKey())){ + map.put(entry.getKey(), entry.getValue()); + } + }); + } + } + private void setEncryptedDecryptedProperty(Table> encDecPropertyTable, String methodOrigin) throws AuditFailedException{ + for(Table.Cell> cell: encDecPropertyTable.cellSet()){ + Vertex subject = cell.getRowKey(); + Vertex newSubject = cell.getColumnKey(); + for(Map.Entry entry : cell.getValue().entrySet()){ + Object entryValue = entry.getValue(); + String entryKey = entry.getKey(); + String tailOfPropertyKey = entryKey.substring(entryKey.lastIndexOf("/") + 1).trim(); + String newKey = null; + if (methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")){ + newKey = entryKey.replace(tailOfPropertyKey, "encrypted" + tailOfPropertyKey); + setProperty(newSubject, newKey, entryValue, methodOrigin); + VertexProperty property = subject.property(entryKey); + setMetaProperty(subject, newSubject, property, methodOrigin); + } else if(methodOrigin.equalsIgnoreCase("read")){ + newKey = entryKey.replace(tailOfPropertyKey, tailOfPropertyKey.substring(9)); + Iterator> propIter = newSubject.property(newKey).properties(); + setProperty(newSubject, newKey, entryValue, methodOrigin); + while(propIter.hasNext()){ + Property propertyObj = propIter.next(); + newSubject.property(newKey).property(propertyObj.key(), propertyObj.value()); + } + } - private void setEncDecMap(Map encryptedMap, Table> encDecPropertyTable){ - for(Map.Entry entry : encryptedMap.entrySet()){ - encDecPropertyTable.values().forEach(map -> { if(map.containsKey(entry.getKey())){ - map.put(entry.getKey(), entry.getValue()); - } - }); - } - } + } + } + } - private void setEncryptedDecryptedProperty(Table> encDecPropertyTable, String methodOrigin) throws AuditFailedException{ - - for(Table.Cell> cell: encDecPropertyTable.cellSet()){ - Vertex subject = cell.getRowKey(); - Vertex newSubject = cell.getColumnKey(); - for(Map.Entry entry : cell.getValue().entrySet()){ - Object entryValue = entry.getValue(); - String entryKey = entry.getKey(); - String tailOfPropertyKey = entryKey.substring(entryKey.lastIndexOf("/") + 1).trim(); - String newKey = null; - if (methodOrigin.equalsIgnoreCase("create") || methodOrigin.equalsIgnoreCase("update")){ - newKey = entryKey.replace(tailOfPropertyKey, "encrypted" + tailOfPropertyKey); - setProperty(newSubject, newKey, entryValue, methodOrigin); - VertexProperty property = subject.property(entryKey); - setMetaProperty(subject, newSubject, property, methodOrigin); - } else if(methodOrigin.equalsIgnoreCase("read")){ - newKey = entryKey.replace(tailOfPropertyKey, tailOfPropertyKey.substring(9)); - Iterator> propIter = newSubject.property(newKey).properties(); - setProperty(newSubject, newKey, entryValue, methodOrigin); - while(propIter.hasNext()){ - Property propertyObj = propIter.next(); - newSubject.property(newKey).property(propertyObj.key(), propertyObj.value()); - } - } + public void setAuditInfo(Vertex v, boolean isNew){ + if(authenticationEnabled){ + String userId = ((AuthInfo) SecurityContextHolder.getContext().getAuthentication().getPrincipal()).getSub(); + long timestamp = new Date().getTime(); + if(isNew){ + v.property(registryContext+Constants.AuditProperties.createdBy.name(),userId); + v.property(registryContext+Constants.AuditProperties.createdAt.name(),timestamp); + } + v.property(registryContext+Constants.AuditProperties.lastUpdatedBy.name(),userId); + v.property(registryContext+Constants.AuditProperties.lastUpdatedAt.name(),timestamp); + } + } - } - } - } + private String internalPropertyKey(String key) { + return INTERNAL_PROPERTY_PREFIX+key; + } + + private Iterator getVerticesIterator(Graph dbGraph, String label) { + String longLabel = IMPOSSIBLE_LABEL; + if(isaBlankNode(label)) + longLabel = IMPOSSIBLE_LABEL; + else{ + longLabel = extractID(label); + List matchedIDs = databaseProvider.getIDsFromLabel(label); + if(!matchedIDs.isEmpty()) + return getVerticesIterator(dbGraph, generateNamespacedLabel(matchedIDs.get(0))); + } + return dbGraph.vertices(longLabel); + } + private String extractID(String URI) { + String longLabel = IMPOSSIBLE_LABEL; + Pattern r = Pattern.compile(".*\\/(\\d+)"); + Matcher m = r.matcher(URI); + if (m.find( )) { + longLabel = m.group(1); + } + return longLabel; + } - public void setAuditInfo(Vertex v, boolean isNew){ - if(authenticationEnabled){ - String userId = ((AuthInfo) SecurityContextHolder.getContext().getAuthentication().getPrincipal()).getSub(); - long timestamp = new Date().getTime(); - if(isNew){ - v.property(registryContext+Constants.AuditProperties.createdBy.name(),userId); - v.property(registryContext+Constants.AuditProperties.createdAt.name(),timestamp); - } - v.property(registryContext+Constants.AuditProperties.lastUpdatedBy.name(),userId); - v.property(registryContext+Constants.AuditProperties.lastUpdatedAt.name(),timestamp); - } - } + private boolean doesExist(String rootNodeLabel, Graph dbGraph) { + if(isaBlankNode(rootNodeLabel)) + return false; + try { + Iterator iter = getVerticesIterator(dbGraph,rootNodeLabel); + return iter.hasNext(); + } catch(Exception e){ + return false; + } + } + + private Optional getFirstMatchingEdgeAndVertex(List dbEdgesForVertex, Vertex ver, String edgeLabel) { + return dbEdgesForVertex.stream().filter(ed -> { + return ed.label().equalsIgnoreCase(edgeLabel) && ed.inVertex().label().equalsIgnoreCase(ver.label()); + }).findFirst(); + } } diff --git a/java/registry/src/main/java/io/opensaber/registry/sink/DatabaseProvider.java b/java/registry/src/main/java/io/opensaber/registry/sink/DatabaseProvider.java index ca46e4c3a..9bc654f3d 100644 --- a/java/registry/src/main/java/io/opensaber/registry/sink/DatabaseProvider.java +++ b/java/registry/src/main/java/io/opensaber/registry/sink/DatabaseProvider.java @@ -8,11 +8,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + public abstract class DatabaseProvider { private static Logger logger = LoggerFactory.getLogger(DatabaseProvider.class); public abstract Graph getGraphStore(); - public abstract void shutdown() throws Exception; + public abstract void shutdown() throws Exception; + public abstract boolean isMultiValuedLiteralPropertySupported(); /** * This method is used for checking database service. It fires a dummy query to check for a non-existent label @@ -36,7 +41,7 @@ public boolean isDatabaseServiceUp() { * This method is used to initialize some global graph level configuration */ public void initializeGlobalGraphConfiguration() { - if (IteratorUtils.count(getGraphStore().traversal().V().has(T.label, Constants.GRAPH_GLOBAL_CONFIG)) == 0) { + if (!((getGraphStore().traversal().clone().V().has(T.label, Constants.GRAPH_GLOBAL_CONFIG)).hasNext())) { logger.info("Adding GRAPH_GLOBAL_CONFIG node..."); if (getGraphStore().features().graph().supportsTransactions()) { org.apache.tinkerpop.gremlin.structure.Transaction tx; @@ -52,6 +57,16 @@ public void initializeGlobalGraphConfiguration() { logger.debug("Graph initialised without transaction !"); } } + + } + + public List getIDsFromLabel(String label){ + ArrayList vertexIDList = new ArrayList<>(); + Iterator verticesWithLabel= getGraphStore().traversal().clone().V().hasLabel(label); + while(verticesWithLabel.hasNext()){ + vertexIDList.add(String.valueOf(verticesWithLabel.next().id())); + } + return vertexIDList; } } diff --git a/java/registry/src/main/java/io/opensaber/registry/sink/GUIDElementIDProvider.java b/java/registry/src/main/java/io/opensaber/registry/sink/GUIDElementIDProvider.java new file mode 100644 index 000000000..84e74fb53 --- /dev/null +++ b/java/registry/src/main/java/io/opensaber/registry/sink/GUIDElementIDProvider.java @@ -0,0 +1,56 @@ +package io.opensaber.registry.sink; + +import com.steelbridgelabs.oss.neo4j.structure.Neo4JElementIdProvider; +import org.slf4j.Logger; +import org.neo4j.driver.v1.types.Entity; +import org.slf4j.LoggerFactory; + +import java.util.Objects; +import java.util.UUID; + +public class GUIDElementIDProvider implements Neo4JElementIdProvider { + + private static final Logger logger = LoggerFactory.getLogger(GUIDElementIDProvider.class); + + public static final String DefaultIdFieldName = "id"; + + @Override + public Long generate() { + return UUID.randomUUID().getMostSignificantBits() & Long.MAX_VALUE; + } + + @Override + public String fieldName() { + return DefaultIdFieldName; + } + + @Override + public Long get(Entity entity) { + Objects.requireNonNull(entity, "entity cannot be null"); + // return id() + return entity.get(DefaultIdFieldName).asLong(); + } + + @Override + public Long processIdentifier(Object id) { + Objects.requireNonNull(id, "Element identifier cannot be null"); + // check for Long + if (id instanceof Long) + return (Long)id; + // check for numeric types + if (id instanceof Number) + return ((Number)id).longValue(); + // check for string + if (id instanceof String) + return Long.valueOf((String)id); + // error + throw new IllegalArgumentException(String.format("Expected an id that is convertible to Long but received %s", id.getClass())); + } + + @Override + public String matchPredicateOperand(String alias) { + Objects.requireNonNull(alias, "alias cannot be null"); + // id(alias) + return alias + "." + DefaultIdFieldName; + } +} diff --git a/java/registry/src/main/java/io/opensaber/registry/sink/Neo4jGraphProvider.java b/java/registry/src/main/java/io/opensaber/registry/sink/Neo4jGraphProvider.java index 3effeb950..33fd0734d 100644 --- a/java/registry/src/main/java/io/opensaber/registry/sink/Neo4jGraphProvider.java +++ b/java/registry/src/main/java/io/opensaber/registry/sink/Neo4jGraphProvider.java @@ -2,31 +2,39 @@ import com.steelbridgelabs.oss.neo4j.structure.Neo4JElementIdProvider; import com.steelbridgelabs.oss.neo4j.structure.Neo4JGraph; +import com.steelbridgelabs.oss.neo4j.structure.Neo4JVertex; import com.steelbridgelabs.oss.neo4j.structure.providers.DatabaseSequenceElementIdProvider; import com.steelbridgelabs.oss.neo4j.structure.providers.Neo4JNativeElementIdProvider; +import com.steelbridgelabs.oss.neo4j.structure.summary.ResultSummaryLogger; import io.opensaber.registry.middleware.util.Constants; import org.apache.commons.configuration.BaseConfiguration; import org.apache.commons.configuration.Configuration; import org.apache.tinkerpop.gremlin.neo4j.structure.Neo4jGraph; import org.apache.tinkerpop.gremlin.structure.Graph; -import org.neo4j.driver.v1.AuthTokens; -import org.neo4j.driver.v1.Config; -import org.neo4j.driver.v1.Driver; -import org.neo4j.driver.v1.GraphDatabase; +import org.apache.tinkerpop.gremlin.structure.Vertex; +import org.neo4j.driver.v1.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.env.Environment; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.stream.Stream; public class Neo4jGraphProvider extends DatabaseProvider { private Logger logger = LoggerFactory.getLogger(Neo4jGraphProvider.class); private Graph graph; private Driver driver; + private Neo4JGraph neo4JGraph; + private Environment environment; + Neo4JElementIdProvider idProvider; - public Neo4jGraphProvider(Environment environment) { + public Neo4jGraphProvider(Environment env) { + environment = env; Boolean isDatabaseEmbedded = Boolean.parseBoolean(environment.getProperty("database.embedded")); if (isDatabaseEmbedded) { String graphDbLocation = environment.getProperty(Constants.NEO4J_DIRECTORY); @@ -41,13 +49,12 @@ public Neo4jGraphProvider(Environment environment) { String databasePort = environment.getProperty("database.port"); Boolean profilerEnabled = Boolean.parseBoolean(environment.getProperty("database.neo4j-profiler-enabled")); driver = GraphDatabase.driver(String.format("bolt://%s:%s", databaseHost, databasePort), AuthTokens.none()); - Neo4JElementIdProvider idProvider = new Neo4JNativeElementIdProvider(); - Neo4JGraph neo4JGraph = new Neo4JGraph(driver, idProvider, idProvider); + idProvider = new GUIDElementIDProvider(); + neo4JGraph = new Neo4JGraph(driver, idProvider, idProvider); neo4JGraph.setProfilerEnabled(profilerEnabled); graph = neo4JGraph; logger.info("Initializing remote graph db for "); logger.info("host: %s \n\t port: %s \n\t driver: %s", databaseHost, databasePort,driver); - } catch (Exception ex) { logger.error("Exception when initializing Neo4J DB connection...", ex); throw ex; @@ -77,4 +84,28 @@ public void shutdown() throws Exception { driver.close(); } } + + @Override + public boolean isMultiValuedLiteralPropertySupported() { + return false; + } + + @Override + public List getIDsFromLabel(String label){ + Boolean isDatabaseEmbedded = Boolean.parseBoolean(environment.getProperty("database.embedded")); + ArrayList vertexIDList = new ArrayList<>(); + if (isDatabaseEmbedded) { + Iterator verticesWithLabel= graph.traversal().clone().V().hasLabel(label); + while(verticesWithLabel.hasNext()){ + vertexIDList.add(String.valueOf(verticesWithLabel.next().id())); + } + } else { + StatementResult result = neo4JGraph.execute("MATCH (n:`"+label+"`) RETURN n"); + while(result.hasNext()){ + Record record = result.next(); + vertexIDList.add(String.valueOf(record.get("n").get(idProvider.fieldName()))); + } + } + return vertexIDList; + } } diff --git a/java/registry/src/main/java/io/opensaber/registry/sink/OrientDBGraphProvider.java b/java/registry/src/main/java/io/opensaber/registry/sink/OrientDBGraphProvider.java index f19673a11..e4ce9ecfc 100644 --- a/java/registry/src/main/java/io/opensaber/registry/sink/OrientDBGraphProvider.java +++ b/java/registry/src/main/java/io/opensaber/registry/sink/OrientDBGraphProvider.java @@ -44,4 +44,9 @@ public void shutdown() throws Exception { logger.info("**************************************************************************"); graph.close(); } + + @Override + public boolean isMultiValuedLiteralPropertySupported() { + return false; + } } diff --git a/java/registry/src/main/java/io/opensaber/registry/sink/SqlgProvider.java b/java/registry/src/main/java/io/opensaber/registry/sink/SqlgProvider.java index 01ad74e39..2c5c03772 100644 --- a/java/registry/src/main/java/io/opensaber/registry/sink/SqlgProvider.java +++ b/java/registry/src/main/java/io/opensaber/registry/sink/SqlgProvider.java @@ -47,4 +47,9 @@ public void shutdown() throws Exception { logger.info("**************************************************************************"); graph.close(); } + + @Override + public boolean isMultiValuedLiteralPropertySupported() { + return false; + } } diff --git a/java/registry/src/main/java/io/opensaber/registry/sink/TinkerGraphProvider.java b/java/registry/src/main/java/io/opensaber/registry/sink/TinkerGraphProvider.java index 02b687239..a3f45ad3a 100644 --- a/java/registry/src/main/java/io/opensaber/registry/sink/TinkerGraphProvider.java +++ b/java/registry/src/main/java/io/opensaber/registry/sink/TinkerGraphProvider.java @@ -43,4 +43,9 @@ public void shutdown() throws Exception { logger.info("**************************************************************************"); graph.close(); } + + @Override + public boolean isMultiValuedLiteralPropertySupported() { + return false; + } } diff --git a/java/registry/src/main/resources/schema-configuration-school-test.jsonld.sample b/java/registry/src/main/resources/schema-configuration-school-test.jsonld.sample index 4f410cc74..870eafe54 100644 --- a/java/registry/src/main/resources/schema-configuration-school-test.jsonld.sample +++ b/java/registry/src/main/resources/schema-configuration-school-test.jsonld.sample @@ -11,16 +11,5 @@ "@type": "opensaber:Registry", "opensaber:privateProperties": [ - { - "@id": "sample:schoolName" - }, - - { - "@id": "sample:clusterResourceCentre" - }, - - { - "@id": "sample:udiseNumber" - } ] } diff --git a/java/registry/src/test/java/io/opensaber/registry/controller/RegistryTestBase.java b/java/registry/src/test/java/io/opensaber/registry/controller/RegistryTestBase.java index 361fceea5..e10b822da 100644 --- a/java/registry/src/test/java/io/opensaber/registry/controller/RegistryTestBase.java +++ b/java/registry/src/test/java/io/opensaber/registry/controller/RegistryTestBase.java @@ -9,6 +9,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.file.Paths; +import java.util.Iterator; import java.util.UUID; import org.apache.commons.lang.StringUtils; @@ -19,6 +20,7 @@ import org.apache.jena.rdf.model.Statement; import org.apache.jena.rdf.model.StmtIterator; import org.apache.jena.sparql.vocabulary.FOAF; +import org.apache.jena.util.ResourceUtils; import org.apache.jena.vocabulary.RDF; import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal; import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource; @@ -140,10 +142,32 @@ public void setJsonldWithNewRootLabel(){ } public static String generateRandomId(){ - return UUID.randomUUID().toString(); + return String.valueOf((UUID.randomUUID().getMostSignificantBits() & Long.MAX_VALUE)); } public String getSubjectType(){ return environment.getProperty(Constants.SUBJECT_LABEL_TYPE); } + + public void printModel(Model rdfModel) { + Iterator iter = rdfModel.listStatements(); + while(iter.hasNext()){ + System.out.println(iter.next()); + } + } + + public void replaceSubjectID(Model model, String oldURI, String newURI) { + System.out.println(model); + Iterator iter = model.listSubjects(); + while(iter.hasNext()){ + Resource subject = iter.next(); + if(oldURI!=null){ + if(subject.getURI().equals(oldURI)) + ResourceUtils.renameResource(subject,newURI); + } else { + ResourceUtils.renameResource(subject,newURI); + } + } + System.out.println(model); + } } diff --git a/java/registry/src/test/java/io/opensaber/registry/dao/impl/RegistryDaoImplTest.java b/java/registry/src/test/java/io/opensaber/registry/dao/impl/RegistryDaoImplTest.java index 223e2d36f..da5725af7 100644 --- a/java/registry/src/test/java/io/opensaber/registry/dao/impl/RegistryDaoImplTest.java +++ b/java/registry/src/test/java/io/opensaber/registry/dao/impl/RegistryDaoImplTest.java @@ -84,7 +84,9 @@ public class RegistryDaoImplTest extends RegistryTestBase { @Value("${registry.context.base}") private String registryContext; - + @Value("${authentication.enabled}") + private boolean authenticationEnabled; + private static final String RICH_LITERAL_TTL = "rich-literal.jsonld"; private static final String CONTEXT_CONSTANT = "sample:"; @@ -111,7 +113,7 @@ public void initializeGraph() { graph = TinkerGraph.open(); MockitoAnnotations.initMocks(this); TestHelper.clearData(databaseProvider); - databaseProvider.getGraphStore().addVertex(Constants.GRAPH_GLOBAL_CONFIG).property(Constants.PERSISTENT_GRAPH, true); +// databaseProvider.getGraphStore().addVertex(Constants.GRAPH_GLOBAL_CONFIG).property(Constants.PERSISTENT_GRAPH, true); AuthInfo authInfo = new AuthInfo(); authInfo.setAud("aud"); authInfo.setName("name"); @@ -266,7 +268,6 @@ public void test_adding_shared_nodes() throws LabelCannotBeNullException { // Expected count of vertices is 6 with two entities with same address created assertEquals(7, verticesCountAfterSharedNodesCreation); assertEquals(8, edgesCountAfterSharedNodesCreation); - } catch (DuplicateRecordException | RecordNotFoundException | EncryptionException | NoSuchElementException e) { e.printStackTrace(); } catch (AuditFailedException e) { @@ -321,13 +322,6 @@ public void test_adding_shared_nodes_with_new_properties() throws DuplicateRecor } - private void printModel(Model rdfModel) { - Iterator iter = rdfModel.listStatements(); - while(iter.hasNext()){ - logger.debug("-------next iterator in printModel() : {} ",iter.next()); -} - } - @Test public void test_adding_shared_nodes_with_updated_properties() throws DuplicateRecordException, RecordNotFoundException, EncryptionException, AuditFailedException, LabelCannotBeNullException { @@ -670,12 +664,13 @@ public void test_add_iri_node_to_existing_entity() MultipleEntityException, EntityCreationException{ Model rdfModel = getNewValidRdf(); + printModel(rdfModel); Graph graph = TinkerGraph.open(); String rootLabel = updateGraphFromRdf(rdfModel, graph); String response = registryDao.addEntity(graph, "_:"+rootLabel, null, null); Graph entity = registryDao.getEntityById(response); Model updateRdfModel = getNewValidRdf("add_node.jsonld"); - + printModel(updateRdfModel); // Call add entity Graph updateGraph = TinkerGraph.open(); String label = getRootLabel(updateRdfModel); @@ -808,16 +803,19 @@ public void shutDown() throws Exception{ public void savingMetaProperties() throws DuplicateRecordException, RecordNotFoundException, EncryptionException, AuditFailedException { Model inputModel = getNewValidRdf(RICH_LITERAL_TTL, "ex:"); String rootLabel = updateGraphFromRdf(inputModel, graph, "http://example.org/typeProperty"); - registryDao.addEntity(graph, rootLabel, null, null); - Graph entity = registryDao.getEntityById(rootLabel); - org.eclipse.rdf4j.model.Model model = RDF2Graph.convertGraph2RDFModel(entity, rootLabel); + String response = registryDao.addEntity(graph, rootLabel, null, null); + Graph entity = registryDao.getEntityById(response); + org.eclipse.rdf4j.model.Model model = RDF2Graph.convertGraph2RDFModel(entity, response); Model outputModel = JenaRDF4J.asJenaModel(model); + replaceSubjectID(outputModel,null,rootLabel); + printModel(inputModel); + printModel(outputModel); assertTrue(inputModel.difference(outputModel).isEmpty()); assertTrue(outputModel.difference(inputModel).isEmpty()); } - private void updateNodeLabel(Model rdfModel, String nodeLabel) { + private void updateNodeLabel(Model rdfModel, String nodeLabel) { String labelForUpdate = databaseProvider.getGraphStore().traversal().clone().V() .has(T.label, nodeLabel) .next().vertices(Direction.IN).next().label(); @@ -1100,33 +1098,37 @@ public void test_update_single_valued_properties() throws Exception { @Test public void test_setAuthInfo_for_create(){ - Graph graph = TinkerGraph.open(); - Vertex v = graph.addVertex("1234"); - registryDao.setAuditInfo(v, true); - assertTrue(v.property(registryContext +"createdBy").isPresent()); - assertTrue(v.property(registryContext +"lastUpdatedBy").isPresent()); - assertEquals(v.property(registryContext +"createdAt").value(), - v.property(registryContext +"lastUpdatedAt").value()); + if(authenticationEnabled){ + Graph graph = TinkerGraph.open(); + Vertex v = graph.addVertex("1234"); + registryDao.setAuditInfo(v, true); + assertTrue(v.property(registryContext +"createdBy").isPresent()); + assertTrue(v.property(registryContext +"lastUpdatedBy").isPresent()); + assertEquals(v.property(registryContext +"createdAt").value(), + v.property(registryContext +"lastUpdatedAt").value()); + } } @Test public void test_setAuthInfo_for_update(){ - Graph graph = TinkerGraph.open(); - Vertex v = graph.addVertex("1234"); - registryDao.setAuditInfo(v, true); - assertTrue(v.property(registryContext +"createdBy").isPresent()); - assertTrue(v.property(registryContext +"lastUpdatedBy").isPresent()); - assertEquals(v.property(registryContext +"createdBy").value().toString(), "sub"); - assertEquals(v.property(registryContext +"lastUpdatedBy").value().toString(), "sub"); - assertThat(v.property(registryContext +"createdAt").value(), instanceOf(Long.class)); - assertThat(v.property(registryContext +"lastUpdatedAt").value(), instanceOf(Long.class)); - assertEquals(v.property(registryContext +"createdAt").value(), - v.property(registryContext +"lastUpdatedAt").value()); - registryDao.setAuditInfo(v, false); - assertTrue(v.property(registryContext +"lastUpdatedBy").isPresent()); - assertEquals(v.property(registryContext +"lastUpdatedBy").value().toString(), "sub"); - assertThat(v.property(registryContext +"createdAt").value(), instanceOf(Long.class)); - assertThat(v.property(registryContext +"lastUpdatedAt").value(), instanceOf(Long.class)); + if(authenticationEnabled) { + Graph graph = TinkerGraph.open(); + Vertex v = graph.addVertex("1234"); + registryDao.setAuditInfo(v, true); + assertTrue(v.property(registryContext + "createdBy").isPresent()); + assertTrue(v.property(registryContext + "lastUpdatedBy").isPresent()); + assertEquals(v.property(registryContext + "createdBy").value().toString(), "sub"); + assertEquals(v.property(registryContext + "lastUpdatedBy").value().toString(), "sub"); + assertThat(v.property(registryContext + "createdAt").value(), instanceOf(Long.class)); + assertThat(v.property(registryContext + "lastUpdatedAt").value(), instanceOf(Long.class)); + assertEquals(v.property(registryContext + "createdAt").value(), + v.property(registryContext + "lastUpdatedAt").value()); + registryDao.setAuditInfo(v, false); + assertTrue(v.property(registryContext + "lastUpdatedBy").isPresent()); + assertEquals(v.property(registryContext + "lastUpdatedBy").value().toString(), "sub"); + assertThat(v.property(registryContext + "createdAt").value(), instanceOf(Long.class)); + assertThat(v.property(registryContext + "lastUpdatedAt").value(), instanceOf(Long.class)); + } } } diff --git a/java/registry/src/test/java/io/opensaber/registry/service/impl/RegistryServiceImplTest.java b/java/registry/src/test/java/io/opensaber/registry/service/impl/RegistryServiceImplTest.java index eda63d286..10ac596c9 100644 --- a/java/registry/src/test/java/io/opensaber/registry/service/impl/RegistryServiceImplTest.java +++ b/java/registry/src/test/java/io/opensaber/registry/service/impl/RegistryServiceImplTest.java @@ -198,9 +198,9 @@ public void test_adding_record_with_multi_valued_literal_properties() throws Exc } String response = registryService.addEntity(model,null,null); org.eclipse.rdf4j.model.Model responseModel = registryService.getEntityById(response); + replaceSubjectID(model,roots.get(0).getURI(),response); Model jenaModel = JenaRDF4J.asJenaModel(responseModel); assertTrue(jenaModel.isIsomorphicWith(model)); - closeDB(); } public void closeDB() throws Exception{