diff --git a/resources/test-cassandra.yaml b/resources/test-cassandra.yaml new file mode 100644 index 0000000..498d789 --- /dev/null +++ b/resources/test-cassandra.yaml @@ -0,0 +1,567 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# You should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +initial_token: 114528098789089706000475355189730717532 + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, hints will be dropped. +max_hint_window_in_ms: 3600000 # one hour +# Sleep this long after delivering each hint +hinted_handoff_throttle_delay_in_ms: 1 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# authentication backend, implementing IAuthenticator; used to identify users +authenticator: org.apache.cassandra.auth.AllowAllAuthenticator + +# authorization backend, implementing IAuthority; used to limit access/provide permissions +authority: org.apache.cassandra.auth.AllowAllAuthority + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.RandomPartitioner +# org.apache.cassandra.dht.ByteOrderedPartitioner, +# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated), +# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner +# (deprecated). +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP colates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.RandomPartitioner + +# directories where Cassandra should store data on disk. +data_file_directories: + - /tmp/titanium-test/data + +# commit log +commitlog_directory: /tmp/titanium-test/commitlog + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must store the whole values of +# its rows, so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# safe the keys cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: /tmp/titanium-test/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentally from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSD:s; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7001 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing *if* the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: localhost + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: localhost +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three options for the RPC Server: +# +# sync -> One connection per thread in the rpc pool (see below). +# For a very large number of clients, memory will be your limiting +# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread. +# Connection pooling is very, very strongly recommended. +# +# async -> Nonblocking server implementation with one thread to serve +# rpc connections. This is not recommended for high throughput use +# cases. Async has been tested to be about 50% slower than sync +# or hsha and is deprecated: it will be removed in the next major release. +# +# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool +# (see below) is used to manage requests, but the threads are multiplexed +# across the different clients. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +rpc_server_type: sync + +# Uncomment rpc_min|max|thread to set request pool size. +# You would primarily set max for the sync server to safeguard against +# misbehaved clients; if you do hit the max, Cassandra will block until one +# disconnects before accepting more. The defaults for sync are min of 16 and max +# unlimited. +# +# For the Hsha server, the min and max both default to quadruple the number of +# CPU cores. +# +# This configuration is ignored by the async server. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +# 0 disables TFramedTransport in favor of TSocket. This option +# is deprecated; we strongly recommend using Framed mode. +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# Keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# This setting has no effect on LeveledCompactionStrategy. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 400 Mbps or 50 MB/s. +# stream_throughput_outbound_megabits_per_sec: 400 + +# Time to wait for a reply from other nodes before failing the command +rpc_timeout_in_ms: 10000 + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This *can* involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the Datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifer based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] \ No newline at end of file diff --git a/test/clojurewerkz/titanium/conf.clj b/test/clojurewerkz/titanium/conf.clj new file mode 100644 index 0000000..e969325 --- /dev/null +++ b/test/clojurewerkz/titanium/conf.clj @@ -0,0 +1,13 @@ +(ns clojurewerkz.titanium.conf + (:import (org.apache.commons.io FileUtils))) + +(def conf {:storage {:backend "embeddedcassandra" + :hostname "127.0.0.1" + :keyspace "titaniumtest" + :cassandra-config-dir + (str "file://" + (System/getProperty "user.dir") + "/resources/test-cassandra.yaml")}}) + +(defn clear-db [] + (FileUtils/deleteDirectory (java.io.File. "/tmp/titanium-test"))) diff --git a/test/clojurewerkz/titanium/element_test.clj b/test/clojurewerkz/titanium/element_test.clj new file mode 100644 index 0000000..a403e78 --- /dev/null +++ b/test/clojurewerkz/titanium/element_test.clj @@ -0,0 +1,41 @@ +(ns clojurewerkz.titanium.element-test + (:use [clojure.test] + [clojurewerkz.titanium.conf :only (conf clear-db)]) + (:import [com.thinkaurelius.titan.graphdb.relations RelationIdentifier]) + (:require [clojurewerkz.titanium.graph :as g] + [clojurewerkz.titanium.vertices :as v] + [clojurewerkz.titanium.edges :as e])) + +(deftest element-test + (clear-db) + (g/open conf) + + (testing "Get keys" + (g/transact! (let [a (v/create! {:name "v1" :a 1 :b 1}) + b (v/create! {:name "v2" :a 1 :b 1}) + c (e/connect! a :test-label b {:prop "e1" :a 1 :b 1}) + coll-a (v/keys a) + coll-b (v/keys b) + coll-c (v/keys c)] + (is (= #{:name :a :b} coll-a coll-b)) + (is (= #{:prop :a :b} coll-c)) + (is (= clojure.lang.PersistentHashSet (type coll-a)))))) + + (testing "Get id" + (g/transact! + (let [a (v/create!) + b (v/create!) + c (e/connect! a :test-label b )] + (is (= java.lang.Long (type (v/id-of a)))) + (is (= RelationIdentifier (type (e/id-of c))))))) + + (testing "Remove property!" + (g/transact! + (let [a (v/create! {:a 1}) + b (v/create!) + c (e/connect! a :test-label b {:a 1})] + (v/dissoc! a :a) + (v/dissoc! c :a) + (is (nil? (:a (v/to-map a)))) + (is (nil? (:a (v/to-map a))))))) + (g/shutdown)) \ No newline at end of file diff --git a/test/clojurewerkz/titanium/types_test.clj b/test/clojurewerkz/titanium/types_test.clj new file mode 100644 index 0000000..94787b0 --- /dev/null +++ b/test/clojurewerkz/titanium/types_test.clj @@ -0,0 +1,91 @@ +(ns clojurewerkz.titanium.types-test + (:use [clojure.test] + [clojurewerkz.titanium.conf :only (clear-db conf)]) + (:require [clojurewerkz.titanium.types :as tt] + [clojurewerkz.titanium.graph :as tg])) + +(deftest test-create-groups + (clear-db) + (tg/open conf) + (tg/transact! + (let [my-group-name "My-Group-Name" + my-group (tt/create-group 100 my-group-name) + default-group-id (.getID tt/default-group)] + (is (= my-group-name (.getName my-group)) + "The group has the correct name") + (is (= 100 (.getID my-group)) + "The group has the correct ID") + (is (not (= default-group-id (.getID my-group))) + "my-group has a different ID from the default group"))) + (tg/shutdown)) + +(deftest test-create-vertex-key + (clear-db) + (tg/open conf) + (tg/transact! + (testing "With no parameters" + (tt/create-vertex-key :first-key Integer) + ;; Get the key from the graph + (let [k (tt/get-type :first-key)] + (is (.isPropertyKey k) "the key is a property key") + (is (not (.isEdgeLabel k)) "the key is not an edge label") + (is (= "first-key" (.getName k)) "the key has the correct name") + (is (not (.isFunctional k)) "the key is not functional") + (is (not (.hasIndex k)) "the key is not indexed") + (is (not (.isUnique k)) "the key is not unique")))) + + (tg/transact! + (testing "With parameters" + (tt/create-vertex-key :second-key Integer + {:functional true + :indexed true + :unique true}) + ;; Get the key from the graph + (let [k (tt/get-type :second-key)] + (is (.isPropertyKey k) "the key is a property key") + (is (not (.isEdgeLabel k)) "the key is not an edge label") + (is (= "second-key" (.getName k)) "the key has the correct name") + (is (.isFunctional k) "the key is functional") + (is (.hasIndex k) "the key is indexed") + (is (.isUnique k) "the key is unique")))) + (tg/shutdown)) + +(deftest test-create-edge-label + (clear-db) + (tg/open conf) + (tg/transact! + (testing "With no parameters" + (tt/create-edge-label :first-label) + ;; Get the label from the graph + (let [lab (tt/get-type :first-label)] + (is (.isEdgeLabel lab) "the label is an edge label") + (is (not (.isPropertyKey lab)) "the label is not a property key") + (is (= "first-label" (.getName lab)) "the label has the correct name") + (is (not (.isFunctional lab)) "the label is not functional") + (is (not (.isSimple lab)) "the label is not simple") + (is (not (.isUndirected lab)) "the label is not undirected") + (is (not (.isUnidirected lab)) "the label is not unidirected") + (is (= tt/default-group (.getGroup lab)) "the label has the default group")))) + (tg/transact! + (testing "With parameters" + + (tt/create-edge-label :second-label + {:functional true + :simple true + :direction "undirected"}) + ;; Get the label from the graph + (let [lab (tt/get-type :second-label)] + (is (.isEdgeLabel lab) "the label is an edge label") + (is (not (.isPropertyKey lab)) "the label is not a property key") + (is (= "second-label" (.getName lab)) "the label has the correct name") + (is (.isFunctional lab) "the label is functional") + (is (.isSimple lab) "the label is simple") + (is (.isUndirected lab) "the label is undirected") + (is (= tt/default-group (.getGroup lab)) "the label has the default group")))) + (tg/transact! + (testing "With a group" + (let [a-group (tt/create-group 20 "a-group") + the-label (tt/create-edge-label :lab {:group a-group})] + (is (.isEdgeLabel the-label) "the label is an edge label") + (is (= a-group (.getGroup the-label)) "the label has the correct group")))) + (tg/shutdown)) \ No newline at end of file