diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..fe731cc37 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +.DS_Store +target +bin +build +.gradle +.springBeans +*.iml +*.ipr +*.iws +*.log +.classpath +.project +.settings +.cassandra diff --git a/README.adoc b/README.adoc index 44f8d465d..90799fd57 100644 --- a/README.adoc +++ b/README.adoc @@ -45,7 +45,7 @@ companies and individuals: * http://www.prowaveconsulting.com[Prowave Consulting] * http://www.scispike.com[SciSpike] -* Alexander Shvid +* http://www.vha.com[VHA] The following companies and individuals are also generously providing support: diff --git a/attic/cassandra/conf/cassandra.yaml b/attic/cassandra/conf/cassandra.yaml new file mode 100644 index 000000000..c6c96f715 --- /dev/null +++ b/attic/cassandra/conf/cassandra.yaml @@ -0,0 +1,664 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: AllowAllAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP collates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +data_file_directories: + - .cassandra/var/lib/cassandra/data + +# commit log +commitlog_directory: .cassandra/var/lib/cassandra/commitlog + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The off-heap memory allocator. Affects storage engine metadata as +# well as caches. Experiments show that JEMAlloc saves some memory +# than the native GCC allocator (i.e., JEMalloc is more +# fragmentation-resistant). +# +# Supported values are: NativeAllocator, JEMallocAllocator +# +# If you intend to use JEMallocAllocator you have to install JEMalloc as library and +# modify cassandra-env.sh as directed in the file. +# +# Defaults to NativeAllocator +# memory_allocator: NativeAllocator + +# saved caches +saved_caches_directory: .cassandra/var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. By default this allows 1024*(CPU cores) pending +# entries on the commitlog queue. If you are writing very large blobs, +# you should reduce that; 16*cores works reasonably well for 1MB blobs. +# It should be at least as large as the concurrent_writes setting. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 +# commitlog_periodic_queue_size: + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for sstable-reading buffers. Defaults to +# the smaller of 1/4 of heap or 512MB. +# file_cache_size_in_mb: 512 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7001 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: localhost + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9042 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 + +# Whether to start the thrift rpc server. +start_rpc: true + +# The address to bind the Thrift RPC service and native transport +# server -- clients connect here. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0 +# here if you want to listen on all interfaces but is not best practice +# as it is known to confuse the node auto-discovery features of some +# client drivers. +rpc_address: localhost +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This _can_ involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: all + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# Enable or disable kernel page cache preheating from contents of the key cache after compaction. +# When enabled it would preheat only first "page" (4KB) of each row to optimize +# for sequential access. Note: This could be harmful for fat rows, see CASSANDRA-4937 +# for further details on that topic. +preheat_kernel_page_cache: false diff --git a/attic/get-and-start-cassandra b/attic/get-and-start-cassandra new file mode 100755 index 000000000..2c045e19e --- /dev/null +++ b/attic/get-and-start-cassandra @@ -0,0 +1,23 @@ +CASSANDRA_DIST=.cassandra/dist +mkdir -p $CASSANDRA_DIST + +curl -sL http://downloads.datastax.com/community/dsc.tar.gz > $CASSANDRA_DIST/dist.tgz +tar -xzf $CASSANDRA_DIST/dist.tgz -C $CASSANDRA_DIST + +CASSANDRA_HOME=`find $CASSANDRA_DIST -name 'dsc-cassandra-*' -print` +if [ -z "$CASSANDRA_HOME" ]; then + echo "Couldn't determine CASSANDRA_HOME" + exit 1 +fi +echo "CASSANDRA_HOME is $CASSANDRA_HOME" + +# these directories must match what's in test-support/cassandra/conf/cassandra.yaml +mkdir -p .cassandra/var/lib/cassandra +mkdir -p .cassandra/var/log/cassandra + +mv $CASSANDRA_HOME/conf/cassandra.yaml $CASSANDRA_HOME/conf/cassandra.yaml.original +cp test-support/cassandra/conf/cassandra.yaml $CASSANDRA_HOME/conf/cassandra.yaml + +$CASSANDRA_HOME/bin/cassandra -p $CASSANDRA_HOME/cassandra.pid + +sleep 5 diff --git a/attic/gradle/build.gradle b/attic/gradle/build.gradle new file mode 100644 index 000000000..b2419f178 --- /dev/null +++ b/attic/gradle/build.gradle @@ -0,0 +1,231 @@ +buildscript { + repositories { + maven { url 'http://repo.springsource.org/plugins-release' } + } + dependencies { + classpath 'org.springframework.build.gradle:bundlor-plugin:0.1.2' + classpath 'org.springframework.build.gradle:docbook-reference-plugin:0.1.5' + } +} + +description = 'Spring Data Cassandra' +group = 'org.springframework.data' + +repositories { + maven { url "http://repo1.maven.org/maven2" } + maven { url "http://repo.springsource.org/libs-snapshot" } + maven { url "http://repo.springsource.org/plugins-release" } + maven { url "repo" } +} + +apply plugin: "java" +apply plugin: 'eclipse' +apply plugin: 'idea' +apply from: "$rootDir/maven.gradle" +apply plugin: 'docbook-reference' +apply plugin: 'bundlor' + +[compileJava, compileTestJava]*.options*.compilerArgs = ["-Xlint:-serial"] + +// Common dependencies +dependencies { + // Logging + compile "org.slf4j:slf4j-api:$slf4jVersion" + compile "org.slf4j:jcl-over-slf4j:$slf4jVersion" + testRuntime "log4j:log4j:$log4jVersion" + testRuntime "org.slf4j:slf4j-log4j12:$slf4jVersion" + + // Spring Framework + compile("org.springframework:spring-core:$springVersion") { + exclude module: "commons-logging" + } + compile "org.springframework:spring-context-support:$springVersion" + compile "org.springframework:spring-tx:$springVersion" + compile("org.springframework:spring-oxm:$springVersion", optional) + compile "org.springframework.data:spring-data-commons:$springDataVersion" + + // Cassandra Drivers + compile "com.datastax.cassandra:cassandra-driver-core:$datastaxVersion" + compile "org.apache.cassandra:cassandra-all:$cassandraVersion" + compile "io.netty:netty:$nettyVersion" + + // Mappers + compile("org.codehaus.jackson:jackson-mapper-asl:$jacksonVersion", optional) + compile("commons-beanutils:commons-beanutils-core:1.8.3", optional) + + // Testing + testCompile "junit:junit:$junitVersion" + testCompile "org.springframework:spring-test:$springVersion" + testCompile "org.mockito:mockito-all:$mockitoVersion" + testCompile("javax.annotation:jsr250-api:1.0", optional) + testCompile("com.thoughtworks.xstream:xstream:1.3", optional) + testCompile "org.cassandraunit:cassandra-unit:$cassandraUnitVersion" + testCompile "org.cassandraunit:cassandra-unit-spring:$cassandraUnitVersion" + testCompile "cglib:cglib-nodep:$cglibVersion" + +} + +sourceCompatibility = 1.6 +targetCompatibility = 1.6 + +javadoc { + ext.srcDir = file("${projectDir}/src/main/doc") + ext.destinationDir = file("${buildDir}/docs/javadoc") + ext.tmpDir = file("${buildDir}/api-work") + + configure(options) { + stylesheetFile = file("${srcDir}/spring-javadoc.css") + //overview = "${srcDir}/overview.html" + docFilesSubDirs = true + + outputLevel = org.gradle.external.javadoc.JavadocOutputLevel.QUIET + breakIterator = true + showFromProtected() + groups = [ + 'Spring Cassandra' : ['org.springframework.cassandra*'], + 'Spring Data Cassandra' : ['org.springframework.data.cassandra*'], + ] + + links = [ + "http://static.springframework.org/spring/docs/3.0.x/javadoc-api", + "http://download.oracle.com/javase/6/docs/api", + "http://jackson.codehaus.org/1.8.2/javadoc" + ] + + exclude "org/springframework/data/cassandra/config/**" + } + + logger.error("BuildDir => ${buildDir}"); + logger.error("DestDir => ${destinationDir}"); + logger.error("ExtDestDir => ${ext.destinationDir}"); + + copy { + from "src/main/doc/resources" + into "${ext.destinationDir}/resources" + include '**/*' + } + + title = "${rootProject.description} ${version} API" +} + +bundlor { + manifestTemplate = file("${projectDir}/template.mf").text +} + + +jar { + manifest.attributes['Implementation-Title'] = 'spring-data-cassandra' + manifest.attributes['Implementation-Version'] = project.version + + from("$rootDir/docs/src/info") { + include "license.txt" + include "notice.txt" + into "META-INF" + expand(copyright: new Date().format('yyyy'), version: project.version) + } +} + +task sourcesJar(type: Jar, dependsOn:classes) { + classifier = 'sources' + from sourceSets.main.allJava +} + +task javadocJar(type: Jar) { + classifier = 'javadoc' + from javadoc +} + +reference { + sourceDir = file('docs/src/reference/docbook') +} + +task docsZip(type: Zip) { + group = 'Distribution' + classifier = 'docs' + description = "Builds -${classifier} archive containing api and reference for deployment" + + from('docs/src/info') { + include 'changelog.txt' + } + + from (javadoc) { + into 'api' + } + + from (reference) { + into 'reference' + } +} + +task schemaZip(type: Zip) { + group = 'Distribution' + classifier = 'schema' + description = "Builds -${classifier} archive containing all XSDs for deployment" + + def Properties schemas = new Properties(); + + sourceSets.main.resources.find { + it.path.endsWith('META-INF' + File.separator + 'spring.schemas') + }?.withInputStream { schemas.load(it) } + + for (def key : schemas.keySet()) { + def shortName = key.replaceAll(/http.*schema.(.*).spring-.*/, '$1') + def alias = key.replaceAll(/http.*schema.(.*).(spring-.*)/, '$2') + assert shortName != key + File xsdFile = sourceSets.main.resources.find { + it.path.replace('\\', '/').endsWith(schemas.get(key)) + } + assert xsdFile != null + + into (shortName) { + from xsdFile.path + rename { String fileName -> alias } + } + } +} + +task distZip(type: Zip, dependsOn: [jar, docsZip, schemaZip, sourcesJar, javadocJar]) { + group = 'Distribution' + classifier = 'dist' + description = "Builds -${classifier} archive, containing all jars and docs, " + + "suitable for community download page." + + ext.zipRootDir = "${project.name}-${project.version}" + + into (zipRootDir) { + from('docs/src/info') { + include 'readme.txt' + include 'license.txt' + include 'notice.txt' + expand(copyright: new Date().format('yyyy'), version: project.version) + } + + from(zipTree(docsZip.archivePath)) { + into "docs" + } + + from(zipTree(schemaZip.archivePath)) { + into "schema" + } + into ("dist") { + from rootProject.collect { project -> project.libsDir } + } + } +} + +artifacts { + archives sourcesJar + archives javadocJar + + archives docsZip + archives schemaZip + archives distZip +} + +task wrapper(type: Wrapper) { + description = 'Generates gradlew[.bat] scripts' + gradleVersion = '1.2' +} + +assemble.dependsOn = ['jar', 'sourcesJar'] +defaultTasks 'build' \ No newline at end of file diff --git a/attic/gradle/gradle.properties b/attic/gradle/gradle.properties new file mode 100644 index 000000000..30c49caf8 --- /dev/null +++ b/attic/gradle/gradle.properties @@ -0,0 +1,26 @@ +## Dependecies Version + +# Logging +log4jVersion = 1.2.17 +cassandraUnitVersion=1.2.0.1 +cglibVersion=2.2.2 +slf4jVersion = 1.6.6 + +# Common libraries +springVersion = 3.1.4.RELEASE +springDataVersion = 1.5.3.RELEASE +jacksonVersion = 1.8.8 + +# Testing +junitVersion = 4.8.1 +mockitoVersion = 1.8.5 + +# Drivers +datastaxVersion = 1.0.4-dse +cassandraVersion = 1.2.0 +nettyVersion = 3.6.2.Final + +# -------------------- +# Project wide version +# -------------------- +version=1.2.0.BUILD-SNAPSHOT \ No newline at end of file diff --git a/attic/gradle/maven.gradle b/attic/gradle/maven.gradle new file mode 100644 index 000000000..04182b3ea --- /dev/null +++ b/attic/gradle/maven.gradle @@ -0,0 +1,63 @@ +apply plugin: 'maven' + +ext.optionalDeps = [] +ext.providedDeps = [] + +ext.optional = { optionalDeps << it } +ext.provided = { providedDeps << it } + +install { + repositories.mavenInstaller { + customizePom(pom, project) + } +} + +def customizePom(pom, gradleProject) { + pom.whenConfigured { generatedPom -> + // respect 'optional' and 'provided' dependencies + gradleProject.optionalDeps.each { dep -> + generatedPom.dependencies.find { it.artifactId == dep.name }?.optional = true + } + gradleProject.providedDeps.each { dep -> + generatedPom.dependencies.find { it.artifactId == dep.name }?.scope = 'provided' + } + + // eliminate test-scoped dependencies (no need in maven central poms) + generatedPom.dependencies.removeAll { dep -> + dep.scope == 'test' + } + + // add all items necessary for maven central publication + generatedPom.project { + name = gradleProject.description + description = gradleProject.description + url = 'http://github.com/shvid/spring-data-cassandra' + organization { + name = 'Mirantis' + url = 'http://www.mirantis.com/spring-data/cassandra' + } + licenses { + license { + name 'The Apache Software License, Version 2.0' + url 'http://www.apache.org/licenses/LICENSE-2.0.txt' + distribution 'repo' + } + } + scm { + url = 'http://github.com/shvid/spring-data-cassandra' + connection = 'scm:git:git://github.com/shvid/spring-data-cassandra' + developerConnection = 'scm:git:git://github.com/shvid/spring-data-cassandra' + } + developers { + developer { + id = 'shvid' + name = 'Alex Shvid' + email = 'aschwid@mirantis.com' + properties { + twitter = 'alexshvid' + } + } + } + } + } +} \ No newline at end of file diff --git a/attic/gradle/settings.gradle b/attic/gradle/settings.gradle new file mode 100644 index 000000000..062310fab --- /dev/null +++ b/attic/gradle/settings.gradle @@ -0,0 +1 @@ +rootProject.name = 'spring-data-cassandra' \ No newline at end of file diff --git a/etc/eclipse-formatting.xml b/etc/eclipse-formatting.xml new file mode 100644 index 000000000..c74468778 --- /dev/null +++ b/etc/eclipse-formatting.xml @@ -0,0 +1,291 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/pom.xml b/pom.xml new file mode 100644 index 000000000..b6ad3dc88 --- /dev/null +++ b/pom.xml @@ -0,0 +1,288 @@ + + + + 4.0.0 + + org.springframework.data + spring-data-cassandra-parent + 1.0.0.BUILD-SNAPSHOT + pom + + Spring Data Cassandra + Cassandra support for Spring Data + http://www.springsource.org/spring-data/cassandra + + + org.springframework.data.build + spring-data-parent + 1.2.0.RELEASE + ../spring-data-build/parent/pom.xml + + + + spring-cassandra + spring-data-cassandra + spring-data-cassandra-distribution + + + + UTF-8 + UTF-8 + multi + spring-data-cassandra + 1.6.2.RELEASE + 1.2.0.1 + 1.0.4-dse + 2.16 + + + + + madams + Matthew T. Adams + matthew dot adams at scispike.com + SciSpike Inc. + http://www.scispike.com + + Project Lead + Developer + + -6 + + + dwebb + David Webb + dwebb at prowaveconsulting.com + Prowave Consulting Inc. + http://www.prowaveconsulting.com + + Project Lead + Developer + + -5 + + + ashvid + Alex Shvid + a at shvid.com + + Project Lead + Developer + + -8 + + + + + + + ${project.groupId} + spring-cassandra + ${project.version} + + + com.datastax.cassandra + cassandra-driver-core + ${cassandra-driver-core.version} + + + slf4j-log4j12 + org.slf4j + + + + + + + org.springframework + spring-context + ${spring} + + + org.springframework + spring-tx + ${spring} + + + org.springframework + spring-beans + ${spring} + + + org.springframework + spring-core + ${spring} + + + commons-logging + commons-logging + + + + + org.springframework + spring-expression + ${spring} + + + + + org.slf4j + log4j-over-slf4j + ${slf4j} + test + + + + org.slf4j + jul-to-slf4j + ${slf4j} + test + + + + + javax.enterprise + cdi-api + ${cdi} + provided + true + + + + cglib + cglib-nodep + 2.2.2 + test + + + + org.xerial.snappy + snappy-java + 1.1.0.1 + test + + + + org.codehaus.jackson + jackson-mapper-asl + 1.9.13 + test + + + + org.codehaus.jackson + jackson-core-asl + 1.9.13 + test + + + + org.cassandraunit + cassandra-unit + ${cassandra-unit.version} + test + + + cassandra-all + org.apache.cassandra + + + + + + org.cassandraunit + cassandra-unit-spring + ${cassandra-unit.version} + test + + + + javax.el + el-api + ${cdi} + test + + + + org.hibernate + hibernate-validator + 4.2.0.Final + test + + + + joda-time + joda-time + ${jodatime} + test + + + + + + + spring-lib-release + http://repo.springsource.org/libs-release-local + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + 2.8 + + + org.apache.maven.plugins + maven-surefire-plugin + + -Xmx2048m -XX:MaxPermSize=512m + methods + 10 + false + + **/test/unit/**/*.java + + + **/test/integration/**/*.java + **/test/performance/**/*.java + + + src/test/resources/logging.properties + + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${failsafe.version} + + always + -Xmx2048m -XX:MaxPermSize=512m + false + + **/test/integration/**/*.java + + + **/test/unit/**/*.java + **/test/performance/**/*.java + + + src/test/resources/logging.properties + + + + + + integration-test + verify + + + + + + + diff --git a/spring-cassandra/pom.xml b/spring-cassandra/pom.xml new file mode 100644 index 000000000..b3afd4fd1 --- /dev/null +++ b/spring-cassandra/pom.xml @@ -0,0 +1,92 @@ + + + + 4.0.0 + + spring-cassandra + + Spring Cassandra - Core + Cassandra support for Spring + + + org.springframework.data + spring-data-cassandra-parent + 1.0.0.BUILD-SNAPSHOT + ../pom.xml + + + + 1.0.0.GA + + + + + org.springframework + spring-context + + + org.springframework + spring-beans + + + org.springframework + spring-core + + + org.springframework + spring-expression + + + org.springframework + spring-tx + + + com.datastax.cassandra + cassandra-driver-core + + + log4j + log4j + + + + + javax.enterprise + cdi-api + provided + true + + + org.codehaus.jackson + jackson-mapper-asl + test + + + org.codehaus.jackson + jackson-core-asl + test + + + org.cassandraunit + cassandra-unit + test + + + slf4j-log4j12 + org.slf4j + + + + + cglib + cglib-nodep + test + + + javax.el + el-api + test + + + diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraClusterFactoryBean.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraClusterFactoryBean.java new file mode 100644 index 000000000..cdd63f6c4 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraClusterFactoryBean.java @@ -0,0 +1,346 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.DisposableBean; +import org.springframework.beans.factory.FactoryBean; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.cassandra.core.CassandraTemplate; +import org.springframework.cassandra.core.cql.generator.CreateKeyspaceCqlGenerator; +import org.springframework.cassandra.core.cql.generator.DropKeyspaceCqlGenerator; +import org.springframework.cassandra.core.keyspace.CreateKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.DropKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.KeyspaceNameSpecification; +import org.springframework.cassandra.support.CassandraExceptionTranslator; +import org.springframework.dao.DataAccessException; +import org.springframework.dao.support.PersistenceExceptionTranslator; +import org.springframework.util.StringUtils; + +import com.datastax.driver.core.AuthProvider; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.HostDistance; +import com.datastax.driver.core.PoolingOptions; +import com.datastax.driver.core.ProtocolOptions.Compression; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SocketOptions; +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.datastax.driver.core.policies.ReconnectionPolicy; +import com.datastax.driver.core.policies.RetryPolicy; + +/** + * Convenient factory for configuring a Cassandra Cluster. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class CassandraClusterFactoryBean implements FactoryBean, InitializingBean, DisposableBean, + PersistenceExceptionTranslator { + + public static final String DEFAULT_CONTACT_POINTS = "localhost"; + public static final boolean DEFAULT_METRICS_ENABLED = true; + public static final int DEFAULT_PORT = 9042; + + protected static final Logger log = LoggerFactory.getLogger(CassandraClusterFactoryBean.class); + + private Cluster cluster; + + /** + * Comma-delimited string of servers. + */ + private String contactPoints = DEFAULT_CONTACT_POINTS; + private int port = CassandraClusterFactoryBean.DEFAULT_PORT; + private CompressionType compressionType; + private PoolingOptionsConfig localPoolingOptions; + private PoolingOptionsConfig remotePoolingOptions; + private SocketOptionsConfig socketOptions; + private AuthProvider authProvider; + private LoadBalancingPolicy loadBalancingPolicy; + private ReconnectionPolicy reconnectionPolicy; + private RetryPolicy retryPolicy; + private boolean metricsEnabled = DEFAULT_METRICS_ENABLED; + private List keyspaceCreations = new ArrayList(); + private List keyspaceDrops = new ArrayList(); + private List startupScripts = new ArrayList(); + private List shutdownScripts = new ArrayList(); + + private final PersistenceExceptionTranslator exceptionTranslator = new CassandraExceptionTranslator(); + + @Override + public Cluster getObject() throws Exception { + return cluster; + } + + @Override + public Class getObjectType() { + return Cluster.class; + } + + @Override + public boolean isSingleton() { + return true; + } + + @Override + public DataAccessException translateExceptionIfPossible(RuntimeException ex) { + return exceptionTranslator.translateExceptionIfPossible(ex); + } + + @Override + public void afterPropertiesSet() throws Exception { + + if (!StringUtils.hasText(contactPoints)) { + throw new IllegalArgumentException("at least one server is required"); + } + + Cluster.Builder builder = Cluster.builder(); + + builder.addContactPoints(StringUtils.commaDelimitedListToStringArray(contactPoints)).withPort(port); + + if (compressionType != null) { + builder.withCompression(convertCompressionType(compressionType)); + } + + if (localPoolingOptions != null) { + builder.withPoolingOptions(configPoolingOptions(HostDistance.LOCAL, localPoolingOptions)); + } + + if (remotePoolingOptions != null) { + builder.withPoolingOptions(configPoolingOptions(HostDistance.REMOTE, remotePoolingOptions)); + } + + if (socketOptions != null) { + builder.withSocketOptions(configSocketOptions(socketOptions)); + } + + if (authProvider != null) { + builder.withAuthProvider(authProvider); + } + + if (loadBalancingPolicy != null) { + builder.withLoadBalancingPolicy(loadBalancingPolicy); + } + + if (reconnectionPolicy != null) { + builder.withReconnectionPolicy(reconnectionPolicy); + } + + if (retryPolicy != null) { + builder.withRetryPolicy(retryPolicy); + } + + if (!metricsEnabled) { + builder.withoutMetrics(); + } + + cluster = builder.build(); + executeSpecsAndScripts(keyspaceCreations, startupScripts); + } + + protected void executeSpecsAndScripts(@SuppressWarnings("rawtypes") List specs, List scripts) { + + Session system = null; + CassandraTemplate template = null; + + try { + if (specs != null) { + system = specs.size() == 0 ? null : cluster.connect(); + template = system == null ? null : new CassandraTemplate(system); + + Iterator i = specs.iterator(); + while (i.hasNext()) { + KeyspaceNameSpecification spec = (KeyspaceNameSpecification) i.next(); + String cql = (spec instanceof CreateKeyspaceSpecification) ? new CreateKeyspaceCqlGenerator( + (CreateKeyspaceSpecification) spec).toCql() : new DropKeyspaceCqlGenerator( + (DropKeyspaceSpecification) spec).toCql(); + + if (log.isInfoEnabled()) { + log.info("executing CQL [{}]", cql); + } + + template.execute(cql); + } + } + + if (scripts != null) { + + if (system == null) { + system = scripts.size() == 0 ? null : cluster.connect(); + } + + if (template == null) { + template = system == null ? null : new CassandraTemplate(system); + } + + for (String script : scripts) { + + if (log.isInfoEnabled()) { + log.info("executing raw CQL [{}]", script); + } + + template.execute(script); + } + } + } finally { + + if (system != null) { + system.shutdown(); + } + } + } + + @Override + public void destroy() throws Exception { + + executeSpecsAndScripts(keyspaceDrops, shutdownScripts); + cluster.shutdown(); + } + + /** + * Sets a comma-delimited string of the contact points (hosts) to connect to. + */ + public void setContactPoints(String contactPoints) { + this.contactPoints = contactPoints; + } + + public void setPort(int port) { + this.port = port; + } + + public void setCompressionType(CompressionType compressionType) { + this.compressionType = compressionType; + } + + public void setLocalPoolingOptions(PoolingOptionsConfig localPoolingOptions) { + this.localPoolingOptions = localPoolingOptions; + } + + public void setRemotePoolingOptions(PoolingOptionsConfig remotePoolingOptions) { + this.remotePoolingOptions = remotePoolingOptions; + } + + public void setSocketOptions(SocketOptionsConfig socketOptions) { + this.socketOptions = socketOptions; + } + + public void setAuthProvider(AuthProvider authProvider) { + this.authProvider = authProvider; + } + + public void setLoadBalancingPolicy(LoadBalancingPolicy loadBalancingPolicy) { + this.loadBalancingPolicy = loadBalancingPolicy; + } + + public void setReconnectionPolicy(ReconnectionPolicy reconnectionPolicy) { + this.reconnectionPolicy = reconnectionPolicy; + } + + public void setRetryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + } + + public void setMetricsEnabled(boolean metricsEnabled) { + this.metricsEnabled = metricsEnabled; + } + + public void setKeyspaceCreations(List specifications) { + this.keyspaceCreations = specifications; + } + + public List getKeyspaceCreations() { + return keyspaceCreations; + } + + public void setKeyspaceDrops(List specifications) { + this.keyspaceDrops = specifications; + } + + public List getKeyspaceDrops() { + return keyspaceDrops; + } + + public void setStartupScripts(List scripts) { + this.startupScripts = scripts; + } + + public void setShutdownScripts(List scripts) { + this.shutdownScripts = scripts; + } + + private static Compression convertCompressionType(CompressionType type) { + switch (type) { + case NONE: + return Compression.NONE; + case SNAPPY: + return Compression.SNAPPY; + } + throw new IllegalArgumentException("unknown compression type " + type); + } + + private static PoolingOptions configPoolingOptions(HostDistance hostDistance, PoolingOptionsConfig config) { + PoolingOptions poolingOptions = new PoolingOptions(); + + if (config.getMinSimultaneousRequests() != null) { + poolingOptions + .setMinSimultaneousRequestsPerConnectionThreshold(hostDistance, config.getMinSimultaneousRequests()); + } + if (config.getMaxSimultaneousRequests() != null) { + poolingOptions + .setMaxSimultaneousRequestsPerConnectionThreshold(hostDistance, config.getMaxSimultaneousRequests()); + } + if (config.getCoreConnections() != null) { + poolingOptions.setCoreConnectionsPerHost(hostDistance, config.getCoreConnections()); + } + if (config.getMaxConnections() != null) { + poolingOptions.setMaxConnectionsPerHost(hostDistance, config.getMaxConnections()); + } + + return poolingOptions; + } + + private static SocketOptions configSocketOptions(SocketOptionsConfig config) { + SocketOptions socketOptions = new SocketOptions(); + + if (config.getConnectTimeoutMls() != null) { + socketOptions.setConnectTimeoutMillis(config.getConnectTimeoutMls()); + } + if (config.getKeepAlive() != null) { + socketOptions.setKeepAlive(config.getKeepAlive()); + } + if (config.getReuseAddress() != null) { + socketOptions.setReuseAddress(config.getReuseAddress()); + } + if (config.getSoLinger() != null) { + socketOptions.setSoLinger(config.getSoLinger()); + } + if (config.getTcpNoDelay() != null) { + socketOptions.setTcpNoDelay(config.getTcpNoDelay()); + } + if (config.getReceiveBufferSize() != null) { + socketOptions.setReceiveBufferSize(config.getReceiveBufferSize()); + } + if (config.getSendBufferSize() != null) { + socketOptions.setSendBufferSize(config.getSendBufferSize()); + } + + return socketOptions; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraSessionFactoryBean.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraSessionFactoryBean.java new file mode 100644 index 000000000..ecc900e2e --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraSessionFactoryBean.java @@ -0,0 +1,146 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config; + +import java.util.ArrayList; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.DisposableBean; +import org.springframework.beans.factory.FactoryBean; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.cassandra.core.CassandraTemplate; +import org.springframework.cassandra.support.CassandraExceptionTranslator; +import org.springframework.dao.DataAccessException; +import org.springframework.dao.support.PersistenceExceptionTranslator; +import org.springframework.util.Assert; +import org.springframework.util.StringUtils; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Session; + +/** + * Factory for configuring a Cassandra {@link Session}, which is a thread-safe singleton. As such, it is sufficient to + * have one {@link Session} per application and keyspace. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ + +public class CassandraSessionFactoryBean implements FactoryBean, InitializingBean, DisposableBean, + PersistenceExceptionTranslator { + + private static final Logger log = LoggerFactory.getLogger(CassandraSessionFactoryBean.class); + + private Cluster cluster; + private Session session; + private String keyspaceName; + private List startupScripts = new ArrayList(); + private List shutdownScripts = new ArrayList(); + private final PersistenceExceptionTranslator exceptionTranslator = new CassandraExceptionTranslator(); + + @Override + public Session getObject() { + return session; + } + + @Override + public Class getObjectType() { + return Session.class; + } + + @Override + public boolean isSingleton() { + return true; + } + + @Override + public DataAccessException translateExceptionIfPossible(RuntimeException ex) { + return exceptionTranslator.translateExceptionIfPossible(ex); + } + + @Override + public void afterPropertiesSet() throws Exception { + + if (cluster == null) { + throw new IllegalArgumentException("at least one cluster is required"); + } + + session = StringUtils.hasText(keyspaceName) ? cluster.connect(keyspaceName) : cluster.connect(); + executeScripts(startupScripts); + } + + /** + * Executes given scripts. Session must be connected when this method is called. + */ + protected void executeScripts(List scripts) { + + if (scripts == null) { + return; + } + + CassandraTemplate template = new CassandraTemplate(session); + + for (String script : scripts) { + + if (log.isInfoEnabled()) { + log.info("executing raw CQL [{}]", script); + } + + template.execute(script); + } + } + + @Override + public void destroy() throws Exception { + + executeScripts(shutdownScripts); + session.shutdown(); + } + + /** + * Sets the keyspace name to connect to. Using null, empty string, or only whitespace will cause the + * system keyspace to be used. + */ + public void setKeyspaceName(String keyspaceName) { + this.keyspaceName = keyspaceName; + } + + /** + * Sets the cluster to use. Must not be null. + */ + public void setCluster(Cluster cluster) { + if (cluster == null) { + throw new IllegalArgumentException("cluster must not be null"); + } + this.cluster = cluster; + } + + /** + * Sets CQL scripts to be executed immediately after the session is connected. + */ + public void setStartupScripts(List scripts) { + this.startupScripts = scripts; + } + + /** + * Sets CQL scripts to be executed immediately before the session is shutdown. + */ + public void setShutdownScripts(List scripts) { + this.shutdownScripts = scripts; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraTemplateFactoryBean.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraTemplateFactoryBean.java new file mode 100644 index 000000000..660ac9c85 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CassandraTemplateFactoryBean.java @@ -0,0 +1,63 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.FactoryBean; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.cassandra.core.CassandraOperations; +import org.springframework.cassandra.core.CassandraTemplate; + +import com.datastax.driver.core.Session; + +/** + * Factory for configuring a {@link CassandraTemplate}. + * + * @author Matthew T. Adams + */ +public class CassandraTemplateFactoryBean implements FactoryBean, InitializingBean { + + private static final Logger log = LoggerFactory.getLogger(CassandraTemplateFactoryBean.class); + + private CassandraTemplate template; + private Session session; + + public CassandraOperations getObject() { + return template; + } + + public Class getObjectType() { + return CassandraOperations.class; + } + + public boolean isSingleton() { + return true; + } + + public void afterPropertiesSet() throws Exception { + + if (session == null) { + throw new IllegalStateException("session is required"); + } + + this.template = new CassandraTemplate(session); + } + + public void setSession(Session session) { + this.session = session; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/CompressionType.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CompressionType.java new file mode 100644 index 000000000..4e9248ada --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/CompressionType.java @@ -0,0 +1,25 @@ +/* + * Copyright 2010-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config; + +/** + * Simple enumeration for the various compression types. + * + * @author Alex Shvid + */ +public enum CompressionType { + NONE, SNAPPY; +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/KeyspaceAttributes.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/KeyspaceAttributes.java new file mode 100644 index 000000000..2f5e07303 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/KeyspaceAttributes.java @@ -0,0 +1,121 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config; + +import java.util.HashMap; +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.DefaultOption; +import org.springframework.cassandra.core.keyspace.KeyspaceOption; +import org.springframework.cassandra.core.keyspace.Option; +import org.springframework.cassandra.core.util.MapBuilder; + +/** + * Keyspace attributes. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class KeyspaceAttributes { + + public static final String SIMPLE_REPLICATION_STRATEGY = "SimpleStrategy"; + public static final String NETWORK_TOPOLOGY_REPLICATION_STRATEGY = "NetworkTopologyStrategy"; + + public static final String DEFAULT_REPLICATION_STRATEGY = SIMPLE_REPLICATION_STRATEGY; + public static final long DEFAULT_REPLICATION_FACTOR = 1; + public static final boolean DEFAULT_DURABLE_WRITES = true; + + /** + * Returns a map of {@link Option}s suitable as the value of a {@link KeyspaceOption#REPLICATION} option with + * replication strategy class "SimpleStrategy" and with a replication factor of one. + */ + public static Map newSimpleReplication() { + return newSimpleReplication(DEFAULT_REPLICATION_FACTOR); + } + + /** + * Returns a map of {@link Option}s suitable as the value of a {@link KeyspaceOption#REPLICATION} option with + * replication strategy class "SimpleStrategy" and with a replication factor equal to that given. + */ + public static Map newSimpleReplication(long replicationFactor) { + return MapBuilder.map(Option.class, Object.class) + .entry(new DefaultOption("class", String.class, true, false, true), SIMPLE_REPLICATION_STRATEGY) + .entry(new DefaultOption("replication_factor", Long.class, true, false, false), replicationFactor).build(); + } + + /** + * Returns a map of {@link Option}s suitable as the value of a {@link KeyspaceOption#REPLICATION} option with + * replication strategy class "NetworkTopologyStrategy" and with data centers each with their corresponding + * replication factors. + */ + public static Map newNetworkReplication(DataCenterReplication... dataCenterReplications) { + + MapBuilder builder = MapBuilder.map(Option.class, Object.class).entry( + new DefaultOption("class", String.class, true, false, true), NETWORK_TOPOLOGY_REPLICATION_STRATEGY); + + for (DataCenterReplication dcr : dataCenterReplications) { + builder.entry(new DefaultOption(dcr.dataCenter, Long.class, true, false, false), dcr.replicationFactor); + } + + return builder.build(); + } + + /** + * Simple data structure to be used when setting the replication factor for a given data center. + */ + public static class DataCenterReplication { + public String dataCenter; + public long replicationFactor; + + public DataCenterReplication(String dataCenter, long replicationFactor) { + this.dataCenter = dataCenter; + this.replicationFactor = replicationFactor; + } + } + + private String replicationStrategy = DEFAULT_REPLICATION_STRATEGY; + private long replicationFactor = DEFAULT_REPLICATION_FACTOR; + private boolean durableWrites = DEFAULT_DURABLE_WRITES; + private Map replicasPerNodeByDataCenter = new HashMap(); + + public String getReplicationStrategy() { + return replicationStrategy; + } + + public void setReplicationStrategy(String replicationStrategy) { + this.replicationStrategy = replicationStrategy; + } + + public long getReplicationFactor() { + return replicationFactor; + } + + public void setReplicationFactor(long replicationFactor) { + this.replicationFactor = replicationFactor; + } + + public boolean getDurableWrites() { + return durableWrites; + } + + public void setDurableWrites(boolean durableWrites) { + this.durableWrites = durableWrites; + } + + public void addReplicasPerNode(String dataCenter, long replicasPerNode) { + replicasPerNodeByDataCenter.put(dataCenter, replicasPerNode); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/PoolingOptionsConfig.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/PoolingOptionsConfig.java new file mode 100644 index 000000000..e982e217d --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/PoolingOptionsConfig.java @@ -0,0 +1,62 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config; + +/** + * Pooling options. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class PoolingOptionsConfig { + + private Integer minSimultaneousRequests; + private Integer maxSimultaneousRequests; + private Integer coreConnections; + private Integer maxConnections; + + public Integer getMinSimultaneousRequests() { + return minSimultaneousRequests; + } + + public void setMinSimultaneousRequests(Integer minSimultaneousRequests) { + this.minSimultaneousRequests = minSimultaneousRequests; + } + + public Integer getMaxSimultaneousRequests() { + return maxSimultaneousRequests; + } + + public void setMaxSimultaneousRequests(Integer maxSimultaneousRequests) { + this.maxSimultaneousRequests = maxSimultaneousRequests; + } + + public Integer getCoreConnections() { + return coreConnections; + } + + public void setCoreConnections(Integer coreConnections) { + this.coreConnections = coreConnections; + } + + public Integer getMaxConnections() { + return maxConnections; + } + + public void setMaxConnections(Integer maxConnections) { + this.maxConnections = maxConnections; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/SocketOptionsConfig.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/SocketOptionsConfig.java new file mode 100644 index 000000000..562377415 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/SocketOptionsConfig.java @@ -0,0 +1,89 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config; + +/** + * Socket options. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class SocketOptionsConfig { + + private Integer connectTimeoutMls; + private Boolean keepAlive; + private Boolean reuseAddress; + private Integer soLinger; + private Boolean tcpNoDelay; + private Integer receiveBufferSize; + private Integer sendBufferSize; + + public Integer getConnectTimeoutMls() { + return connectTimeoutMls; + } + + public void setConnectTimeoutMls(Integer connectTimeoutMls) { + this.connectTimeoutMls = connectTimeoutMls; + } + + public Boolean getKeepAlive() { + return keepAlive; + } + + public void setKeepAlive(Boolean keepAlive) { + this.keepAlive = keepAlive; + } + + public Boolean getReuseAddress() { + return reuseAddress; + } + + public void setReuseAddress(Boolean reuseAddress) { + this.reuseAddress = reuseAddress; + } + + public Integer getSoLinger() { + return soLinger; + } + + public void setSoLinger(Integer soLinger) { + this.soLinger = soLinger; + } + + public Boolean getTcpNoDelay() { + return tcpNoDelay; + } + + public void setTcpNoDelay(Boolean tcpNoDelay) { + this.tcpNoDelay = tcpNoDelay; + } + + public Integer getReceiveBufferSize() { + return receiveBufferSize; + } + + public void setReceiveBufferSize(Integer receiveBufferSize) { + this.receiveBufferSize = receiveBufferSize; + } + + public Integer getSendBufferSize() { + return sendBufferSize; + } + + public void setSendBufferSize(Integer sendBufferSize) { + this.sendBufferSize = sendBufferSize; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/java/AbstractCassandraConfiguration.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/java/AbstractCassandraConfiguration.java new file mode 100644 index 000000000..b9f3bb682 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/java/AbstractCassandraConfiguration.java @@ -0,0 +1,129 @@ +package org.springframework.cassandra.config.java; + +import java.util.Collections; +import java.util.List; + +import org.springframework.cassandra.config.CassandraClusterFactoryBean; +import org.springframework.cassandra.config.CassandraSessionFactoryBean; +import org.springframework.cassandra.config.CompressionType; +import org.springframework.cassandra.config.PoolingOptionsConfig; +import org.springframework.cassandra.config.SocketOptionsConfig; +import org.springframework.cassandra.core.CassandraOperations; +import org.springframework.cassandra.core.CassandraTemplate; +import org.springframework.cassandra.core.keyspace.CreateKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.DropKeyspaceSpecification; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import com.datastax.driver.core.AuthProvider; +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.policies.LoadBalancingPolicy; +import com.datastax.driver.core.policies.ReconnectionPolicy; +import com.datastax.driver.core.policies.RetryPolicy; + +/** + * Base class for Spring Cassandra configuration that can handle creating namespaces, execute arbitrary CQL on startup & + * shutdown, and optionally drop namespaces. + * + * @author Matthew T. Adams + */ +@Configuration +public abstract class AbstractCassandraConfiguration { + + protected abstract String getKeyspaceName(); + + @Bean + public CassandraClusterFactoryBean cluster() throws Exception { + + CassandraClusterFactoryBean bean = new CassandraClusterFactoryBean(); + bean.setAuthProvider(getAuthProvider()); + bean.setCompressionType(getCompressionType()); + bean.setContactPoints(getContactPoints()); + bean.setKeyspaceCreations(getKeyspaceCreations()); + bean.setKeyspaceDrops(getKeyspaceDrops()); + bean.setLoadBalancingPolicy(getLoadBalancingPolicy()); + bean.setLocalPoolingOptions(getLocalPoolingOptions()); + bean.setMetricsEnabled(getMetricsEnabled()); + bean.setPort(getPort()); + bean.setReconnectionPolicy(getReconnectionPolicy()); + bean.setRemotePoolingOptions(getRemotePoolingOptions()); + bean.setRetryPolicy(getRetryPolicy()); + bean.setShutdownScripts(getShutdownScripts()); + bean.setSocketOptions(getSocketOptions()); + bean.setStartupScripts(getStartupScripts()); + + return bean; + } + + @Bean + public CassandraSessionFactoryBean session() throws Exception { + + Cluster cluster = cluster().getObject(); + + CassandraSessionFactoryBean bean = new CassandraSessionFactoryBean(); + bean.setCluster(cluster); + bean.setKeyspaceName(getKeyspaceName()); + + return bean; + } + + protected List getStartupScripts() { + return Collections.emptyList(); + } + + protected SocketOptionsConfig getSocketOptions() { + return null; + } + + protected List getShutdownScripts() { + return Collections.emptyList(); + } + + protected ReconnectionPolicy getReconnectionPolicy() { + return null; + } + + protected RetryPolicy getRetryPolicy() { + return null; + } + + protected PoolingOptionsConfig getRemotePoolingOptions() { + return null; + } + + protected int getPort() { + return CassandraClusterFactoryBean.DEFAULT_PORT; + } + + protected boolean getMetricsEnabled() { + return CassandraClusterFactoryBean.DEFAULT_METRICS_ENABLED; + } + + protected PoolingOptionsConfig getLocalPoolingOptions() { + return null; + } + + protected LoadBalancingPolicy getLoadBalancingPolicy() { + return null; + } + + protected List getKeyspaceDrops() { + return Collections.emptyList(); + } + + protected List getKeyspaceCreations() { + return Collections.emptyList(); + } + + protected String getContactPoints() { + return CassandraClusterFactoryBean.DEFAULT_CONTACT_POINTS; + } + + protected CompressionType getCompressionType() { + return null; + } + + protected AuthProvider getAuthProvider() { + return null; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/BeanNames.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/BeanNames.java new file mode 100644 index 000000000..07f3537dd --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/BeanNames.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2011 by the original author(s). + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config.xml; + +/** + * @author Alex Shvid + * @author David Webb + * @author Matthew T. Adams + */ +public final class BeanNames { + + private BeanNames() { + } + + public static final String CASSANDRA_CLUSTER = "cassandra-cluster"; + public static final String CASSANDRA_KEYSPACE = "cassandra-keyspace"; + public static final String CASSANDRA_SESSION = "cassandra-session"; + public static final String CASSANDRA_TEMPLATE = "cassandra-template"; +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraClusterParser.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraClusterParser.java new file mode 100644 index 000000000..b931fb325 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraClusterParser.java @@ -0,0 +1,241 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config.xml; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.springframework.beans.factory.BeanDefinitionStoreException; +import org.springframework.beans.factory.config.BeanDefinition; +import org.springframework.beans.factory.support.AbstractBeanDefinition; +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.beans.factory.xml.AbstractSimpleBeanDefinitionParser; +import org.springframework.beans.factory.xml.ParserContext; +import org.springframework.cassandra.config.CassandraClusterFactoryBean; +import org.springframework.cassandra.config.CompressionType; +import org.springframework.cassandra.config.KeyspaceAttributes; +import org.springframework.cassandra.config.PoolingOptionsConfig; +import org.springframework.cassandra.config.SocketOptionsConfig; +import org.springframework.cassandra.core.keyspace.CreateKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.DefaultOption; +import org.springframework.cassandra.core.keyspace.DropKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.KeyspaceOption; +import org.springframework.cassandra.core.keyspace.Option; +import org.springframework.util.StringUtils; +import org.springframework.util.xml.DomUtils; +import org.w3c.dom.Element; +import org.w3c.dom.NodeList; + +/** + * Parser for <cluster;gt; definitions. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class CassandraClusterParser extends AbstractSimpleBeanDefinitionParser { + + @Override + protected Class getBeanClass(Element element) { + return CassandraClusterFactoryBean.class; + } + + @Override + protected String resolveId(Element element, AbstractBeanDefinition definition, ParserContext parserContext) + throws BeanDefinitionStoreException { + + String id = super.resolveId(element, definition, parserContext); + return StringUtils.hasText(id) ? id : BeanNames.CASSANDRA_CLUSTER; + } + + @Override + protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) { + + String contactPoints = element.getAttribute("contactPoints"); + if (StringUtils.hasText(contactPoints)) { + builder.addPropertyValue("contactPoints", contactPoints); + } + + String port = element.getAttribute("port"); + if (StringUtils.hasText(port)) { + builder.addPropertyValue("port", port); + } + + String compression = element.getAttribute("compression"); + if (StringUtils.hasText(compression)) { + builder.addPropertyValue("compressionType", CompressionType.valueOf(compression)); + } + + parseChildElements(builder, element); + } + + protected void parseChildElements(BeanDefinitionBuilder builder, Element element) { + + List creates = new ArrayList(); + List drops = new ArrayList(); + List startupScripts = new ArrayList(); + List shutdownScripts = new ArrayList(); + + List elements = DomUtils.getChildElements(element); + + // parse nested elements + for (Element subElement : elements) { + String name = subElement.getLocalName(); + + if ("local-pooling-options".equals(name)) { + builder.addPropertyValue("localPoolingOptions", parsePoolingOptions(subElement)); + } else if ("remote-pooling-options".equals(name)) { + builder.addPropertyValue("remotePoolingOptions", parsePoolingOptions(subElement)); + } else if ("socket-options".equals(name)) { + builder.addPropertyValue("socketOptions", parseSocketOptions(subElement)); + } else if ("keyspace".equals(name)) { + + KeyspaceSpecifications specifications = parseKeyspace(subElement); + + if (specifications.create != null) { + creates.add(specifications.create); + } + if (specifications.drop != null) { + drops.add(specifications.drop); + } + } else if ("startup-cql".equals(name)) { + startupScripts.add(parseScript(subElement)); + } else if ("shutdown-cql".equals(name)) { + shutdownScripts.add(parseScript(subElement)); + } + } + + builder.addPropertyValue("keyspaceCreations", creates); + builder.addPropertyValue("keyspaceDrops", drops); + builder.addPropertyValue("startupScripts", startupScripts); + builder.addPropertyValue("shutdownScripts", startupScripts); + } + + protected KeyspaceSpecifications parseKeyspace(Element element) { + + CreateKeyspaceSpecification create = null; + DropKeyspaceSpecification drop = null; + + String name = element.getAttribute("name"); + if (name == null || name.trim().length() == 0) { + name = BeanNames.CASSANDRA_KEYSPACE; + } + + boolean durableWrites = Boolean.valueOf(element.getAttribute("durable-writes")); + + String action = element.getAttribute("action"); + if (action == null || action.trim().length() == 0) { + throw new IllegalArgumentException("attribute action must be given"); + } + + if (action.startsWith("CREATE")) { + + create = CreateKeyspaceSpecification.createKeyspace().name(name) + .with(KeyspaceOption.DURABLE_WRITES, durableWrites); + + NodeList nodes = element.getElementsByTagName("replication"); + create = parseReplication((Element) (nodes.getLength() == 1 ? nodes.item(0) : null), create); + } + + if (action.equals("CREATE-DROP")) { + drop = DropKeyspaceSpecification.dropKeyspace().name(create.getName()); + } + + return new KeyspaceSpecifications(create, drop); + } + + protected CreateKeyspaceSpecification parseReplication(Element element, CreateKeyspaceSpecification create) { + + String strategyClass = null; + if (element != null) { + strategyClass = element.getAttribute("class"); + } + if (strategyClass == null || (strategyClass = strategyClass.trim()).length() == 0) { + strategyClass = KeyspaceAttributes.DEFAULT_REPLICATION_STRATEGY; + } + + Long replicationFactor = null; + if (element != null) { + String s = element.getAttribute("replication-factor"); + replicationFactor = (s == null || s.trim().length() == 0) ? null : Long.parseLong(s); + } + if (replicationFactor == null) { + replicationFactor = KeyspaceAttributes.DEFAULT_REPLICATION_FACTOR; + } + + Map replicationMap = new HashMap(); + replicationMap.put(new DefaultOption("class", String.class, false, false, true), strategyClass); + replicationMap.put(new DefaultOption("replication_factor", Long.class, true, false, false), replicationFactor); + + if (element != null) { + + NodeList dataCenters = element.getElementsByTagName("data-center"); + + int length = dataCenters.getLength(); + for (int i = 0; i < length; i++) { + + Element dataCenter = (Element) dataCenters.item(i); + + replicationMap.put(new DefaultOption(dataCenter.getAttribute("name"), Long.class, false, false, true), + dataCenter.getAttribute("replication-factor")); + } + } + + return create.with(KeyspaceOption.REPLICATION, replicationMap); + } + + protected String parseScript(Element element) { + return element.getTextContent(); + } + + protected BeanDefinition parsePoolingOptions(Element element) { + BeanDefinitionBuilder builder = BeanDefinitionBuilder.genericBeanDefinition(PoolingOptionsConfig.class); + + ParsingUtils.setPropertyValue(builder, element, "min-simultaneous-requests", "minSimultaneousRequests"); + ParsingUtils.setPropertyValue(builder, element, "max-simultaneous-requests", "maxSimultaneousRequests"); + ParsingUtils.setPropertyValue(builder, element, "core-connections", "coreConnections"); + ParsingUtils.setPropertyValue(builder, element, "max-connections", "maxConnections"); + + return builder.getBeanDefinition(); + } + + protected BeanDefinition parseSocketOptions(Element element) { + BeanDefinitionBuilder builder = BeanDefinitionBuilder.genericBeanDefinition(SocketOptionsConfig.class); + + ParsingUtils.setPropertyValue(builder, element, "connect-timeout-mls", "connectTimeoutMls"); + ParsingUtils.setPropertyValue(builder, element, "keep-alive", "keepAlive"); + ParsingUtils.setPropertyValue(builder, element, "reuse-address", "reuseAddress"); + ParsingUtils.setPropertyValue(builder, element, "so-linger", "soLinger"); + ParsingUtils.setPropertyValue(builder, element, "tcp-no-delay", "tcpNoDelay"); + ParsingUtils.setPropertyValue(builder, element, "receive-buffer-size", "receiveBufferSize"); + ParsingUtils.setPropertyValue(builder, element, "send-buffer-size", "sendBufferSize"); + + return builder.getBeanDefinition(); + } + + protected static class KeyspaceSpecifications { + + public KeyspaceSpecifications(CreateKeyspaceSpecification create, DropKeyspaceSpecification drop) { + this.create = create; + this.drop = drop; + } + + public CreateKeyspaceSpecification create; + public DropKeyspaceSpecification drop; + // TODO: public AlterKeyspaceSpecification alter; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraNamespaceHandler.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraNamespaceHandler.java new file mode 100644 index 000000000..9d743fa39 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraNamespaceHandler.java @@ -0,0 +1,36 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config.xml; + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport; + +/** + * Namespace handler for <cassandra> elements. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ + +public class CassandraNamespaceHandler extends NamespaceHandlerSupport { + + @Override + public void init() { + + registerBeanDefinitionParser("cluster", new CassandraClusterParser()); + registerBeanDefinitionParser("session", new CassandraSessionParser()); + registerBeanDefinitionParser("template", new CassandraTemplateParser()); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraSessionParser.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraSessionParser.java new file mode 100644 index 000000000..aa73c1b77 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraSessionParser.java @@ -0,0 +1,93 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config.xml; + +import java.util.ArrayList; +import java.util.List; + +import org.springframework.beans.factory.BeanDefinitionStoreException; +import org.springframework.beans.factory.support.AbstractBeanDefinition; +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.beans.factory.xml.AbstractSimpleBeanDefinitionParser; +import org.springframework.beans.factory.xml.ParserContext; +import org.springframework.cassandra.config.CassandraSessionFactoryBean; +import org.springframework.util.StringUtils; +import org.w3c.dom.Element; +import org.w3c.dom.NodeList; + +/** + * Parser for <session> definitions. + * + * @author David Webb + * @author Matthew T. Adams + */ + +public class CassandraSessionParser extends AbstractSimpleBeanDefinitionParser { + + @Override + protected Class getBeanClass(Element element) { + return CassandraSessionFactoryBean.class; + } + + @Override + protected String resolveId(Element element, AbstractBeanDefinition definition, ParserContext parserContext) + throws BeanDefinitionStoreException { + + String id = super.resolveId(element, definition, parserContext); + return StringUtils.hasText(id) ? id : BeanNames.CASSANDRA_SESSION; + } + + @Override + protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) { + + String keyspaceName = element.getAttribute("keyspace-name"); + if (!StringUtils.hasText(keyspaceName)) { + keyspaceName = null; + } + builder.addPropertyValue("keyspaceName", keyspaceName); + + String clusterRef = element.getAttribute("cluster-ref"); + if (!StringUtils.hasText(clusterRef)) { + clusterRef = BeanNames.CASSANDRA_CLUSTER; + } + builder.addPropertyReference("cluster", clusterRef); + + parseChildElements(element, builder); + } + + protected void parseChildElements(Element element, BeanDefinitionBuilder builder) { + + List scripts = parseScripts(element, "startup-cql"); + builder.addPropertyValue("startupScripts", scripts); + + scripts = parseScripts(element, "shutdown-cql"); + builder.addPropertyValue("shutdownScripts", scripts); + } + + protected List parseScripts(Element element, String elementName) { + + NodeList nodes = element.getElementsByTagName("startup-cql"); + int length = nodes.getLength(); + List scripts = new ArrayList(length); + + for (int i = 0; i < length; i++) { + Element script = (Element) nodes.item(i); + scripts.add(script.getTextContent()); + } + + return scripts; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraTemplateParser.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraTemplateParser.java new file mode 100644 index 000000000..c796ec9b2 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/CassandraTemplateParser.java @@ -0,0 +1,58 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.config.xml; + +import org.springframework.beans.factory.BeanDefinitionStoreException; +import org.springframework.beans.factory.support.AbstractBeanDefinition; +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.beans.factory.xml.AbstractSimpleBeanDefinitionParser; +import org.springframework.beans.factory.xml.ParserContext; +import org.springframework.cassandra.config.CassandraTemplateFactoryBean; +import org.springframework.util.StringUtils; +import org.w3c.dom.Element; + +/** + * Parser for <template> definitions. + * + * @author David Webb + * @author Matthew T. Adams + */ + +public class CassandraTemplateParser extends AbstractSimpleBeanDefinitionParser { + + @Override + protected Class getBeanClass(Element element) { + return CassandraTemplateFactoryBean.class; + } + + @Override + protected String resolveId(Element element, AbstractBeanDefinition definition, ParserContext parserContext) + throws BeanDefinitionStoreException { + + String id = super.resolveId(element, definition, parserContext); + return StringUtils.hasText(id) ? id : BeanNames.CASSANDRA_TEMPLATE; + } + + @Override + protected void doParse(Element element, ParserContext parserContext, BeanDefinitionBuilder builder) { + + String sessionRef = element.getAttribute("session-ref"); + if (!StringUtils.hasText(sessionRef)) { + sessionRef = BeanNames.CASSANDRA_SESSION; + } + builder.addPropertyReference("session", sessionRef); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/ParsingUtils.java b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/ParsingUtils.java new file mode 100644 index 000000000..826c26e3c --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/config/xml/ParsingUtils.java @@ -0,0 +1,33 @@ +package org.springframework.cassandra.config.xml; + +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.util.Assert; +import org.springframework.util.StringUtils; +import org.w3c.dom.Element; + +public class ParsingUtils { + + /** + * Configures a property value for the given property name reading the attribute of the given name from the given + * {@link Element} if the attribute is configured. + * + * @param builder must not be {@literal null}. + * @param element must not be {@literal null}. + * @param attrName must not be {@literal null} or empty. + * @param propertyName must not be {@literal null} or empty. + */ + public static void setPropertyValue(BeanDefinitionBuilder builder, Element element, String attrName, + String propertyName) { + + Assert.notNull(builder, "BeanDefinitionBuilder must not be null!"); + Assert.notNull(element, "Element must not be null!"); + Assert.hasText(attrName, "Attribute name must not be null!"); + Assert.hasText(propertyName, "Property name must not be null!"); + + String attr = element.getAttribute(attrName); + + if (StringUtils.hasText(attr)) { + builder.addPropertyValue(propertyName, attr); + } + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/CassandraOperations.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/CassandraOperations.java new file mode 100644 index 000000000..31bee51a9 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/CassandraOperations.java @@ -0,0 +1,655 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.springframework.dao.DataAccessException; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Session; + +/** + * Operations for interacting with Cassandra at the lowest level. This interface provides Exception Translation. + * + * @author David Webb + * @author Matthew Adams + */ +public interface CassandraOperations { + + /** + * Executes the supplied {@link SessionCallback} in the current Template Session. The implementation of + * SessionCallback can decide whether or not to execute() or executeAsync() the operation. + * + * @param sessionCallback + * @return Type defined in the SessionCallback + */ + T execute(SessionCallback sessionCallback) throws DataAccessException; + + /** + * Executes the supplied CQL Query and returns nothing. + * + * @param cql + */ + void execute(final String cql) throws DataAccessException; + + /** + * Executes the supplied CQL Query Asynchronously and returns nothing. + * + * @param cql The CQL Statement to execute + */ + void executeAsynchronously(final String cql) throws DataAccessException; + + /** + * Executes the provided CQL Query, and extracts the results with the ResultSetExtractor. + * + * @param cql The Query + * @param rse The implementation for extracting the ResultSet + * + * @return Type specified in the ResultSetExtractor + * @throws DataAccessException + */ + T query(final String cql, ResultSetExtractor rse) throws DataAccessException; + + /** + * Executes the provided CQL Query, and extracts the results with the ResultSetExtractor. + * + * @param cql The Query + * @param rse The implementation for extracting the ResultSet + * @param options Query Options Object + * + * @return + * @throws DataAccessException + */ + T query(final String cql, ResultSetExtractor rse, final QueryOptions options) throws DataAccessException; + + /** + * Executes the provided CQL Query asynchronously, and extracts the results with the ResultSetFutureExtractor + * + * @param cql The Query + * @param rse The implementation for extracting the future results + * @return + * @throws DataAccessException + */ + T queryAsynchronously(final String cql, ResultSetFutureExtractor rse) throws DataAccessException; + + /** + * Executes the provided CQL Query asynchronously, and extracts the results with the ResultSetFutureExtractor + * + * @param cql The Query + * @param rse The implementation for extracting the future results + * @param options Query Options Object + * @return + * @throws DataAccessException + */ + T queryAsynchronously(final String cql, ResultSetFutureExtractor rse, final QueryOptions options) + throws DataAccessException; + + /** + * Executes the provided CQL Query, and then processes the results with the RowCallbackHandler. + * + * @param cql The Query + * @param rch The implementation for processing the rows returned. + * @throws DataAccessException + */ + void query(final String cql, RowCallbackHandler rch) throws DataAccessException; + + /** + * Executes the provided CQL Query, and then processes the results with the RowCallbackHandler. + * + * @param cql The Query + * @param rch The implementation for processing the rows returned. + * @param options Query Options Object + * @throws DataAccessException + */ + void query(final String cql, RowCallbackHandler rch, final QueryOptions options) throws DataAccessException; + + /** + * Processes the ResultSet through the RowCallbackHandler and return nothing. This is used internal to the Template + * for core operations, but is made available through Operations in the event you have a ResultSet to process. The + * ResultsSet could come from a ResultSetFuture after an asynchronous query. + * + * @param resultSet Results to process + * @param rch RowCallbackHandler with the processing implementation + * @throws DataAccessException + */ + void process(ResultSet resultSet, RowCallbackHandler rch) throws DataAccessException; + + /** + * Executes the provided CQL Query, and maps all Rows returned with the supplied RowMapper. + * + * @param cql The Query + * @param rowMapper The implementation for mapping all rows + * @return List of processed by the RowMapper + * @throws DataAccessException + */ + List query(final String cql, RowMapper rowMapper) throws DataAccessException; + + /** + * Executes the provided CQL Query, and maps all Rows returned with the supplied RowMapper. + * + * @param cql The Query + * @param rowMapper The implementation for mapping all rows + * @param options Query Options Object + * @return List of processed by the RowMapper + * @throws DataAccessException + */ + List query(final String cql, RowMapper rowMapper, final QueryOptions options) throws DataAccessException; + + /** + * Processes the ResultSet through the RowMapper and returns the List of mapped Rows. This is used internal to the + * Template for core operations, but is made available through Operations in the event you have a ResultSet to + * process. The ResultsSet could come from a ResultSetFuture after an asynchronous query. + * + * @param resultSet Results to process + * @param rowMapper RowMapper with the processing implementation + * @return List of generated by the RowMapper + * @throws DataAccessException + */ + List process(ResultSet resultSet, RowMapper rowMapper) throws DataAccessException; + + /** + * Executes the provided CQL Query, and maps ONE Row returned with the supplied RowMapper. + * + *

+ * This expects only ONE row to be returned. More than one Row will cause an Exception to be thrown. + *

+ * + * @param cql The Query + * @param rowMapper The implementation for convert the Row to + * @return Object + * @throws DataAccessException + */ + T queryForObject(final String cql, RowMapper rowMapper) throws DataAccessException; + + /** + * Process a ResultSet through a RowMapper. This is used internal to the Template for core operations, but is made + * available through Operations in the event you have a ResultSet to process. The ResultsSet could come from a + * ResultSetFuture after an asynchronous query. + * + * @param resultSet + * @param rowMapper + * @return + * @throws DataAccessException + */ + T processOne(ResultSet resultSet, RowMapper rowMapper) throws DataAccessException; + + /** + * Executes the provided query and tries to return the first column of the first Row as a Class. + * + * @param cql The Query + * @param requiredType Valid Class that Cassandra Data Types can be converted to. + * @return The Object - item [0,0] in the result table of the query. + * @throws DataAccessException + */ + T queryForObject(final String cql, Class requiredType) throws DataAccessException; + + /** + * Process a ResultSet, trying to convert the first columns of the first Row to Class. This is used internal to the + * Template for core operations, but is made available through Operations in the event you have a ResultSet to + * process. The ResultsSet could come from a ResultSetFuture after an asynchronous query. + * + * @param resultSet + * @param requiredType + * @return + * @throws DataAccessException + */ + T processOne(ResultSet resultSet, Class requiredType) throws DataAccessException; + + /** + * Executes the provided CQL Query and maps ONE Row to a basic Map of Strings and Objects. If more than one Row + * is returned from the Query, an exception will be thrown. + * + * @param cql The Query + * @return Map representing the results of the Query + * @throws DataAccessException + */ + Map queryForMap(final String cql) throws DataAccessException; + + /** + * Process a ResultSet with ONE Row and convert to a Map. This is used internal to the Template for core + * operations, but is made available through Operations in the event you have a ResultSet to process. The ResultsSet + * could come from a ResultSetFuture after an asynchronous query. + * + * @param resultSet + * @return + * @throws DataAccessException + */ + Map processMap(ResultSet resultSet) throws DataAccessException; + + /** + * Executes the provided CQL and returns all values in the first column of the Results as a List of the Type in the + * second argument. + * + * @param cql The Query + * @param elementType Type to cast the data values to + * @return List of elementType + * @throws DataAccessException + */ + List queryForList(final String cql, Class elementType) throws DataAccessException; + + /** + * Process a ResultSet and convert the first column of the results to a List. This is used internal to the Template + * for core operations, but is made available through Operations in the event you have a ResultSet to process. The + * ResultsSet could come from a ResultSetFuture after an asynchronous query. + * + * @param resultSet + * @param elementType + * @return + * @throws DataAccessException + */ + List processList(ResultSet resultSet, Class elementType) throws DataAccessException; + + /** + * Executes the provided CQL and converts the results to a basic List of Maps. Each element in the List represents a + * Row returned from the Query. Each Row's columns are put into the map as column/value. + * + * @param cql The Query + * @return List of Maps with the query results + * @throws DataAccessException + */ + List> queryForListOfMap(final String cql) throws DataAccessException; + + /** + * Process a ResultSet and convert it to a List of Maps with column/value. This is used internal to the Template for + * core operations, but is made available through Operations in the event you have a ResultSet to process. The + * ResultsSet could come from a ResultSetFuture after an asynchronous query. + * + * @param resultSet + * @return + * @throws DataAccessException + */ + List> processListOfMap(ResultSet resultSet) throws DataAccessException; + + /** + * Converts the CQL provided into a {@link SimplePreparedStatementCreator}. This can only be used for CQL + * Statements that do not have data binding. The results of the PreparedStatement are processed with + * PreparedStatementCallback implementation provided by the Application Code. + * + * @param cql The CQL Statement to Execute + * @param action What to do with the results of the PreparedStatement + * @return Type as determined by the supplied Callback. + * @throws DataAccessException + */ + T execute(String cql, PreparedStatementCallback action) throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call, then executes the statement and processes + * the statement using the provided Callback. This can only be used for CQL Statements that do not have data + * binding. The results of the PreparedStatement are processed with PreparedStatementCallback implementation + * provided by the Application Code. + * + * @param psc The implementation to create the PreparedStatement + * @param action What to do with the results of the PreparedStatement + * @return Type as determined by the supplied Callback. + * @throws DataAccessException + */ + T execute(PreparedStatementCreator psc, PreparedStatementCallback action) throws DataAccessException; + + /** + * Converts the CQL provided into a {@link SimplePreparedStatementCreator}. Then, the PreparedStatementBinder will + * bind its values to the bind variables in the provided CQL String. The results of the PreparedStatement are + * processed with the ResultSetExtractor implementation provided by the Application Code. The can return any object, + * including a List of Objects to support the ResultSet processing. + * + * @param cql The Query to Prepare + * @param psb The Binding implementation + * @param rse The implementation for extracting the results of the query. + * @return Type generated by the ResultSetExtractor + * @throws DataAccessException + */ + T query(final String cql, PreparedStatementBinder psb, ResultSetExtractor rse) throws DataAccessException; + + T query(final String cql, PreparedStatementBinder psb, ResultSetExtractor rse, final QueryOptions options) + throws DataAccessException; + + /** + * Converts the CQL provided into a {@link SimplePreparedStatementCreator}. Then, the PreparedStatementBinder will + * bind its values to the bind variables in the provided CQL String. The results of the PreparedStatement are + * processed with the RowCallbackHandler implementation provided and nothing is returned. + * + * @param cql The Query to Prepare + * @param psb The Binding implementation + * @param rch The RowCallbackHandler for processing the ResultSet + * @throws DataAccessException + */ + void query(final String cql, PreparedStatementBinder psb, RowCallbackHandler rch) throws DataAccessException; + + /** + * Converts the CQL provided into a {@link SimplePreparedStatementCreator}. Then, the PreparedStatementBinder will + * bind its values to the bind variables in the provided CQL String. The results of the PreparedStatement are + * processed with the RowCallbackHandler implementation provided and nothing is returned. + * + * @param cql The Query to Prepare + * @param psb The Binding implementation + * @param rch The RowCallbackHandler for processing the ResultSet + * @param options The Query Options Object + * @throws DataAccessException + */ + void query(final String cql, PreparedStatementBinder psb, RowCallbackHandler rch, final QueryOptions options) + throws DataAccessException; + + /** + * Converts the CQL provided into a {@link SimplePreparedStatementCreator}. Then, the PreparedStatementBinder will + * bind its values to the bind variables in the provided CQL String. The results of the PreparedStatement are + * processed with the RowMapper implementation provided and a List is returned with elements of Type for each Row + * returned. + * + * @param cql The Query to Prepare + * @param psb The Binding implementation + * @param rowMapper The implementation for Mapping a Row to Type + * @return List of for each Row returned from the Query. + * @throws DataAccessException + */ + List query(final String cql, PreparedStatementBinder psb, RowMapper rowMapper) throws DataAccessException; + + /** + * Converts the CQL provided into a {@link SimplePreparedStatementCreator}. Then, the PreparedStatementBinder will + * bind its values to the bind variables in the provided CQL String. The results of the PreparedStatement are + * processed with the RowMapper implementation provided and a List is returned with elements of Type for each Row + * returned. + * + * @param cql The Query to Prepare + * @param psb The Binding implementation + * @param rowMapper The implementation for Mapping a Row to Type + * @param options The Query Options Object + * @return List of for each Row returned from the Query. + * @throws DataAccessException + */ + List query(final String cql, PreparedStatementBinder psb, RowMapper rowMapper, final QueryOptions options) + throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. This can only be used for CQL + * Statements that do not have data binding. The results of the PreparedStatement are processed with + * ResultSetExtractor implementation provided by the Application Code. + * + * @param psc The implementation to create the PreparedStatement + * @param rse Implementation for extracting from the ResultSet + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + T query(PreparedStatementCreator psc, ResultSetExtractor rse) throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. This can only be used for CQL + * Statements that do not have data binding. The results of the PreparedStatement are processed with + * ResultSetExtractor implementation provided by the Application Code. + * + * @param psc The implementation to create the PreparedStatement + * @param rse Implementation for extracting from the ResultSet + * @param options The Query Options Object + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + T query(PreparedStatementCreator psc, ResultSetExtractor rse, final QueryOptions options) + throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. This can only be used for CQL + * Statements that do not have data binding. The results of the PreparedStatement are processed with + * RowCallbackHandler and nothing is returned. + * + * @param psc The implementation to create the PreparedStatement + * @param rch The implementation to process Results + * @throws DataAccessException + */ + void query(PreparedStatementCreator psc, RowCallbackHandler rch) throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. This can only be used for CQL + * Statements that do not have data binding. The results of the PreparedStatement are processed with + * RowCallbackHandler and nothing is returned. + * + * @param psc The implementation to create the PreparedStatement + * @param rch The implementation to process Results + * @param options The Query Options Object + * @throws DataAccessException + */ + void query(PreparedStatementCreator psc, RowCallbackHandler rch, final QueryOptions options) + throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. This can only be used for CQL + * Statements that do not have data binding. The results of the PreparedStatement are processed with RowMapper + * implementation provided and a List is returned with elements of Type for each Row returned. + * + * @param psc The implementation to create the PreparedStatement + * @param rowMapper The implementation for mapping each Row returned. + * @return List of Type mapped from each Row in the Results + * @throws DataAccessException + */ + List query(PreparedStatementCreator psc, RowMapper rowMapper) throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. This can only be used for CQL + * Statements that do not have data binding. The results of the PreparedStatement are processed with RowMapper + * implementation provided and a List is returned with elements of Type for each Row returned. + * + * @param psc The implementation to create the PreparedStatement + * @param rowMapper The implementation for mapping each Row returned. + * @param options The Query Options Object + * @return List of Type mapped from each Row in the Results + * @throws DataAccessException + */ + List query(PreparedStatementCreator psc, RowMapper rowMapper, final QueryOptions options) + throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. Binds the values from the + * PreparedStatementBinder to the available bind variables. The results of the PreparedStatement are processed with + * ResultSetExtractor implementation provided by the Application Code. + * + * @param psc The implementation to create the PreparedStatement + * @param psb The implementation to bind variables to values + * @param rse Implementation for extracting from the ResultSet + * @param options The Query Options Object + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + T query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final ResultSetExtractor rse, + final QueryOptions options) throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. Binds the values from the + * PreparedStatementBinder to the available bind variables. The results of the PreparedStatement are processed with + * ResultSetExtractor implementation provided by the Application Code. + * + * @param psc The implementation to create the PreparedStatement + * @param psb The implementation to bind variables to values + * @param rse Implementation for extracting from the ResultSet + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + T query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final ResultSetExtractor rse) + throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. Binds the values from the + * PreparedStatementBinder to the available bind variables. The results of the PreparedStatement are processed with + * RowCallbackHandler and nothing is returned. + * + * @param psc The implementation to create the PreparedStatement + * @param psb The implementation to bind variables to values + * @param rch The implementation to process Results + * @param options The Query Options Object + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + void query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final RowCallbackHandler rch, + final QueryOptions options) throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. Binds the values from the + * PreparedStatementBinder to the available bind variables. The results of the PreparedStatement are processed with + * RowCallbackHandler and nothing is returned. + * + * @param psc The implementation to create the PreparedStatement + * @param psb The implementation to bind variables to values + * @param rch The implementation to process Results + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + void query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final RowCallbackHandler rch) + throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. Binds the values from the + * PreparedStatementBinder to the available bind variables. The results of the PreparedStatement are processed with + * RowMapper implementation provided and a List is returned with elements of Type for each Row returned. + * + * @param psc The implementation to create the PreparedStatement + * @param psb The implementation to bind variables to values + * @param rowMapper The implementation for mapping each Row returned. + * @param options The Query Options Object + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + List query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final RowMapper rowMapper, + final QueryOptions options) throws DataAccessException; + + /** + * Uses the provided PreparedStatementCreator to prepare a new Session call. Binds the values from the + * PreparedStatementBinder to the available bind variables. The results of the PreparedStatement are processed with + * RowMapper implementation provided and a List is returned with elements of Type for each Row returned. + * + * @param psc The implementation to create the PreparedStatement + * @param psb The implementation to bind variables to values + * @param rowMapper The implementation for mapping each Row returned. + * @return Type which is the output of the ResultSetExtractor + * @throws DataAccessException + */ + List query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final RowMapper rowMapper) + throws DataAccessException; + + /** + * Describe the current Ring. This uses the provided {@link RingMemberHostMapper} to provide the basics of the + * Cassandra Ring topology. + * + * @return The list of ring tokens that are active in the cluster + */ + List describeRing() throws DataAccessException; + + /** + * Describe the current Ring. Application code must provide its own {@link HostMapper} implementation to process the + * lists of hosts returned by the Cassandra Cluster Metadata. + * + * @param hostMapper The implementation to use for host mapping. + * @return Collection generated by the provided HostMapper. + * @throws DataAccessException + */ + Collection describeRing(HostMapper hostMapper) throws DataAccessException; + + /** + * Get the current Session used for operations in the implementing class. + * + * @return The DataStax Driver Session Object + */ + Session getSession(); + + /** + * This is an operation designed for high performance writes. The cql is used to create a PreparedStatement once, then + * all row values are bound to the single PreparedStatement and executed against the Session. + * + *

+ * This is used internally by the other ingest() methods, but can be used if you want to write your own RowIterator. + * The Object[] length returned by the next() implementation must match the number of bind variables in the CQL. + *

+ * + * @param cql The CQL + * @param rowIterator Implementation to provide the Object[] to be bound to the CQL. + * @param options The Query Options Object + */ + void ingest(String cql, RowIterator rowIterator, QueryOptions options); + + /** + * This is an operation designed for high performance writes. The cql is used to create a PreparedStatement once, then + * all row values are bound to the single PreparedStatement and executed against the Session. + * + *

+ * This is used internally by the other ingest() methods, but can be used if you want to write your own RowIterator. + * The Object[] length returned by the next() implementation must match the number of bind variables in the CQL. + *

+ * + * @param cql The CQL + * @param rowIterator Implementation to provide the Object[] to be bound to the CQL. + */ + void ingest(String cql, RowIterator rowIterator); + + /** + * This is an operation designed for high performance writes. The cql is used to create a PreparedStatement once, then + * all row values are bound to the single PreparedStatement and executed against the Session. + * + *

+ * The List length must match the number of bind variables in the CQL. + *

+ * + * @param cql The CQL + * @param rows List of List with data to bind to the CQL. + * @param options The Query Options Object + */ + void ingest(String cql, List> rows, QueryOptions options); + + /** + * This is an operation designed for high performance writes. The cql is used to create a PreparedStatement once, then + * all row values are bound to the single PreparedStatement and executed against the Session. + * + *

+ * The List length must match the number of bind variables in the CQL. + *

+ * + * @param cql The CQL + * @param rows List of List with data to bind to the CQL. + */ + void ingest(String cql, List> rows); + + /** + * This is an operation designed for high performance writes. The cql is used to create a PreparedStatement once, then + * all row values are bound to the single PreparedStatement and executed against the Session. + * + *

+ * The Object[] length of the nested array must match the number of bind variables in the CQL. + *

+ * + * @param cql The CQL + * @param rows Object array of Object array of values to bind to the CQL. + * @param options The Query Options Object + */ + void ingest(String cql, Object[][] rows, QueryOptions options); + + /** + * This is an operation designed for high performance writes. The cql is used to create a PreparedStatement once, then + * all row values are bound to the single PreparedStatement and executed against the Session. + * + *

+ * The Object[] length of the nested array must match the number of bind variables in the CQL. + *

+ * + * @param cql The CQL + * @param rows Object array of Object array of values to bind to the CQL. + */ + void ingest(String cql, Object[][] rows); + + /** + * Delete all rows in the table + * + * @param tableName + */ + void truncate(String tableName); + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/CassandraTemplate.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/CassandraTemplate.java new file mode 100644 index 000000000..6537e2103 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/CassandraTemplate.java @@ -0,0 +1,839 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.springframework.cassandra.support.CassandraAccessor; +import org.springframework.dao.DataAccessException; +import org.springframework.util.Assert; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.ColumnDefinitions; +import com.datastax.driver.core.ColumnDefinitions.Definition; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.Metadata; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Query; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.SimpleStatement; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.datastax.driver.core.querybuilder.Truncate; + +/** + * This is the Central class in the Cassandra core package. It simplifies the use of Cassandra and helps to avoid + * common errors. It executes the core Cassandra workflow, leaving application code to provide CQL and result + * extraction. This class execute CQL Queries, provides different ways to extract/map results, and provides Exception + * translation to the generic, more informative exception hierarchy defined in the org.springframework.dao + * package. + * + *

+ * For working with POJOs, use the {@link CassandraDataTemplate}. + *

+ * + * @author David Webb + * @author Matthew Adams + */ +public class CassandraTemplate extends CassandraAccessor implements CassandraOperations { + + /** + * Blank constructor. You must wire in the Session before use. + * + */ + public CassandraTemplate() { + } + + /** + * Constructor used for a basic template configuration + * + * @param session must not be {@literal null}. + */ + public CassandraTemplate(Session session) { + setSession(session); + afterPropertiesSet(); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#execute(org.springframework.data.cassandra.core.SessionCallback) + */ + @Override + public T execute(SessionCallback sessionCallback) throws DataAccessException { + return doExecute(sessionCallback); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#execute(java.lang.String) + */ + @Override + public void execute(final String cql) throws DataAccessException { + doExecute(cql, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#queryAsynchronously(java.lang.String, org.springframework.cassandra.core.ResultSetFutureExtractor, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public T queryAsynchronously(final String cql, ResultSetFutureExtractor rse, final QueryOptions options) + throws DataAccessException { + return rse.extractData(execute(new SessionCallback() { + @Override + public ResultSetFuture doInSession(Session s) throws DataAccessException { + Statement statement = new SimpleStatement(cql); + addQueryOptions(statement, options); + return s.executeAsync(statement); + } + })); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.FutureResultSetExtractor) + */ + @Override + public T queryAsynchronously(final String cql, ResultSetFutureExtractor rse) throws DataAccessException { + return queryAsynchronously(cql, rse, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.ResultSetExtractor, java.util.Map) + */ + @Override + public T query(String cql, ResultSetExtractor rse) throws DataAccessException { + return query(cql, rse, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.ResultSetExtractor, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public T query(String cql, ResultSetExtractor rse, QueryOptions options) throws DataAccessException { + Assert.notNull(cql); + ResultSet rs = doExecute(cql, options); + return rse.extractData(rs); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.RowCallbackHandler, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public void query(String cql, RowCallbackHandler rch, QueryOptions options) throws DataAccessException { + process(doExecute(cql, options), rch); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.RowCallbackHandler) + */ + public void query(String cql, RowCallbackHandler rch) throws DataAccessException { + query(cql, rch, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.RowMapper, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public List query(String cql, RowMapper rowMapper, QueryOptions options) throws DataAccessException { + return process(doExecute(cql, options), rowMapper); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.RowMapper) + */ + public List query(String cql, RowMapper rowMapper) throws DataAccessException { + return query(cql, rowMapper, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#queryForList(java.lang.String) + */ + public List> queryForListOfMap(String cql) throws DataAccessException { + return processListOfMap(doExecute(cql, null)); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#queryForList(java.lang.String, java.lang.Class) + */ + public List queryForList(String cql, Class elementType) throws DataAccessException { + return processList(doExecute(cql, null), elementType); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#queryForMap(java.lang.String) + */ + public Map queryForMap(String cql) throws DataAccessException { + return processMap(doExecute(cql, null)); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#queryForObject(java.lang.String, java.lang.Class) + */ + public T queryForObject(String cql, Class requiredType) throws DataAccessException { + return processOne(doExecute(cql, null), requiredType); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#queryForObject(java.lang.String, org.springframework.cassandra.core.RowMapper) + */ + public T queryForObject(String cql, RowMapper rowMapper) throws DataAccessException { + return processOne(doExecute(cql, null), rowMapper); + } + + /** + * Execute a command at the Session Level + * + * @param callback + * @return + */ + protected T doExecute(SessionCallback callback) { + + Assert.notNull(callback); + + try { + + return callback.doInSession(getSession()); + + } catch (DataAccessException e) { + throw throwTranslated(e); + } + } + + /** + * Execute a command at the Session Level + * + * @param callback + * @return + */ + protected ResultSet doExecute(final String cql, final QueryOptions options) { + + logger.info(cql); + + return doExecute(new SessionCallback() { + + @Override + public ResultSet doInSession(Session s) throws DataAccessException { + SimpleStatement statement = new SimpleStatement(cql); + addQueryOptions(statement, options); + return s.execute(statement); + } + }); + } + + /** + * Execute a command at the Session Level + * + * @param callback + * @return + */ + protected ResultSet doExecute(final BoundStatement bs, final QueryOptions options) { + + return doExecute(new SessionCallback() { + + @Override + public ResultSet doInSession(Session s) throws DataAccessException { + addQueryOptions(bs, options); + return s.execute(bs); + } + }); + } + + /** + * @param row + * @return + */ + protected Object firstColumnToObject(Row row) { + ColumnDefinitions cols = row.getColumnDefinitions(); + if (cols.size() == 0) { + return null; + } + return cols.getType(0).deserialize(row.getBytesUnsafe(0)); + } + + /** + * @param row + * @return + */ + protected Map toMap(Row row) { + if (row == null) { + return null; + } + + ColumnDefinitions cols = row.getColumnDefinitions(); + Map map = new HashMap(cols.size()); + + for (Definition def : cols.asList()) { + String name = def.getName(); + map.put(name, def.getType().deserialize(row.getBytesUnsafe(name))); + } + + return map; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#describeRing() + */ + @Override + public List describeRing() throws DataAccessException { + return new ArrayList(describeRing(new RingMemberHostMapper())); + } + + /** + * Pulls the list of Hosts for the current Session + * + * @return + */ + private Set getHosts() { + + /* + * Get the cluster metadata for this session + */ + Metadata clusterMetadata = doExecute(new SessionCallback() { + + @Override + public Metadata doInSession(Session s) throws DataAccessException { + return s.getCluster().getMetadata(); + } + + }); + + /* + * Get all hosts in the cluster + */ + Set hosts = clusterMetadata.getAllHosts(); + + return hosts; + + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#describeRing(org.springframework.cassandra.core.HostMapper) + */ + @Override + public Collection describeRing(HostMapper hostMapper) throws DataAccessException { + Set hosts = getHosts(); + return hostMapper.mapHosts(hosts); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#executeAsynchronously(java.lang.String) + */ + @Override + public void executeAsynchronously(final String cql) throws DataAccessException { + execute(new SessionCallback() { + @Override + public Object doInSession(Session s) throws DataAccessException { + return s.executeAsync(cql); + } + }); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#process(com.datastax.driver.core.ResultSet, org.springframework.cassandra.core.RowCallbackHandler) + */ + @Override + public void process(ResultSet resultSet, RowCallbackHandler rch) throws DataAccessException { + try { + for (Row row : resultSet.all()) { + rch.processRow(row); + } + } catch (DriverException dx) { + throwTranslated(dx); + } + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#process(com.datastax.driver.core.ResultSet, org.springframework.cassandra.core.RowMapper) + */ + @Override + public List process(ResultSet resultSet, RowMapper rowMapper) throws DataAccessException { + List mappedRows = new ArrayList(); + try { + int i = 0; + for (Row row : resultSet.all()) { + mappedRows.add(rowMapper.mapRow(row, i++)); + } + } catch (DriverException dx) { + throwTranslated(dx); + } + return mappedRows; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#processOne(com.datastax.driver.core.ResultSet, org.springframework.cassandra.core.RowMapper) + */ + @Override + public T processOne(ResultSet resultSet, RowMapper rowMapper) throws DataAccessException { + T row = null; + Assert.notNull(resultSet, "ResultSet cannot be null"); + try { + List rows = resultSet.all(); + Assert.notNull(rows, "null row list returned from query"); + Assert.isTrue(rows.size() == 1, "row list has " + rows.size() + " rows instead of one"); + row = rowMapper.mapRow(rows.get(0), 0); + } catch (DriverException dx) { + throwTranslated(dx); + } + return row; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#processOne(com.datastax.driver.core.ResultSet, java.lang.Class) + */ + @SuppressWarnings("unchecked") + @Override + public T processOne(ResultSet resultSet, Class requiredType) throws DataAccessException { + if (resultSet == null) { + return null; + } + Row row = resultSet.one(); + if (row == null) { + return null; + } + return (T) firstColumnToObject(row); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#processMap(com.datastax.driver.core.ResultSet) + */ + @Override + public Map processMap(ResultSet resultSet) throws DataAccessException { + if (resultSet == null) { + return null; + } + return toMap(resultSet.one()); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#processList(com.datastax.driver.core.ResultSet, java.lang.Class) + */ + @Override + @SuppressWarnings( "unchecked" ) + public List processList(ResultSet resultSet, Class elementType) throws DataAccessException { + List rows = resultSet.all(); + List list = new ArrayList(rows.size()); + for (Row row : rows) { + list.add((T) firstColumnToObject(row)); + } + return list; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#processListOfMap(com.datastax.driver.core.ResultSet) + */ + @Override + public List> processListOfMap(ResultSet resultSet) throws DataAccessException { + List rows = resultSet.all(); + List> list = new ArrayList>(rows.size()); + for (Row row : rows) { + list.add(toMap(row)); + } + return list; + } + + /** + * Attempt to translate a Runtime Exception to a Spring Data Exception + * + * @param ex + * @return + */ + protected RuntimeException throwTranslated(RuntimeException ex) { + RuntimeException resolved = getExceptionTranslator().translateExceptionIfPossible(ex); + return resolved == null ? ex : resolved; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#execute(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.PreparedStatementCallback) + */ + @Override + public T execute(PreparedStatementCreator psc, PreparedStatementCallback action) { + + try { + PreparedStatement ps = psc.createPreparedStatement(getSession()); + return action.doInPreparedStatement(ps); + } catch (DriverException dx) { + throwTranslated(dx); + } + + return null; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#execute(java.lang.String, org.springframework.cassandra.core.PreparedStatementCallback) + */ + @Override + public T execute(String cql, PreparedStatementCallback action) { + return execute(new SimplePreparedStatementCreator(cql), action); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.ResultSetExtractor, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public T query(PreparedStatementCreator psc, ResultSetExtractor rse, QueryOptions options) + throws DataAccessException { + return query(psc, null, rse, options); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.ResultSetExtractor) + */ + @Override + public T query(PreparedStatementCreator psc, ResultSetExtractor rse) throws DataAccessException { + return query(psc, rse, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.RowCallbackHandler, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public void query(PreparedStatementCreator psc, RowCallbackHandler rch, QueryOptions options) + throws DataAccessException { + query(psc, null, rch, options); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.RowCallbackHandler) + */ + @Override + public void query(PreparedStatementCreator psc, RowCallbackHandler rch) throws DataAccessException { + query(psc, rch, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.RowMapper, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public List query(PreparedStatementCreator psc, RowMapper rowMapper, QueryOptions options) + throws DataAccessException { + return query(psc, null, rowMapper, options); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.RowMapper) + */ + @Override + public List query(PreparedStatementCreator psc, RowMapper rowMapper) throws DataAccessException { + return query(psc, rowMapper, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.ResultSetExtractor, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public T query(String cql, PreparedStatementBinder psb, ResultSetExtractor rse, QueryOptions options) + throws DataAccessException { + return query(new SimplePreparedStatementCreator(cql), psb, rse, options); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.PreparedStatementSetter, org.springframework.cassandra.core.ResultSetExtractor) + */ + @Override + public T query(String cql, PreparedStatementBinder psb, ResultSetExtractor rse) throws DataAccessException { + return query(cql, psb, rse, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.RowCallbackHandler, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public void query(String cql, PreparedStatementBinder psb, RowCallbackHandler rch, QueryOptions options) + throws DataAccessException { + query(new SimplePreparedStatementCreator(cql), psb, rch, options); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.PreparedStatementSetter, org.springframework.cassandra.core.RowCallbackHandler) + */ + @Override + public void query(String cql, PreparedStatementBinder psb, RowCallbackHandler rch) throws DataAccessException { + query(cql, psb, rch, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.RowMapper, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public List query(String cql, PreparedStatementBinder psb, RowMapper rowMapper, QueryOptions options) + throws DataAccessException { + return query(new SimplePreparedStatementCreator(cql), psb, rowMapper, options); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(java.lang.String, org.springframework.cassandra.core.PreparedStatementSetter, org.springframework.cassandra.core.RowMapper) + */ + @Override + public List query(String cql, PreparedStatementBinder psb, RowMapper rowMapper) throws DataAccessException { + return query(cql, psb, rowMapper, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#ingest(java.lang.String, org.springframework.cassandra.core.RowIterator, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public void ingest(String cql, RowIterator rowIterator, QueryOptions options) { + + PreparedStatement preparedStatement = getSession().prepare(cql); + addPreparedStatementOptions(preparedStatement, options); + + while (rowIterator.hasNext()) { + getSession().execute(preparedStatement.bind(rowIterator.next())); + } + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#ingest(java.lang.String, org.springframework.cassandra.core.RowIterator) + */ + @Override + public void ingest(String cql, RowIterator rowIterator) { + ingest(cql, rowIterator, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#ingest(java.lang.String, java.util.List, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public void ingest(String cql, final List> rows, QueryOptions options) { + + Assert.notNull(rows); + Assert.notEmpty(rows); + + ingest(cql, new RowIterator() { + + Iterator> i = rows.iterator(); + + @Override + public Object[] next() { + return i.next().toArray(); + } + + @Override + public boolean hasNext() { + return i.hasNext(); + } + + }, options); + + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#ingest(java.lang.String, java.util.List) + */ + @Override + public void ingest(String cql, List> rows) { + ingest(cql, rows, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#ingest(java.lang.String, java.lang.Object[][], org.springframework.cassandra.core.QueryOptions) + */ + @Override + public void ingest(String cql, final Object[][] rows, QueryOptions options) { + + ingest(cql, new RowIterator() { + + int index = 0; + + @Override + public Object[] next() { + return rows[index++]; + } + + @Override + public boolean hasNext() { + return index < rows.length; + } + + }, options); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#ingest(java.lang.String, java.lang.Object[][]) + */ + @Override + public void ingest(String cql, final Object[][] rows) { + ingest(cql, rows, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#truncate(java.lang.String) + */ + @Override + public void truncate(String tableName) throws DataAccessException { + Truncate truncate = QueryBuilder.truncate(tableName); + doExecute(truncate.getQueryString(), null); + } + + /** + * Add common Query options for all types of queries. + * + * @param q + * @param optionsByName + */ + public static void addQueryOptions(Query q, QueryOptions options) { + + if (options == null) { + return; + } + + /* + * Add Query Options + */ + if (options.getConsistencyLevel() != null) { + q.setConsistencyLevel(ConsistencyLevelResolver.resolve(options.getConsistencyLevel())); + } + if (options.getRetryPolicy() != null) { + q.setRetryPolicy(RetryPolicyResolver.resolve(options.getRetryPolicy())); + } + + } + + /** + * Add common Query options for all types of queries. + * + * @param q + * @param optionsByName + */ + public static void addPreparedStatementOptions(PreparedStatement s, QueryOptions options) { + + if (options == null) { + return; + } + + /* + * Add Query Options + */ + if (options.getConsistencyLevel() != null) { + s.setConsistencyLevel(ConsistencyLevelResolver.resolve(options.getConsistencyLevel())); + } + if (options.getRetryPolicy() != null) { + s.setRetryPolicy(RetryPolicyResolver.resolve(options.getRetryPolicy())); + } + + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.ResultSetExtractor, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public T query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final ResultSetExtractor rse, + final QueryOptions options) throws DataAccessException { + + Assert.notNull(rse, "ResultSetExtractor must not be null"); + logger.debug("Executing prepared CQL query"); + + return execute(psc, new PreparedStatementCallback() { + public T doInPreparedStatement(PreparedStatement ps) throws DriverException { + ResultSet rs = null; + BoundStatement bs = null; + if (psb != null) { + bs = psb.bindValues(ps); + } else { + bs = ps.bind(); + } + rs = doExecute(bs, options); + return rse.extractData(rs); + } + }); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.ResultSetExtractor) + */ + @Override + public T query(PreparedStatementCreator psc, PreparedStatementBinder psb, ResultSetExtractor rse) + throws DataAccessException { + return query(psc, psb, rse, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.RowCallbackHandler, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public void query(PreparedStatementCreator psc, final PreparedStatementBinder psb, final RowCallbackHandler rch, + final QueryOptions options) throws DataAccessException { + + Assert.notNull(rch, "RowCallbackHandler must not be null"); + logger.debug("Executing prepared CQL query"); + + execute(psc, new PreparedStatementCallback() { + public Object doInPreparedStatement(PreparedStatement ps) throws DriverException { + ResultSet rs = null; + BoundStatement bs = null; + if (psb != null) { + bs = psb.bindValues(ps); + } else { + bs = ps.bind(); + } + rs = doExecute(bs, options); + process(rs, rch); + return null; + } + }); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.RowCallbackHandler) + */ + @Override + public void query(PreparedStatementCreator psc, PreparedStatementBinder psb, RowCallbackHandler rch) + throws DataAccessException { + query(psc, psb, rch, null); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.RowMapper, org.springframework.cassandra.core.QueryOptions) + */ + @Override + public List query(PreparedStatementCreator psc, final PreparedStatementBinder psb, + final RowMapper rowMapper, final QueryOptions options) throws DataAccessException { + Assert.notNull(rowMapper, "RowMapper must not be null"); + logger.debug("Executing prepared CQL query"); + + return execute(psc, new PreparedStatementCallback>() { + public List doInPreparedStatement(PreparedStatement ps) throws DriverException { + ResultSet rs = null; + BoundStatement bs = null; + if (psb != null) { + bs = psb.bindValues(ps); + } else { + bs = ps.bind(); + } + rs = doExecute(bs, options); + + return process(rs, rowMapper); + } + }); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.CassandraOperations#query(org.springframework.cassandra.core.PreparedStatementCreator, org.springframework.cassandra.core.PreparedStatementBinder, org.springframework.cassandra.core.RowMapper) + */ + @Override + public List query(PreparedStatementCreator psc, PreparedStatementBinder psb, RowMapper rowMapper) + throws DataAccessException { + return query(psc, psb, rowMapper, null); + } + +} \ No newline at end of file diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/ConsistencyLevel.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ConsistencyLevel.java new file mode 100644 index 000000000..018f22eed --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ConsistencyLevel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +/** + * Generic Consistency Levels associated with Cassandra. + * + * @author David Webb + * + */ +public enum ConsistencyLevel { + + ANY, ONE, TWO, THREE, QUOROM, LOCAL_QUOROM, EACH_QUOROM, ALL + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/ConsistencyLevelResolver.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ConsistencyLevelResolver.java new file mode 100644 index 000000000..f20909412 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ConsistencyLevelResolver.java @@ -0,0 +1,78 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +/** + * Determine driver consistency level based on ConsistencyLevel + * + * @author David Webb + * + */ +public final class ConsistencyLevelResolver { + + /** + * No instances allowed + */ + private ConsistencyLevelResolver() { + } + + /** + * Decode the generic spring data cassandra enum to the type required by the DataStax Driver. + * + * @param level + * @return The DataStax Driver Consistency Level. + */ + public static com.datastax.driver.core.ConsistencyLevel resolve(ConsistencyLevel level) { + + com.datastax.driver.core.ConsistencyLevel resolvedLevel = com.datastax.driver.core.ConsistencyLevel.ONE; + + /* + * Determine the driver level based on our enum + */ + switch (level) { + case ONE: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.ONE; + break; + case ALL: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.ALL; + break; + case ANY: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.ANY; + break; + case EACH_QUOROM: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.EACH_QUORUM; + break; + case LOCAL_QUOROM: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.LOCAL_QUORUM; + break; + case QUOROM: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.QUORUM; + break; + case THREE: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.THREE; + break; + case TWO: + resolvedLevel = com.datastax.driver.core.ConsistencyLevel.TWO; + break; + default: + break; + } + + return resolvedLevel; + + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/HostMapper.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/HostMapper.java new file mode 100644 index 000000000..dee1969b7 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/HostMapper.java @@ -0,0 +1,28 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import java.util.Collection; +import java.util.Set; + +import com.datastax.driver.core.Host; +import com.datastax.driver.core.exceptions.DriverException; + +public interface HostMapper { + + Collection mapHosts(Set host) throws DriverException; + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/Ordering.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/Ordering.java new file mode 100644 index 000000000..72fdc7632 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/Ordering.java @@ -0,0 +1,47 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +/** + * Enum for Cassandra primary key column ordering. + * + * @author Matthew T. Adams + */ +public enum Ordering { + + /** + * Ascending Cassandra column ordering. + */ + ASCENDING("ASC"), + + /** + * Descending Cassandra column ordering. + */ + DESCENDING("DESC"); + + private String cql; + + private Ordering(String cql) { + this.cql = cql; + } + + /** + * Returns the CQL keyword of this {@link Ordering}. + */ + public String cql() { + return cql; + } +} \ No newline at end of file diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementBinder.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementBinder.java new file mode 100644 index 000000000..f4bd1cca2 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementBinder.java @@ -0,0 +1,30 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.exceptions.DriverException; + +/** + * @author David Webb + * + */ +public interface PreparedStatementBinder { + + BoundStatement bindValues(PreparedStatement ps) throws DriverException; + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCallback.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCallback.java new file mode 100644 index 000000000..1b2ba5fda --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCallback.java @@ -0,0 +1,31 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import org.springframework.dao.DataAccessException; + +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.exceptions.DriverException; + +/** + * @author David Webb + * + */ +public interface PreparedStatementCallback { + + T doInPreparedStatement(PreparedStatement ps) throws DriverException, DataAccessException; + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCreator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCreator.java new file mode 100644 index 000000000..d95e92862 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCreator.java @@ -0,0 +1,41 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.DriverException; + +/** + * Creates a PreparedStatement for the usage with the DataStax Java Driver + * + * @author David Webb + * + */ +public interface PreparedStatementCreator { + + /** + * Create a statement in this session. Allows implementations to use PreparedStatements. The CassandraTemlate will + * attempt to cache the PreparedStatement for future use without the overhead of re-preparing on the entire cluster. + * + * @param session Session to use to create statement + * @return a prepared statement + * @throws DriverException there is no need to catch DriverException that may be thrown in the implementation of this + * method. The CassandraTemlate class will handle them. + */ + PreparedStatement createPreparedStatement(Session session) throws DriverException; + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCreatorImpl.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCreatorImpl.java new file mode 100644 index 000000000..44a14f730 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PreparedStatementCreatorImpl.java @@ -0,0 +1,69 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import java.util.List; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.DriverException; + +/** + * @author David Webb + * + */ +public class PreparedStatementCreatorImpl implements PreparedStatementCreator, PreparedStatementBinder { + + private final String cql; + private List values; + + public PreparedStatementCreatorImpl(String cql) { + this.cql = cql; + } + + public PreparedStatementCreatorImpl(String cql, List values) { + this.cql = cql; + this.values = values; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.PreparedStatementSetter#setValues(com.datastax.driver.core.PreparedStatement) + */ + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + // Nothing to set if there are no values + if (values == null) { + return new BoundStatement(ps); + } + + return ps.bind(values.toArray()); + + } + + public String getCql() { + return this.cql; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.PreparedStatementCreator#createPreparedStatement(com.datastax.driver.core.Session) + */ + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(this.cql); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/PrimaryKeyType.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PrimaryKeyType.java new file mode 100644 index 000000000..6a729626a --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/PrimaryKeyType.java @@ -0,0 +1,35 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +/** + * Values representing primary key column types. + * + * @author Matthew T. Adams + * @author Alex Shvid + */ +public enum PrimaryKeyType { + + /** + * Used for a column that is part of the partition key. + */ + PARTITIONED, + + /** + * Used for a column that is clustered key. + */ + CLUSTERED +} \ No newline at end of file diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/QueryOptions.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/QueryOptions.java new file mode 100644 index 000000000..4d9df0f50 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/QueryOptions.java @@ -0,0 +1,72 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + + +/** + * Contains Query Options for Cassandra queries. This controls the Consistency Tuning and Retry Policy for a Query. + * + * @author David Webb + * + */ +public class QueryOptions { + + private ConsistencyLevel consistencyLevel; + private RetryPolicy retryPolicy; + private Integer ttl; + + /** + * @return Returns the consistencyLevel. + */ + public ConsistencyLevel getConsistencyLevel() { + return consistencyLevel; + } + + /** + * @param consistencyLevel The consistencyLevel to set. + */ + public void setConsistencyLevel(ConsistencyLevel consistencyLevel) { + this.consistencyLevel = consistencyLevel; + } + + /** + * @return Returns the retryPolicy. + */ + public RetryPolicy getRetryPolicy() { + return retryPolicy; + } + + /** + * @param retryPolicy The retryPolicy to set. + */ + public void setRetryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + } + + /** + * @return Returns the ttl. + */ + public Integer getTtl() { + return ttl; + } + + /** + * @param ttl The ttl to set. + */ + public void setTtl(Integer ttl) { + this.ttl = ttl; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/ResultSetExtractor.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ResultSetExtractor.java new file mode 100644 index 000000000..c6dc5adaa --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ResultSetExtractor.java @@ -0,0 +1,26 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import org.springframework.dao.DataAccessException; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.exceptions.DriverException; + +public interface ResultSetExtractor { + + T extractData(ResultSet rs) throws DriverException, DataAccessException; +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/ResultSetFutureExtractor.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ResultSetFutureExtractor.java new file mode 100644 index 000000000..750dfa691 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/ResultSetFutureExtractor.java @@ -0,0 +1,26 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import org.springframework.dao.DataAccessException; + +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.exceptions.DriverException; + +public interface ResultSetFutureExtractor { + + T extractData(ResultSetFuture rs) throws DriverException, DataAccessException; +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RetryPolicy.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RetryPolicy.java new file mode 100644 index 000000000..5264afa3e --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RetryPolicy.java @@ -0,0 +1,28 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +/** + * Retry Policies associated with Cassandra. + * + * @author David Webb + * + */ +public enum RetryPolicy { + + DEFAULT, DOWNGRADING_CONSISTENCY, FALLTHROUGH, LOGGING + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RetryPolicyResolver.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RetryPolicyResolver.java new file mode 100644 index 000000000..dfb93f221 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RetryPolicyResolver.java @@ -0,0 +1,67 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import com.datastax.driver.core.policies.DefaultRetryPolicy; +import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy; +import com.datastax.driver.core.policies.FallthroughRetryPolicy; + +/** + * Determine driver query retry policy + * + * @author David Webb + * + */ +public final class RetryPolicyResolver { + + /** + * No instances allowed + */ + private RetryPolicyResolver() { + } + + /** + * Decode the generic spring data cassandra enum to the type required by the DataStax Driver. + * + * @param level + * @return The DataStax Driver Consistency Level. + */ + public static com.datastax.driver.core.policies.RetryPolicy resolve(RetryPolicy policy) { + + com.datastax.driver.core.policies.RetryPolicy resolvedPolicy = DefaultRetryPolicy.INSTANCE; + + /* + * Determine the driver level based on our enum + */ + switch (policy) { + case DEFAULT: + resolvedPolicy = DefaultRetryPolicy.INSTANCE; + break; + case DOWNGRADING_CONSISTENCY: + resolvedPolicy = DowngradingConsistencyRetryPolicy.INSTANCE; + break; + case FALLTHROUGH: + resolvedPolicy = FallthroughRetryPolicy.INSTANCE; + break; + default: + resolvedPolicy = DefaultRetryPolicy.INSTANCE; + break; + } + + return resolvedPolicy; + + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RingMember.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RingMember.java new file mode 100644 index 000000000..705d1b6b7 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RingMember.java @@ -0,0 +1,43 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import java.io.Serializable; + +import com.datastax.driver.core.Host; + +/** + * @author David Webb + * + */ +public final class RingMember implements Serializable { + + /* + * Ring attributes + */ + public String hostName; + public String address; + public String DC; + public String rack; + + public RingMember(Host h) { + this.hostName = h.getAddress().getHostName(); + this.address = h.getAddress().getHostAddress(); + this.DC = h.getDatacenter(); + this.rack = h.getRack(); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RingMemberHostMapper.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RingMemberHostMapper.java new file mode 100644 index 000000000..d4a0e44ed --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RingMemberHostMapper.java @@ -0,0 +1,54 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.springframework.util.Assert; + +import com.datastax.driver.core.Host; +import com.datastax.driver.core.exceptions.DriverException; + +/** + * @author David Webb + * @param + * + */ +public class RingMemberHostMapper implements HostMapper { + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.HostMapper#mapHosts(java.util.Set) + */ + @Override + public List mapHosts(Set hosts) throws DriverException { + + List members = new ArrayList(); + + Assert.notNull(hosts); + Assert.notEmpty(hosts); + + RingMember r = null; + for (Host host : hosts) { + r = new RingMember(host); + members.add(r); + } + + return members; + + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowCallback.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowCallback.java new file mode 100644 index 000000000..57b12493f --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowCallback.java @@ -0,0 +1,29 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import com.datastax.driver.core.Row; + +/** + * Simple internal callback to allow operations on a {@link Row}. + * + * @author Alex Shvid + */ + +public interface RowCallback { + + T doWith(Row object); +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowCallbackHandler.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowCallbackHandler.java new file mode 100644 index 000000000..74132fb28 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowCallbackHandler.java @@ -0,0 +1,25 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import com.datastax.driver.core.Row; +import com.datastax.driver.core.exceptions.DriverException; + +public interface RowCallbackHandler { + + void processRow(Row row) throws DriverException; + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowIterator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowIterator.java new file mode 100644 index 000000000..9fb98ce57 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowIterator.java @@ -0,0 +1,29 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + + +/** + * @author David Webb + * + */ +public interface RowIterator { + + Object[] next(); + + boolean hasNext(); + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowMapper.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowMapper.java new file mode 100644 index 000000000..2f10fccb1 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/RowMapper.java @@ -0,0 +1,25 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import com.datastax.driver.core.Row; +import com.datastax.driver.core.exceptions.DriverException; + +public interface RowMapper { + + T mapRow(Row row, int rowNum) throws DriverException; + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/SessionCallback.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/SessionCallback.java new file mode 100644 index 000000000..96a8d8167 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/SessionCallback.java @@ -0,0 +1,40 @@ +/* + * Copyright 2010-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import org.springframework.dao.DataAccessException; + +import com.datastax.driver.core.Session; + +/** + * Interface for operations on a Cassandra Session. + * + * @author David Webb + * + * @param + */ +public interface SessionCallback { + + /** + * Perform the operation in the given Session + * + * @param s + * @return + * @throws DataAccessException + */ + T doInSession(Session s) throws DataAccessException; + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/SimplePreparedStatementCreator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/SimplePreparedStatementCreator.java new file mode 100644 index 000000000..b3a63e158 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/SimplePreparedStatementCreator.java @@ -0,0 +1,54 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core; + +import org.springframework.util.Assert; + +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.DriverException; + +/** + * @author David Webb + * + */ +public class SimplePreparedStatementCreator implements PreparedStatementCreator { + + private final String cql; + + /** + * Create a PreparedStatementCreator from the provided CQL. + * + * @param cql + */ + public SimplePreparedStatementCreator(String cql) { + Assert.notNull(cql, "CQL is required to create a PreparedStatement"); + this.cql = cql; + } + + public String getCql() { + return this.cql; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.PreparedStatementCreator#createPreparedStatement(com.datastax.driver.core.Session) + */ + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(this.cql); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/CqlStringUtils.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/CqlStringUtils.java new file mode 100644 index 000000000..b711c5a41 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/CqlStringUtils.java @@ -0,0 +1,140 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql; + +import java.util.regex.Pattern; + +public class CqlStringUtils { + + protected static final String SINGLE_QUOTE = "\'"; + protected static final String DOUBLE_SINGLE_QUOTE = "\'\'"; + protected static final String DOUBLE_QUOTE = "\""; + protected static final String DOUBLE_DOUBLE_QUOTE = "\"\""; + protected static final String EMPTY_STRING = ""; + + public static StringBuilder noNull(StringBuilder sb) { + return sb == null ? new StringBuilder() : sb; + } + + public static final String UNESCAPED_DOUBLE_QUOTE_REGEX = "TODO"; + public static final Pattern UNESCAPED_DOUBLE_QUOTE_PATTERN = Pattern.compile(UNESCAPED_DOUBLE_QUOTE_REGEX); + + public static final String UNQUOTED_IDENTIFIER_REGEX = "[a-zA-Z_][a-zA-Z0-9_]*"; + public static final Pattern UNQUOTED_IDENTIFIER_PATTERN = Pattern.compile(UNQUOTED_IDENTIFIER_REGEX); + + public static boolean isUnquotedIdentifier(CharSequence chars) { + return UNQUOTED_IDENTIFIER_PATTERN.matcher(chars).matches(); + } + + public static void checkUnquotedIdentifier(CharSequence chars) { + if (!CqlStringUtils.isUnquotedIdentifier(chars)) { + throw new IllegalArgumentException("[" + chars + "] is not a valid CQL identifier"); + } + } + + public static final String QUOTED_IDENTIFIER_REGEX = "[a-zA-Z_]([a-zA-Z0-9_]|\"{2}+)*"; + public static final Pattern QUOTED_IDENTIFIER_PATTERN = Pattern.compile(QUOTED_IDENTIFIER_REGEX); + + public static boolean isQuotedIdentifier(CharSequence chars) { + return QUOTED_IDENTIFIER_PATTERN.matcher(chars).matches(); + } + + public static void checkQuotedIdentifier(CharSequence chars) { + if (!CqlStringUtils.isQuotedIdentifier(chars)) { + throw new IllegalArgumentException("[" + chars + "] is not a valid CQL quoted identifier"); + } + } + + public static boolean isIdentifier(CharSequence chars) { + return isUnquotedIdentifier(chars) || isQuotedIdentifier(chars); + } + + public static void checkIdentifier(CharSequence chars) { + if (!CqlStringUtils.isIdentifier(chars)) { + throw new IllegalArgumentException("[" + chars + "] is not a valid CQL quoted or unquoted identifier"); + } + } + + /** + * Renders the given string as a legal Cassandra identifier. + *
    + *
  • If the given identifier is a legal unquoted identifier, it is returned unchanged.
  • + *
  • If the given identifier is a legal quoted identifier, it is returned encased in double quotes.
  • + *
  • If the given identifier is illegal, an {@link IllegalArgumentException} is thrown.
  • + *
+ */ + public static String identifize(String candidate) { + + checkIdentifier(candidate); + + if (isUnquotedIdentifier(candidate)) { + return candidate; + } + // else it must be quoted + return doubleQuote(candidate); + } + + /** + * Renders the given string as a legal Cassandra string column or table option value, by escaping single quotes and + * encasing the result in single quotes. Given null, returns null. + */ + public static String valuize(String candidate) { + + if (candidate == null) { + return null; + } + return singleQuote(escapeSingle(candidate)); + } + + /** + * Doubles single quote characters (' -> ''). Given null, returns null. + */ + public static String escapeSingle(Object things) { + return things == null ? (String) null : things.toString().replace(SINGLE_QUOTE, DOUBLE_SINGLE_QUOTE); + } + + /** + * Doubles double quote characters (" -> ""). Given null, returns null. + */ + public static String escapeDouble(Object things) { + return things == null ? (String) null : things.toString().replace(DOUBLE_QUOTE, DOUBLE_DOUBLE_QUOTE); + } + + /** + * Surrounds given object's {@link Object#toString()} with single quotes. Given null, returns + * null. + */ + public static String singleQuote(Object thing) { + return thing == null ? (String) null : new StringBuilder().append(SINGLE_QUOTE).append(thing).append(SINGLE_QUOTE) + .toString(); + } + + /** + * Surrounds given object's {@link Object#toString()} with double quotes. Given null, returns + * null. + */ + public static String doubleQuote(Object thing) { + return thing == null ? (String) null : new StringBuilder().append(DOUBLE_QUOTE).append(thing).append(DOUBLE_QUOTE) + .toString(); + } + + /** + * Removed single quotes from quoted String option values + */ + public static String removeSingleQuotes(Object thing) { + return thing == null ? (String) null : ((String) thing).replaceAll(SINGLE_QUOTE, EMPTY_STRING); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AddColumnCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AddColumnCqlGenerator.java new file mode 100644 index 000000000..086770417 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AddColumnCqlGenerator.java @@ -0,0 +1,37 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import org.springframework.cassandra.core.keyspace.AddColumnSpecification; + +/** + * CQL generator for generating an ADD clause of an ALTER TABLE statement. + * + * @author Matthew T. Adams + */ +public class AddColumnCqlGenerator extends ColumnChangeCqlGenerator { + + public AddColumnCqlGenerator(AddColumnSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + return noNull(cql).append("ADD ").append(spec().getNameAsIdentifier()).append(" TYPE ") + .append(spec().getType().getName()); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterColumnCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterColumnCqlGenerator.java new file mode 100644 index 000000000..629805008 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterColumnCqlGenerator.java @@ -0,0 +1,37 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import org.springframework.cassandra.core.keyspace.AlterColumnSpecification; + +/** + * CQL generator for generating an ALTER column clause of an ALTER TABLE statement. + * + * @author Matthew T. Adams + */ +public class AlterColumnCqlGenerator extends ColumnChangeCqlGenerator { + + public AlterColumnCqlGenerator(AlterColumnSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + return noNull(cql).append("ALTER ").append(spec().getNameAsIdentifier()).append(" TYPE ") + .append(spec().getType().getName()); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterKeyspaceCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterKeyspaceCqlGenerator.java new file mode 100644 index 000000000..ffb404909 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterKeyspaceCqlGenerator.java @@ -0,0 +1,98 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.AlterKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.Option; + +/** + * CQL generator for generating ALTER TABLE statements. + * + * @author Matthew T. Adams + */ +public class AlterKeyspaceCqlGenerator extends KeyspaceOptionsCqlGenerator { + + public AlterKeyspaceCqlGenerator(AlterKeyspaceSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + cql = noNull(cql); + + preambleCql(cql); + optionsCql(cql); + + cql.append(";"); + + return cql; + } + + protected StringBuilder preambleCql(StringBuilder cql) { + return noNull(cql).append("ALTER KEYSPACE ").append(spec().getNameAsIdentifier()).append(" "); + } + + @SuppressWarnings( "unchecked" ) + protected StringBuilder optionsCql(StringBuilder cql) { + cql = noNull(cql); + + // begin options clause + Map options = spec().getOptions(); + + if (!options.isEmpty()) { + + // option preamble + boolean first = true; + cql.append(" WITH "); + // end option preamble + + if (!options.isEmpty()) { + for (String name : options.keySet()) { + // append AND if we're not on first option + if (first) { + first = false; + } else { + cql.append(" AND "); + } + + // append = + cql.append(name); + + Object value = options.get(name); + if (value == null) { // then assume string-only, valueless option like "COMPACT STORAGE" + continue; + } + + cql.append(" = "); + + if (value instanceof Map) { + optionValueMap((Map) value, cql); + continue; // end non-empty value map + } + + // else just use value as string + cql.append(value.toString()); + } + } + } + // end options + + return cql; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterTableCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterTableCqlGenerator.java new file mode 100644 index 000000000..ad8404d2f --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/AlterTableCqlGenerator.java @@ -0,0 +1,132 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.AddColumnSpecification; +import org.springframework.cassandra.core.keyspace.AlterColumnSpecification; +import org.springframework.cassandra.core.keyspace.AlterTableSpecification; +import org.springframework.cassandra.core.keyspace.ColumnChangeSpecification; +import org.springframework.cassandra.core.keyspace.DropColumnSpecification; +import org.springframework.cassandra.core.keyspace.Option; +import org.springframework.cassandra.core.keyspace.TableOption; + +/** + * CQL generator for generating ALTER TABLE statements. + * + * @author Matthew T. Adams + */ +public class AlterTableCqlGenerator extends TableOptionsCqlGenerator { + + public AlterTableCqlGenerator(AlterTableSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + cql = noNull(cql); + + preambleCql(cql); + changesCql(cql); + optionsCql(cql); + + cql.append(";"); + + return cql; + } + + protected StringBuilder preambleCql(StringBuilder cql) { + return noNull(cql).append("ALTER TABLE ").append(spec().getNameAsIdentifier()).append(" "); + } + + protected StringBuilder changesCql(StringBuilder cql) { + cql = noNull(cql); + + boolean first = true; + for (ColumnChangeSpecification change : spec().getChanges()) { + if (first) { + first = false; + } else { + cql.append(" "); + } + getCqlGeneratorFor(change).toCql(cql); + } + + return cql; + } + + protected ColumnChangeCqlGenerator getCqlGeneratorFor(ColumnChangeSpecification change) { + if (change instanceof AddColumnSpecification) { + return new AddColumnCqlGenerator((AddColumnSpecification) change); + } + if (change instanceof DropColumnSpecification) { + return new DropColumnCqlGenerator((DropColumnSpecification) change); + } + if (change instanceof AlterColumnSpecification) { + return new AlterColumnCqlGenerator((AlterColumnSpecification) change); + } + throw new IllegalArgumentException("unknown ColumnChangeSpecification type: " + change.getClass().getName()); + } + + @SuppressWarnings("unchecked") + protected StringBuilder optionsCql(StringBuilder cql) { + cql = noNull(cql); + + Map options = spec().getOptions(); + if (options == null || options.isEmpty()) { + return cql; + } + + cql.append(" WITH "); + boolean first = true; + for (String key : options.keySet()) { + + /* + * Compact storage is illegal on alter table. + * + * TODO - Is there a way to handle this in the specification? + */ + if (key.equals(TableOption.COMPACT_STORAGE.getName())) { + throw new IllegalArgumentException("Alter table cannot contain the COMPACT STORAGE option"); + } + + if (first) { + first = false; + } else { + cql.append(" AND "); + } + + cql.append(key); + + Object value = options.get(key); + if (value == null) { + continue; + } + cql.append(" = "); + + if (value instanceof Map) { + optionValueMap((Map) value, cql); + continue; + } + + // else just use value as string + cql.append(value.toString()); + } + return cql; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/ColumnChangeCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/ColumnChangeCqlGenerator.java new file mode 100644 index 000000000..6a6bf2b6d --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/ColumnChangeCqlGenerator.java @@ -0,0 +1,50 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import org.springframework.cassandra.core.keyspace.ColumnChangeSpecification; +import org.springframework.util.Assert; + +/** + * Base class for column change CQL generators. + * + * @author Matthew T. Adams + * @param The corresponding {@link ColumnChangeSpecification} type for this CQL generator. + */ +public abstract class ColumnChangeCqlGenerator { + + public abstract StringBuilder toCql(StringBuilder cql); + + private ColumnChangeSpecification specification; + + public ColumnChangeCqlGenerator(ColumnChangeSpecification specification) { + setSpecification(specification); + } + + protected void setSpecification(ColumnChangeSpecification specification) { + Assert.notNull(specification); + this.specification = specification; + } + + @SuppressWarnings("unchecked") + public T getSpecification() { + return (T) specification; + } + + protected T spec() { + return getSpecification(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateIndexCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateIndexCqlGenerator.java new file mode 100644 index 000000000..61636fe3d --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateIndexCqlGenerator.java @@ -0,0 +1,52 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import org.springframework.cassandra.core.keyspace.CreateIndexSpecification; +import org.springframework.util.StringUtils; + +/** + * CQL generator for generating a CREATE INDEX statement. + * + * @author Matthew T. Adams + * @author David Webb + */ +public class CreateIndexCqlGenerator extends IndexNameCqlGenerator { + + public CreateIndexCqlGenerator(CreateIndexSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + + cql = noNull(cql); + + cql.append("CREATE").append(spec().isCustom() ? " CUSTOM" : "").append(" INDEX ") + .append(spec().getIfNotExists() ? "IF NOT EXISTS " : "") + .append(StringUtils.hasText(spec().getNameAsIdentifier()) ? spec().getNameAsIdentifier() : "").append(" ON ") + .append(spec().getTableNameAsIdentifier()).append(" (").append(spec().getColumnName()).append(")"); + + if (spec().isCustom()) { + cql.append(" USING ").append(spec().getUsing()); + } + + cql.append(";"); + + return cql; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateKeyspaceCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateKeyspaceCqlGenerator.java new file mode 100644 index 000000000..f73c38a01 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateKeyspaceCqlGenerator.java @@ -0,0 +1,103 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.CreateKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.Option; + +/** + * CQL generator for generating a CREATE TABLE statement. + * + * @author Matthew T. Adams + * @author Alex Shvid + */ +public class CreateKeyspaceCqlGenerator extends KeyspaceCqlGenerator { + + public CreateKeyspaceCqlGenerator(CreateKeyspaceSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + + cql = noNull(cql); + + preambleCql(cql); + optionsCql(cql); + + cql.append(";"); + + return cql; + } + + protected StringBuilder preambleCql(StringBuilder cql) { + return noNull(cql).append("CREATE KEYSPACE ").append(spec().getIfNotExists() ? "IF NOT EXISTS " : "") + .append(spec().getNameAsIdentifier()); + } + + @SuppressWarnings( "unchecked" ) + protected StringBuilder optionsCql(StringBuilder cql) { + cql = noNull(cql); + + cql.append( " " ); + + // begin options clause + Map options = spec().getOptions(); + + if (!options.isEmpty()) { + + // option preamble + boolean first = true; + cql.append("WITH "); + // end option preamble + + if (!options.isEmpty()) { + for (String name : options.keySet()) { + // append AND if we're not on first option + if (first) { + first = false; + } else { + cql.append(" AND "); + } + + // append = + cql.append(name); + + Object value = options.get(name); + if (value == null) { // then assume string-only, valueless option like "COMPACT STORAGE" + continue; + } + + cql.append(" = "); + + if (value instanceof Map) { + optionValueMap((Map) value, cql); + continue; // end non-empty value map + } + + // else just use value as string + cql.append(value.toString()); + } + } + } + // end options + + return cql; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateTableCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateTableCqlGenerator.java new file mode 100644 index 000000000..c13d456ab --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/CreateTableCqlGenerator.java @@ -0,0 +1,194 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; +import static org.springframework.cassandra.core.PrimaryKeyType.PARTITIONED; +import static org.springframework.cassandra.core.PrimaryKeyType.CLUSTERED; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.ColumnSpecification; +import org.springframework.cassandra.core.keyspace.CreateTableSpecification; +import org.springframework.cassandra.core.keyspace.Option; + +/** + * CQL generator for generating a CREATE TABLE statement. + * + * @author Matthew T. Adams + * @author Alex Shvid + */ +public class CreateTableCqlGenerator extends TableCqlGenerator { + + public CreateTableCqlGenerator(CreateTableSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + + cql = noNull(cql); + + preambleCql(cql); + columnsAndOptionsCql(cql); + + cql.append(";"); + + return cql; + } + + protected StringBuilder preambleCql(StringBuilder cql) { + return noNull(cql).append("CREATE TABLE ").append(spec().getIfNotExists() ? "IF NOT EXISTS " : "") + .append(spec().getNameAsIdentifier()); + } + + @SuppressWarnings("unchecked") + protected StringBuilder columnsAndOptionsCql(StringBuilder cql) { + + cql = noNull(cql); + + // begin columns + cql.append(" ("); + + List partitionKeys = new ArrayList(); + List clusterKeys = new ArrayList(); + for (ColumnSpecification col : spec().getColumns()) { + col.toCql(cql).append(", "); + + if (col.getKeyType() == PARTITIONED) { + partitionKeys.add(col); + } else if (col.getKeyType() == CLUSTERED) { + clusterKeys.add(col); + } + } + + // begin primary key clause + cql.append("PRIMARY KEY ("); + + if (partitionKeys.size() > 1) { + // begin partition key clause + cql.append("("); + } + + appendColumnNames(cql, partitionKeys); + + if (partitionKeys.size() > 1) { + cql.append(")"); + // end partition key clause + } + + if (!clusterKeys.isEmpty()) { + cql.append(", "); + } + + appendColumnNames(cql, clusterKeys); + + cql.append(")"); + // end primary key clause + + cql.append(")"); + // end columns + + StringBuilder ordering = createOrderingClause(clusterKeys); + // begin options + // begin option clause + Map options = spec().getOptions(); + + if (ordering != null || !options.isEmpty()) { + + // option preamble + boolean first = true; + cql.append(" WITH "); + // end option preamble + + if (ordering != null) { + cql.append(ordering); + first = false; + } + if (!options.isEmpty()) { + for (String name : options.keySet()) { + // append AND if we're not on first option + if (first) { + first = false; + } else { + cql.append(" AND "); + } + + // append = + cql.append(name); + + Object value = options.get(name); + if (value == null) { // then assume string-only, valueless option like "COMPACT STORAGE" + continue; + } + + cql.append(" = "); + + if (value instanceof Map) { + optionValueMap((Map) value, cql); + continue; // end non-empty value map + } + + // else just use value as string + cql.append(value.toString()); + } + } + } + // end options + + return cql; + } + + private static StringBuilder createOrderingClause(List columns) { + StringBuilder ordering = null; + boolean first = true; + for (ColumnSpecification col : columns) { + + if (col.getOrdering() != null) { // then ordering specified + if (ordering == null) { // then initialize ordering clause + ordering = new StringBuilder().append("CLUSTERING ORDER BY ("); + } + if (first) { + first = false; + } else { + ordering.append(", "); + } + ordering.append(col.getName()).append(" ").append(col.getOrdering().cql()); + } + } + if (ordering != null) { // then end ordering option + ordering.append(")"); + } + return ordering; + } + + private static void appendColumnNames(StringBuilder str, List columns) { + + boolean first = true; + for (ColumnSpecification col : columns) { + if (first) { + first = false; + } else { + str.append(", "); + } + str.append(col.getName()); + + } + + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropColumnCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropColumnCqlGenerator.java new file mode 100644 index 000000000..500ca2d59 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropColumnCqlGenerator.java @@ -0,0 +1,36 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import org.springframework.cassandra.core.keyspace.DropColumnSpecification; + +/** + * CQL generator for generating a DROP column clause of an ALTER TABLE statement. + * + * @author Matthew T. Adams + */ +public class DropColumnCqlGenerator extends ColumnChangeCqlGenerator { + + public DropColumnCqlGenerator(DropColumnSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + return noNull(cql).append("DROP ").append(spec().getNameAsIdentifier()); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropIndexCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropIndexCqlGenerator.java new file mode 100644 index 000000000..99c8d2f79 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropIndexCqlGenerator.java @@ -0,0 +1,39 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import org.springframework.cassandra.core.keyspace.DropIndexSpecification; + +/** + * CQL generator for generating a DROP INDEX statement. + * + * @author Matthew T. Adams + * @author David Webb + */ +public class DropIndexCqlGenerator extends IndexNameCqlGenerator { + + public DropIndexCqlGenerator(DropIndexSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + return noNull(cql).append("DROP INDEX ") + // .append(spec().getIfExists() ? "IF EXISTS " : "") + .append(spec().getNameAsIdentifier()).append(";"); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropKeyspaceCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropKeyspaceCqlGenerator.java new file mode 100644 index 000000000..63382bd88 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropKeyspaceCqlGenerator.java @@ -0,0 +1,37 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import org.springframework.cassandra.core.keyspace.DropKeyspaceSpecification; + +/** + * CQL generator for generating a DROP TABLE statement. + * + * @author Matthew T. Adams + */ +public class DropKeyspaceCqlGenerator extends KeyspaceNameCqlGenerator { + + public DropKeyspaceCqlGenerator(DropKeyspaceSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + return noNull(cql).append("DROP KEYSPACE ").append(spec().getIfExists() ? "IF EXISTS " : "") + .append(spec().getNameAsIdentifier()).append(";"); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropTableCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropTableCqlGenerator.java new file mode 100644 index 000000000..57a459b36 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/DropTableCqlGenerator.java @@ -0,0 +1,38 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; + +import org.springframework.cassandra.core.keyspace.DropTableSpecification; + +/** + * CQL generator for generating a DROP TABLE statement. + * + * @author Matthew T. Adams + */ +public class DropTableCqlGenerator extends TableNameCqlGenerator { + + public DropTableCqlGenerator(DropTableSpecification specification) { + super(specification); + } + + public StringBuilder toCql(StringBuilder cql) { + return noNull(cql).append("DROP TABLE ") + // .append(spec().getIfExists() ? "IF EXISTS " : "") + .append(spec().getNameAsIdentifier()).append(";"); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/IndexNameCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/IndexNameCqlGenerator.java new file mode 100644 index 000000000..03ffcef96 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/IndexNameCqlGenerator.java @@ -0,0 +1,51 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import org.springframework.cassandra.core.keyspace.IndexNameSpecification; +import org.springframework.util.Assert; + +public abstract class IndexNameCqlGenerator> { + + public abstract StringBuilder toCql(StringBuilder cql); + + private IndexNameSpecification specification; + + public IndexNameCqlGenerator(IndexNameSpecification specification) { + setSpecification(specification); + } + + protected void setSpecification(IndexNameSpecification specification) { + Assert.notNull(specification); + this.specification = specification; + } + + @SuppressWarnings("unchecked") + public T getSpecification() { + return (T) specification; + } + + /** + * Convenient synonymous method of {@link #getSpecification()}. + */ + protected T spec() { + return getSpecification(); + } + + public String toCql() { + return toCql(null).toString(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceCqlGenerator.java new file mode 100644 index 000000000..9c3c87eb2 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceCqlGenerator.java @@ -0,0 +1,37 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import org.springframework.cassandra.core.keyspace.KeyspaceSpecification; + +/** + * Base class that contains behavior common to CQL generation for table operations. + * + * @author Matthew T. Adams + * @param T The subtype of this class for which this is a CQL generator. + */ +public abstract class KeyspaceCqlGenerator> extends + KeyspaceOptionsCqlGenerator> { + + public KeyspaceCqlGenerator(KeyspaceSpecification specification) { + super(specification); + } + + @SuppressWarnings("unchecked") + protected T spec() { + return (T) getSpecification(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceNameCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceNameCqlGenerator.java new file mode 100644 index 000000000..fbaa6b7dc --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceNameCqlGenerator.java @@ -0,0 +1,51 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import org.springframework.cassandra.core.keyspace.KeyspaceNameSpecification; +import org.springframework.util.Assert; + +public abstract class KeyspaceNameCqlGenerator> { + + public abstract StringBuilder toCql(StringBuilder cql); + + private KeyspaceNameSpecification specification; + + public KeyspaceNameCqlGenerator(KeyspaceNameSpecification specification) { + setSpecification(specification); + } + + protected void setSpecification(KeyspaceNameSpecification specification) { + Assert.notNull(specification); + this.specification = specification; + } + + @SuppressWarnings("unchecked") + public T getSpecification() { + return (T) specification; + } + + /** + * Convenient synonymous method of {@link #getSpecification()}. + */ + protected T spec() { + return getSpecification(); + } + + public String toCql() { + return toCql(null).toString(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceOptionsCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceOptionsCqlGenerator.java new file mode 100644 index 000000000..8078ebf4c --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/KeyspaceOptionsCqlGenerator.java @@ -0,0 +1,80 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.escapeSingle; +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; +import static org.springframework.cassandra.core.cql.CqlStringUtils.singleQuote; + +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.KeyspaceOptionsSpecification; +import org.springframework.cassandra.core.keyspace.Option; + +/** + * Base class that contains behavior common to CQL generation for table operations. + * + * @author Matthew T. Adams + * @param T The subtype of this class for which this is a CQL generator. + */ +public abstract class KeyspaceOptionsCqlGenerator> extends + KeyspaceNameCqlGenerator> { + + public KeyspaceOptionsCqlGenerator(KeyspaceOptionsSpecification specification) { + super(specification); + } + + @SuppressWarnings("unchecked") + protected T spec() { + return (T) getSpecification(); + } + + protected StringBuilder optionValueMap(Map valueMap, StringBuilder cql) { + cql = noNull(cql); + + if (valueMap == null || valueMap.isEmpty()) { + return cql; + } + // else option value is a non-empty map + + // append { 'name' : 'value', ... } + cql.append("{ "); + boolean mapFirst = true; + for (Map.Entry entry : valueMap.entrySet()) { + if (mapFirst) { + mapFirst = false; + } else { + cql.append(", "); + } + + Option option = entry.getKey(); + cql.append(singleQuote(option.getName())); // entries in map keys are always quoted + cql.append(" : "); + Object entryValue = entry.getValue(); + entryValue = entryValue == null ? "" : entryValue.toString(); + if (option.escapesValue()) { + entryValue = escapeSingle(entryValue); + } + if (option.quotesValue()) { + entryValue = singleQuote(entryValue); + } + cql.append(entryValue); + } + cql.append(" }"); + + return cql; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableCqlGenerator.java new file mode 100644 index 000000000..857bb9503 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableCqlGenerator.java @@ -0,0 +1,37 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import org.springframework.cassandra.core.keyspace.TableSpecification; + +/** + * Base class that contains behavior common to CQL generation for table operations. + * + * @author Matthew T. Adams + * @param T The subtype of this class for which this is a CQL generator. + */ +public abstract class TableCqlGenerator> extends + TableOptionsCqlGenerator> { + + public TableCqlGenerator(TableSpecification specification) { + super(specification); + } + + @SuppressWarnings("unchecked") + protected T spec() { + return (T) getSpecification(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableNameCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableNameCqlGenerator.java new file mode 100644 index 000000000..a09d4355d --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableNameCqlGenerator.java @@ -0,0 +1,51 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import org.springframework.cassandra.core.keyspace.TableNameSpecification; +import org.springframework.util.Assert; + +public abstract class TableNameCqlGenerator> { + + public abstract StringBuilder toCql(StringBuilder cql); + + private TableNameSpecification specification; + + public TableNameCqlGenerator(TableNameSpecification specification) { + setSpecification(specification); + } + + protected void setSpecification(TableNameSpecification specification) { + Assert.notNull(specification); + this.specification = specification; + } + + @SuppressWarnings("unchecked") + public T getSpecification() { + return (T) specification; + } + + /** + * Convenient synonymous method of {@link #getSpecification()}. + */ + protected T spec() { + return getSpecification(); + } + + public String toCql() { + return toCql(null).toString(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableOptionsCqlGenerator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableOptionsCqlGenerator.java new file mode 100644 index 000000000..dc98ce529 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/cql/generator/TableOptionsCqlGenerator.java @@ -0,0 +1,80 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.cql.generator; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.escapeSingle; +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; +import static org.springframework.cassandra.core.cql.CqlStringUtils.singleQuote; + +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.Option; +import org.springframework.cassandra.core.keyspace.TableOptionsSpecification; + +/** + * Base class that contains behavior common to CQL generation for table operations. + * + * @author Matthew T. Adams + * @param T The subtype of this class for which this is a CQL generator. + */ +public abstract class TableOptionsCqlGenerator> extends + TableNameCqlGenerator> { + + public TableOptionsCqlGenerator(TableOptionsSpecification specification) { + super(specification); + } + + @SuppressWarnings("unchecked") + protected T spec() { + return (T) getSpecification(); + } + + protected StringBuilder optionValueMap(Map valueMap, StringBuilder cql) { + cql = noNull(cql); + + if (valueMap == null || valueMap.isEmpty()) { + return cql; + } + // else option value is a non-empty map + + // append { 'name' : 'value', ... } + cql.append("{ "); + boolean mapFirst = true; + for (Map.Entry entry : valueMap.entrySet()) { + if (mapFirst) { + mapFirst = false; + } else { + cql.append(", "); + } + + Option option = entry.getKey(); + cql.append(singleQuote(option.getName())); // entries in map keys are always quoted + cql.append(" : "); + Object entryValue = entry.getValue(); + entryValue = entryValue == null ? "" : entryValue.toString(); + if (option.escapesValue()) { + entryValue = escapeSingle(entryValue); + } + if (option.quotesValue()) { + entryValue = singleQuote(entryValue); + } + cql.append(entryValue); + } + cql.append(" }"); + + return cql; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AddColumnSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AddColumnSpecification.java new file mode 100644 index 000000000..dd77a2abf --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AddColumnSpecification.java @@ -0,0 +1,25 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import com.datastax.driver.core.DataType; + +public class AddColumnSpecification extends ColumnTypeChangeSpecification { + + public AddColumnSpecification(String name, DataType type) { + super(name, type); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterColumnSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterColumnSpecification.java new file mode 100644 index 000000000..27d287555 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterColumnSpecification.java @@ -0,0 +1,25 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import com.datastax.driver.core.DataType; + +public class AlterColumnSpecification extends ColumnTypeChangeSpecification { + + public AlterColumnSpecification(String name, DataType type) { + super(name, type); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterKeyspaceSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterKeyspaceSpecification.java new file mode 100644 index 000000000..e62d660b6 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterKeyspaceSpecification.java @@ -0,0 +1,12 @@ +package org.springframework.cassandra.core.keyspace; + +public class AlterKeyspaceSpecification extends KeyspaceOptionsSpecification { + + /** + * Entry point into the {@link AlterKeyspaceSpecification}'s fluent API to alter a keyspace. Convenient if imported + * statically. + */ + public static AlterKeyspaceSpecification alterKeyspace() { + return new AlterKeyspaceSpecification(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterTableSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterTableSpecification.java new file mode 100644 index 000000000..3bc4c2684 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/AlterTableSpecification.java @@ -0,0 +1,76 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import com.datastax.driver.core.DataType; + +/** + * Builder class to construct an ALTER TABLE specification. + * + * @author Matthew T. Adams + */ +public class AlterTableSpecification extends TableOptionsSpecification { + + /** + * The list of column changes. + */ + private List changes = new ArrayList(); + + /* + * Adds a DROP to the list of column changes. + * + * DW Removed as this only works in C* 2.0 + */ + // public AlterTableSpecification drop(String column) { + // changes.add(new DropColumnSpecification(column)); + // return this; + // } + + /** + * Adds an ADD to the list of column changes. + */ + public AlterTableSpecification add(String column, DataType type) { + changes.add(new AddColumnSpecification(column, type)); + return this; + } + + /** + * Adds an ALTER to the list of column changes. + */ + public AlterTableSpecification alter(String column, DataType type) { + changes.add(new AlterColumnSpecification(column, type)); + return this; + } + + /** + * Returns an unmodifiable list of column changes. + */ + public List getChanges() { + return Collections.unmodifiableList(changes); + } + + /** + * Entry point into the {@link AlterTableSpecification}'s fluent API to alter a table. Convenient if imported + * statically. + */ + public static AlterTableSpecification alterTable() { + return new AlterTableSpecification(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnChangeSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnChangeSpecification.java new file mode 100644 index 000000000..6cea473f8 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnChangeSpecification.java @@ -0,0 +1,46 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.checkIdentifier; +import static org.springframework.cassandra.core.cql.CqlStringUtils.identifize; + +/** + * Base class for column change specifications. + * + * @author Matthew T. Adams + */ +public abstract class ColumnChangeSpecification { + + private String name; + + public ColumnChangeSpecification(String name) { + setName(name); + } + + private void setName(String name) { + checkIdentifier(name); + this.name = name; + } + + public String getName() { + return name; + } + + public String getNameAsIdentifier() { + return identifize(name); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnSpecification.java new file mode 100644 index 000000000..9c2d0ba03 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnSpecification.java @@ -0,0 +1,183 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.checkIdentifier; +import static org.springframework.cassandra.core.cql.CqlStringUtils.identifize; +import static org.springframework.cassandra.core.cql.CqlStringUtils.noNull; +import static org.springframework.cassandra.core.PrimaryKeyType.PARTITIONED; +import static org.springframework.cassandra.core.PrimaryKeyType.CLUSTERED; +import static org.springframework.cassandra.core.Ordering.ASCENDING; + +import org.springframework.cassandra.core.PrimaryKeyType; +import org.springframework.cassandra.core.Ordering; + +import com.datastax.driver.core.DataType; + +/** + * Builder class to specify columns. + *

+ * Use {@link #name(String)} and {@link #type(String)} to set the name and type of the column, respectively. To specify + * a clustered PRIMARY KEY column, use {@link #clustered()} or {@link #clustered(Ordering)}. To specify + * that the PRIMARY KEY column is or is part of the partition key, use {@link #partitioned()} instead of + * {@link #clustered()} or {@link #clustered(Ordering)}. + * + * @author Matthew T. Adams + * @author Alex Shvid + */ +public class ColumnSpecification { + + /** + * Default ordering of primary key fields; value is {@link Ordering#ASCENDING}. + */ + public static final Ordering DEFAULT_ORDERING = ASCENDING; + + private String name; + private DataType type; // TODO: determining if we should be coupling this to Datastax Java Driver type? + private PrimaryKeyType keyType; + private Ordering ordering; + + /** + * Sets the column's name. + * + * @return this + */ + public ColumnSpecification name(String name) { + checkIdentifier(name); + this.name = name; + return this; + } + + /** + * Sets the column's type. + * + * @return this + */ + public ColumnSpecification type(DataType type) { + this.type = type; + return this; + } + + /** + * Identifies this column as a primary key column that is also part of a partition key. Sets the column's + * {@link #keyType} to {@link PrimaryKeyType#PARTITIONED} and its {@link #ordering} to null. + * + * @return this + */ + public ColumnSpecification partitioned() { + return partitioned(true); + } + + /** + * Toggles the identification of this column as a primary key column that also is or is part of a partition key. Sets + * {@link #ordering} to null and, if the given boolean is true, then sets the column's + * {@link #keyType} to {@link PrimaryKeyType#PARTITIONED}, else sets it to null. + * + * @return this + */ + public ColumnSpecification partitioned(boolean partitioned) { + this.keyType = partitioned ? PARTITIONED : null; + this.ordering = null; + return this; + } + + /** + * Identifies this column as a clustered key column with default ordering. Sets the column's {@link #keyType} to + * {@link PrimaryKeyType#CLUSTERED} and its {@link #ordering} to {@link #DEFAULT_ORDERING}. + * + * @return this + */ + public ColumnSpecification clustered() { + return clustered(DEFAULT_ORDERING); + } + + /** + * Identifies this column as a clustered key column with the given ordering. Sets the column's {@link #keyType} to + * {@link PrimaryKeyType#CLUSTERED} and its {@link #ordering} to the given {@link Ordering}. + * + * @return this + */ + public ColumnSpecification clustered(Ordering order) { + return clustered(order, true); + } + + /** + * Toggles the identification of this column as a clustered key column. If the given boolean is true, + * then sets the column's {@link #keyType} to {@link PrimaryKeyType#PARTITIONED} and {@link #ordering} to the given + * {@link Ordering} , else sets both {@link #keyType} and {@link #ordering} to null. + * + * @return this + */ + public ColumnSpecification clustered(Ordering order, boolean primary) { + this.keyType = primary ? CLUSTERED : null; + this.ordering = primary ? order : null; + return this; + } + + /** + * Sets the column's {@link #keyType}. + * + * @return this + */ + /* package */ColumnSpecification keyType(PrimaryKeyType keyType) { + this.keyType = keyType; + return this; + } + + /** + * Sets the column's {@link #ordering}. + * + * @return this + */ + /* package */ColumnSpecification ordering(Ordering ordering) { + this.ordering = ordering; + return this; + } + + public String getName() { + return name; + } + + public String getNameAsIdentifier() { + return identifize(name); + } + + public DataType getType() { + return type; + } + + public PrimaryKeyType getKeyType() { + return keyType; + } + + public Ordering getOrdering() { + return ordering; + } + + public String toCql() { + return toCql(null).toString(); + } + + public StringBuilder toCql(StringBuilder cql) { + return (cql = noNull(cql)).append(name).append(" ").append(type); + } + + @Override + public String toString() { + return toCql(null).append(" /* keyType=").append(keyType).append(", ordering=").append(ordering).append(" */ ") + .toString(); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnTypeChangeSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnTypeChangeSpecification.java new file mode 100644 index 000000000..814cfcd7a --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/ColumnTypeChangeSpecification.java @@ -0,0 +1,44 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import org.springframework.util.Assert; + +import com.datastax.driver.core.DataType; + +/** + * Base class for column changes that include {@link DataType} information. + * + * @author Matthew T. Adams + */ +public abstract class ColumnTypeChangeSpecification extends ColumnChangeSpecification { + + private DataType type; + + public ColumnTypeChangeSpecification(String name, DataType type) { + super(name); + setType(type); + } + + private void setType(DataType type) { + Assert.notNull(type); + this.type = type; + } + + public DataType getType() { + return type; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateIndexSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateIndexSpecification.java new file mode 100644 index 000000000..1f9f89d46 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateIndexSpecification.java @@ -0,0 +1,118 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.checkIdentifier; +import static org.springframework.cassandra.core.cql.CqlStringUtils.identifize; + +import org.springframework.util.StringUtils; + +/** + * Builder class to construct a CREATE INDEX specification. + * + * @author Matthew T. Adams + * @author David Webb + */ +public class CreateIndexSpecification extends IndexNameSpecification implements + IndexDescriptor { + + private boolean ifNotExists = false; + private boolean custom = false; + private String tableName; + private String columnName; + private String using; + + /** + * Causes the inclusion of an IF NOT EXISTS clause. + * + * @return this + */ + public CreateIndexSpecification ifNotExists() { + return ifNotExists(true); + } + + /** + * Toggles the inclusion of an IF NOT EXISTS clause. + * + * @return this + */ + public CreateIndexSpecification ifNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + return this; + } + + public boolean getIfNotExists() { + return ifNotExists; + } + + public boolean isCustom() { + return custom; + } + + public CreateIndexSpecification using(String className) { + + if (StringUtils.hasText(className)) { + this.using = className; + this.custom = true; + } else { + this.using = null; + this.custom = false; + } + + return this; + } + + public String getUsing() { + return using; + } + + public String getColumnName() { + return columnName; + } + + /** + * Sets the table name. + * + * @return this + */ + public CreateIndexSpecification tableName(String tableName) { + checkIdentifier(tableName); + this.tableName = tableName; + return this; + } + + public String getTableName() { + return tableName; + } + + public String getTableNameAsIdentifier() { + return identifize(tableName); + } + + public CreateIndexSpecification columnName(String columnName) { + this.columnName = columnName; + return this; + } + + /** + * Entry point into the {@link CreateIndexSpecification}'s fluent API to create a index. Convenient if imported + * statically. + */ + public static CreateIndexSpecification createIndex() { + return new CreateIndexSpecification(); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateKeyspaceSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateKeyspaceSpecification.java new file mode 100644 index 000000000..f945093e4 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateKeyspaceSpecification.java @@ -0,0 +1,57 @@ +package org.springframework.cassandra.core.keyspace; + +public class CreateKeyspaceSpecification extends KeyspaceSpecification { + + private boolean ifNotExists = false; + + /** + * Causes the inclusion of an IF NOT EXISTS clause. + * + * @return this + */ + public CreateKeyspaceSpecification ifNotExists() { + return ifNotExists(true); + } + + /** + * Toggles the inclusion of an IF NOT EXISTS clause. + * + * @return this + */ + public CreateKeyspaceSpecification ifNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + return this; + } + + public boolean getIfNotExists() { + return ifNotExists; + } + + /** + * Entry point into the {@link CreateKeyspaceSpecification}'s fluent API to create a keyspace. Convenient if imported + * statically. + */ + public static CreateKeyspaceSpecification createKeyspace() { + return new CreateKeyspaceSpecification(); + } + + @Override + public CreateKeyspaceSpecification name(String name) { + return (CreateKeyspaceSpecification) super.name(name); + } + + @Override + public CreateKeyspaceSpecification with(KeyspaceOption option) { + return (CreateKeyspaceSpecification) super.with(option); + } + + @Override + public CreateKeyspaceSpecification with(KeyspaceOption option, Object value) { + return (CreateKeyspaceSpecification) super.with(option, value); + } + + @Override + public CreateKeyspaceSpecification with(String name, Object value, boolean escape, boolean quote) { + return (CreateKeyspaceSpecification) super.with(name, value, escape, quote); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateTableSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateTableSpecification.java new file mode 100644 index 000000000..10b1b6674 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/CreateTableSpecification.java @@ -0,0 +1,72 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +/** + * Builder class to construct a CREATE TABLE specification. + * + * @author Matthew T. Adams + */ +public class CreateTableSpecification extends TableSpecification { + + private boolean ifNotExists = false; + + /** + * Causes the inclusion of an IF NOT EXISTS clause. + * + * @return this + */ + public CreateTableSpecification ifNotExists() { + return ifNotExists(true); + } + + /** + * Toggles the inclusion of an IF NOT EXISTS clause. + * + * @return this + */ + public CreateTableSpecification ifNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + return this; + } + + public boolean getIfNotExists() { + return ifNotExists; + } + + /** + * Entry point into the {@link CreateTableSpecification}'s fluent API to create a table. Convenient if imported + * statically. + */ + public static CreateTableSpecification createTable() { + return new CreateTableSpecification(); + } + + @Override + public CreateTableSpecification with(TableOption option) { + return (CreateTableSpecification) super.with(option); + } + + @Override + public CreateTableSpecification with(TableOption option, Object value) { + return (CreateTableSpecification) super.with(option, value); + } + + @Override + public CreateTableSpecification with(String name, Object value, boolean escape, boolean quote) { + return (CreateTableSpecification) super.with(name, value, escape, quote); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DefaultOption.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DefaultOption.java new file mode 100644 index 000000000..c48923d6b --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DefaultOption.java @@ -0,0 +1,172 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.escapeSingle; +import static org.springframework.cassandra.core.cql.CqlStringUtils.singleQuote; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.Collection; +import java.util.Map; + +import org.springframework.util.Assert; + +/** + * A default implementation of {@link Option}. + * + * @author Matthew T. Adams + */ +public class DefaultOption implements Option { + + private String name; + private Class type; + private boolean requiresValue; + private boolean escapesValue; + private boolean quotesValue; + + public DefaultOption(String name, Class type, boolean requiresValue, boolean escapesValue, boolean quotesValue) { + setName(name); + setType(type); + this.requiresValue = requiresValue; + this.escapesValue = escapesValue; + this.quotesValue = quotesValue; + + } + + protected void setName(String name) { + Assert.hasLength(name); + this.name = name; + } + + protected void setType(Class type) { + if (type != null) { + if (type.isInterface() && !(Map.class.isAssignableFrom(type) || Collection.class.isAssignableFrom(type))) { + throw new IllegalArgumentException("given type [" + type.getName() + "] must be a class, Map or Collection"); + } + } + this.type = type; + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public boolean isCoerceable(Object value) { + if (value == null || type == null) { + return true; + } + + // check map + if (Map.class.isAssignableFrom(type)) { + return Map.class.isAssignableFrom(value.getClass()); + } + // check collection + if (Collection.class.isAssignableFrom(type)) { + return Collection.class.isAssignableFrom(value.getClass()); + } + // check enum + if (type.isEnum()) { + try { + String name = value instanceof Enum ? name = ((Enum) value).name() : value.toString(); + Enum.valueOf((Class) type, name); + return true; + } catch (NullPointerException x) { + return false; + } catch (IllegalArgumentException x) { + return false; + } + } + + // check class via String constructor + try { + Constructor ctor = type.getConstructor(String.class); + if (!ctor.isAccessible()) { + ctor.setAccessible(true); + } + ctor.newInstance(value.toString()); + return true; + } catch (InstantiationException e) { + } catch (IllegalAccessException e) { + } catch (IllegalArgumentException e) { + } catch (InvocationTargetException e) { + } catch (NoSuchMethodException e) { + } catch (SecurityException e) { + } + return false; + } + + public Class getType() { + return type; + } + + public String getName() { + return name; + } + + public boolean takesValue() { + return type != null; + } + + public boolean requiresValue() { + return this.requiresValue; + } + + public boolean escapesValue() { + return this.escapesValue; + } + + public boolean quotesValue() { + return this.quotesValue; + } + + public void checkValue(Object value) { + if (takesValue()) { + if (value == null) { + if (requiresValue) { + throw new IllegalArgumentException("Option [" + getName() + "] requires a value"); + } + return; // doesn't require a value, so null is ok + } + // else value is not null + if (isCoerceable(value)) { + return; + } + // else value is not coerceable into the expected type + throw new IllegalArgumentException("Option [" + getName() + "] takes value coerceable to type [" + + getType().getName() + "]"); + } + // else this option doesn't take a value + if (value != null) { + throw new IllegalArgumentException("Option [" + getName() + "] takes no value"); + } + } + + public String toString(Object value) { + if (value == null) { + return null; + } + checkValue(value); + + String string = value.toString(); + string = escapesValue ? escapeSingle(string) : string; + string = quotesValue ? singleQuote(string) : string; + return string; + } + + @Override + public String toString() { + return "[name=" + name + ", type=" + type.getName() + ", requiresValue=" + requiresValue + ", escapesValue=" + + escapesValue + ", quotesValue=" + quotesValue + "]"; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DefaultTableDescriptor.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DefaultTableDescriptor.java new file mode 100644 index 000000000..c39ed0dd8 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DefaultTableDescriptor.java @@ -0,0 +1,32 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +/** + * Convenient default implementation of {@link TableDescriptor} as an extension of {@link TableSpecification} that + * doesn't require the use of generics. + * + * @author Matthew T. Adams + */ +public class DefaultTableDescriptor extends TableSpecification { + + /** + * Factory method to produce a new {@link DefaultTableDescriptor}. Convenient if imported statically. + */ + public static DefaultTableDescriptor table() { + return new DefaultTableDescriptor(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropColumnSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropColumnSpecification.java new file mode 100644 index 000000000..62828fd18 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropColumnSpecification.java @@ -0,0 +1,28 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +/** + * A specification to drop a column. + * + * @author Matthew T. Adams + */ +public class DropColumnSpecification extends ColumnChangeSpecification { + + public DropColumnSpecification(String name) { + super(name); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropIndexSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropIndexSpecification.java new file mode 100644 index 000000000..1502a7cc6 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropIndexSpecification.java @@ -0,0 +1,53 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +/** + * Builder class that supports the construction of DROP INDEX specifications. + * + * @author Matthew T. Adams + * @author David Webb + */ +public class DropIndexSpecification extends IndexNameSpecification { + + private boolean ifExists; + + /** + * Entry point into the {@link DropIndexSpecification}'s fluent API to drop a table. Convenient if imported + * statically. + */ + public static DropIndexSpecification dropIndex() { + return new DropIndexSpecification(); + } + + /* + * In CQL 3.1 this is supported so we can uncomment the exposure then. + * In the meantime, it will always be false and tests will pass. + */ + + // public DropIndexSpecification ifExists() { + // return ifExists(true); + // } + // + // public DropIndexSpecification ifExists(boolean ifExists) { + // this.ifExists = ifExists; + // return this; + // } + // + // public boolean getIfExists() { + // return ifExists; + // } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropKeyspaceSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropKeyspaceSpecification.java new file mode 100644 index 000000000..839e793c5 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropKeyspaceSpecification.java @@ -0,0 +1,28 @@ +package org.springframework.cassandra.core.keyspace; + +public class DropKeyspaceSpecification extends KeyspaceNameSpecification { + + private boolean ifExists; + + public DropKeyspaceSpecification ifExists() { + return ifExists(true); + } + + public DropKeyspaceSpecification ifExists(boolean ifExists) { + this.ifExists = ifExists; + return this; + } + + public boolean getIfExists() { + return ifExists; + } + + /** + * Entry point into the {@link DropKeyspaceSpecification}'s fluent API to drop a keyspace. Convenient if imported + * statically. + */ + public static DropKeyspaceSpecification dropKeyspace() { + return new DropKeyspaceSpecification(); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropTableSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropTableSpecification.java new file mode 100644 index 000000000..be0df1982 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/DropTableSpecification.java @@ -0,0 +1,49 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +/** + * Builder class that supports the construction of DROP TABLE specifications. + * + * @author Matthew T. Adams + */ +public class DropTableSpecification extends TableNameSpecification { + + // private boolean ifExists; + + // Added in Cassandra 2.0. + + // public DropTableSpecification ifExists() { + // return ifExists(true); + // } + // + // public DropTableSpecification ifExists(boolean ifExists) { + // this.ifExists = ifExists; + // return this; + // } + // + // public boolean getIfExists() { + // return ifExists; + // } + + /** + * Entry point into the {@link DropTableSpecification}'s fluent API to drop a table. Convenient if imported + * statically. + */ + public static DropTableSpecification dropTable() { + return new DropTableSpecification(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/IndexDescriptor.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/IndexDescriptor.java new file mode 100644 index 000000000..0df932c53 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/IndexDescriptor.java @@ -0,0 +1,53 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + + +/** + * Describes an index. + * + * @author Matthew T. Adams + * @author David Webb + */ +public interface IndexDescriptor { + + /** + * Returns the name of the index. + */ + String getName(); + + /** + * Returns the table name for the index + */ + String getTableName(); + + /** + * Returns the name of the index as an identifer or quoted identifier as appropriate. + */ + String getNameAsIdentifier(); + + /** + * Returns the name of the table as an identifer or quoted identifier as appropriate. + */ + String getTableNameAsIdentifier(); + + String getColumnName(); + + String getUsing(); + + boolean isCustom(); + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/IndexNameSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/IndexNameSpecification.java new file mode 100644 index 000000000..22ea428a2 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/IndexNameSpecification.java @@ -0,0 +1,54 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.checkIdentifier; +import static org.springframework.cassandra.core.cql.CqlStringUtils.identifize; + +/** + * Abstract builder class to support the construction of table specifications. + * + * @author David Webb + * @param The subtype of the {@link IndexNameSpecification} + */ +public abstract class IndexNameSpecification> { + + /** + * The name of the index. + */ + private String name; + + /** + * Sets the index name. + * + * @return this + */ + @SuppressWarnings("unchecked") + public T name(String name) { + checkIdentifier(name); + this.name = name; + return (T) this; + } + + public String getName() { + return name; + } + + public String getNameAsIdentifier() { + return identifize(name); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceDescriptor.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceDescriptor.java new file mode 100644 index 000000000..0357c76f8 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceDescriptor.java @@ -0,0 +1,41 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import java.util.Map; + +/** + * Describes a Keyspace. + * + * @author John McPeek + */ +public interface KeyspaceDescriptor { + + /** + * Returns the name of the table. + */ + String getName(); + + /** + * Returns the name of the table as an identifier or quoted identifier as appropriate. + */ + String getNameAsIdentifier(); + + /** + * Returns an unmodifiable {@link Map} of keyspace options. + */ + Map getOptions(); +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceNameSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceNameSpecification.java new file mode 100644 index 000000000..0bec92437 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceNameSpecification.java @@ -0,0 +1,39 @@ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.checkIdentifier; +import static org.springframework.cassandra.core.cql.CqlStringUtils.identifize; + +/** + * Abstract builder class to support the construction of keyspace specifications. + * + * @author John McPeek + * @param The subtype of the {@link KeyspaceNameSpecification} + */ +public abstract class KeyspaceNameSpecification> { + + /** + * The name of the table. + */ + private String name; + + /** + * Sets the keyspace name. + * + * @return this + */ + @SuppressWarnings( "unchecked" ) + public T name(String name) { + checkIdentifier(name); + this.name = name; + return (T) this; + } + + public String getName() { + return name; + } + + public String getNameAsIdentifier() { + return identifize(name); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceOption.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceOption.java new file mode 100644 index 000000000..3e5905f64 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceOption.java @@ -0,0 +1,79 @@ +package org.springframework.cassandra.core.keyspace; + +import java.util.Map; + +public enum KeyspaceOption implements Option { + REPLICATION("replication", Map.class, true, false, false), + + DURABLE_WRITES("durable_writes", Boolean.class, false, false, false); + + private Option delegate; + + private KeyspaceOption(String name, Class type, boolean requiresValue, boolean escapesValue, boolean quotesValue) { + this.delegate = new DefaultOption(name, type, requiresValue, escapesValue, quotesValue); + } + + public Class getType() { + return delegate.getType(); + } + + public boolean takesValue() { + return delegate.takesValue(); + } + + public String getName() { + return delegate.getName(); + } + + public boolean escapesValue() { + return delegate.escapesValue(); + } + + public boolean quotesValue() { + return delegate.quotesValue(); + } + + public boolean requiresValue() { + return delegate.requiresValue(); + } + + public void checkValue(Object value) { + delegate.checkValue(value); + } + + public boolean isCoerceable(Object value) { + return delegate.isCoerceable(value); + } + + public String toString() { + return delegate.toString(); + } + + public String toString(Object value) { + return delegate.toString(value); + } + + /** + * Known Replication Strategy options. + * + * @author John McPeek + * + */ + public enum ReplicationStrategy { + SIMPLE_STRATEGY("SimpleStrategy"), NETWORK_TOPOLOGY_STRATEGY("NetworkTopologyStrategy"); + + private String value; + + private ReplicationStrategy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public String toString() { + return getValue(); + } + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceOptionsSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceOptionsSpecification.java new file mode 100644 index 000000000..574e166a6 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceOptionsSpecification.java @@ -0,0 +1,94 @@ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.escapeSingle; +import static org.springframework.cassandra.core.cql.CqlStringUtils.singleQuote; + +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; + +import org.springframework.cassandra.core.cql.CqlStringUtils; + +/** + * Abstract builder class to support the construction of table specifications that have table options, that is, those + * options normally specified by WITH ... AND .... + *

+ * It is important to note that although this class depends on {@link KeyspaceOption} for convenient and typesafe use, it + * ultimately stores its options in a Map for flexibility. This means that + * {@link #with(KeyspaceOption)} and {@link #with(KeyspaceOption, Object)} delegate to + * {@link #with(String, Object, boolean, boolean)}. This design allows the API to support new Cassandra options as they + * are introduced without having to update the code immediately. + * + * @author John McPeek + * @param The subtype of the {@link KeyspaceOptionsSpecification}. + */ +public abstract class KeyspaceOptionsSpecification> extends + KeyspaceNameSpecification> { + + protected Map options = new LinkedHashMap(); + + @SuppressWarnings( "unchecked" ) + public T name(String name) { + return (T) super.name(name); + } + + /** + * Convenience method that calls with(option, null). + * + * @return this + */ + public T with(KeyspaceOption option) { + return with(option, null); + } + + /** + * Sets the given table option. This is a convenience method that calls + * {@link #with(String, Object, boolean, boolean)} appropriately from the given {@link KeyspaceOption} and value for that + * option. + * + * @param option The option to set. + * @param value The value of the option. Must be type-compatible with the {@link KeyspaceOption}. + * @return this + * @see #with(String, Object, boolean, boolean) + */ + public T with(KeyspaceOption option, Object value) { + option.checkValue(value); + return (T) with(option.getName(), value, option.escapesValue(), option.quotesValue()); + } + + /** + * Adds the given option by name to this keyspaces's options. + *

+ * Options that have null values are considered single string options where the name of the option is the + * string to be used. Otherwise, the result of {@link Object#toString()} is considered to be the value of the option + * with the given name. The value, after conversion to string, may have embedded single quotes escaped according to + * parameter escape and may be single-quoted according to parameter quote. + * + * @param name The name of the option + * @param value The value of the option. If null, the value is ignored and the option is considered to be + * composed of only the name, otherwise the value's {@link Object#toString()} value is used. + * @param escape Whether to escape the value via {@link CqlStringUtils#escapeSingle(Object)}. Ignored if given value + * is an instance of a {@link Map}. + * @param quote Whether to quote the value via {@link CqlStringUtils#singleQuote(Object)}. Ignored if given value is + * an instance of a {@link Map}. + * @return this + */ + @SuppressWarnings("unchecked") + public T with(String name, Object value, boolean escape, boolean quote) { + if (!(value instanceof Map)) { + if (escape) { + value = escapeSingle(value); + } + if (quote) { + value = singleQuote(value); + } + } + options.put(name, value); + return (T) this; + } + + public Map getOptions() { + return Collections.unmodifiableMap(options); + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceSpecification.java new file mode 100644 index 000000000..8cfc98636 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/KeyspaceSpecification.java @@ -0,0 +1,27 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + + + +/** + * Builder class to support the construction of keyspace specifications that have columns. This class can also be used as a + * standalone {@link KeyspaceDescriptor}, independent of {@link CreateKeyspaceSpecification}. + * + * @author John McPeek + */ +public class KeyspaceSpecification extends KeyspaceOptionsSpecification> implements KeyspaceDescriptor { +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/Option.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/Option.java new file mode 100644 index 000000000..beb2f9ddc --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/Option.java @@ -0,0 +1,76 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +/** + * Interface to represent option types. + * + * @author Matthew T. Adams + */ +public interface Option { + + /** + * The type that values must be able to be coerced into for this option. + */ + Class getType(); + + /** + * The (usually lower-cased, underscore-separated) name of this table option. + */ + String getName(); + + /** + * Whether this option takes a value. + */ + boolean takesValue(); + + /** + * Whether this option should escape single quotes in its value. + */ + boolean escapesValue(); + + /** + * Whether this option's value should be single-quoted. + */ + boolean quotesValue(); + + /** + * Whether this option requires a value. + */ + boolean requiresValue(); + + /** + * Checks that the given value can be coerced into the type given by {@link #getType()}. + */ + void checkValue(Object value); + + /** + * Tests whether the given value can be coerced into the type given by {@link #getType()}. + */ + boolean isCoerceable(Object value); + + /** + * First ensures that the given value is coerceable into the type expected by this option, then returns the result of + * {@link Object#toString()} called on the given value. If this option is escaping quotes ({@link #escapesValue()} is + * true), then single quotes will be escaped, and if this option is quoting values ( + * {@link #quotesValue()} is true), then the value will be surrounded by single quotes. Given + * null, returns null. + * + * @see #escapesValue() + * @see #quotesValue() + */ + String toString(Object value); +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableDescriptor.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableDescriptor.java new file mode 100644 index 000000000..116e1dedd --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableDescriptor.java @@ -0,0 +1,68 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import java.util.List; +import java.util.Map; + +/** + * Describes a table. + * + * @author Matthew T. Adams + * @author Alex Shvid + */ +public interface TableDescriptor { + + /** + * Returns the name of the table. + */ + String getName(); + + /** + * Returns the name of the table as an identifer or quoted identifier as appropriate. + */ + String getNameAsIdentifier(); + + /** + * Returns an unmodifiable {@link List} of {@link ColumnSpecification}s. + */ + List getColumns(); + + /** + * Returns an unmodifiable list of all partition key columns. + */ + public List getPartitionKeyColumns(); + + /** + * Returns an unmodifiable list of all primary key columns that are not also partition key columns. + */ + public List getClusteredKeyColumns(); + + /** + * Returns an unmodifiable list of all partition and primary key columns. + */ + public List getPrimaryKeyColumns(); + + /** + * Returns an unmodifiable list of all non-key columns. + */ + public List getNonKeyColumns(); + + /** + * Returns an unmodifiable {@link Map} of table options. + */ + Map getOptions(); +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableNameSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableNameSpecification.java new file mode 100644 index 000000000..f99addd15 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableNameSpecification.java @@ -0,0 +1,53 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.checkIdentifier; +import static org.springframework.cassandra.core.cql.CqlStringUtils.identifize; + +/** + * Abstract builder class to support the construction of table specifications. + * + * @author Matthew T. Adams + * @param The subtype of the {@link TableNameSpecification} + */ +public abstract class TableNameSpecification> { + + /** + * The name of the table. + */ + private String name; + + /** + * Sets the table name. + * + * @return this + */ + @SuppressWarnings("unchecked") + public T name(String name) { + checkIdentifier(name); + this.name = name; + return (T) this; + } + + public String getName() { + return name; + } + + public String getNameAsIdentifier() { + return identifize(name); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableOption.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableOption.java new file mode 100644 index 000000000..bac1a0cf8 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableOption.java @@ -0,0 +1,306 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import java.util.Map; + +/** + * Enumeration that represents all known table options. If a table option is not listed here, but is supported by + * Cassandra, use the method {@link CreateTableSpecification#with(String, Object, boolean, boolean)} to write the raw + * value. + * + * Implements {@link Option} via delegation, since {@link Enum}s can't extend anything. + * + * @author Matthew T. Adams + * @see CompactionOption + * @see CompressionOption + * @see CachingOption + */ +public enum TableOption implements Option { + /** + * comment + */ + COMMENT("comment", String.class, true, true, true), + /** + * COMPACT STORAGE + */ + COMPACT_STORAGE("COMPACT STORAGE", null, false, false, false), + /** + * compaction. Value is a Map<CompactionOption,Object>. + * + * @see CompactionOption + */ + COMPACTION("compaction", Map.class, true, false, false), + /** + * compression. Value is a Map<CompressionOption,Object>. + * + * @see {@link CompressionOption} + */ + COMPRESSION("compression", Map.class, true, false, false), + /** + * replicate_on_write + */ + REPLICATE_ON_WRITE("replicate_on_write", Boolean.class, true, false, true), + /** + * caching + * + * @see CachingOption + */ + CACHING("caching", CachingOption.class, true, false, true), + /** + * bloom_filter_fp_chance + */ + BLOOM_FILTER_FP_CHANCE("bloom_filter_fp_chance", Double.class, true, false, false), + /** + * read_repair_chance + */ + READ_REPAIR_CHANCE("read_repair_chance", Double.class, true, false, false), + /** + * dclocal_read_repair_chance + */ + DCLOCAL_READ_REPAIR_CHANCE("dclocal_read_repair_chance", Double.class, true, false, false), + /** + * gc_grace_seconds + */ + GC_GRACE_SECONDS("gc_grace_seconds", Long.class, true, false, false); + + private Option delegate; + + private TableOption(String name, Class type, boolean requiresValue, boolean escapesValue, boolean quotesValue) { + this.delegate = new DefaultOption(name, type, requiresValue, escapesValue, quotesValue); + } + + public Class getType() { + return delegate.getType(); + } + + public boolean takesValue() { + return delegate.takesValue(); + } + + public String getName() { + return delegate.getName(); + } + + public boolean escapesValue() { + return delegate.escapesValue(); + } + + public boolean quotesValue() { + return delegate.quotesValue(); + } + + public boolean requiresValue() { + return delegate.requiresValue(); + } + + public void checkValue(Object value) { + delegate.checkValue(value); + } + + public boolean isCoerceable(Object value) { + return delegate.isCoerceable(value); + } + + public String toString() { + return delegate.toString(); + } + + public String toString(Object value) { + return delegate.toString(value); + } + + /** + * Known caching options. + * + * @author Matthew T. Adams + */ + public enum CachingOption { + ALL("all"), KEYS_ONLY("keys_only"), ROWS_ONLY("rows_only"), NONE("none"); + + private String value; + + private CachingOption(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public String toString() { + return getValue(); + } + } + + /** + * Known compaction options. + * + * @author Matthew T. Adams + */ + public enum CompactionOption implements Option { + /** + * tombstone_threshold + */ + CLASS("class", String.class, true, false, true), + /** + * tombstone_threshold + */ + TOMBSTONE_THRESHOLD("tombstone_threshold", Double.class, true, false, false), + /** + * tombstone_compaction_interval + */ + TOMBSTONE_COMPACTION_INTERVAL("tombstone_compaction_interval", Double.class, true, false, false), + /** + * min_sstable_size + */ + MIN_SSTABLE_SIZE("min_sstable_size", Long.class, true, false, false), + /** + * min_threshold + */ + MIN_THRESHOLD("min_threshold", Long.class, true, false, false), + /** + * max_threshold + */ + MAX_THRESHOLD("max_threshold", Long.class, true, false, false), + /** + * bucket_low + */ + BUCKET_LOW("bucket_low", Double.class, true, false, false), + /** + * bucket_high + */ + BUCKET_HIGH("bucket_high", Double.class, true, false, false), + /** + * sstable_size_in_mb + */ + SSTABLE_SIZE_IN_MB("sstable_size_in_mb", Long.class, true, false, false); + + private Option delegate; + + private CompactionOption(String name, Class type, boolean requiresValue, boolean escapesValue, + boolean quotesValue) { + this.delegate = new DefaultOption(name, type, requiresValue, escapesValue, quotesValue); + } + + public Class getType() { + return delegate.getType(); + } + + public boolean takesValue() { + return delegate.takesValue(); + } + + public String getName() { + return delegate.getName(); + } + + public boolean escapesValue() { + return delegate.escapesValue(); + } + + public boolean quotesValue() { + return delegate.quotesValue(); + } + + public boolean requiresValue() { + return delegate.requiresValue(); + } + + public void checkValue(Object value) { + delegate.checkValue(value); + } + + public boolean isCoerceable(Object value) { + return delegate.isCoerceable(value); + } + + public String toString() { + return delegate.toString(); + } + + public String toString(Object value) { + return delegate.toString(value); + } + } + + /** + * Known compression options. + * + * @author Matthew T. Adams + */ + public enum CompressionOption implements Option { + /** + * sstable_compression + */ + SSTABLE_COMPRESSION("sstable_compression", String.class, true, false, true), + /** + * chunk_length_kb + */ + CHUNK_LENGTH_KB("chunk_length_kb", Long.class, true, false, false), + /** + * crc_check_chance + */ + CRC_CHECK_CHANCE("crc_check_chance", Double.class, true, false, false); + + private Option delegate; + + private CompressionOption(String name, Class type, boolean requiresValue, boolean escapesValue, + boolean quotesValue) { + this.delegate = new DefaultOption(name, type, requiresValue, escapesValue, quotesValue); + } + + public Class getType() { + return delegate.getType(); + } + + public boolean takesValue() { + return delegate.takesValue(); + } + + public String getName() { + return delegate.getName(); + } + + public boolean escapesValue() { + return delegate.escapesValue(); + } + + public boolean quotesValue() { + return delegate.quotesValue(); + } + + public boolean requiresValue() { + return delegate.requiresValue(); + } + + public void checkValue(Object value) { + delegate.checkValue(value); + } + + public boolean isCoerceable(Object value) { + return delegate.isCoerceable(value); + } + + public String toString() { + return delegate.toString(); + } + + public String toString(Object value) { + return delegate.toString(value); + } + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableOptionsSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableOptionsSpecification.java new file mode 100644 index 000000000..ad603a118 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableOptionsSpecification.java @@ -0,0 +1,108 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.cql.CqlStringUtils.escapeSingle; +import static org.springframework.cassandra.core.cql.CqlStringUtils.singleQuote; + +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; + +import org.springframework.cassandra.core.cql.CqlStringUtils; + +/** + * Abstract builder class to support the construction of table specifications that have table options, that is, those + * options normally specified by WITH ... AND .... + *

+ * It is important to note that although this class depends on {@link TableOption} for convenient and typesafe use, it + * ultimately stores its options in a Map for flexibility. This means that + * {@link #with(TableOption)} and {@link #with(TableOption, Object)} delegate to + * {@link #with(String, Object, boolean, boolean)}. This design allows the API to support new Cassandra options as they + * are introduced without having to update the code immediately. + * + * @author Matthew T. Adams + * @param The subtype of the {@link TableOptionsSpecification}. + */ +public abstract class TableOptionsSpecification> extends + TableNameSpecification> { + + protected Map options = new LinkedHashMap(); + + @SuppressWarnings("unchecked") + public T name(String name) { + return (T) super.name(name); + } + + /** + * Convenience method that calls with(option, null). + * + * @return this + */ + public T with(TableOption option) { + return with(option, null); + } + + /** + * Sets the given table option. This is a convenience method that calls + * {@link #with(String, Object, boolean, boolean)} appropriately from the given {@link TableOption} and value for that + * option. + * + * @param option The option to set. + * @param value The value of the option. Must be type-compatible with the {@link TableOption}. + * @return this + * @see #with(String, Object, boolean, boolean) + */ + public T with(TableOption option, Object value) { + option.checkValue(value); + return (T) with(option.getName(), value, option.escapesValue(), option.quotesValue()); + } + + /** + * Adds the given option by name to this table's options. + *

+ * Options that have null values are considered single string options where the name of the option is the + * string to be used. Otherwise, the result of {@link Object#toString()} is considered to be the value of the option + * with the given name. The value, after conversion to string, may have embedded single quotes escaped according to + * parameter escape and may be single-quoted according to parameter quote. + * + * @param name The name of the option + * @param value The value of the option. If null, the value is ignored and the option is considered to be + * composed of only the name, otherwise the value's {@link Object#toString()} value is used. + * @param escape Whether to escape the value via {@link CqlStringUtils#escapeSingle(Object)}. Ignored if given value + * is an instance of a {@link Map}. + * @param quote Whether to quote the value via {@link CqlStringUtils#singleQuote(Object)}. Ignored if given value is + * an instance of a {@link Map}. + * @return this + */ + @SuppressWarnings("unchecked") + public T with(String name, Object value, boolean escape, boolean quote) { + if (!(value instanceof Map)) { + if (escape) { + value = escapeSingle(value); + } + if (quote) { + value = singleQuote(value); + } + } + options.put(name, value); + return (T) this; + } + + public Map getOptions() { + return Collections.unmodifiableMap(options); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableSpecification.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableSpecification.java new file mode 100644 index 000000000..423e4b3ad --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/keyspace/TableSpecification.java @@ -0,0 +1,177 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.keyspace; + +import static org.springframework.cassandra.core.PrimaryKeyType.CLUSTERED; +import static org.springframework.cassandra.core.PrimaryKeyType.PARTITIONED; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.springframework.cassandra.core.Ordering; +import org.springframework.cassandra.core.PrimaryKeyType; + +import com.datastax.driver.core.DataType; + +/** + * Builder class to support the construction of table specifications that have columns. This class can also be used as a + * standalone {@link TableDescriptor}, independent of {@link CreateTableSpecification}. + * + * @author Matthew T. Adams + * @author Alex Shvid + */ +public class TableSpecification extends TableOptionsSpecification> implements TableDescriptor { + + /** + * List of all columns. + */ + private List columns = new ArrayList(); + + /** + * List of only those columns that comprise the partition key. + */ + private List partitionKeyColumns = new ArrayList(); + + /** + * List of only those columns that comprise the primary key that are not also part of the partition key. + */ + private List clusteredKeyColumns = new ArrayList(); + + /** + * List of only those columns that are not partition or primary key columns. + */ + private List nonKeyColumns = new ArrayList(); + + /** + * Adds the given non-key column to the table. Must be specified after all primary key columns. + * + * @param name The column name; must be a valid unquoted or quoted identifier without the surrounding double quotes. + * @param type The data type of the column. + */ + public T column(String name, DataType type) { + return column(name, type, null, null); + } + + /** + * Adds the given partition key column to the table. Must be specified before any other columns. + * + * @param name The column name; must be a valid unquoted or quoted identifier without the surrounding double quotes. + * @param type The data type of the column. + * @return this + */ + public T partitionKeyColumn(String name, DataType type) { + return column(name, type, PARTITIONED, null); + } + + /** + * Adds the given primary key column to the table with ascending ordering. Must be specified after all partition key + * columns and before any non-key columns. + * + * @param name The column name; must be a valid unquoted or quoted identifier without the surrounding double quotes. + * @param type The data type of the column. + * @return this + */ + public T clusteredKeyColumn(String name, DataType type) { + return clusteredKeyColumn(name, type, null); + } + + /** + * Adds the given primary key column to the table with the given ordering (null meaning ascending). Must + * be specified after all partition key columns and before any non-key columns. + * + * @param name The column name; must be a valid unquoted or quoted identifier without the surrounding double quotes. + * @param type The data type of the column. + * @return this + */ + public T clusteredKeyColumn(String name, DataType type, Ordering ordering) { + return column(name, type, CLUSTERED, ordering); + } + + /** + * Adds the given info as a new column to the table. Partition key columns must precede primary key columns, which + * must precede non-key columns. + * + * @param name The column name; must be a valid unquoted or quoted identifier without the surrounding double quotes. + * @param type The data type of the column. + * @param keyType Indicates key type. Null means that the column is not a key column. + * @param ordering If the given {@link PrimaryKeyType} is {@link PrimaryKeyType#CLUSTERED}, then the given ordering is + * used, else ignored. + * @return this + */ + @SuppressWarnings("unchecked") + protected T column(String name, DataType type, PrimaryKeyType keyType, Ordering ordering) { + + ColumnSpecification column = new ColumnSpecification().name(name).type(type).keyType(keyType) + .ordering(keyType == CLUSTERED ? ordering : null); + + columns.add(column); + + if (keyType == PrimaryKeyType.PARTITIONED) { + partitionKeyColumns.add(column); + } + + if (keyType == PrimaryKeyType.CLUSTERED) { + clusteredKeyColumns.add(column); + } + + if (keyType == null) { + nonKeyColumns.add(column); + } + + return (T) this; + } + + /** + * Returns an unmodifiable list of all columns. + */ + public List getColumns() { + return Collections.unmodifiableList(columns); + } + + /** + * Returns an unmodifiable list of all partition key columns. + */ + public List getPartitionKeyColumns() { + return Collections.unmodifiableList(partitionKeyColumns); + } + + /** + * Returns an unmodifiable list of all primary key columns that are not also partition key columns. + */ + public List getClusteredKeyColumns() { + return Collections.unmodifiableList(clusteredKeyColumns); + } + + /** + * Returns an unmodifiable list of all primary key columns that are not also partition key columns. + */ + public List getPrimaryKeyColumns() { + + ArrayList primaryKeyColumns = new ArrayList(); + primaryKeyColumns.addAll(partitionKeyColumns); + primaryKeyColumns.addAll(clusteredKeyColumns); + + return Collections.unmodifiableList(primaryKeyColumns); + } + + /** + * Returns an unmodifiable list of all non-key columns. + */ + public List getNonKeyColumns() { + return Collections.unmodifiableList(nonKeyColumns); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/core/util/MapBuilder.java b/spring-cassandra/src/main/java/org/springframework/cassandra/core/util/MapBuilder.java new file mode 100644 index 000000000..dd999d137 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/core/util/MapBuilder.java @@ -0,0 +1,155 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.core.util; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +/** + * Builder for maps, which also conveniently implements {@link Map} via delegation for convenience so you don't have to + * actually {@link #build()} it. + * + * @author Matthew T. Adams + * @param The key type of the map. + * @param The value type of the map. + */ +public class MapBuilder implements Map { + + /** + * Factory method to construct a new MapBuilder<Object,Object>. Convenient if imported statically. + */ + public static MapBuilder map() { + return map(Object.class, Object.class); + } + + /** + * Factory method to construct a new builder with the given key & value types. Convenient if imported statically. + */ + public static MapBuilder map(Class keyType, Class valueType) { + return new MapBuilder(); + } + + /** + * Factory method to construct a new builder with a shallow copy of the given map. Convenient if imported statically. + */ + public static MapBuilder map(Map source) { + return new MapBuilder(source); + } + + private Map map; + + public MapBuilder() { + this(new LinkedHashMap()); + } + + /** + * Constructs a new instance with a copy of the given map. + */ + public MapBuilder(Map source) { + this.map = new LinkedHashMap(source); + } + + /** + * Adds an entry to this map, then returns this. + * + * @return this + */ + public MapBuilder entry(K key, V value) { + map.put(key, value); + return this; + } + + /** + * Returns a new map based on the current state of this builder's map. + * + * @return A new Map with this builder's map's current content. + */ + public Map build() { + return new LinkedHashMap(map); + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return map.containsKey(key); + } + + @Override + public boolean containsValue(Object value) { + return map.containsValue(value); + } + + @Override + public V get(Object key) { + return map.get(key); + } + + @Override + public V put(K key, V value) { + return map.put(key, value); + } + + @Override + public V remove(Object key) { + return map.remove(key); + } + + @Override + public void putAll(Map m) { + map.putAll(m); + } + + @Override + public void clear() { + map.clear(); + } + + @Override + public Set keySet() { + return map.keySet(); + } + + @Override + public Collection values() { + return map.values(); + } + + @Override + public Set> entrySet() { + return map.entrySet(); + } + + @Override + public boolean equals(Object o) { + return map.equals(o); + } + + @Override + public int hashCode() { + return map.hashCode(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/CassandraAccessor.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/CassandraAccessor.java new file mode 100644 index 000000000..b41e852e5 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/CassandraAccessor.java @@ -0,0 +1,76 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.support; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.InitializingBean; + +import com.datastax.driver.core.Session; + +/** + * @author David Webb + * + */ +public class CassandraAccessor implements InitializingBean { + + /** Logger available to subclasses */ + protected final Logger logger = LoggerFactory.getLogger(getClass()); + + private Session session; + + private CassandraExceptionTranslator exceptionTranslator; + + /** + * Set the exception translator for this instance. + * + * @see org.springframework.cassandra.support.CassandraExceptionTranslator + */ + public void setExceptionTranslator(CassandraExceptionTranslator exceptionTranslator) { + this.exceptionTranslator = exceptionTranslator; + } + + /** + * Return the exception translator for this instance. + */ + public CassandraExceptionTranslator getExceptionTranslator() { + return this.exceptionTranslator; + } + + /** + * Ensure that the Cassandra Session has been set + */ + public void afterPropertiesSet() { + if (getSession() == null) { + throw new IllegalArgumentException("Property 'session' is required"); + } + } + + /** + * @return Returns the session. + */ + public Session getSession() { + return session; + } + + /** + * @param session The session to set. + */ + public void setSession(Session session) { + this.session = session; + } + +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/CassandraExceptionTranslator.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/CassandraExceptionTranslator.java new file mode 100644 index 000000000..b2fe93910 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/CassandraExceptionTranslator.java @@ -0,0 +1,136 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.support; + +import org.springframework.cassandra.support.exception.CassandraAuthenticationException; +import org.springframework.cassandra.support.exception.CassandraConnectionFailureException; +import org.springframework.cassandra.support.exception.CassandraInsufficientReplicasAvailableException; +import org.springframework.cassandra.support.exception.CassandraInternalException; +import org.springframework.cassandra.support.exception.CassandraInvalidConfigurationInQueryException; +import org.springframework.cassandra.support.exception.CassandraInvalidQueryException; +import org.springframework.cassandra.support.exception.CassandraKeyspaceExistsException; +import org.springframework.cassandra.support.exception.CassandraQuerySyntaxException; +import org.springframework.cassandra.support.exception.CassandraReadTimeoutException; +import org.springframework.cassandra.support.exception.CassandraTableExistsException; +import org.springframework.cassandra.support.exception.CassandraTraceRetrievalException; +import org.springframework.cassandra.support.exception.CassandraTruncateException; +import org.springframework.cassandra.support.exception.CassandraTypeMismatchException; +import org.springframework.cassandra.support.exception.CassandraUnauthorizedException; +import org.springframework.cassandra.support.exception.CassandraUncategorizedException; +import org.springframework.cassandra.support.exception.CassandraWriteTimeoutException; +import org.springframework.dao.DataAccessException; +import org.springframework.dao.support.PersistenceExceptionTranslator; + +import com.datastax.driver.core.WriteType; +import com.datastax.driver.core.exceptions.AlreadyExistsException; +import com.datastax.driver.core.exceptions.AuthenticationException; +import com.datastax.driver.core.exceptions.DriverException; +import com.datastax.driver.core.exceptions.DriverInternalError; +import com.datastax.driver.core.exceptions.InvalidConfigurationInQueryException; +import com.datastax.driver.core.exceptions.InvalidQueryException; +import com.datastax.driver.core.exceptions.InvalidTypeException; +import com.datastax.driver.core.exceptions.NoHostAvailableException; +import com.datastax.driver.core.exceptions.ReadTimeoutException; +import com.datastax.driver.core.exceptions.SyntaxError; +import com.datastax.driver.core.exceptions.TraceRetrievalException; +import com.datastax.driver.core.exceptions.TruncateException; +import com.datastax.driver.core.exceptions.UnauthorizedException; +import com.datastax.driver.core.exceptions.UnavailableException; +import com.datastax.driver.core.exceptions.WriteTimeoutException; + +/** + * Simple {@link PersistenceExceptionTranslator} for Cassandra. Convert the given runtime exception to an appropriate + * exception from the {@code org.springframework.dao} hierarchy. Return {@literal null} if no translation is + * appropriate: any other exception may have resulted from user code, and should not be translated. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ + +public class CassandraExceptionTranslator implements PersistenceExceptionTranslator { + + /* + * (non-Javadoc) + * + * @see org.springframework.dao.support.PersistenceExceptionTranslator# + * translateExceptionIfPossible(java.lang.RuntimeException) + */ + public DataAccessException translateExceptionIfPossible(RuntimeException x) { + + if (x instanceof DataAccessException) { + return (DataAccessException) x; + } + + if (!(x instanceof DriverException)) { + return null; + } + + // Remember: subclasses must come before superclasses, otherwise the + // superclass would match before the subclass! + + if (x instanceof AuthenticationException) { + return new CassandraAuthenticationException(((AuthenticationException) x).getHost(), x.getMessage(), x); + } + if (x instanceof DriverInternalError) { + return new CassandraInternalException(x.getMessage(), x); + } + if (x instanceof InvalidTypeException) { + return new CassandraTypeMismatchException(x.getMessage(), x); + } + if (x instanceof NoHostAvailableException) { + return new CassandraConnectionFailureException(((NoHostAvailableException) x).getErrors(), x.getMessage(), x); + } + if (x instanceof ReadTimeoutException) { + return new CassandraReadTimeoutException(((ReadTimeoutException) x).wasDataRetrieved(), x.getMessage(), x); + } + if (x instanceof WriteTimeoutException) { + WriteType writeType = ((WriteTimeoutException) x).getWriteType(); + return new CassandraWriteTimeoutException(writeType == null ? null : writeType.name(), x.getMessage(), x); + } + if (x instanceof TruncateException) { + return new CassandraTruncateException(x.getMessage(), x); + } + if (x instanceof UnavailableException) { + UnavailableException ux = (UnavailableException) x; + return new CassandraInsufficientReplicasAvailableException(ux.getRequiredReplicas(), ux.getAliveReplicas(), + x.getMessage(), x); + } + if (x instanceof AlreadyExistsException) { + AlreadyExistsException aex = (AlreadyExistsException) x; + + return aex.wasTableCreation() ? new CassandraTableExistsException(aex.getTable(), x.getMessage(), x) + : new CassandraKeyspaceExistsException(aex.getKeyspace(), x.getMessage(), x); + } + if (x instanceof InvalidConfigurationInQueryException) { + return new CassandraInvalidConfigurationInQueryException(x.getMessage(), x); + } + if (x instanceof InvalidQueryException) { + return new CassandraInvalidQueryException(x.getMessage(), x); + } + if (x instanceof SyntaxError) { + return new CassandraQuerySyntaxException(x.getMessage(), x); + } + if (x instanceof UnauthorizedException) { + return new CassandraUnauthorizedException(x.getMessage(), x); + } + if (x instanceof TraceRetrievalException) { + return new CassandraTraceRetrievalException(x.getMessage(), x); + } + + // unknown or unhandled exception + return new CassandraUncategorizedException(x.getMessage(), x); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraAuthenticationException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraAuthenticationException.java new file mode 100644 index 000000000..9e5a57153 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraAuthenticationException.java @@ -0,0 +1,42 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import java.net.InetAddress; + +import org.springframework.dao.PermissionDeniedDataAccessException; + +/** + * Spring data access exception for a Cassandra authentication failure. + * + * @author Matthew T. Adams + */ +public class CassandraAuthenticationException extends PermissionDeniedDataAccessException { + + private static final long serialVersionUID = 8556304586797273927L; + + private InetAddress host; + + public CassandraAuthenticationException(InetAddress host, String msg, Throwable cause) { + super(msg, cause); + this.host = host; + } + + public InetAddress getHost() { + return host; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraConnectionFailureException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraConnectionFailureException.java new file mode 100644 index 000000000..633ad8b6b --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraConnectionFailureException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import java.net.InetAddress; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import org.springframework.dao.DataAccessResourceFailureException; + +/** + * Spring data access exception for Cassandra when no host is available. + * + * @author Matthew T. Adams + */ +public class CassandraConnectionFailureException extends DataAccessResourceFailureException { + + private static final long serialVersionUID = 6299912054261646552L; + + private final Map messagesByHost = new HashMap(); + + public CassandraConnectionFailureException(Map messagesByHost, String msg, Throwable cause) { + super(msg, cause); + this.messagesByHost.putAll(messagesByHost); + } + + public Map getMessagesByHost() { + return Collections.unmodifiableMap(messagesByHost); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInsufficientReplicasAvailableException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInsufficientReplicasAvailableException.java new file mode 100644 index 000000000..7072fd9fe --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInsufficientReplicasAvailableException.java @@ -0,0 +1,51 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.TransientDataAccessException; + +/** + * Spring data access exception for Cassandra when insufficient replicas are available for a given consistency level. + * + * @author Matthew T. Adams + */ +public class CassandraInsufficientReplicasAvailableException extends TransientDataAccessException { + + private static final long serialVersionUID = 6415130674604814905L; + + private int numberRequired; + private int numberAlive; + + public CassandraInsufficientReplicasAvailableException(String msg) { + super(msg); + } + + public CassandraInsufficientReplicasAvailableException(int numberRequired, int numberAlive, String msg, + Throwable cause) { + super(msg, cause); + this.numberRequired = numberRequired; + this.numberAlive = numberAlive; + } + + public int getNumberRequired() { + return numberRequired; + } + + public int getNumberAlive() { + return numberAlive; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInternalException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInternalException.java new file mode 100644 index 000000000..1da76f11b --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInternalException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.DataAccessException; + +/** + * Spring data access exception for a Cassandra internal error. + * + * @author Matthew T. Adams + */ +public class CassandraInternalException extends DataAccessException { + + private static final long serialVersionUID = 433061676465346338L; + + public CassandraInternalException(String msg) { + super(msg); + } + + public CassandraInternalException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInvalidConfigurationInQueryException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInvalidConfigurationInQueryException.java new file mode 100644 index 000000000..ae29eff31 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInvalidConfigurationInQueryException.java @@ -0,0 +1,38 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.InvalidDataAccessApiUsageException; + +/** + * Spring data access exception for a Cassandra query that is syntactically correct but has an invalid configuration + * clause. + * + * @author Matthew T. Adams + */ +public class CassandraInvalidConfigurationInQueryException extends InvalidDataAccessApiUsageException { + + private static final long serialVersionUID = 4594321191806182918L; + + public CassandraInvalidConfigurationInQueryException(String msg) { + super(msg); + } + + public CassandraInvalidConfigurationInQueryException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInvalidQueryException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInvalidQueryException.java new file mode 100644 index 000000000..3ee84ac47 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraInvalidQueryException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.InvalidDataAccessApiUsageException; + +/** + * Spring data access exception for a Cassandra query that's syntactically correct but invalid. + * + * @author Matthew T. Adams + */ +public class CassandraInvalidQueryException extends InvalidDataAccessApiUsageException { + + private static final long serialVersionUID = 4594321191806182918L; + + public CassandraInvalidQueryException(String msg) { + super(msg); + } + + public CassandraInvalidQueryException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraKeyspaceExistsException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraKeyspaceExistsException.java new file mode 100644 index 000000000..5fd297e3b --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraKeyspaceExistsException.java @@ -0,0 +1,35 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +/** + * Spring data access exception for Cassandra when a keyspace being created already exists. + * + * @author Matthew T. Adams + */ +public class CassandraKeyspaceExistsException extends CassandraSchemaElementExistsException { + + private static final long serialVersionUID = 6032967419751410352L; + + public CassandraKeyspaceExistsException(String keyspaceName, String msg, Throwable cause) { + super(keyspaceName, ElementType.KEYSPACE, msg, cause); + } + + public String getKeyspaceName() { + return getElementName(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraQuerySyntaxException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraQuerySyntaxException.java new file mode 100644 index 000000000..8cbc1d979 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraQuerySyntaxException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.InvalidDataAccessApiUsageException; + +/** + * Spring data access exception for a Cassandra query syntax error. + * + * @author Matthew T. Adams + */ +public class CassandraQuerySyntaxException extends InvalidDataAccessApiUsageException { + + private static final long serialVersionUID = 4398474399882434154L; + + public CassandraQuerySyntaxException(String msg) { + super(msg); + } + + public CassandraQuerySyntaxException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraReadTimeoutException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraReadTimeoutException.java new file mode 100644 index 000000000..a2bff225f --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraReadTimeoutException.java @@ -0,0 +1,40 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.QueryTimeoutException; + +/** + * Spring data access exception for a Cassandra read timeout. + * + * @author Matthew T. Adams + */ +public class CassandraReadTimeoutException extends QueryTimeoutException { + + private static final long serialVersionUID = -787022307935203387L; + + private boolean wasDataReceived; + + public CassandraReadTimeoutException(boolean wasDataReceived, String msg, Throwable cause) { + super(msg); + this.wasDataReceived = wasDataReceived; + } + + public boolean getWasDataReceived() { + return wasDataReceived; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraSchemaElementExistsException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraSchemaElementExistsException.java new file mode 100644 index 000000000..79d981190 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraSchemaElementExistsException.java @@ -0,0 +1,50 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.NonTransientDataAccessException; + +/** + * Spring data access exception for when Cassandra schema element being created already exists. + * + * @author Matthew T. Adams + */ +public class CassandraSchemaElementExistsException extends NonTransientDataAccessException { + + private static final long serialVersionUID = 7798361273692300162L; + + public enum ElementType { + KEYSPACE, TABLE, COLUMN, INDEX; + } + + private String elementName; + private ElementType elementType; + + public CassandraSchemaElementExistsException(String elementName, ElementType elementType, String msg, Throwable cause) { + super(msg, cause); + this.elementName = elementName; + this.elementType = elementType; + } + + public String getElementName() { + return elementName; + } + + public ElementType getElementType() { + return elementType; + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTableExistsException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTableExistsException.java new file mode 100644 index 000000000..7c23f242a --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTableExistsException.java @@ -0,0 +1,35 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +/** + * Spring data access exception for when a Cassandra table being created already exists. + * + * @author Matthew T. Adams + */ +public class CassandraTableExistsException extends CassandraSchemaElementExistsException { + + private static final long serialVersionUID = 6032967419751410352L; + + public CassandraTableExistsException(String tableName, String msg, Throwable cause) { + super(tableName, ElementType.TABLE, msg, cause); + } + + public String getTableName() { + return getElementName(); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTraceRetrievalException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTraceRetrievalException.java new file mode 100644 index 000000000..9dc700a83 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTraceRetrievalException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.TransientDataAccessException; + +/** + * Spring data access exception for a Cassandra trace retrieval exception. + * + * @author Matthew T. Adams + */ +public class CassandraTraceRetrievalException extends TransientDataAccessException { + + private static final long serialVersionUID = -3163557220324700239L; + + public CassandraTraceRetrievalException(String msg) { + super(msg); + } + + public CassandraTraceRetrievalException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTruncateException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTruncateException.java new file mode 100644 index 000000000..fe7d18205 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTruncateException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.TransientDataAccessException; + +/** + * Spring data access exception for a Cassandra truncate exception. + * + * @author Matthew T. Adams + */ +public class CassandraTruncateException extends TransientDataAccessException { + + private static final long serialVersionUID = 5730642491362430311L; + + public CassandraTruncateException(String msg) { + super(msg); + } + + public CassandraTruncateException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTypeMismatchException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTypeMismatchException.java new file mode 100644 index 000000000..972aa01e7 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraTypeMismatchException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.TypeMismatchDataAccessException; + +/** + * Spring data access exception for a Cassandra type mismatch exception. + * + * @author Matthew T. Adams + */ +public class CassandraTypeMismatchException extends TypeMismatchDataAccessException { + + private static final long serialVersionUID = -7420058975444905629L; + + public CassandraTypeMismatchException(String msg) { + super(msg); + } + + public CassandraTypeMismatchException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraUnauthorizedException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraUnauthorizedException.java new file mode 100644 index 000000000..7893b8b93 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraUnauthorizedException.java @@ -0,0 +1,33 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.PermissionDeniedDataAccessException; + +/** + * Spring data access exception for when access to a Cassandra element is denied. + * + * @author Matthew T. Adams + */ +public class CassandraUnauthorizedException extends PermissionDeniedDataAccessException { + + private static final long serialVersionUID = 4618185356687726647L; + + public CassandraUnauthorizedException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraUncategorizedException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraUncategorizedException.java new file mode 100644 index 000000000..f3ee3c9b6 --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraUncategorizedException.java @@ -0,0 +1,33 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.UncategorizedDataAccessException; + +/** + * Spring data access exception for an uncategorized Cassandra exception. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class CassandraUncategorizedException extends UncategorizedDataAccessException { + + private static final long serialVersionUID = 1029525121238025444L; + + public CassandraUncategorizedException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraWriteTimeoutException.java b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraWriteTimeoutException.java new file mode 100644 index 000000000..2836668df --- /dev/null +++ b/spring-cassandra/src/main/java/org/springframework/cassandra/support/exception/CassandraWriteTimeoutException.java @@ -0,0 +1,40 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.springframework.cassandra.support.exception; + +import org.springframework.dao.QueryTimeoutException; + +/** + * Spring data access exception for a Cassandra write timeout. + * + * @author Matthew T. Adams + */ +public class CassandraWriteTimeoutException extends QueryTimeoutException { + + private static final long serialVersionUID = -4374826375213670718L; + + private String writeType; + + public CassandraWriteTimeoutException(String writeType, String msg, Throwable cause) { + super(msg, cause); + this.writeType = writeType; + } + + public String getWriteType() { + return writeType; + } +} diff --git a/spring-cassandra/src/main/resources/META-INF/spring.handlers b/spring-cassandra/src/main/resources/META-INF/spring.handlers new file mode 100644 index 000000000..f5bd3781b --- /dev/null +++ b/spring-cassandra/src/main/resources/META-INF/spring.handlers @@ -0,0 +1 @@ +http\://www.springframework.org/schema/cassandra=org.springframework.cassandra.config.xml.CassandraNamespaceHandler diff --git a/spring-cassandra/src/main/resources/META-INF/spring.schemas b/spring-cassandra/src/main/resources/META-INF/spring.schemas new file mode 100644 index 000000000..9a41e8fcd --- /dev/null +++ b/spring-cassandra/src/main/resources/META-INF/spring.schemas @@ -0,0 +1,2 @@ +http\://www.springframework.org/schema/cassandra/spring-cassandra-1.0.xsd=org/springframework/cassandra/config/spring-cassandra-1.0.xsd +http\://www.springframework.org/schema/cassandra/spring-cassandra.xsd=org/springframework/cassandra/config/spring-cassandra-1.0.xsd \ No newline at end of file diff --git a/spring-cassandra/src/main/resources/META-INF/spring.tooling b/spring-cassandra/src/main/resources/META-INF/spring.tooling new file mode 100644 index 000000000..a339bc54e --- /dev/null +++ b/spring-cassandra/src/main/resources/META-INF/spring.tooling @@ -0,0 +1,4 @@ +# Tooling related information for the cassandra namespace +http\://www.springframework.org/schema/cassandra@name=Spring Cassandra Namespace +http\://www.springframework.org/schema/cassandra@prefix=cassandra +http\://www.springframework.org/schema/cassandra@icon=org/springframework/data/cassandra/config/spring-cassandra.gif diff --git a/spring-cassandra/src/main/resources/org/springframework/cassandra/config/spring-cassandra-1.0.xsd b/spring-cassandra/src/main/resources/org/springframework/cassandra/config/spring-cassandra-1.0.xsd new file mode 100644 index 000000000..c0b1ab354 --- /dev/null +++ b/spring-cassandra/src/main/resources/org/springframework/cassandra/config/spring-cassandra-1.0.xsd @@ -0,0 +1,516 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/spring-cassandra/src/main/resources/org/springframework/cassandra/config/spring-cassandra.gif b/spring-cassandra/src/main/resources/org/springframework/cassandra/config/spring-cassandra.gif new file mode 100644 index 000000000..20ed1f9a4 Binary files /dev/null and b/spring-cassandra/src/main/resources/org/springframework/cassandra/config/spring-cassandra.gif differ diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/AbstractEmbeddedCassandraIntegrationTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/AbstractEmbeddedCassandraIntegrationTest.java new file mode 100644 index 000000000..4b5127af6 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/AbstractEmbeddedCassandraIntegrationTest.java @@ -0,0 +1,98 @@ +package org.springframework.cassandra.test.integration; + +import java.io.IOException; +import java.util.UUID; + +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.thrift.transport.TTransportException; +import org.cassandraunit.utils.EmbeddedCassandraServerHelper; +import org.junit.After; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.KeyspaceMetadata; +import com.datastax.driver.core.Session; + +public abstract class AbstractEmbeddedCassandraIntegrationTest { + + protected final static String CASSANDRA_CONFIG = "spring-cassandra.yaml"; + protected final static String CASSANDRA_HOST = "localhost"; + protected final static int CASSANDRA_NATIVE_PORT = 9042; + + public static void startCassandra() throws ConfigurationException, TTransportException, IOException, + InterruptedException { + EmbeddedCassandraServerHelper.startEmbeddedCassandra(CASSANDRA_CONFIG); + } + + public AbstractEmbeddedCassandraIntegrationTest() { + try { + startCassandra(); + } catch (Exception e) { + throw new RuntimeException(e); + } + connect(); + } + + /** + * Whether to clear the cluster before the next test. + */ + protected boolean clear = false; + /** + * Whether to connect to Cassandra. + */ + protected boolean connect = true; + /** + * The {@link Cluster} that's connected to Cassandra. + */ + protected Cluster cluster; + /** + * If not null, get a {@link Session} for the from the {@link #cluster}. + */ + protected String keyspace = "ks" + UUID.randomUUID().toString().replace("-", ""); + /** + * The {@link Session} for the {@link #keyspace} from the {@link #cluster}. + */ + protected Session session; + + protected String keyspace() { + return keyspace; + } + + /** + * Returns whether we're currently connected to the cluster. + */ + public boolean connected() { + return session != null; + } + + public Cluster cluster() { + return Cluster.builder().addContactPoint(CASSANDRA_HOST).withPort(CASSANDRA_NATIVE_PORT).build(); + } + + public void connect() { + if (connect && !connected()) { + cluster = cluster(); + + if (keyspace() == null) { + session = cluster.connect(); + } else { + + KeyspaceMetadata kmd = cluster.getMetadata().getKeyspace(keyspace()); + if (kmd == null) { // then create keyspace + session = cluster.connect(); + session.execute("CREATE KEYSPACE " + keyspace() + + " WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 1};"); + session.execute("USE " + keyspace() + ";"); + } else {// else keyspace already exists + session = cluster.connect(keyspace()); + } + } + } + } + + @After + public void after() { + if (clear && connected()) { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + } + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/IntegrationTestUtils.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/IntegrationTestUtils.java new file mode 100644 index 000000000..bac0245c1 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/IntegrationTestUtils.java @@ -0,0 +1,16 @@ +package org.springframework.cassandra.test.integration.config; + +import static org.junit.Assert.assertNotNull; + +import com.datastax.driver.core.Session; + +public class IntegrationTestUtils { + + public static void assertSession(Session session) { + assertNotNull(session); + } + + public static void assertKeyspaceExists(String keyspace, Session session) { + assertNotNull(session.getCluster().getMetadata().getKeyspace(keyspace)); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/AbstractIntegrationTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/AbstractIntegrationTest.java new file mode 100644 index 000000000..b8e606da5 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/AbstractIntegrationTest.java @@ -0,0 +1,23 @@ +package org.springframework.cassandra.test.integration.config.java; + +import javax.inject.Inject; + +import org.junit.Before; +import org.junit.runner.RunWith; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.integration.config.IntegrationTestUtils; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.datastax.driver.core.Session; + +@RunWith(SpringJUnit4ClassRunner.class) +public abstract class AbstractIntegrationTest extends AbstractEmbeddedCassandraIntegrationTest { + + @Inject + protected Session session; + + @Before + public void assertSession() { + IntegrationTestUtils.assertSession(session); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/AbstractKeyspaceCreatingConfiguration.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/AbstractKeyspaceCreatingConfiguration.java new file mode 100644 index 000000000..b5793bceb --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/AbstractKeyspaceCreatingConfiguration.java @@ -0,0 +1,40 @@ +package org.springframework.cassandra.test.integration.config.java; + +import org.springframework.cassandra.config.CassandraSessionFactoryBean; +import org.springframework.cassandra.config.java.AbstractCassandraConfiguration; +import org.springframework.context.annotation.Configuration; +import org.springframework.util.StringUtils; + +import com.datastax.driver.core.KeyspaceMetadata; +import com.datastax.driver.core.Session; + +@Configuration +public abstract class AbstractKeyspaceCreatingConfiguration extends AbstractCassandraConfiguration { + + @Override + public CassandraSessionFactoryBean session() throws Exception { + + createKeyspaceIfNecessary(); + + return super.session(); + } + + protected void createKeyspaceIfNecessary() throws Exception { + String keyspace = getKeyspaceName(); + if (!StringUtils.hasText(keyspace)) { + return; + } + + Session system = cluster().getObject().connect(); + KeyspaceMetadata kmd = system.getCluster().getMetadata().getKeyspace(keyspace); + if (kmd != null) { + return; + } + + // TODO: use KeyspaceBuilder to build keyspace with attributes & options + + system.execute("CREATE KEYSPACE " + keyspace + + " WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"); + system.shutdown(); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/Config.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/Config.java new file mode 100644 index 000000000..7768526a0 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/Config.java @@ -0,0 +1,13 @@ +package org.springframework.cassandra.test.integration.config.java; + +import org.springframework.cassandra.config.java.AbstractCassandraConfiguration; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class Config extends AbstractCassandraConfiguration { + + @Override + protected String getKeyspaceName() { + return null; + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/ConfigTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/ConfigTest.java new file mode 100644 index 000000000..d00de3a59 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/ConfigTest.java @@ -0,0 +1,15 @@ +package org.springframework.cassandra.test.integration.config.java; + +import org.junit.Test; +import org.springframework.test.context.ContextConfiguration; + +@ContextConfiguration(classes = Config.class) +public class ConfigTest extends AbstractIntegrationTest { + + @Test + public void test() { + session + .execute("CREATE KEYSPACE ConfigTest WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };"); + session.execute("USE ConfigTest"); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/KeyspaceCreatingJavaConfig.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/KeyspaceCreatingJavaConfig.java new file mode 100644 index 000000000..4b0f9be88 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/KeyspaceCreatingJavaConfig.java @@ -0,0 +1,32 @@ +package org.springframework.cassandra.test.integration.config.java; + +import java.util.ArrayList; +import java.util.List; + +import org.springframework.cassandra.config.KeyspaceAttributes; +import org.springframework.cassandra.config.java.AbstractCassandraConfiguration; +import org.springframework.cassandra.core.keyspace.CreateKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.KeyspaceOption; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class KeyspaceCreatingJavaConfig extends AbstractCassandraConfiguration { + + public static final String KEYSPACE_NAME = "foo"; + + @Override + protected String getKeyspaceName() { + return KEYSPACE_NAME; + } + + @Override + protected List getKeyspaceCreations() { + ArrayList list = new ArrayList(); + + CreateKeyspaceSpecification specification = CreateKeyspaceSpecification.createKeyspace().name(getKeyspaceName()); + specification.with(KeyspaceOption.REPLICATION, KeyspaceAttributes.newSimpleReplication(1L)); + + list.add(specification); + return list; + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/KeyspaceCreatingJavaConfigTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/KeyspaceCreatingJavaConfigTest.java new file mode 100644 index 000000000..0f639ec57 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/java/KeyspaceCreatingJavaConfigTest.java @@ -0,0 +1,31 @@ +package org.springframework.cassandra.test.integration.config.java; + +import javax.inject.Inject; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.cassandra.test.integration.config.IntegrationTestUtils; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.datastax.driver.core.Session; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(classes = KeyspaceCreatingJavaConfig.class) +public class KeyspaceCreatingJavaConfigTest extends AbstractIntegrationTest { + + @Inject + protected Session session; + + @Override + protected String keyspace() { + return null; + } + + @Test + public void test() { + Assert.assertNotNull(session); + IntegrationTestUtils.assertKeyspaceExists(KeyspaceCreatingJavaConfig.KEYSPACE_NAME, session); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest.java new file mode 100644 index 000000000..44ac32069 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest.java @@ -0,0 +1,33 @@ +package org.springframework.cassandra.test.integration.config.xml; + +import javax.inject.Inject; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.integration.config.IntegrationTestUtils; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.datastax.driver.core.Session; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration +public class FullySpecifiedKeyspaceCreatingXmlConfigTest extends AbstractEmbeddedCassandraIntegrationTest { + + @Override + protected String keyspace() { + return null; + } + + @Inject + Session s; + + @Test + public void test() { + IntegrationTestUtils.assertKeyspaceExists("full1", s); + IntegrationTestUtils.assertKeyspaceExists("full2", s); + IntegrationTestUtils.assertKeyspaceExists("script1", s); + IntegrationTestUtils.assertKeyspaceExists("script2", s); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/MinimalKeyspaceCreatingXmlConfigTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/MinimalKeyspaceCreatingXmlConfigTest.java new file mode 100644 index 000000000..492605316 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/MinimalKeyspaceCreatingXmlConfigTest.java @@ -0,0 +1,30 @@ +package org.springframework.cassandra.test.integration.config.xml; + +import javax.inject.Inject; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.integration.config.IntegrationTestUtils; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.datastax.driver.core.Session; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration +public class MinimalKeyspaceCreatingXmlConfigTest extends AbstractEmbeddedCassandraIntegrationTest { + + @Override + protected String keyspace() { + return null; + } + + @Inject + Session s; + + @Test + public void test() { + IntegrationTestUtils.assertKeyspaceExists("minimal", s); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/MinimalXmlConfigTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/MinimalXmlConfigTest.java new file mode 100644 index 000000000..760c00192 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/MinimalXmlConfigTest.java @@ -0,0 +1,38 @@ +package org.springframework.cassandra.test.integration.config.xml; + +import static org.junit.Assert.*; + +import javax.inject.Inject; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.cassandra.core.CassandraOperations; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.integration.config.IntegrationTestUtils; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.datastax.driver.core.Session; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration +public class MinimalXmlConfigTest extends AbstractEmbeddedCassandraIntegrationTest { + + protected String keyspace() { + return "minimalxmlconfigtest"; + } + + @Inject + Session s; + + @Inject + CassandraOperations ops; + + @Test + public void test() { + IntegrationTestUtils.assertSession(s); + IntegrationTestUtils.assertKeyspaceExists(keyspace(), s); + + assertNotNull(ops); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/XmlConfigTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/XmlConfigTest.java new file mode 100644 index 000000000..b8eb27ea8 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/config/xml/XmlConfigTest.java @@ -0,0 +1,30 @@ +package org.springframework.cassandra.test.integration.config.xml; + +import javax.inject.Inject; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.integration.config.IntegrationTestUtils; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import com.datastax.driver.core.Session; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration +public class XmlConfigTest extends AbstractEmbeddedCassandraIntegrationTest { + + protected String keyspace() { + return "xmlconfigtest"; + } + + @Inject + Session s; + + @Test + public void test() { + IntegrationTestUtils.assertSession(s); + IntegrationTestUtils.assertKeyspaceExists(keyspace(), s); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlIndexSpecificationAssertions.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlIndexSpecificationAssertions.java new file mode 100644 index 000000000..34930f284 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlIndexSpecificationAssertions.java @@ -0,0 +1,43 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import org.springframework.cassandra.core.keyspace.IndexDescriptor; + +import com.datastax.driver.core.ColumnMetadata.IndexMetadata; +import com.datastax.driver.core.Session; + +public class CqlIndexSpecificationAssertions { + + public static double DELTA = 1e-6; // delta for comparisons of doubles + + public static void assertIndex(IndexDescriptor expected, String keyspace, Session session) { + IndexMetadata imd = session.getCluster().getMetadata().getKeyspace(keyspace.toLowerCase()) + .getTable(expected.getTableName()).getColumn(expected.getColumnName()).getIndex(); + + assertEquals(expected.getName().toLowerCase(), imd.getName().toLowerCase()); + } + + public static void assertNoIndex(IndexDescriptor expected, String keyspace, Session session) { + IndexMetadata imd = session.getCluster().getMetadata().getKeyspace(keyspace.toLowerCase()) + .getTable(expected.getTableName()).getColumn(expected.getColumnName()).getIndex(); + + assertNull(imd); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlKeyspaceSpecificationAssertions.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlKeyspaceSpecificationAssertions.java new file mode 100644 index 000000000..0831c4e37 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlKeyspaceSpecificationAssertions.java @@ -0,0 +1,49 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.junit.Assert.*; + +import java.util.Map; + +import org.springframework.cassandra.core.keyspace.Option; +import org.springframework.cassandra.core.keyspace.KeyspaceDescriptor; + +import com.datastax.driver.core.KeyspaceMetadata; +import com.datastax.driver.core.Session; + +public class CqlKeyspaceSpecificationAssertions { + + public static double DELTA = 1e-6; // delta for comparisons of doubles + + @SuppressWarnings( "unchecked" ) + public static void assertKeyspace(KeyspaceDescriptor expected, String keyspace, Session session) { + KeyspaceMetadata kmd = session.getCluster().getMetadata().getKeyspace(keyspace.toLowerCase()); + + assertEquals( expected.getName(), kmd.getName() ); + + Map options = kmd.getReplication(); + Map expectedOptions = expected.getOptions(); + Map replicationMap = (Map) expectedOptions.get( "replication" ); + assertEquals(replicationMap.size(), options.size()); + + for ( Map.Entry optionEntry : replicationMap.entrySet()) { + String optionValue = options.get(optionEntry.getKey().getName()); + String repMapValue = "" + optionEntry.getValue(); + assertTrue(optionValue.endsWith(repMapValue)); + } + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlTableSpecificationAssertions.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlTableSpecificationAssertions.java new file mode 100644 index 000000000..70b8b8adc --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CqlTableSpecificationAssertions.java @@ -0,0 +1,177 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.core.cql.CqlStringUtils; +import org.springframework.cassandra.core.keyspace.ColumnSpecification; +import org.springframework.cassandra.core.keyspace.DropTableSpecification; +import org.springframework.cassandra.core.keyspace.TableDescriptor; +import org.springframework.cassandra.core.keyspace.TableOption; + +import com.datastax.driver.core.ColumnMetadata; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.TableMetadata; +import com.datastax.driver.core.TableMetadata.Options; + +public class CqlTableSpecificationAssertions { + + private final static Logger log = LoggerFactory.getLogger(CqlTableSpecificationAssertions.class); + + public static double DELTA = 1e-6; // delta for comparisons of doubles + + public static void assertTable(TableDescriptor expected, String keyspace, Session session) { + TableMetadata tmd = session.getCluster().getMetadata().getKeyspace(keyspace.toLowerCase()) + .getTable(expected.getName()); + + assertEquals(expected.getName().toLowerCase(), tmd.getName().toLowerCase()); + assertPartitionKeyColumns(expected, tmd); + assertPrimaryKeyColumns(expected, tmd); + assertColumns(expected.getColumns(), tmd.getColumns()); + assertOptions(expected.getOptions(), tmd.getOptions()); + } + + public static void assertNoTable(DropTableSpecification expected, String keyspace, Session session) { + TableMetadata tmd = session.getCluster().getMetadata().getKeyspace(keyspace.toLowerCase()) + .getTable(expected.getName()); + + assertNull(tmd); + } + + public static void assertPartitionKeyColumns(TableDescriptor expected, TableMetadata actual) { + assertColumns(expected.getPartitionKeyColumns(), actual.getPartitionKey()); + } + + public static void assertPrimaryKeyColumns(TableDescriptor expected, TableMetadata actual) { + assertColumns(expected.getPrimaryKeyColumns(), actual.getPrimaryKey()); + } + + public static void assertOptions(Map expected, Options actual) { + + for (String key : expected.keySet()) { + + log.info(key + " -> " + expected.get(key)); + + Object value = expected.get(key); + TableOption tableOption = getTableOptionFor(key.toUpperCase()); + + if (tableOption == null && key.equalsIgnoreCase(TableOption.COMPACT_STORAGE.getName())) { + // TODO: figure out how to tell if COMPACT STORAGE was used + continue; + } + + assertOption(tableOption, key, value, getOptionFor(tableOption, tableOption.getType(), actual)); + } + } + + @SuppressWarnings({ "unchecked", "incomplete-switch" }) + public static void assertOption(TableOption tableOption, String key, Object expected, Object actual) { + + if (tableOption == null) { // then this is a string-only or unknown value + key.equalsIgnoreCase(actual.toString()); // TODO: determine if this is the right test + } + + switch (tableOption) { + + case BLOOM_FILTER_FP_CHANCE: + case READ_REPAIR_CHANCE: + case DCLOCAL_READ_REPAIR_CHANCE: + assertEquals((Double) expected, (Double) actual, DELTA); + return; + + case CACHING: + assertEquals(((String) expected).toUpperCase(), ((String) actual).toUpperCase()); + return; + + case COMPACTION: + assertCompaction((Map) expected, (Map) actual); + return; + + case COMPRESSION: + assertCompression((Map) expected, (Map) actual); + return; + } + + log.info(actual.getClass().getName()); + + assertEquals(expected, + tableOption.quotesValue() && !(actual instanceof CharSequence) ? CqlStringUtils.singleQuote(actual) : actual); + } + + public static void assertCompaction(Map expected, Map actual) { + // TODO + } + + public static void assertCompression(Map expected, Map actual) { + // TODO + } + + public static TableOption getTableOptionFor(String key) { + try { + return TableOption.valueOf(key); + } catch (IllegalArgumentException x) { + return null; + } + } + + @SuppressWarnings("unchecked") + public static T getOptionFor(TableOption option, Class type, Options options) { + switch (option) { + case BLOOM_FILTER_FP_CHANCE: + return (T) (Double) options.getBloomFilterFalsePositiveChance(); + case CACHING: + return (T) CqlStringUtils.singleQuote(options.getCaching()); + case COMMENT: + return (T) CqlStringUtils.singleQuote(options.getComment()); + case COMPACTION: + return (T) options.getCompaction(); + case COMPACT_STORAGE: + throw new Error(); // TODO: figure out + case COMPRESSION: + return (T) options.getCompression(); + case DCLOCAL_READ_REPAIR_CHANCE: + return (T) (Double) options.getLocalReadRepairChance(); + case GC_GRACE_SECONDS: + return (T) new Long(options.getGcGraceInSeconds()); + case READ_REPAIR_CHANCE: + return (T) (Double) options.getReadRepairChance(); + case REPLICATE_ON_WRITE: + return (T) (Boolean) options.getReplicateOnWrite(); + } + return null; + } + + public static void assertColumns(List expected, List actual) { + for (int i = 0; i < expected.size(); i++) { + ColumnSpecification expectedColumn = expected.get(i); + ColumnMetadata actualColumn = actual.get(i); + + assertColumn(expectedColumn, actualColumn); + } + } + + public static void assertColumn(ColumnSpecification expected, ColumnMetadata actual) { + assertEquals(expected.getName().toLowerCase(), actual.getName().toLowerCase()); + assertEquals(expected.getType(), actual.getType()); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateIndexCqlGeneratorIntegrationTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateIndexCqlGeneratorIntegrationTests.java new file mode 100644 index 000000000..5c0add73b --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateIndexCqlGeneratorIntegrationTests.java @@ -0,0 +1,60 @@ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlIndexSpecificationAssertions.assertIndex; + +import org.cassandraunit.CassandraCQLUnit; +import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; +import org.junit.Rule; +import org.junit.Test; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateIndexCqlGeneratorTests.BasicTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateIndexCqlGeneratorTests.CreateIndexTest; + +/** + * Integration tests that reuse unit tests. + * + * @author Matthew T. Adams + */ +public class CreateIndexCqlGeneratorIntegrationTests { + + /** + * Integration test base class that knows how to do everything except instantiate the concrete unit test type T. + * + * @author Matthew T. Adams + * + * @param The concrete unit test class to which this integration test corresponds. + */ + public static abstract class Base extends AbstractEmbeddedCassandraIntegrationTest { + T unit; + + public abstract T unit(); + + @Test + public void test() { + unit = unit(); + unit.prepare(); + + session.execute(unit.cql); + + assertIndex(unit.specification, keyspace, session); + } + } + + public static class BasicIntegrationTest extends Base { + + /** + * This loads any test specific Cassandra objects + */ + @Rule + public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(new ClassPathCQLDataSet( + "integration/cql/generator/CreateIndexCqlGeneratorIntegrationTests-BasicTest.cql", this.keyspace), + CASSANDRA_CONFIG, CASSANDRA_HOST, CASSANDRA_NATIVE_PORT); + + @Override + public BasicTest unit() { + return new BasicTest(); + } + + } + +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateKeyspaceCqlGeneratorIntegrationTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateKeyspaceCqlGeneratorIntegrationTests.java new file mode 100644 index 000000000..2524e0205 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateKeyspaceCqlGeneratorIntegrationTests.java @@ -0,0 +1,54 @@ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlKeyspaceSpecificationAssertions.assertKeyspace; + +import org.junit.Test; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateKeyspaceCqlGeneratorTests.BasicTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateKeyspaceCqlGeneratorTests.CreateKeyspaceTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateKeyspaceCqlGeneratorTests.NetworkTopologyTest; + +/** + * Integration tests that reuse unit tests. + * + * @author John McPeek + */ +public class CreateKeyspaceCqlGeneratorIntegrationTests { + + /** + * Integration test base class that knows how to do everything except instantiate the concrete unit test type T. + * + * @param The concrete unit test class to which this integration test corresponds. + */ + public static abstract class Base extends AbstractEmbeddedCassandraIntegrationTest { + T unit; + + public abstract T unit(); + + @Test + public void test() { + unit = unit(); + unit.prepare(); + + session.execute(unit.cql); + + assertKeyspace(unit.specification, unit.keyspace, session); + } + } + + public static class BasicIntegrationTest extends Base { + + @Override + public BasicTest unit() { + return new BasicTest(); + } + } + + public static class NetworkTopologyIntegrationTest extends Base { + + @Override + public NetworkTopologyTest unit() { + return new NetworkTopologyTest(); + } + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateTableCqlGeneratorIntegrationTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateTableCqlGeneratorIntegrationTests.java new file mode 100644 index 000000000..63806cbc7 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/CreateTableCqlGeneratorIntegrationTests.java @@ -0,0 +1,60 @@ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlTableSpecificationAssertions.assertTable; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateTableCqlGeneratorTests.BasicTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateTableCqlGeneratorTests.CompositePartitionKeyTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateTableCqlGeneratorTests.CreateTableTest; + +/** + * Integration tests that reuse unit tests. + * + * @author Matthew T. Adams + */ +public class CreateTableCqlGeneratorIntegrationTests { + + private final static Logger log = LoggerFactory.getLogger(CreateTableCqlGeneratorIntegrationTests.class); + + /** + * Integration test base class that knows how to do everything except instantiate the concrete unit test type T. + * + * @author Matthew T. Adams + * + * @param The concrete unit test class to which this integration test corresponds. + */ + public static abstract class Base extends AbstractEmbeddedCassandraIntegrationTest { + T unit; + + public abstract T unit(); + + @Test + public void test() { + unit = unit(); + unit.prepare(); + + session.execute(unit.cql); + + assertTable(unit.specification, keyspace, session); + } + } + + public static class BasicIntegrationTest extends Base { + + @Override + public BasicTest unit() { + return new BasicTest(); + } + } + + public static class CompositePartitionKeyIntegrationTest extends Base { + + @Override + public CompositePartitionKeyTest unit() { + return new CompositePartitionKeyTest(); + } + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/IndexLifecycleCqlGeneratorIntegrationTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/IndexLifecycleCqlGeneratorIntegrationTests.java new file mode 100644 index 000000000..0c01fdbd0 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/IndexLifecycleCqlGeneratorIntegrationTests.java @@ -0,0 +1,61 @@ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlIndexSpecificationAssertions.assertIndex; +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlIndexSpecificationAssertions.assertNoIndex; + +import org.cassandraunit.CassandraCQLUnit; +import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; +import org.junit.Rule; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateIndexCqlGeneratorTests; +import org.springframework.cassandra.test.unit.core.cql.generator.DropIndexCqlGeneratorTests; + +/** + * Integration tests that reuse unit tests. + * + * @author Matthew T. Adams + */ +public class IndexLifecycleCqlGeneratorIntegrationTests extends AbstractEmbeddedCassandraIntegrationTest { + + Logger log = LoggerFactory.getLogger(IndexLifecycleCqlGeneratorIntegrationTests.class); + + /** + * This loads any test specific Cassandra objects + */ + @Rule + public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(new ClassPathCQLDataSet( + "integration/cql/generator/CreateIndexCqlGeneratorIntegrationTests-BasicTest.cql", this.keyspace), + CASSANDRA_CONFIG, CASSANDRA_HOST, CASSANDRA_NATIVE_PORT); + + @Test + public void lifecycleTest() { + + CreateIndexCqlGeneratorTests.BasicTest createTest = new CreateIndexCqlGeneratorTests.BasicTest(); + DropIndexCqlGeneratorTests.BasicTest dropTest = new DropIndexCqlGeneratorTests.BasicTest(); + DropIndexCqlGeneratorTests.IfExistsTest dropIfExists = new DropIndexCqlGeneratorTests.IfExistsTest(); + + createTest.prepare(); + dropTest.prepare(); + dropIfExists.prepare(); + + log.info(createTest.cql); + session.execute(createTest.cql); + + assertIndex(createTest.specification, keyspace, session); + + log.info(dropTest.cql); + session.execute(dropTest.cql); + + assertNoIndex(createTest.specification, keyspace, session); + + // log.info(dropIfExists.cql); + // session.execute(dropIfExists.cql); + // + // assertNoIndex(createTest.specification, keyspace, session); + + } + +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/TableLifecycleIntegrationTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/TableLifecycleIntegrationTest.java new file mode 100644 index 000000000..60e55c9a8 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/TableLifecycleIntegrationTest.java @@ -0,0 +1,105 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlTableSpecificationAssertions.assertNoTable; +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlTableSpecificationAssertions.assertTable; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.core.cql.generator.DropTableCqlGenerator; +import org.springframework.cassandra.core.keyspace.DropTableSpecification; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.unit.core.cql.generator.AlterTableCqlGeneratorTests; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateTableCqlGeneratorTests; +import org.springframework.cassandra.test.unit.core.cql.generator.DropTableCqlGeneratorTests; + +/** + * Test CREATE TABLE / ALTER TABLE / DROP TABLE + * + * @author David Webb + */ +public class TableLifecycleIntegrationTest extends AbstractEmbeddedCassandraIntegrationTest { + + private final static Logger log = LoggerFactory.getLogger(TableLifecycleIntegrationTest.class); + + CreateTableCqlGeneratorTests.MultipleOptionsTest createTableTest = new CreateTableCqlGeneratorTests.MultipleOptionsTest(); + + @Test + public void testDrop() { + + createTableTest.prepare(); + + log.info(createTableTest.cql); + + session.execute(createTableTest.cql); + + assertTable(createTableTest.specification, keyspace, session); + + DropTableTest dropTest = new DropTableTest(); + dropTest.prepare(); + + log.info(dropTest.cql); + + session.execute(dropTest.cql); + + assertNoTable(dropTest.specification, keyspace, session); + } + + @Test + public void testAlter() { + + createTableTest.prepare(); + + log.info(createTableTest.cql); + + session.execute(createTableTest.cql); + + assertTable(createTableTest.specification, keyspace, session); + + AlterTableCqlGeneratorTests.MultipleOptionsTest alterTest = new AlterTableCqlGeneratorTests.MultipleOptionsTest(); + alterTest.prepare(); + + log.info(alterTest.cql); + + session.execute(alterTest.cql); + + // assertTable(alterTest.specification, keyspace, session); + + } + + public class DropTableTest extends DropTableCqlGeneratorTests.DropTableTest { + + /* (non-Javadoc) + * @see org.springframework.cassandra.test.unit.core.cql.generator.TableOperationCqlGeneratorTest#specification() + */ + @Override + public DropTableSpecification specification() { + return DropTableSpecification.dropTable().name(createTableTest.specification.getName()); + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.test.unit.core.cql.generator.TableOperationCqlGeneratorTest#generator() + */ + @Override + public DropTableCqlGenerator generator() { + return new DropTableCqlGenerator(specification); + } + + } + +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/TableOptionsIntegrationTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/TableOptionsIntegrationTest.java new file mode 100644 index 000000000..b0d0bd92b --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/cql/generator/TableOptionsIntegrationTest.java @@ -0,0 +1,48 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.test.integration.core.cql.generator; + +import static org.springframework.cassandra.test.integration.core.cql.generator.CqlTableSpecificationAssertions.assertTable; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.cassandra.test.unit.core.cql.generator.CreateTableCqlGeneratorTests; + +/** + * Test CREATE TABLE for all Options and assert against C* TableMetaData + * + * @author David Webb + */ +public class TableOptionsIntegrationTest extends AbstractEmbeddedCassandraIntegrationTest { + + private final static Logger log = LoggerFactory.getLogger(TableOptionsIntegrationTest.class); + + @Test + public void test() { + + CreateTableCqlGeneratorTests.MultipleOptionsTest optionsTest = new CreateTableCqlGeneratorTests.MultipleOptionsTest(); + + optionsTest.prepare(); + + log.info(optionsTest.cql); + + session.execute(optionsTest.cql); + + assertTable(optionsTest.specification, keyspace, session); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/template/Book.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/template/Book.java new file mode 100644 index 000000000..7b5464321 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/template/Book.java @@ -0,0 +1,99 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.test.integration.core.template; + +/** + * Test POJO + * + * @author David Webb + * + */ +public class Book { + + private String isbn; + + private String title; + private String author; + private int pages; + + /** + * @return Returns the isbn. + */ + public String getIsbn() { + return isbn; + } + + /** + * @param isbn The isbn to set. + */ + public void setIsbn(String isbn) { + this.isbn = isbn; + } + + /** + * @return Returns the title. + */ + public String getTitle() { + return title; + } + + /** + * @param title The title to set. + */ + public void setTitle(String title) { + this.title = title; + } + + /** + * @return Returns the author. + */ + public String getAuthor() { + return author; + } + + /** + * @param author The author to set. + */ + public void setAuthor(String author) { + this.author = author; + } + + /** + * @return Returns the pages. + */ + public int getPages() { + return pages; + } + + /** + * @param pages The pages to set. + */ + public void setPages(int pages) { + this.pages = pages; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("isbn -> " + isbn).append("\n"); + sb.append("tile -> " + title).append("\n"); + sb.append("author -> " + author).append("\n"); + sb.append("pages -> " + pages).append("\n"); + return sb.toString(); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/template/CassandraOperationsTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/template/CassandraOperationsTest.java new file mode 100644 index 000000000..82340180f --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/integration/core/template/CassandraOperationsTest.java @@ -0,0 +1,1145 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.cassandra.test.integration.core.template; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import org.cassandraunit.CassandraCQLUnit; +import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.core.CassandraOperations; +import org.springframework.cassandra.core.CassandraTemplate; +import org.springframework.cassandra.core.HostMapper; +import org.springframework.cassandra.core.PreparedStatementBinder; +import org.springframework.cassandra.core.PreparedStatementCallback; +import org.springframework.cassandra.core.PreparedStatementCreator; +import org.springframework.cassandra.core.ResultSetExtractor; +import org.springframework.cassandra.core.ResultSetFutureExtractor; +import org.springframework.cassandra.core.RingMember; +import org.springframework.cassandra.core.RowCallbackHandler; +import org.springframework.cassandra.core.RowIterator; +import org.springframework.cassandra.core.RowMapper; +import org.springframework.cassandra.core.SessionCallback; +import org.springframework.cassandra.test.integration.AbstractEmbeddedCassandraIntegrationTest; +import org.springframework.dao.DataAccessException; +import org.springframework.util.CollectionUtils; + +import com.datastax.driver.core.BoundStatement; +import com.datastax.driver.core.Host; +import com.datastax.driver.core.PreparedStatement; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.ResultSetFuture; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.exceptions.DriverException; + +/** + * Unit Tests for CassandraTemplate + * + * @author David Webb + * + */ +public class CassandraOperationsTest extends AbstractEmbeddedCassandraIntegrationTest { + + private CassandraOperations cassandraTemplate; + + private static Logger log = LoggerFactory.getLogger(CassandraOperationsTest.class); + + /* + * Objects used for test data + */ + final String ISBN_NINES = "999999999"; + final String TITLE_NINES = "Book of Nines"; + final Object[] o1 = new Object[] { "1234", "Moby Dick", "Herman Manville", new Integer(456) }; + final Object[] o2 = new Object[] { "2345", "War and Peace", "Russian Dude", new Integer(456) }; + final Object[] o3 = new Object[] { "3456", "Jane Ayre", "Charlotte", new Integer(456) }; + + /** + * This loads any test specific Cassandra objects + */ + @Rule + public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(new ClassPathCQLDataSet( + "cassandraOperationsTest-cql-dataload.cql", this.keyspace), CASSANDRA_CONFIG, CASSANDRA_HOST, + CASSANDRA_NATIVE_PORT); + + @Before + public void setupTemplate() { + cassandraTemplate = new CassandraTemplate(session); + } + + @Test + public void ringTest() { + + List ring = cassandraTemplate.describeRing(); + + /* + * There must be 1 node in the cluster if the embedded server is + * running. + */ + assertNotNull(ring); + + for (RingMember h : ring) { + log.info("ringTest Host -> " + h.address); + } + } + + @Test + public void hostMapperTest() { + + List ring = (List) cassandraTemplate.describeRing(new HostMapper() { + + @Override + public Collection mapHosts(Set host) throws DriverException { + + List list = new LinkedList(); + + for (Host h : host) { + MyHost mh = new MyHost(); + mh.someName = h.getAddress().getCanonicalHostName(); + list.add(mh); + } + + return list; + } + + }); + + assertNotNull(ring); + assertTrue(ring.size() > 0); + + for (MyHost h : ring) { + log.info("hostMapperTest Host -> " + h.someName); + } + + } + + @Test + @SuppressWarnings( "unchecked" ) + public void ingestionTestListOfList() { + + String cql = "insert into book (isbn, title, author, pages) values (?, ?, ?, ?)"; + + List> values = new LinkedList>(); + + values.add(new LinkedList(CollectionUtils.arrayToList(o1))); + values.add(new LinkedList(CollectionUtils.arrayToList(o2))); + values.add(new LinkedList(CollectionUtils.arrayToList(o3))); + + cassandraTemplate.ingest(cql, values); + + // Assert that the rows were inserted into Cassandra + Book b1 = getBook((String) o1[0]); + Book b2 = getBook((String) o2[0]); + Book b3 = getBook((String) o3[0]); + + assertBook(b1, objectToBook(o1)); + assertBook(b2, objectToBook(o2)); + assertBook(b3, objectToBook(o3)); + } + + @Test + public void ingestionTestObjectArray() { + + String cql = "insert into book (isbn, title, author, pages) values (?, ?, ?, ?)"; + + Object[][] values = new Object[3][]; + values[0] = o1; + values[1] = o2; + values[2] = o3; + + cassandraTemplate.ingest(cql, values); + + // Assert that the rows were inserted into Cassandra + Book b1 = getBook("1234"); + Book b2 = getBook("2345"); + Book b3 = getBook("3456"); + + assertBook(b1, objectToBook(o1)); + assertBook(b2, objectToBook(o2)); + assertBook(b3, objectToBook(o3)); + } + + /** + * This is an implementation of RowIterator for the purposes of testing passing your own Impl to CassandraTemplate + * + * @author David Webb + */ + final class MyRowIterator implements RowIterator { + + private Object[][] values; + + public MyRowIterator(Object[][] values) { + this.values = values; + } + + int index = 0; + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.RowIterator#next() + */ + @Override + public Object[] next() { + return values[index++]; + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.core.RowIterator#hasNext() + */ + @Override + public boolean hasNext() { + return index < values.length; + } + + } + + @Test + public void ingestionTestRowIterator() { + + String cql = "insert into book (isbn, title, author, pages) values (?, ?, ?, ?)"; + + final Object[][] v = new Object[3][]; + v[0] = o1; + v[1] = o2; + v[2] = o3; + RowIterator ri = new MyRowIterator(v); + + cassandraTemplate.ingest(cql, ri); + + // Assert that the rows were inserted into Cassandra + Book b1 = getBook("1234"); + Book b2 = getBook("2345"); + Book b3 = getBook("3456"); + + assertBook(b1, objectToBook(o1)); + assertBook(b2, objectToBook(o2)); + assertBook(b3, objectToBook(o3)); + + } + + @Test + public void executeTestSessionCallback() { + + final String isbn = UUID.randomUUID().toString(); + final String title = "Spring Data Cassandra Cookbook"; + final String author = "David Webb"; + final Integer pages = 1; + + cassandraTemplate.execute(new SessionCallback() { + + @Override + public Object doInSession(Session s) throws DataAccessException { + + String cql = "insert into book (isbn, title, author, pages) values (?, ?, ?, ?)"; + + PreparedStatement ps = s.prepare(cql); + BoundStatement bs = ps.bind(isbn, title, author, pages); + + s.execute(bs); + + return null; + + } + }); + + Book b = getBook(isbn); + + assertBook(b, isbn, title, author, pages); + + } + + @Test + public void executeTestCqlString() { + + final String isbn = UUID.randomUUID().toString(); + final String title = "Spring Data Cassandra Cookbook"; + final String author = "David Webb"; + final Integer pages = 1; + + cassandraTemplate.execute("insert into book (isbn, title, author, pages) values ('" + isbn + "', '" + title + + "', '" + author + "', " + pages + ")"); + + Book b = getBook(isbn); + + assertBook(b, isbn, title, author, pages); + + } + + @Test + public void executeAsynchronouslyTestCqlString() { + + final String isbn = UUID.randomUUID().toString(); + final String title = "Spring Data Cassandra Cookbook"; + final String author = "David Webb"; + final Integer pages = 1; + + cassandraTemplate.executeAsynchronously("insert into book (isbn, title, author, pages) values ('" + isbn + "', '" + + title + "', '" + author + "', " + pages + ")"); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + Book b = getBook(isbn); + + assertBook(b, isbn, title, author, pages); + + } + + @Test + public void queryTestCqlStringResultSetExtractor() { + + final String isbn = "999999999"; + + Book b1 = cassandraTemplate.query("select * from book where isbn='" + isbn + "'", new ResultSetExtractor() { + + @Override + public Book extractData(ResultSet rs) throws DriverException, DataAccessException { + Row r = rs.one(); + assertNotNull(r); + + Book b = rowToBook(r); + + return b; + } + }); + + Book b2 = getBook(isbn); + + assertBook(b1, b2); + + } + + @Test + public void queryAsynchronouslyTestCqlStringResultSetExtractor() { + + final String isbn = "999999999"; + + Book b1 = cassandraTemplate.queryAsynchronously("select * from book where isbn='" + isbn + "'", + new ResultSetFutureExtractor() { + + @Override + public Book extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + Row r = frs.one(); + assertNotNull(r); + + Book b = rowToBook(r); + + return b; + } + }); + + Book b2 = getBook(isbn); + + assertBook(b1, b2); + + } + + @Test + public void queryTestCqlStringRowCallbackHandler() { + + final String isbn = "999999999"; + + final Book b1 = getBook(isbn); + + cassandraTemplate.query("select * from book where isbn='" + isbn + "'", new RowCallbackHandler() { + + @Override + public void processRow(Row row) throws DriverException { + + assertNotNull(row); + + Book b = rowToBook(row); + + assertBook(b1, b); + + } + }); + + } + + @Test + public void processTestResultSetRowCallbackHandler() { + + final String isbn = "999999999"; + + final Book b1 = getBook(isbn); + + ResultSet rs = cassandraTemplate.queryAsynchronously("select * from book where isbn='" + isbn + "'", + new ResultSetFutureExtractor() { + + @Override + public ResultSet extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + return frs; + } + }); + + assertNotNull(rs); + + cassandraTemplate.process(rs, new RowCallbackHandler() { + + @Override + public void processRow(Row row) throws DriverException { + + assertNotNull(row); + + Book b = rowToBook(row); + + assertBook(b1, b); + + } + + }); + + } + + @Test + public void queryTestCqlStringRowMapper() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + List books = cassandraTemplate.query("select * from book where isbn in ('1234','2345','3456')", + new RowMapper() { + + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + Book b = rowToBook(row); + return b; + } + }); + + log.debug("Size of Book List -> " + books.size()); + assertEquals(books.size(), 3); + assertBook(books.get(0), getBook(books.get(0).getIsbn())); + assertBook(books.get(1), getBook(books.get(1).getIsbn())); + assertBook(books.get(2), getBook(books.get(2).getIsbn())); + } + + @Test + public void processTestResultSetRowMapper() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + ResultSet rs = cassandraTemplate.queryAsynchronously("select * from book where isbn in ('1234','2345','3456')", + new ResultSetFutureExtractor() { + + @Override + public ResultSet extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + return frs; + } + }); + + assertNotNull(rs); + + List books = cassandraTemplate.process(rs, new RowMapper() { + + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + Book b = rowToBook(row); + return b; + } + }); + + log.debug("Size of Book List -> " + books.size()); + assertEquals(books.size(), 3); + assertBook(books.get(0), getBook(books.get(0).getIsbn())); + assertBook(books.get(1), getBook(books.get(1).getIsbn())); + assertBook(books.get(2), getBook(books.get(2).getIsbn())); + + } + + @Test + public void queryForObjectTestCqlStringRowMapper() { + + Book book = cassandraTemplate.queryForObject("select * from book where isbn in ('" + ISBN_NINES + "')", + new RowMapper() { + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + Book b = rowToBook(row); + return b; + } + }); + + assertNotNull(book); + assertBook(book, getBook(ISBN_NINES)); + } + + /** + * Test that CQL for QueryForObject must only return 1 row or an IllegalArgumentException is thrown. + */ + @Test(expected = IllegalArgumentException.class) + public void queryForObjectTestCqlStringRowMapperNotOneRowReturned() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + @SuppressWarnings( "unused" ) + Book book = cassandraTemplate.queryForObject("select * from book where isbn in ('1234','2345','3456')", + new RowMapper() { + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + Book b = rowToBook(row); + return b; + } + }); + } + + @Test + public void processOneTestResultSetRowMapper() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + ResultSet rs = cassandraTemplate.queryAsynchronously("select * from book where isbn in ('" + ISBN_NINES + "')", + new ResultSetFutureExtractor() { + + @Override + public ResultSet extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + return frs; + } + }); + + assertNotNull(rs); + + Book book = cassandraTemplate.processOne(rs, new RowMapper() { + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + Book b = rowToBook(row); + return b; + } + }); + + assertNotNull(book); + assertBook(book, getBook(ISBN_NINES)); + } + + @Test + public void quertForObjectTestCqlStringRequiredType() { + + String title = cassandraTemplate.queryForObject("select title from book where isbn in ('" + ISBN_NINES + "')", + String.class); + + assertEquals(title, TITLE_NINES); + + } + + @Test(expected = ClassCastException.class) + public void queryForObjectTestCqlStringRequiredTypeInvalid() { + + @SuppressWarnings( "unused" ) + Float title = cassandraTemplate.queryForObject("select title from book where isbn in ('" + ISBN_NINES + "')", + Float.class); + + } + + @Test + public void processOneTestResultSetType() { + + ResultSet rs = cassandraTemplate.queryAsynchronously("select title from book where isbn in ('" + ISBN_NINES + "')", + new ResultSetFutureExtractor() { + + @Override + public ResultSet extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + return frs; + } + }); + + assertNotNull(rs); + + String title = cassandraTemplate.processOne(rs, String.class); + + assertNotNull(title); + assertEquals(title, TITLE_NINES); + } + + @Test + public void queryForMapTestCqlString() { + + Map rsMap = cassandraTemplate + .queryForMap("select * from book where isbn in ('" + ISBN_NINES + "')"); + + log.debug(rsMap.toString()); + + Book b1 = objectToBook(rsMap.get("isbn"), rsMap.get("title"), rsMap.get("author"), rsMap.get("pages")); + + Book b2 = getBook(ISBN_NINES); + + assertBook(b1, b2); + + } + + @Test + public void processMapTestResultSet() { + + ResultSet rs = cassandraTemplate.queryAsynchronously("select * from book where isbn in ('" + ISBN_NINES + "')", + new ResultSetFutureExtractor() { + + @Override + public ResultSet extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + return frs; + } + }); + + assertNotNull(rs); + + Map rsMap = cassandraTemplate.processMap(rs); + + log.debug("Size of Book List -> " + rsMap.size()); + + Book b1 = objectToBook(rsMap.get("isbn"), rsMap.get("title"), rsMap.get("author"), rsMap.get("pages")); + + Book b2 = getBook(ISBN_NINES); + + assertBook(b1, b2); + + } + + @Test + public void queryForListTestCqlStringType() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + List titles = cassandraTemplate.queryForList("select title from book where isbn in ('1234','2345','3456')", + String.class); + + log.debug(titles.toString()); + + assertNotNull(titles); + assertEquals(titles.size(), 3); + + } + + @Test + public void processListTestResultSetType() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + ResultSet rs = cassandraTemplate.queryAsynchronously("select * from book where isbn in ('1234','2345','3456')", + new ResultSetFutureExtractor() { + + @Override + public ResultSet extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + return frs; + } + }); + + assertNotNull(rs); + + List titles = cassandraTemplate.processList(rs, String.class); + + log.debug(titles.toString()); + + assertNotNull(titles); + assertEquals(titles.size(), 3); + } + + @Test + public void queryForListOfMapCqlString() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + List> results = cassandraTemplate + .queryForListOfMap("select * from book where isbn in ('1234','2345','3456')"); + + log.debug(results.toString()); + + assertEquals(results.size(), 3); + + } + + @Test + public void processListOfMapTestResultSet() { + + // Insert our 3 test books. + ingestionTestObjectArray(); + + ResultSet rs = cassandraTemplate.queryAsynchronously("select * from book where isbn in ('1234','2345','3456')", + new ResultSetFutureExtractor() { + + @Override + public ResultSet extractData(ResultSetFuture rs) throws DriverException, DataAccessException { + + ResultSet frs = rs.getUninterruptibly(); + return frs; + } + }); + + assertNotNull(rs); + + List> results = cassandraTemplate.processListOfMap(rs); + + log.debug(results.toString()); + + assertEquals(results.size(), 3); + + } + + @Test + public void executeTestCqlStringPreparedStatementCallback() { + + String cql = "insert into book (isbn, title, author, pages) values (?, ?, ?, ?)"; + + BoundStatement statement = cassandraTemplate.execute(cql, new PreparedStatementCallback() { + + @Override + public BoundStatement doInPreparedStatement(PreparedStatement ps) throws DriverException, DataAccessException { + BoundStatement bs = ps.bind(); + return bs; + } + }); + + assertNotNull(statement); + + } + + @Test + public void executeTestPreparedStatementCreatorPreparedStatementCallback() { + + final String cql = "insert into book (isbn, title, author, pages) values (?, ?, ?, ?)"; + + BoundStatement statement = cassandraTemplate.execute(new PreparedStatementCreator() { + + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(cql); + } + }, new PreparedStatementCallback() { + + @Override + public BoundStatement doInPreparedStatement(PreparedStatement ps) throws DriverException, DataAccessException { + BoundStatement bs = ps.bind(); + return bs; + } + }); + + assertNotNull(statement); + + } + + @Test + public void queryTestCqlStringPreparedStatementBinderResultSetExtractor() { + + final String cql = "select * from book where isbn = ?"; + final String isbn = "999999999"; + + Book b1 = cassandraTemplate.query(cql, new PreparedStatementBinder() { + + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + return ps.bind(isbn); + } + }, new ResultSetExtractor() { + + @Override + public Book extractData(ResultSet rs) throws DriverException, DataAccessException { + Row r = rs.one(); + assertNotNull(r); + + Book b = rowToBook(r); + + return b; + } + }); + + Book b2 = getBook(isbn); + + assertBook(b1, b2); + } + + @Test + public void queryTestCqlStringPreparedStatementBinderRowCallbackHandler() { + + final String cql = "select * from book where isbn = ?"; + final String isbn = "999999999"; + + cassandraTemplate.query(cql, new PreparedStatementBinder() { + + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + return ps.bind(isbn); + } + }, new RowCallbackHandler() { + + @Override + public void processRow(Row row) throws DriverException { + + Book b = rowToBook(row); + + Book b2 = getBook(isbn); + + assertBook(b, b2); + + } + }); + + } + + @Test + public void queryTestCqlStringPreparedStatementBinderRowMapper() { + + final String cql = "select * from book where isbn = ?"; + final String isbn = "999999999"; + + List books = cassandraTemplate.query(cql, new PreparedStatementBinder() { + + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + return ps.bind(isbn); + } + }, new RowMapper() { + + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + return rowToBook(row); + } + }); + + Book b2 = getBook(isbn); + + assertEquals(books.size(), 1); + assertBook(books.get(0), b2); + } + + @Test + public void queryTestPreparedStatementCreatorResultSetExtractor() { + + ingestionTestObjectArray(); + + final String cql = "select * from book"; + + List books = cassandraTemplate.query(new PreparedStatementCreator() { + + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(cql); + } + }, new ResultSetExtractor>() { + + @Override + public List extractData(ResultSet rs) throws DriverException, DataAccessException { + + List books = new LinkedList(); + + for (Row row : rs.all()) { + books.add(rowToBook(row)); + } + + return books; + } + }); + + log.debug("Size of all Books -> " + books.size()); + + assertTrue(books.size() > 0); + } + + @Test + public void queryTestPreparedStatementCreatorRowCallbackHandler() { + + ingestionTestObjectArray(); + + final String cql = "select * from book"; + + cassandraTemplate.query(new PreparedStatementCreator() { + + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(cql); + } + }, new RowCallbackHandler() { + + @Override + public void processRow(Row row) throws DriverException { + + Book b = rowToBook(row); + + log.debug("Title -> " + b.getTitle()); + + } + }); + + } + + @Test + public void queryTestPreparedStatementCreatorRowMapper() { + + ingestionTestObjectArray(); + + final String cql = "select * from book"; + + List books = cassandraTemplate.query(new PreparedStatementCreator() { + + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(cql); + } + }, new RowMapper() { + + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + return rowToBook(row); + } + }); + + log.debug("Size of all Books -> " + books.size()); + + assertTrue(books.size() > 0); + } + + @Test + public void queryTestPreparedStatementCreatorPreparedStatementBinderResultSetExtractor() { + + final String cql = "select * from book where isbn = ?"; + final String isbn = "999999999"; + + List books = cassandraTemplate.query(new PreparedStatementCreator() { + + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(cql); + } + }, new PreparedStatementBinder() { + + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + return ps.bind(isbn); + } + }, new ResultSetExtractor>() { + + @Override + public List extractData(ResultSet rs) throws DriverException, DataAccessException { + List books = new LinkedList(); + + for (Row row : rs.all()) { + books.add(rowToBook(row)); + } + + return books; + } + }); + + Book b2 = getBook(isbn); + + log.debug("Book list Size -> " + books.size()); + + assertEquals(books.size(), 1); + assertBook(books.get(0), b2); + } + + @Test + public void queryTestPreparedStatementCreatorPreparedStatementBinderRowCallbackHandler() { + + final String cql = "select * from book where isbn = ?"; + final String isbn = "999999999"; + + cassandraTemplate.query(new PreparedStatementCreator() { + + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(cql); + } + }, new PreparedStatementBinder() { + + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + return ps.bind(isbn); + } + }, new RowCallbackHandler() { + + @Override + public void processRow(Row row) throws DriverException { + Book b = rowToBook(row); + Book b2 = getBook(isbn); + assertBook(b, b2); + } + }); + + } + + @Test + public void queryTestPreparedStatementCreatorPreparedStatementBinderRowMapper() { + + final String cql = "select * from book where isbn = ?"; + final String isbn = "999999999"; + + List books = cassandraTemplate.query(new PreparedStatementCreator() { + + @Override + public PreparedStatement createPreparedStatement(Session session) throws DriverException { + return session.prepare(cql); + } + }, new PreparedStatementBinder() { + + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + return ps.bind(isbn); + } + }, new RowMapper() { + + @Override + public Book mapRow(Row row, int rowNum) throws DriverException { + return rowToBook(row); + } + }); + + Book b2 = getBook(isbn); + + assertEquals(books.size(), 1); + assertBook(books.get(0), b2); + } + + /** + * Assert that a Book matches the arguments expected + * + * @param b + * @param orderedElements + */ + private void assertBook(Book b, Object... orderedElements) { + + assertEquals(b.getIsbn(), orderedElements[0]); + assertEquals(b.getTitle(), orderedElements[1]); + assertEquals(b.getAuthor(), orderedElements[2]); + assertEquals(b.getPages(), orderedElements[3]); + + } + + private Book rowToBook(Row row) { + Book b = new Book(); + b.setIsbn(row.getString("isbn")); + b.setTitle(row.getString("title")); + b.setAuthor(row.getString("author")); + b.setPages(row.getInt("pages")); + return b; + } + + /** + * Convert Object[] to a Book + * + * @param bookElements + * @return + */ + private Book objectToBook(Object... bookElements) { + Book b = new Book(); + b.setIsbn((String) bookElements[0]); + b.setTitle((String) bookElements[1]); + b.setAuthor((String) bookElements[2]); + b.setPages((Integer) bookElements[3]); + return b; + } + + /** + * Convert List to a Book + * + * @param bookElements + * @return + */ + private Book listToBook(List bookElements) { + Book b = new Book(); + b.setIsbn((String) bookElements.get(0)); + b.setTitle((String) bookElements.get(1)); + b.setAuthor((String) bookElements.get(2)); + b.setPages((Integer) bookElements.get(3)); + return b; + + } + + /** + * Assert that 2 Book objects are the same + * + * @param b + * @param orderedElements + */ + private void assertBook(Book b1, Book b2) { + + assertEquals(b1.getIsbn(), b2.getIsbn()); + assertEquals(b1.getTitle(), b2.getTitle()); + assertEquals(b1.getAuthor(), b2.getAuthor()); + assertEquals(b1.getPages(), b2.getPages()); + + } + + /** + * Get a Book from Cassandra for assertions. + * + * @param isbn + * @return + */ + public Book getBook(final String isbn) { + + Book b = this.cassandraTemplate.query("select * from book where isbn = ?", new PreparedStatementBinder() { + + @Override + public BoundStatement bindValues(PreparedStatement ps) throws DriverException { + return ps.bind(isbn); + } + }, new ResultSetExtractor() { + + @Override + public Book extractData(ResultSet rs) throws DriverException, DataAccessException { + Book b = new Book(); + Row r = rs.one(); + b.setIsbn(r.getString("isbn")); + b.setTitle(r.getString("title")); + b.setAuthor(r.getString("author")); + b.setPages(r.getInt("pages")); + return b; + } + }); + + return b; + + } + + /** + * For testing a HostMapper Implementation + */ + public class MyHost { + public String someName; + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/CqlStringUtilsTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/CqlStringUtilsTest.java new file mode 100644 index 000000000..e54d7b29b --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/CqlStringUtilsTest.java @@ -0,0 +1,18 @@ +package org.springframework.cassandra.test.unit.core.cql; + +import static org.junit.Assert.*; +import static org.springframework.cassandra.core.cql.CqlStringUtils.isQuotedIdentifier; +import static org.springframework.cassandra.core.cql.CqlStringUtils.isUnquotedIdentifier; + +import org.junit.Test; + +public class CqlStringUtilsTest { + + @Test + public void testIsQuotedIdentifier() throws Exception { + assertFalse(isQuotedIdentifier("my\"id")); + assertTrue(isQuotedIdentifier("my\"\"id")); + assertFalse(isUnquotedIdentifier("my\"id")); + assertTrue(isUnquotedIdentifier("myid")); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/AlterKeyspaceCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/AlterKeyspaceCqlGeneratorTests.java new file mode 100644 index 000000000..f54ef316e --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/AlterKeyspaceCqlGeneratorTests.java @@ -0,0 +1,107 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.Test; +import org.springframework.cassandra.core.cql.generator.AlterKeyspaceCqlGenerator; +import org.springframework.cassandra.core.keyspace.AlterKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.DefaultOption; +import org.springframework.cassandra.core.keyspace.KeyspaceOption; +import org.springframework.cassandra.core.keyspace.Option; + +public class AlterKeyspaceCqlGeneratorTests { + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertPreamble(String tableName, String cql) { + assertTrue(cql.startsWith("ALTER KEYSPACE " + tableName + " ")); + } + + private static void assertReplicationMap(Map replicationMap, String cql) { + assertTrue(cql.contains(" WITH replication = { ")); + + for (Map.Entry entry : replicationMap.entrySet() ) { + String keyValuePair = "'" + entry.getKey().getName() + "' : '" + entry.getValue().toString() + "'"; + assertTrue(cql.contains(keyValuePair)); + } + } + + public static void assertDurableWrites(Boolean durableWrites, String cql) { + assertTrue(cql.contains(" AND durable_writes = " + durableWrites)); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations. + */ + public static abstract class AlterKeyspaceTest extends + KeyspaceOperationCqlGeneratorTest { + } + + public static class CompleteTest extends AlterKeyspaceTest { + + public String name = "mykeyspace"; + public Boolean durableWrites = true; + + public Map replicationMap = new HashMap(); + + public AlterKeyspaceSpecification specification() { + replicationMap.put( new DefaultOption( "class", String.class, false, false, true ), "SimpleStrategy" ); + replicationMap.put( new DefaultOption( "replication_factor", Long.class, false, false, true ), 1 ); + replicationMap.put( new DefaultOption( "dc1", Long.class, false, false, true ), 2 ); + replicationMap.put( new DefaultOption( "dc2", Long.class, false, false, true ), 3 ); + + return AlterKeyspaceSpecification.alterKeyspace() + .name(name) + .with(KeyspaceOption.REPLICATION, replicationMap) + .with(KeyspaceOption.DURABLE_WRITES, durableWrites); + } + + public AlterKeyspaceCqlGenerator generator() { + return new AlterKeyspaceCqlGenerator(specification); + } + + @Test + public void test() { + prepare(); + + assertPreamble(name, cql); + assertReplicationMap(replicationMap, cql); + assertDurableWrites(durableWrites, cql); + } + } + + public static class ReplicationMapOnlyTest extends AlterKeyspaceTest { + + public String name = "mytable"; + public Boolean durableWrites = true; + + public Map replicationMap = new HashMap(); + + public AlterKeyspaceSpecification specification() { + replicationMap.put( new DefaultOption( "class", String.class, false, false, true ), "SimpleStrategy" ); + replicationMap.put( new DefaultOption( "replication_factor", Long.class, false, false, true ), 1 ); + replicationMap.put( new DefaultOption( "dc1", Long.class, false, false, true ), 2 ); + replicationMap.put( new DefaultOption( "dc2", Long.class, false, false, true ), 3 ); + + return AlterKeyspaceSpecification.alterKeyspace() + .name(name) + .with(KeyspaceOption.REPLICATION, replicationMap); + } + + public AlterKeyspaceCqlGenerator generator() { + return new AlterKeyspaceCqlGenerator(specification); + } + + @Test + public void test() { + prepare(); + + assertPreamble(name, cql); + assertReplicationMap(replicationMap, cql); + } + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/AlterTableCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/AlterTableCqlGeneratorTests.java new file mode 100644 index 000000000..e281fdaa7 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/AlterTableCqlGeneratorTests.java @@ -0,0 +1,155 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import java.util.LinkedHashMap; +import java.util.Map; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.core.cql.generator.AlterTableCqlGenerator; +import org.springframework.cassandra.core.keyspace.AlterTableSpecification; +import org.springframework.cassandra.core.keyspace.Option; +import org.springframework.cassandra.core.keyspace.TableOption; +import org.springframework.cassandra.core.keyspace.TableOption.CachingOption; +import org.springframework.cassandra.core.keyspace.TableOption.CompactionOption; +import org.springframework.cassandra.core.keyspace.TableOption.CompressionOption; + +import com.datastax.driver.core.DataType; + +public class AlterTableCqlGeneratorTests { + + private final static Logger log = LoggerFactory.getLogger(AlterTableCqlGeneratorTests.class); + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertPreamble(String tableName, String cql) { + assertTrue(cql.startsWith("ALTER TABLE " + tableName + " ")); + } + + /** + * Asserts that the given list of columns definitions are contained in the given CQL string properly. + * + * @param columnSpec IE, "foo text, bar blob" + */ + public static void assertColumnChanges(String columnSpec, String cql) { + assertTrue(cql.contains("")); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations. + */ + public static abstract class AlterTableTest extends + TableOperationCqlGeneratorTest { + } + + public static class BasicTest extends AlterTableTest { + + public String name = "mytable"; + public DataType alteredType = DataType.text(); + public String altered = "altered"; + + public DataType addedType = DataType.text(); + public String added = "added"; + + public String dropped = "dropped"; + + @Override + public AlterTableSpecification specification() { + return AlterTableSpecification.alterTable().name(name).alter(altered, alteredType).add(added, addedType); + } + + @Override + public AlterTableCqlGenerator generator() { + return new AlterTableCqlGenerator(specification); + } + + @Test + public void test() { + prepare(); + + assertPreamble(name, cql); + assertColumnChanges( + String.format("ALTER %s TYPE %s, ADD %s %s, DROP %s", altered, alteredType, added, addedType, dropped), cql); + } + } + + /** + * Fully test all available create table options + * + * @author David Webb + * + */ + public static class MultipleOptionsTest extends AlterTableTest { + + public String name = "timeseries_table"; + public DataType partitionKeyType0 = DataType.timeuuid(); + public String partitionKey0 = "tid"; + public DataType partitionKeyType1 = DataType.timestamp(); + public String partitionKey1 = "create_timestamp"; + public DataType columnType1 = DataType.text(); + public String column1 = "data_point"; + public Double readRepairChance = 0.6; + public Double dcLocalReadRepairChance = 0.8; + public Double bloomFilterFpChance = 0.002; + public Boolean replcateOnWrite = Boolean.FALSE; + public Long gcGraceSeconds = 1200l; + public String comment = "This is My Table"; + public Map compactionMap = new LinkedHashMap(); + public Map compressionMap = new LinkedHashMap(); + + @Override + public AlterTableSpecification specification() { + + // Compaction + compactionMap.put(CompactionOption.CLASS, "SizeTieredCompactionStrategy"); + compactionMap.put(CompactionOption.MIN_THRESHOLD, "4"); + // Compression + compressionMap.put(CompressionOption.SSTABLE_COMPRESSION, "SnappyCompressor"); + compressionMap.put(CompressionOption.CHUNK_LENGTH_KB, 128); + compressionMap.put(CompressionOption.CRC_CHECK_CHANCE, 0.75); + + return AlterTableSpecification + .alterTable() + .name(name) + // .with(TableOption.COMPACT_STORAGE) + .with(TableOption.READ_REPAIR_CHANCE, readRepairChance).with(TableOption.COMPACTION, compactionMap) + .with(TableOption.COMPRESSION, compressionMap).with(TableOption.BLOOM_FILTER_FP_CHANCE, bloomFilterFpChance) + .with(TableOption.CACHING, CachingOption.KEYS_ONLY).with(TableOption.REPLICATE_ON_WRITE, replcateOnWrite) + .with(TableOption.COMMENT, comment).with(TableOption.DCLOCAL_READ_REPAIR_CHANCE, dcLocalReadRepairChance) + .with(TableOption.GC_GRACE_SECONDS, gcGraceSeconds); + } + + @Test + public void test() { + + prepare(); + + log.info(cql); + + assertPreamble(name, cql); + // assertColumns(String.format("%s %s, %s %s, %s %s", partitionKey0, partitionKeyType0, partitionKey1, + // partitionKeyType1, column1, columnType1), cql); + // assertPrimaryKey(String.format("(%s, %s)", partitionKey0, partitionKey1), cql); + // assertNullOption(TableOption.COMPACT_STORAGE.getName(), cql); + // assertDoubleOption(TableOption.READ_REPAIR_CHANCE.getName(), readRepairChance, cql); + // assertDoubleOption(TableOption.DCLOCAL_READ_REPAIR_CHANCE.getName(), dcLocalReadRepairChance, cql); + // assertDoubleOption(TableOption.BLOOM_FILTER_FP_CHANCE.getName(), bloomFilterFpChance, cql); + // assertStringOption(TableOption.CACHING.getName(), CachingOption.KEYS_ONLY.getValue(), cql); + // assertStringOption(TableOption.REPLICATE_ON_WRITE.getName(), replcateOnWrite.toString(), cql); + // assertStringOption(TableOption.COMMENT.getName(), comment, cql); + // assertLongOption(TableOption.GC_GRACE_SECONDS.getName(), gcGraceSeconds, cql); + + } + + /* (non-Javadoc) + * @see org.springframework.cassandra.test.unit.core.cql.generator.TableOperationCqlGeneratorTest#generator() + */ + @Override + public AlterTableCqlGenerator generator() { + return new AlterTableCqlGenerator(specification); + } + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateIndexCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateIndexCqlGeneratorTests.java new file mode 100644 index 000000000..aaaf9ec12 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateIndexCqlGeneratorTests.java @@ -0,0 +1,59 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.springframework.cassandra.core.cql.generator.CreateIndexCqlGenerator; +import org.springframework.cassandra.core.keyspace.CreateIndexSpecification; + +public class CreateIndexCqlGeneratorTests { + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertPreamble(String indexName, String tableName, String cql) { + assertTrue(cql.startsWith("CREATE INDEX " + indexName + " ON " + tableName)); + } + + /** + * Asserts that the given list of columns definitions are contained in the given CQL string properly. + * + * @param columnSpec IE, "(foo)" + */ + public static void assertColumn(String columnName, String cql) { + assertTrue(cql.contains("(" + columnName + ")")); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations or + * {@link #generator()} method. + */ + public static abstract class CreateIndexTest extends + IndexOperationCqlGeneratorTest { + + public CreateIndexCqlGenerator generator() { + return new CreateIndexCqlGenerator(specification); + } + } + + public static class BasicTest extends CreateIndexTest { + + public String name = "myindex"; + public String tableName = "mytable"; + public String column1 = "column1"; + + public CreateIndexSpecification specification() { + return CreateIndexSpecification.createIndex().name(name).tableName(tableName).columnName(column1); + } + + @Test + public void test() { + prepare(); + + assertPreamble(name, tableName, cql); + assertColumn(column1, cql); + + } + } + +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateKeyspaceCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateKeyspaceCqlGeneratorTests.java new file mode 100644 index 000000000..6de52762b --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateKeyspaceCqlGeneratorTests.java @@ -0,0 +1,105 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.Test; +import org.springframework.cassandra.config.KeyspaceAttributes; +import org.springframework.cassandra.core.cql.generator.CreateKeyspaceCqlGenerator; +import org.springframework.cassandra.core.keyspace.CreateKeyspaceSpecification; +import org.springframework.cassandra.core.keyspace.DefaultOption; +import org.springframework.cassandra.core.keyspace.KeyspaceOption; +import org.springframework.cassandra.core.keyspace.Option; + +public class CreateKeyspaceCqlGeneratorTests { + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertPreamble(String keyspaceName, String cql) { + assertTrue(cql.startsWith("CREATE KEYSPACE " + keyspaceName + " ")); + } + + private static void assertReplicationMap(Map replicationMap, String cql) { + assertTrue(cql.contains(" WITH replication = { ")); + + for (Map.Entry entry : replicationMap.entrySet()) { + String keyValuePair = "'" + entry.getKey().getName() + "' : " + (entry.getKey().quotesValue() ? "'" : "") + + entry.getValue().toString() + (entry.getKey().quotesValue() ? "'" : ""); + assertTrue(cql.contains(keyValuePair)); + } + } + + public static void assertDurableWrites(Boolean durableWrites, String cql) { + assertTrue(cql.contains(" AND durable_writes = " + durableWrites)); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations or + * {@link #generator()} method. + */ + public static abstract class CreateKeyspaceTest extends + KeyspaceOperationCqlGeneratorTest { + + @Override + public CreateKeyspaceCqlGenerator generator() { + return new CreateKeyspaceCqlGenerator(specification); + } + } + + public static class BasicTest extends CreateKeyspaceTest { + + public String name = "mykeyspace"; + public Boolean durableWrites = true; + + public Map replicationMap = KeyspaceAttributes.newSimpleReplication(); + + @Override + public CreateKeyspaceSpecification specification() { + keyspace = name; + + return CreateKeyspaceSpecification.createKeyspace().name(keyspace) + .with(KeyspaceOption.REPLICATION, replicationMap).with(KeyspaceOption.DURABLE_WRITES, durableWrites); + } + + @Test + public void test() { + prepare(); + + assertPreamble(keyspace, cql); + assertReplicationMap(replicationMap, cql); + assertDurableWrites(durableWrites, cql); + } + } + + public static class NetworkTopologyTest extends CreateKeyspaceTest { + + public String name = "mykeyspace"; + public Boolean durableWrites = false; + + public Map replicationMap = new HashMap(); + + @Override + public CreateKeyspaceSpecification specification() { + keyspace = name; + + replicationMap.put(new DefaultOption("class", String.class, false, false, true), "NetworkTopologyStrategy"); + replicationMap.put(new DefaultOption("dc1", Long.class, false, false, true), 2); + replicationMap.put(new DefaultOption("dc2", Long.class, false, false, true), 3); + + return CreateKeyspaceSpecification.createKeyspace().name(keyspace) + .with(KeyspaceOption.REPLICATION, replicationMap).with(KeyspaceOption.DURABLE_WRITES, durableWrites); + } + + @Test + public void test() { + prepare(); + + assertPreamble(keyspace, cql); + assertReplicationMap(replicationMap, cql); + assertDurableWrites(durableWrites, cql); + } + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateTableCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateTableCqlGeneratorTests.java new file mode 100644 index 000000000..e2befa5cb --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/CreateTableCqlGeneratorTests.java @@ -0,0 +1,244 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import java.util.LinkedHashMap; +import java.util.Map; + +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.core.cql.generator.CreateTableCqlGenerator; +import org.springframework.cassandra.core.keyspace.CreateTableSpecification; +import org.springframework.cassandra.core.keyspace.Option; +import org.springframework.cassandra.core.keyspace.TableOption; +import org.springframework.cassandra.core.keyspace.TableOption.CachingOption; +import org.springframework.cassandra.core.keyspace.TableOption.CompactionOption; +import org.springframework.cassandra.core.keyspace.TableOption.CompressionOption; + +import com.datastax.driver.core.DataType; + +public class CreateTableCqlGeneratorTests { + + private static final Logger log = LoggerFactory.getLogger(CreateTableCqlGeneratorTests.class); + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertPreamble(String tableName, String cql) { + assertTrue(cql.startsWith("CREATE TABLE " + tableName + " ")); + } + + /** + * Asserts that the given primary key definition is contained in the given CQL string properly. + * + * @param primaryKeyString IE, "foo", "foo, bar, baz", "(foo, bar), baz", etc + */ + public static void assertPrimaryKey(String primaryKeyString, String cql) { + assertTrue(cql.contains(", PRIMARY KEY (" + primaryKeyString + "))")); + } + + /** + * Asserts that the given list of columns definitions are contained in the given CQL string properly. + * + * @param columnSpec IE, "foo text, bar blob" + */ + public static void assertColumns(String columnSpec, String cql) { + assertTrue(cql.contains("(" + columnSpec + ",")); + } + + /** + * Asserts that the read repair change is set properly + */ + public static void assertStringOption(String name, String value, String cql) { + log.info(name + " -> " + value); + assertTrue(cql.contains(name + " = '" + value + "'")); + } + + /** + * Asserts that the option is set + */ + public static void assertDoubleOption(String name, Double value, String cql) { + log.info(name + " -> " + value); + assertTrue(cql.contains(name + " = " + value)); + } + + public static void assertLongOption(String name, Long value, String cql) { + log.info(name + " -> " + value); + assertTrue(cql.contains(name + " = " + value)); + } + + /** + * Asserts that the read repair change is set properly + */ + public static void assertNullOption(String name, String cql) { + log.info(name); + assertTrue(cql.contains(" " + name + " ")); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations or + * {@link #generator()} method. + */ + public static abstract class CreateTableTest extends + TableOperationCqlGeneratorTest { + + public CreateTableCqlGenerator generator() { + return new CreateTableCqlGenerator(specification); + } + } + + public static class BasicTest extends CreateTableTest { + + public String name = "mytable"; + public DataType partitionKeyType0 = DataType.text(); + public String partitionKey0 = "partitionKey0"; + public DataType columnType1 = DataType.text(); + public String column1 = "column1"; + + public CreateTableSpecification specification() { + return CreateTableSpecification.createTable().name(name).partitionKeyColumn(partitionKey0, partitionKeyType0) + .column(column1, columnType1); + } + + @Test + public void test() { + prepare(); + + assertPreamble(name, cql); + assertColumns(String.format("%s %s, %s %s", partitionKey0, partitionKeyType0, column1, columnType1), cql); + assertPrimaryKey(partitionKey0, cql); + } + } + + public static class CompositePartitionKeyTest extends CreateTableTest { + + public String name = "composite_partition_key_table"; + public DataType partKeyType0 = DataType.text(); + public String partKey0 = "partKey0"; + public DataType partKeyType1 = DataType.text(); + public String partKey1 = "partKey1"; + public String column0 = "column0"; + public DataType columnType0 = DataType.text(); + + @Override + public CreateTableSpecification specification() { + return CreateTableSpecification.createTable().name(name).partitionKeyColumn(partKey0, partKeyType0) + .partitionKeyColumn(partKey1, partKeyType1).column(column0, columnType0); + } + + @Test + public void test() { + prepare(); + + assertPreamble(name, cql); + assertColumns( + String.format("%s %s, %s %s, %s %s", partKey0, partKeyType0, partKey1, partKeyType1, column0, columnType0), + cql); + assertPrimaryKey(String.format("(%s, %s)", partKey0, partKey1), cql); + } + } + + /** + * Test just the Read Repair Chance + * + * @author David Webb + * + */ + public static class ReadRepairChanceTest extends CreateTableTest { + + public String name = "mytable"; + public DataType partitionKeyType0 = DataType.text(); + public String partitionKey0 = "partitionKey0"; + public DataType partitionKeyType1 = DataType.timestamp(); + public String partitionKey1 = "create_timestamp"; + public DataType columnType1 = DataType.text(); + public String column1 = "column1"; + public Double readRepairChance = 0.5; + + public CreateTableSpecification specification() { + return (CreateTableSpecification) CreateTableSpecification.createTable().name(name) + .partitionKeyColumn(partitionKey0, partitionKeyType0).partitionKeyColumn(partitionKey1, partitionKeyType1) + .column(column1, columnType1).with(TableOption.READ_REPAIR_CHANCE, readRepairChance); + } + + @Test + public void test() { + prepare(); + + assertPreamble(name, cql); + assertColumns(String.format("%s %s, %s %s, %s %s", partitionKey0, partitionKeyType0, partitionKey1, + partitionKeyType1, column1, columnType1), cql); + assertPrimaryKey(String.format("(%s, %s)", partitionKey0, partitionKey1), cql); + assertDoubleOption(TableOption.READ_REPAIR_CHANCE.getName(), readRepairChance, cql); + } + } + + /** + * Fully test all available create table options + * + * @author David Webb + * + */ + public static class MultipleOptionsTest extends CreateTableTest { + + public String name = "timeseries_table"; + public DataType partitionKeyType0 = DataType.timeuuid(); + public String partitionKey0 = "tid"; + public DataType partitionKeyType1 = DataType.timestamp(); + public String partitionKey1 = "create_timestamp"; + public DataType columnType1 = DataType.text(); + public String column1 = "data_point"; + public Double readRepairChance = 0.5; + public Double dcLocalReadRepairChance = 0.7; + public Double bloomFilterFpChance = 0.001; + public Boolean replcateOnWrite = Boolean.FALSE; + public Long gcGraceSeconds = 600l; + public String comment = "This is My Table"; + public Map compactionMap = new LinkedHashMap(); + public Map compressionMap = new LinkedHashMap(); + + public CreateTableSpecification specification() { + + // Compaction + compactionMap.put(CompactionOption.CLASS, "SizeTieredCompactionStrategy"); + compactionMap.put(CompactionOption.MIN_THRESHOLD, "4"); + // Compression + compressionMap.put(CompressionOption.SSTABLE_COMPRESSION, "SnappyCompressor"); + compressionMap.put(CompressionOption.CHUNK_LENGTH_KB, 128); + compressionMap.put(CompressionOption.CRC_CHECK_CHANCE, 0.75); + + return (CreateTableSpecification) CreateTableSpecification.createTable().name(name) + .partitionKeyColumn(partitionKey0, partitionKeyType0).partitionKeyColumn(partitionKey1, partitionKeyType1) + .column(column1, columnType1).with(TableOption.COMPACT_STORAGE) + .with(TableOption.READ_REPAIR_CHANCE, readRepairChance).with(TableOption.COMPACTION, compactionMap) + .with(TableOption.COMPRESSION, compressionMap).with(TableOption.BLOOM_FILTER_FP_CHANCE, bloomFilterFpChance) + .with(TableOption.CACHING, CachingOption.KEYS_ONLY).with(TableOption.REPLICATE_ON_WRITE, replcateOnWrite) + .with(TableOption.COMMENT, comment).with(TableOption.DCLOCAL_READ_REPAIR_CHANCE, dcLocalReadRepairChance) + .with(TableOption.GC_GRACE_SECONDS, gcGraceSeconds); + } + + @Test + public void test() { + + prepare(); + + log.info(cql); + + assertPreamble(name, cql); + assertColumns(String.format("%s %s, %s %s, %s %s", partitionKey0, partitionKeyType0, partitionKey1, + partitionKeyType1, column1, columnType1), cql); + assertPrimaryKey(String.format("(%s, %s)", partitionKey0, partitionKey1), cql); + assertNullOption(TableOption.COMPACT_STORAGE.getName(), cql); + assertDoubleOption(TableOption.READ_REPAIR_CHANCE.getName(), readRepairChance, cql); + assertDoubleOption(TableOption.DCLOCAL_READ_REPAIR_CHANCE.getName(), dcLocalReadRepairChance, cql); + assertDoubleOption(TableOption.BLOOM_FILTER_FP_CHANCE.getName(), bloomFilterFpChance, cql); + assertStringOption(TableOption.CACHING.getName(), CachingOption.KEYS_ONLY.getValue(), cql); + assertStringOption(TableOption.REPLICATE_ON_WRITE.getName(), replcateOnWrite.toString(), cql); + assertStringOption(TableOption.COMMENT.getName(), comment, cql); + assertLongOption(TableOption.GC_GRACE_SECONDS.getName(), gcGraceSeconds, cql); + + } + } + +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropIndexCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropIndexCqlGeneratorTests.java new file mode 100644 index 000000000..6db90bc65 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropIndexCqlGeneratorTests.java @@ -0,0 +1,69 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.springframework.cassandra.core.cql.generator.DropIndexCqlGenerator; +import org.springframework.cassandra.core.keyspace.DropIndexSpecification; + +public class DropIndexCqlGeneratorTests { + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertStatement(String indexName, boolean ifExists, String cql) { + assertTrue(cql.equals("DROP INDEX " + (ifExists ? "IF EXISTS " : "") + indexName + ";")); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations. + */ + public static abstract class DropIndexTest extends + IndexOperationCqlGeneratorTest { + } + + public static class BasicTest extends DropIndexTest { + + public String name = "myindex"; + + public DropIndexSpecification specification() { + return DropIndexSpecification.dropIndex().name(name); + } + + public DropIndexCqlGenerator generator() { + return new DropIndexCqlGenerator(specification); + } + + @Test + public void test() { + prepare(); + + assertStatement(name, false, cql); + } + + } + + public static class IfExistsTest extends DropIndexTest { + + public String name = "myindex"; + + public DropIndexSpecification specification() { + return DropIndexSpecification.dropIndex().name(name) + // .ifExists() + ; + } + + public DropIndexCqlGenerator generator() { + return new DropIndexCqlGenerator(specification); + } + + @Test + public void test() { + prepare(); + + // assertStatement(name, true, cql); + assertStatement(name, false, cql); + } + + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropKeyspaceCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropKeyspaceCqlGeneratorTests.java new file mode 100644 index 000000000..08ae48df2 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropKeyspaceCqlGeneratorTests.java @@ -0,0 +1,44 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.springframework.cassandra.core.cql.generator.DropKeyspaceCqlGenerator; +import org.springframework.cassandra.core.keyspace.DropKeyspaceSpecification; + +public class DropKeyspaceCqlGeneratorTests { + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertStatement(String tableName, String cql) { + assertTrue(cql.equals("DROP KEYSPACE " + tableName + ";")); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations. + */ + public static abstract class DropTableTest extends + KeyspaceOperationCqlGeneratorTest { + } + + public static class BasicTest extends DropTableTest { + + public String name = "mykeyspace"; + + public DropKeyspaceSpecification specification() { + return DropKeyspaceSpecification.dropKeyspace().name(name); + } + + public DropKeyspaceCqlGenerator generator() { + return new DropKeyspaceCqlGenerator(specification); + } + + @Test + public void test() { + prepare(); + + assertStatement(name, cql); + } + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropTableCqlGeneratorTests.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropTableCqlGeneratorTests.java new file mode 100644 index 000000000..15a41632b --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/DropTableCqlGeneratorTests.java @@ -0,0 +1,73 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.springframework.cassandra.core.cql.generator.DropTableCqlGenerator; +import org.springframework.cassandra.core.keyspace.DropTableSpecification; + +public class DropTableCqlGeneratorTests { + + /** + * Asserts that the preamble is first & correctly formatted in the given CQL string. + */ + public static void assertStatement(String tableName, boolean ifExists, String cql) { + assertTrue(cql.equals("DROP TABLE " + (ifExists ? "IF EXISTS " : "") + tableName + ";")); + } + + /** + * Asserts that the given list of columns definitions are contained in the given CQL string properly. + * + * @param columnSpec IE, "foo text, bar blob" + */ + public static void assertColumnChanges(String columnSpec, String cql) { + assertTrue(cql.contains("")); + } + + /** + * Convenient base class that other test classes can use so as not to repeat the generics declarations. + */ + public static abstract class DropTableTest extends + TableOperationCqlGeneratorTest { + } + + public static class BasicTest extends DropTableTest { + + public String name = "mytable"; + + public DropTableSpecification specification() { + return DropTableSpecification.dropTable().name(name); + } + + public DropTableCqlGenerator generator() { + return new DropTableCqlGenerator(specification); + } + + @Test + public void test() { + prepare(); + + assertStatement(name, false, cql); + } + } + + // public static class IfExistsTest extends DropTableTest { + // + // public String name = "mytable"; + // + // public DropTableSpecification specification() { + // return DropTableSpecification.dropTable().ifExists().name(name); + // } + // + // public DropTableCqlGenerator generator() { + // return new DropTableCqlGenerator(specification); + // } + // + // @Test + // public void test() { + // prepare(); + // + // assertStatement(name, true, cql); + // } + // } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/IndexOperationCqlGeneratorTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/IndexOperationCqlGeneratorTest.java new file mode 100644 index 000000000..cc6fd636f --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/IndexOperationCqlGeneratorTest.java @@ -0,0 +1,38 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import org.junit.Test; +import org.springframework.cassandra.core.cql.generator.IndexNameCqlGenerator; +import org.springframework.cassandra.core.keyspace.IndexNameSpecification; + +/** + * Useful test class that specifies just about as much as you can for a CQL generation test. Intended to be extended by + * classes that contain methods annotated with {@link Test}. Everything is public because this is a test class with no + * need for encapsulation, and it makes for easier reuse in other tests like integration tests (hint hint). + * + * @author Matthew T. Adams + * @author David Webb + * + * @param The type of the {@link IndexNameSpecification} + * @param The type of the {@link IndexNameCqlGenerator} + */ +public abstract class IndexOperationCqlGeneratorTest, G extends IndexNameCqlGenerator> { + + public abstract S specification(); + + public abstract G generator(); + + public String indexName; + public S specification; + public G generator; + public String cql; + + public void prepare() { + this.specification = specification(); + this.generator = generator(); + this.cql = generateCql(); + } + + public String generateCql() { + return generator.toCql(); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/KeyspaceOperationCqlGeneratorTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/KeyspaceOperationCqlGeneratorTest.java new file mode 100644 index 000000000..7998a028d --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/KeyspaceOperationCqlGeneratorTest.java @@ -0,0 +1,39 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import org.junit.Test; +import org.springframework.cassandra.core.cql.generator.KeyspaceNameCqlGenerator; +import org.springframework.cassandra.core.cql.generator.TableNameCqlGenerator; +import org.springframework.cassandra.core.keyspace.KeyspaceNameSpecification; +import org.springframework.cassandra.core.keyspace.TableNameSpecification; + +/** + * Useful test class that specifies just about as much as you can for a CQL generation test. Intended to be extended by + * classes that contain methods annotated with {@link Test}. Everything is public because this is a test class with no + * need for encapsulation, and it makes for easier reuse in other tests like integration tests (hint hint). + * + * @author Matthew T. Adams + * + * @param The type of the {@link TableNameSpecification} + * @param The type of the {@link TableNameCqlGenerator} + */ +public abstract class KeyspaceOperationCqlGeneratorTest, G extends KeyspaceNameCqlGenerator> { + + public abstract S specification(); + + public abstract G generator(); + + public String keyspace; + public S specification; + public G generator; + public String cql; + + public void prepare() { + this.specification = specification(); + this.generator = generator(); + this.cql = generateCql(); + } + + public String generateCql() { + return generator.toCql(); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/TableOperationCqlGeneratorTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/TableOperationCqlGeneratorTest.java new file mode 100644 index 000000000..cb5172d2c --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/cql/generator/TableOperationCqlGeneratorTest.java @@ -0,0 +1,36 @@ +package org.springframework.cassandra.test.unit.core.cql.generator; + +import org.springframework.cassandra.core.cql.generator.TableNameCqlGenerator; +import org.springframework.cassandra.core.keyspace.TableNameSpecification; + +/** + * Useful test class that specifies just about as much as you can for a CQL generation test. Intended to be extended by + * classes that contain methods annotated with {@link Test}. Everything is public because this is a test class with no + * need for encapsulation, and it makes for easier reuse in other tests like integration tests (hint hint). + * + * @author Matthew T. Adams + * + * @param The type of the {@link TableNameSpecification} + * @param The type of the {@link TableNameCqlGenerator} + */ +public abstract class TableOperationCqlGeneratorTest, G extends TableNameCqlGenerator> { + + public abstract S specification(); + + public abstract G generator(); + + public String tableName; + public S specification; + public G generator; + public String cql; + + public void prepare() { + this.specification = specification(); + this.generator = generator(); + this.cql = generateCql(); + } + + public String generateCql() { + return generator.toCql(); + } +} \ No newline at end of file diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/keyspace/OptionTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/keyspace/OptionTest.java new file mode 100644 index 000000000..e5a8a3345 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/core/keyspace/OptionTest.java @@ -0,0 +1,102 @@ +package org.springframework.cassandra.test.unit.core.keyspace; + +import static org.junit.Assert.*; + +import java.lang.annotation.RetentionPolicy; + +import org.junit.Test; +import org.springframework.cassandra.core.keyspace.DefaultOption; +import org.springframework.cassandra.core.keyspace.Option; + +public class OptionTest { + + @Test(expected = IllegalArgumentException.class) + public void testOptionWithNullName() { + new DefaultOption(null, Object.class, true, true, true); + } + + @Test(expected = IllegalArgumentException.class) + public void testOptionWithEmptyName() { + new DefaultOption("", Object.class, true, true, true); + } + + @Test + public void testOptionWithNullType() { + new DefaultOption("opt", null, true, true, true); + new DefaultOption("opt", null, false, true, true); + } + + @Test + public void testOptionWithNullTypeIsCoerceable() { + Option op = new DefaultOption("opt", null, true, true, true); + assertTrue(op.isCoerceable("")); + assertTrue(op.isCoerceable(null)); + } + + @Test + public void testOptionValueCoercion() { + String name = "my_option"; + Class type = String.class; + boolean requires = true; + boolean escapes = true; + boolean quotes = true; + + Option op = new DefaultOption(name, type, requires, escapes, quotes); + + assertTrue(op.isCoerceable("opt")); + assertEquals("'opt'", op.toString("opt")); + assertEquals("'opt''n'", op.toString("opt'n")); + + type = Long.class; + escapes = false; + quotes = false; + op = new DefaultOption(name, type, requires, escapes, quotes); + + String expected = "1"; + for (Object value : new Object[] { 1, "1" }) { + assertTrue(op.isCoerceable(value)); + assertEquals(expected, op.toString(value)); + } + assertFalse(op.isCoerceable("x")); + assertTrue(op.isCoerceable(null)); + + type = Long.class; + escapes = false; + quotes = true; + op = new DefaultOption(name, type, requires, escapes, quotes); + + expected = "'1'"; + for (Object value : new Object[] { 1, "1" }) { + assertTrue(op.isCoerceable(value)); + assertEquals(expected, op.toString(value)); + } + assertFalse(op.isCoerceable("x")); + assertTrue(op.isCoerceable(null)); + + type = Double.class; + escapes = false; + quotes = false; + op = new DefaultOption(name, type, requires, escapes, quotes); + + String[] expecteds = new String[] { "1", "1.0", "1.0", "1", "1.0", null }; + Object[] values = new Object[] { 1, 1.0F, 1.0D, "1", "1.0", null }; + for (int i = 0; i < values.length; i++) { + assertTrue(op.isCoerceable(values[i])); + assertEquals(expecteds[i], op.toString(values[i])); + } + assertFalse(op.isCoerceable("x")); + assertTrue(op.isCoerceable(null)); + + type = RetentionPolicy.class; + escapes = false; + quotes = false; + op = new DefaultOption(name, type, requires, escapes, quotes); + + assertTrue(op.isCoerceable(null)); + assertTrue(op.isCoerceable(RetentionPolicy.CLASS)); + assertTrue(op.isCoerceable("CLASS")); + assertFalse(op.isCoerceable("x")); + assertEquals("CLASS", op.toString("CLASS")); + assertEquals("CLASS", op.toString(RetentionPolicy.CLASS)); + } +} diff --git a/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/support/CassandraExceptionTranslatorTest.java b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/support/CassandraExceptionTranslatorTest.java new file mode 100644 index 000000000..255947c18 --- /dev/null +++ b/spring-cassandra/src/test/java/org/springframework/cassandra/test/unit/support/CassandraExceptionTranslatorTest.java @@ -0,0 +1,71 @@ +package org.springframework.cassandra.test.unit.support; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; +import org.springframework.cassandra.support.CassandraExceptionTranslator; +import org.springframework.cassandra.support.exception.CassandraInvalidConfigurationInQueryException; +import org.springframework.cassandra.support.exception.CassandraInvalidQueryException; +import org.springframework.cassandra.support.exception.CassandraKeyspaceExistsException; +import org.springframework.cassandra.support.exception.CassandraSchemaElementExistsException; +import org.springframework.cassandra.support.exception.CassandraTableExistsException; +import org.springframework.dao.DataAccessException; + +import com.datastax.driver.core.exceptions.AlreadyExistsException; +import com.datastax.driver.core.exceptions.InvalidConfigurationInQueryException; +import com.datastax.driver.core.exceptions.InvalidQueryException; + +public class CassandraExceptionTranslatorTest { + + CassandraExceptionTranslator tx = new CassandraExceptionTranslator(); + + @Test + public void testTableExistsException() { + String keyspace = ""; + String table = "tbl"; + AlreadyExistsException cx = new AlreadyExistsException(keyspace, table); + DataAccessException dax = tx.translateExceptionIfPossible(cx); + assertNotNull(dax); + assertTrue(dax instanceof CassandraTableExistsException); + + CassandraTableExistsException x = (CassandraTableExistsException) dax; + assertEquals(table, x.getTableName()); + assertEquals(x.getTableName(), x.getElementName()); + assertEquals(CassandraSchemaElementExistsException.ElementType.TABLE, x.getElementType()); + assertEquals(cx, x.getCause()); + } + + @Test + public void testKeyspaceExistsException() { + String keyspace = "ks"; + String table = ""; + AlreadyExistsException cx = new AlreadyExistsException(keyspace, table); + DataAccessException dax = tx.translateExceptionIfPossible(cx); + assertNotNull(dax); + assertTrue(dax instanceof CassandraKeyspaceExistsException); + + CassandraKeyspaceExistsException x = (CassandraKeyspaceExistsException) dax; + assertEquals(keyspace, x.getKeyspaceName()); + assertEquals(x.getKeyspaceName(), x.getElementName()); + assertEquals(CassandraSchemaElementExistsException.ElementType.KEYSPACE, x.getElementType()); + assertEquals(cx, x.getCause()); + } + + @Test + public void testInvalidConfigurationInQueryException() { + String msg = "msg"; + InvalidQueryException cx = new InvalidConfigurationInQueryException(msg); + DataAccessException dax = tx.translateExceptionIfPossible(cx); + assertNotNull(dax); + assertTrue(dax instanceof CassandraInvalidConfigurationInQueryException); + assertEquals(cx, dax.getCause()); + + cx = new InvalidQueryException(msg); + dax = tx.translateExceptionIfPossible(cx); + assertNotNull(dax); + assertTrue(dax instanceof CassandraInvalidQueryException); + assertEquals(cx, dax.getCause()); + } +} diff --git a/spring-cassandra/src/test/resources/cassandraOperationsTest-cql-dataload.cql b/spring-cassandra/src/test/resources/cassandraOperationsTest-cql-dataload.cql new file mode 100644 index 000000000..239ae3e25 --- /dev/null +++ b/spring-cassandra/src/test/resources/cassandraOperationsTest-cql-dataload.cql @@ -0,0 +1,3 @@ +create table book (isbn text, title text, author text, pages int, PRIMARY KEY (isbn)); +create table book_alt (isbn text, title text, author text, pages int, PRIMARY KEY (isbn)); +insert into book (isbn, title, author, pages) values ('999999999', 'Book of Nines', 'Nine Nine', 999); \ No newline at end of file diff --git a/spring-cassandra/src/test/resources/integration/cql/generator/CreateIndexCqlGeneratorIntegrationTests-BasicTest.cql b/spring-cassandra/src/test/resources/integration/cql/generator/CreateIndexCqlGeneratorIntegrationTests-BasicTest.cql new file mode 100644 index 000000000..5201c753c --- /dev/null +++ b/spring-cassandra/src/test/resources/integration/cql/generator/CreateIndexCqlGeneratorIntegrationTests-BasicTest.cql @@ -0,0 +1 @@ +create table mytable (id uuid primary key, column1 text); \ No newline at end of file diff --git a/spring-cassandra/src/test/resources/logback-test.xml b/spring-cassandra/src/test/resources/logback-test.xml new file mode 100644 index 000000000..7db55523f --- /dev/null +++ b/spring-cassandra/src/test/resources/logback-test.xml @@ -0,0 +1,21 @@ + + + + + + + %d %5p | %t | %-55logger{55} | %m | %n + + + + + + + + + + + + + + \ No newline at end of file diff --git a/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest-context.xml b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest-context.xml new file mode 100644 index 000000000..24b776df2 --- /dev/null +++ b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest-context.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest.properties b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest.properties new file mode 100644 index 000000000..a5049296f --- /dev/null +++ b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/FullySpecifiedKeyspaceCreatingXmlConfigTest.properties @@ -0,0 +1 @@ +script2=CREATE KEYSPACE script2 WITH durable_writes = true AND replication = { 'replication_factor' : 1, 'class' : 'SimpleStrategy' }; diff --git a/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/MinimalKeyspaceCreatingXmlConfigTest-context.xml b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/MinimalKeyspaceCreatingXmlConfigTest-context.xml new file mode 100644 index 000000000..688cb3dba --- /dev/null +++ b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/MinimalKeyspaceCreatingXmlConfigTest-context.xml @@ -0,0 +1,15 @@ + + + + + + + + + diff --git a/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/MinimalXmlConfigTest-context.xml b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/MinimalXmlConfigTest-context.xml new file mode 100644 index 000000000..e0939fdc3 --- /dev/null +++ b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/MinimalXmlConfigTest-context.xml @@ -0,0 +1,14 @@ + + + + + + + + + diff --git a/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/XmlConfigTest-context.xml b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/XmlConfigTest-context.xml new file mode 100644 index 000000000..39e2ff612 --- /dev/null +++ b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/XmlConfigTest-context.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + diff --git a/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/xmlconfigtest.properties b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/xmlconfigtest.properties new file mode 100644 index 000000000..2d7a6eb68 --- /dev/null +++ b/spring-cassandra/src/test/resources/org/springframework/cassandra/test/integration/config/xml/xmlconfigtest.properties @@ -0,0 +1,3 @@ +cassandra.contactPoints=localhost +cassandra.port=9042 +cassandra.keyspace=xmlconfigtest diff --git a/spring-cassandra/src/test/resources/spring-cassandra.yaml b/spring-cassandra/src/test/resources/spring-cassandra.yaml new file mode 100644 index 000000000..67c59aba4 --- /dev/null +++ b/spring-cassandra/src/test/resources/spring-cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +# num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KBs per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.AllowAllAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP collates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +data_file_directories: + - target/embeddedCassandra/data + +# commit log +commitlog_directory: target/embeddedCassandra/commitlog + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# Note however that when a row is requested from the row cache, it must be +# deserialized into the heap for use. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: target/embeddedCassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7001 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: localhost + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9042 +# The minimum and maximum threads for handling requests when the native +# transport is used. They are similar to rpc_min_threads and rpc_max_threads, +# though the defaults differ slightly. +# native_transport_min_threads: 16 +#native_transport_max_threads: 48 + +# Whether to start the thrift rpc server. +start_rpc: true + +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you _can_ specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: localhost +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 10000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 10000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This _can_ involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: all + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: true diff --git a/spring-cassandra/template.mf b/spring-cassandra/template.mf new file mode 100644 index 000000000..a61324370 --- /dev/null +++ b/spring-cassandra/template.mf @@ -0,0 +1,30 @@ +Bundle-SymbolicName: org.springframework.cassandra +Bundle-Name: Spring Cassandra +Bundle-Vendor: Spring Data Cassandra Community +Bundle-ManifestVersion: 2 +Import-Package: + sun.reflect;version="0";resolution:=optional +Import-Template: + org.springframework.beans.*;version="[3.1.0, 4.0.0)", + org.springframework.cache.*;version="[3.1.0, 4.0.0)", + org.springframework.context.*;version="[3.1.0, 4.0.0)", + org.springframework.core.*;version="[3.1.0, 4.0.0)", + org.springframework.dao.*;version="[3.1.0, 4.0.0)", + org.springframework.scheduling.*;resolution:="optional";version="[3.1.0, 4.0.0)", + org.springframework.util.*;version="[3.1.0, 4.0.0)", + org.springframework.oxm.*;resolution:="optional";version="[3.1.0, 4.0.0)", + org.springframework.transaction.support.*;version="[3.1.0, 4.0.0)", + org.springframework.data.*;version="[1.5.0, 2.0.0)", + org.springframework.expression.*;version="[3.1.0, 4.0.0)", + org.aopalliance.*;version="[1.0.0, 2.0.0)";resolution:=optional, + org.apache.commons.logging.*;version="[1.1.1, 2.0.0)", + org.w3c.dom.*;version="0", + javax.xml.transform.*;resolution:="optional";version="0", + com.datastax.driver.core.*;resolution:="optional";version="[0.1.0, 1.0.0)", + org.apache.cassandra.db.marshal.*;version="[1.2.0, 1.3.0)", + org.slf4j.*;version="[1.5.0, 1.8.0)", + org.idevlab.rjc.*;resolution:="optional";version="[0.6.4, 0.6.4]", + org.apache.commons.pool.impl.*;resolution:="optional";version="[1.0.0, 3.0.0)", + org.codehaus.jackson.*;resolution:="optional";version="[1.6, 2.0.0)", + org.apache.commons.beanutils.*;resolution:="optional";version=1.8.5, + com.google.common.*;resolution:="optional";version="[11.0.0, 20.0.0)" \ No newline at end of file diff --git a/spring-data-cassandra-distribution/pom.xml b/spring-data-cassandra-distribution/pom.xml new file mode 100644 index 000000000..72402004f --- /dev/null +++ b/spring-data-cassandra-distribution/pom.xml @@ -0,0 +1,42 @@ + + + + 4.0.0 + + spring-data-cassandra-distribution + + pom + + Spring Data Cassandra - Distribution + Distribution build for Spring Data Cassandra + + + org.springframework.data + spring-data-cassandra-parent + 1.0.0.BUILD-SNAPSHOT + ../pom.xml + + + + ${basedir}/.. + SDCASS + 1.0.3 + + + + + + org.apache.maven.plugins + maven-assembly-plugin + 2.3 + + + org.codehaus.mojo + wagon-maven-plugin + ${wagon.version} + + + + + diff --git a/spring-data-cassandra/pom.xml b/spring-data-cassandra/pom.xml new file mode 100644 index 000000000..f0f7c6d0f --- /dev/null +++ b/spring-data-cassandra/pom.xml @@ -0,0 +1,122 @@ + + + + 4.0.0 + + spring-data-cassandra + + Spring Data Cassandra - Core + Cassandra support for Spring Data + + + org.springframework.data + spring-data-cassandra-parent + 1.0.0.BUILD-SNAPSHOT + ../pom.xml + + + + 1.0.0.GA + + + + + + ${project.groupId} + spring-cassandra + + + + + org.springframework + spring-expression + + + + + ${project.groupId} + spring-data-commons + ${springdata.commons} + + + + javax.annotation + jsr250-api + 1.0 + true + + + + + javax.enterprise + cdi-api + provided + true + + + + org.xerial.snappy + snappy-java + test + + + + org.cassandraunit + cassandra-unit + test + + + slf4j-log4j12 + org.slf4j + + + + + + cglib + cglib-nodep + test + + + + org.codehaus.jackson + jackson-mapper-asl + test + + + + org.codehaus.jackson + jackson-core-asl + test + + + + javax.el + el-api + test + + + + + javax.validation + validation-api + ${validation} + true + + + + org.hibernate + hibernate-validator + test + + + + joda-time + joda-time + test + + + + + diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/Constants.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/Constants.java new file mode 100644 index 000000000..fb0341d05 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/Constants.java @@ -0,0 +1,24 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra; + +/** + * @author David Webb + * + */ +public interface Constants { + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/CassandraNamespaceHandler.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/CassandraNamespaceHandler.java new file mode 100644 index 000000000..310bb5a7b --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/CassandraNamespaceHandler.java @@ -0,0 +1,32 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.config; + +import org.springframework.beans.factory.xml.NamespaceHandlerSupport; + +/** + * Namespace handler for <cassandra>. + * + * @author Alex Shvid + */ + +public class CassandraNamespaceHandler extends NamespaceHandlerSupport { + + public void init() { + + // registerBeanDefinitionParser("keyspace", new CassandraKeyspaceParser()); + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/KeyspaceAttributes.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/KeyspaceAttributes.java new file mode 100644 index 000000000..0d4d3d1df --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/KeyspaceAttributes.java @@ -0,0 +1,76 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.config; + +import java.util.Collection; + +/** + * Keyspace attributes are used for manipulation around keyspace at the startup. Auto property defines the way how to do + * this. Other attributes used to ensure or update keyspace settings. + * + * @author Alex Shvid + */ +public class KeyspaceAttributes extends org.springframework.cassandra.config.KeyspaceAttributes { + + /* + * auto possible values: + * validate: validate the keyspace, makes no changes. + * update: update the keyspace. + * create: creates the keyspace, destroying previous data. + * create-drop: drop the keyspace at the end of the session. + */ + public static final String AUTO_VALIDATE = "validate"; + public static final String AUTO_UPDATE = "update"; + public static final String AUTO_CREATE = "create"; + public static final String AUTO_CREATE_DROP = "create-drop"; + + private String auto = AUTO_VALIDATE; + + private Collection tables; + + public String getAuto() { + return auto; + } + + public void setAuto(String auto) { + this.auto = auto; + } + + public boolean isValidate() { + return AUTO_VALIDATE.equals(auto); + } + + public boolean isUpdate() { + return AUTO_UPDATE.equals(auto); + } + + public boolean isCreate() { + return AUTO_CREATE.equals(auto); + } + + public boolean isCreateDrop() { + return AUTO_CREATE_DROP.equals(auto); + } + + public Collection getTables() { + return tables; + } + + public void setTables(Collection tables) { + this.tables = tables; + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/TableAttributes.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/TableAttributes.java new file mode 100644 index 000000000..c10e2eefb --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/TableAttributes.java @@ -0,0 +1,49 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.config; + +/** + * Table attributes are used for manipulation around table at the startup (create/update/validate). + * + * @author Alex Shvid + */ +public class TableAttributes { + + private String entity; + private String name; + + public String getEntity() { + return entity; + } + + public void setEntity(String entity) { + this.entity = entity; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public String toString() { + return "TableAttributes [entity=" + entity + "]"; + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/java/AbstractSpringDataCassandraConfiguration.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/java/AbstractSpringDataCassandraConfiguration.java new file mode 100644 index 000000000..ad6c84044 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/config/java/AbstractSpringDataCassandraConfiguration.java @@ -0,0 +1,129 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.config.java; + +import java.util.HashSet; +import java.util.Set; + +import org.springframework.beans.factory.BeanClassLoaderAware; +import org.springframework.beans.factory.config.BeanDefinition; +import org.springframework.cassandra.config.java.AbstractCassandraConfiguration; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ClassPathScanningCandidateComponentProvider; +import org.springframework.context.annotation.Configuration; +import org.springframework.core.type.filter.AnnotationTypeFilter; +import org.springframework.data.annotation.Persistent; +import org.springframework.data.cassandra.convert.CassandraConverter; +import org.springframework.data.cassandra.convert.MappingCassandraConverter; +import org.springframework.data.cassandra.core.CassandraAdminOperations; +import org.springframework.data.cassandra.core.CassandraAdminTemplate; +import org.springframework.data.cassandra.mapping.CassandraMappingContext; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.cassandra.mapping.Table; +import org.springframework.data.mapping.context.MappingContext; +import org.springframework.util.ClassUtils; +import org.springframework.util.StringUtils; + +/** + * Base class for Spring Data Cassandra configuration using JavaConfig. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +@Configuration +public abstract class AbstractSpringDataCassandraConfiguration extends AbstractCassandraConfiguration implements + BeanClassLoaderAware { + + private ClassLoader beanClassLoader; + + /** + * The base package to scan for entities annotated with {@link Table} annotations. By default, returns the package + * name of {@literal this} (this.getClass().getPackage().getName()). + */ + protected String getMappingBasePackage() { + return getClass().getPackage().getName(); + } + + /** + * Creates a {@link CassandraAdminTemplate}. + * + * @throws Exception + */ + @Bean + public CassandraAdminOperations adminTemplate() throws Exception { + return new CassandraAdminTemplate(session().getObject()); + } + + /** + * Return the {@link MappingContext} instance to map Entities to properties. + * + * @throws ClassNotFoundException + */ + @Bean + public MappingContext, CassandraPersistentProperty> cassandraMappingContext() + throws ClassNotFoundException { + CassandraMappingContext context = new CassandraMappingContext(); + context.setInitialEntitySet(getInitialEntitySet()); + return context; + } + + /** + * Return the {@link CassandraConverter} instance to convert Rows to Objects, Objects to BuiltStatements + * + * @throws ClassNotFoundException + */ + @Bean + public CassandraConverter converter() throws ClassNotFoundException { + MappingCassandraConverter converter = new MappingCassandraConverter(cassandraMappingContext()); + converter.setBeanClassLoader(beanClassLoader); + return converter; + } + + /** + * Scans the mapping base package for entity classes annotated with {@link Table} or {@link Persistent}. + * + * @see #getMappingBasePackage() + * @return Set<Class<?>> representing the annotated entity classes found. + * @throws ClassNotFoundException + */ + protected Set> getInitialEntitySet() throws ClassNotFoundException { + + String basePackage = getMappingBasePackage(); + Set> initialEntitySet = new HashSet>(); + + if (StringUtils.hasText(basePackage)) { + ClassPathScanningCandidateComponentProvider componentProvider = new ClassPathScanningCandidateComponentProvider( + false); + componentProvider.addIncludeFilter(new AnnotationTypeFilter(Table.class)); + componentProvider.addIncludeFilter(new AnnotationTypeFilter(Persistent.class)); + + // TODO: figure out which ClassLoader to use here + ClassLoader classLoader = getClass().getClassLoader(); + + for (BeanDefinition candidate : componentProvider.findCandidateComponents(basePackage)) { + initialEntitySet.add(ClassUtils.forName(candidate.getBeanClassName(), classLoader)); + } + } + + return initialEntitySet; + } + + @Override + public void setBeanClassLoader(ClassLoader classLoader) { + this.beanClassLoader = classLoader; + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/AbstractCassandraConverter.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/AbstractCassandraConverter.java new file mode 100644 index 000000000..ad707e860 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/AbstractCassandraConverter.java @@ -0,0 +1,59 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.convert; + +import org.springframework.beans.factory.InitializingBean; +import org.springframework.core.convert.ConversionService; +import org.springframework.core.convert.support.DefaultConversionService; +import org.springframework.data.convert.EntityInstantiators; + +/** + * Base class for {@link CassandraConverter} implementations. Sets up a {@link ConversionService} and populates basic + * converters. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public abstract class AbstractCassandraConverter implements CassandraConverter, InitializingBean { + + protected final ConversionService conversionService; + protected EntityInstantiators instantiators = new EntityInstantiators(); + + /** + * Creates a new {@link AbstractCassandraConverter} using the given {@link ConversionService}. + */ + public AbstractCassandraConverter(ConversionService conversionService) { + this.conversionService = conversionService == null ? new DefaultConversionService() : conversionService; + } + + /** + * Registers {@link EntityInstantiators} to customize entity instantiation. + * + * @param instantiators + */ + public void setInstantiators(EntityInstantiators instantiators) { + this.instantiators = instantiators == null ? new EntityInstantiators() : instantiators; + } + + @Override + public ConversionService getConversionService() { + return conversionService; + } + + @Override + public void afterPropertiesSet() { + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/CassandraConverter.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/CassandraConverter.java new file mode 100644 index 000000000..ac07a2f98 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/CassandraConverter.java @@ -0,0 +1,33 @@ +/* + * Copyright 2010-2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.convert; + +import org.springframework.cassandra.core.keyspace.CreateTableSpecification; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.convert.EntityConverter; + +/** + * Central Cassandra specific converter interface from Object to Row. + * + * @author Alex Shvid + */ +public interface CassandraConverter extends + EntityConverter, CassandraPersistentProperty, Object, Object> { + + // TODO: move this method to a more appropriate location + CreateTableSpecification getCreateTableSpecification(CassandraPersistentEntity entity); +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/CassandraPropertyValueProvider.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/CassandraPropertyValueProvider.java new file mode 100644 index 000000000..1c0c11653 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/CassandraPropertyValueProvider.java @@ -0,0 +1,94 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.convert; + +import java.nio.ByteBuffer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.mapping.model.DefaultSpELExpressionEvaluator; +import org.springframework.data.mapping.model.PropertyValueProvider; +import org.springframework.data.mapping.model.SpELExpressionEvaluator; +import org.springframework.util.Assert; + +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.Row; + +/** + * {@link PropertyValueProvider} to read property values from a {@link Row}. + * + * @author Alex Shvid + */ +public class CassandraPropertyValueProvider implements PropertyValueProvider { + + private static Logger log = LoggerFactory.getLogger(CassandraPropertyValueProvider.class); + + private final Row source; + private final SpELExpressionEvaluator evaluator; + + /** + * Creates a new {@link CassandraPropertyValueProvider} with the given {@link Row} and + * {@link DefaultSpELExpressionEvaluator}. + * + * @param source must not be {@literal null}. + * @param evaluator must not be {@literal null}. + */ + public CassandraPropertyValueProvider(Row source, DefaultSpELExpressionEvaluator evaluator) { + Assert.notNull(source); + Assert.notNull(evaluator); + + this.source = source; + this.evaluator = evaluator; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.convert.PropertyValueProvider#getPropertyValue(org.springframework.data.mapping.PersistentProperty) + */ + @SuppressWarnings("unchecked") + public T getPropertyValue(CassandraPersistentProperty property) { + + String expression = property.getSpelExpression(); + if (expression != null) { + return evaluator.evaluate(expression); + } + + String columnName = property.getColumnName(); + if (source.isNull(property.getColumnName())) { + return null; + } + DataType columnType = source.getColumnDefinitions().getType(columnName); + + log.debug(columnType.getName().name()); + + /* + * Dave Webb - Added handler for text since getBytes was throwing + * InvalidTypeException when using getBytes on a text column. + */ + // TODO Might need to qualify all DataTypes as we encounter them. + if (columnType.equals(DataType.text())) { + return (T) source.getString(columnName); + } + if (columnType.equals(DataType.cint())) { + return (T) new Integer(source.getInt(columnName)); + } + + ByteBuffer bytes = source.getBytes(columnName); + return (T) columnType.deserialize(bytes); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/MappingCassandraConverter.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/MappingCassandraConverter.java new file mode 100644 index 000000000..b9d44f83a --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/MappingCassandraConverter.java @@ -0,0 +1,337 @@ +/* + * Copyright 2011-2013 by the original author(s). + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.convert; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.BeansException; +import org.springframework.beans.factory.BeanClassLoaderAware; +import org.springframework.cassandra.core.keyspace.CreateTableSpecification; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationContextAware; +import org.springframework.core.convert.support.DefaultConversionService; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.convert.EntityInstantiator; +import org.springframework.data.mapping.PropertyHandler; +import org.springframework.data.mapping.context.MappingContext; +import org.springframework.data.mapping.model.BeanWrapper; +import org.springframework.data.mapping.model.DefaultSpELExpressionEvaluator; +import org.springframework.data.mapping.model.MappingException; +import org.springframework.data.mapping.model.PersistentEntityParameterValueProvider; +import org.springframework.data.mapping.model.PropertyValueProvider; +import org.springframework.data.mapping.model.SpELContext; +import org.springframework.data.util.ClassTypeInformation; +import org.springframework.data.util.TypeInformation; +import org.springframework.util.ClassUtils; + +import com.datastax.driver.core.Row; +import com.datastax.driver.core.querybuilder.Delete.Where; +import com.datastax.driver.core.querybuilder.Insert; +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.datastax.driver.core.querybuilder.Update; + +/** + * {@link CassandraConverter} that uses a {@link MappingContext} to do sophisticated mapping of domain objects to + * {@link Row}. + * + * @author Alex Shvid + */ +public class MappingCassandraConverter extends AbstractCassandraConverter implements CassandraConverter, + ApplicationContextAware, BeanClassLoaderAware { + + protected static final Logger log = LoggerFactory.getLogger(MappingCassandraConverter.class); + + protected final MappingContext, CassandraPersistentProperty> mappingContext; + protected ApplicationContext applicationContext; + private SpELContext spELContext; + private boolean useFieldAccessOnly = true; + + private ClassLoader beanClassLoader; + + /** + * Creates a new {@link MappingCassandraConverter} given the new {@link MappingContext}. + * + * @param mappingContext must not be {@literal null}. + */ + public MappingCassandraConverter( + MappingContext, CassandraPersistentProperty> mappingContext) { + super(new DefaultConversionService()); + this.mappingContext = mappingContext; + this.spELContext = new SpELContext(RowReaderPropertyAccessor.INSTANCE); + } + + @SuppressWarnings("unchecked") + public R readRow(Class clazz, Row row) { + + Class beanClassLoaderClass = transformClassToBeanClassLoaderClass(clazz); + + TypeInformation type = ClassTypeInformation.from(beanClassLoaderClass); + // TypeInformation typeToUse = typeMapper.readType(row, type); + TypeInformation typeToUse = type; + Class rawType = typeToUse.getType(); + + if (Row.class.isAssignableFrom(rawType)) { + return (R) row; + } + + CassandraPersistentEntity persistentEntity = (CassandraPersistentEntity) mappingContext + .getPersistentEntity(typeToUse); + if (persistentEntity == null) { + throw new MappingException("No mapping metadata found for " + rawType.getName()); + } + + return readRowInternal(persistentEntity, row); + } + + @Override + public MappingContext, CassandraPersistentProperty> getMappingContext() { + return mappingContext; + } + + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + this.applicationContext = applicationContext; + this.spELContext = new SpELContext(this.spELContext, applicationContext); + } + + protected S readRowInternal(final CassandraPersistentEntity entity, final Row row) { + + final DefaultSpELExpressionEvaluator evaluator = new DefaultSpELExpressionEvaluator(row, spELContext); + + final PropertyValueProvider propertyProvider = new CassandraPropertyValueProvider(row, + evaluator); + PersistentEntityParameterValueProvider parameterProvider = new PersistentEntityParameterValueProvider( + entity, propertyProvider, null); + + EntityInstantiator instantiator = instantiators.getInstantiatorFor(entity); + S instance = instantiator.createInstance(entity, parameterProvider); + + final BeanWrapper, S> wrapper = BeanWrapper.create(instance, conversionService); + S result = wrapper.getBean(); + + entity.doWithProperties(new PropertyHandler() { + + @Override + public void doWithPersistentProperty(CassandraPersistentProperty prop) { + + MappingCassandraConverter.this.handlePersistentPropertyRead(row, entity, prop, propertyProvider, wrapper); + } + }); + + return result; + } + + protected void handlePersistentPropertyRead(final Row row, final CassandraPersistentEntity entity, + final CassandraPersistentProperty prop, + final PropertyValueProvider propertyProvider, final BeanWrapper wrapper) { + + if (entity.isConstructorArgument(prop)) { // skip 'cause prop was set in ctor + return; + } + + if (prop.isCompositePrimaryKey()) { + // TODO: handle composite primary key properties via recursion into this method + } + + boolean hasValueForProperty = row.getColumnDefinitions().contains(prop.getColumnName()); + if (!hasValueForProperty) { + return; + } + + Object obj = propertyProvider.getPropertyValue(prop); + wrapper.setProperty(prop, obj, useFieldAccessOnly); + } + + public boolean getUseFieldAccessOnly() { + return useFieldAccessOnly; + } + + public void setUseFieldAccessOnly(boolean useFieldAccessOnly) { + this.useFieldAccessOnly = useFieldAccessOnly; + } + + @Override + public R read(Class type, Object row) { + if (row instanceof Row) { + return readRow(type, (Row) row); + } + throw new MappingException("Unknown row object " + row.getClass().getName()); + } + + @Override + public void write(Object obj, Object builtStatement) { + + if (obj == null) { + return; + } + + Class beanClassLoaderClass = transformClassToBeanClassLoaderClass(obj.getClass()); + CassandraPersistentEntity entity = mappingContext.getPersistentEntity(beanClassLoaderClass); + + if (entity == null) { + throw new MappingException("No mapping metadata found for " + obj.getClass()); + } + + if (builtStatement instanceof Insert) { + writeInsertInternal(obj, (Insert) builtStatement, entity); + } else if (builtStatement instanceof Update) { + writeUpdateInternal(obj, (Update) builtStatement, entity); + } else if (builtStatement instanceof Where) { + writeDeleteWhereInternal(obj, (Where) builtStatement, entity); + } else { + throw new MappingException("Unknown buildStatement " + builtStatement.getClass().getName()); + } + } + + private void writeInsertInternal(final Object objectToSave, final Insert insert, CassandraPersistentEntity entity) { + + final BeanWrapper, Object> wrapper = BeanWrapper.create(objectToSave, + conversionService); + + // Write the properties + entity.doWithProperties(new PropertyHandler() { + @Override + public void doWithPersistentProperty(CassandraPersistentProperty prop) { + + Object propertyObj = wrapper.getProperty(prop, prop.getType(), useFieldAccessOnly); + + if (propertyObj != null) { + insert.value(prop.getColumnName(), propertyObj); + } + + } + }); + + } + + private void writeUpdateInternal(final Object objectToSave, final Update update, CassandraPersistentEntity entity) { + + final BeanWrapper, Object> wrapper = BeanWrapper.create(objectToSave, + conversionService); + + // Write the properties + entity.doWithProperties(new PropertyHandler() { + @Override + public void doWithPersistentProperty(CassandraPersistentProperty prop) { + + Object propertyObj = wrapper.getProperty(prop, prop.getType(), useFieldAccessOnly); + + if (propertyObj != null) { + if (prop.isIdProperty()) { + update.where(QueryBuilder.eq(prop.getColumnName(), propertyObj)); + } else { + update.with(QueryBuilder.set(prop.getColumnName(), propertyObj)); + } + } + + } + }); + + } + + private void writeDeleteWhereInternal(final Object objectToSave, final Where whereId, + CassandraPersistentEntity entity) { + + final BeanWrapper, Object> wrapper = BeanWrapper.create(objectToSave, + conversionService); + + // Write the properties + entity.doWithProperties(new PropertyHandler() { + @Override + public void doWithPersistentProperty(CassandraPersistentProperty prop) { + + if (prop.isIdProperty()) { + + Object propertyObj = wrapper.getProperty(prop, prop.getType(), useFieldAccessOnly); + + if (propertyObj != null) { + whereId.and(QueryBuilder.eq(prop.getColumnName(), propertyObj)); + } + } + + } + }); + + } + + @Override + public CreateTableSpecification getCreateTableSpecification(CassandraPersistentEntity entity) { + + final CreateTableSpecification spec = new CreateTableSpecification(); + + spec.name(entity.getTableName()); + + entity.doWithProperties(new PropertyHandler() { + @Override + public void doWithPersistentProperty(CassandraPersistentProperty prop) { + + if (prop.isCompositePrimaryKey()) { + + CassandraPersistentEntity pkEntity = mappingContext.getPersistentEntity(prop.getRawType()); + + pkEntity.doWithProperties(new PropertyHandler() { + @Override + public void doWithPersistentProperty(CassandraPersistentProperty pkProp) { + + if (pkProp.isPartitionKeyColumn()) { + spec.partitionKeyColumn(pkProp.getColumnName(), pkProp.getDataType()); + } else { + spec.clusteredKeyColumn(pkProp.getColumnName(), pkProp.getDataType(), pkProp.getOrdering()); + } + + } + }); + + } else { + + if (prop.isIdProperty()) { + spec.partitionKeyColumn(prop.getColumnName(), prop.getDataType()); + } else { + spec.column(prop.getColumnName(), prop.getDataType()); + } + + } + } + + }); + + if (spec.getPartitionKeyColumns().isEmpty()) { + throw new MappingException("not found partition key in the entity " + entity.getType()); + } + + return spec; + + } + + @SuppressWarnings("unchecked") + private Class transformClassToBeanClassLoaderClass(Class entity) { + try { + return (Class) ClassUtils.forName(entity.getName(), beanClassLoader); + } catch (ClassNotFoundException e) { + return entity; + } catch (LinkageError e) { + return entity; + } + } + + @Override + public void setBeanClassLoader(ClassLoader classLoader) { + this.beanClassLoader = classLoader; + + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/RowReaderPropertyAccessor.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/RowReaderPropertyAccessor.java new file mode 100644 index 000000000..65434e4d5 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/convert/RowReaderPropertyAccessor.java @@ -0,0 +1,67 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.convert; + +import java.nio.ByteBuffer; + +import org.springframework.expression.EvaluationContext; +import org.springframework.expression.PropertyAccessor; +import org.springframework.expression.TypedValue; + +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.Row; + +/** + * {@link PropertyAccessor} to read values from a {@link Row}. + * + * @author Alex Shvid + */ +enum RowReaderPropertyAccessor implements PropertyAccessor { + + INSTANCE; + + @Override + public Class[] getSpecificTargetClasses() { + return new Class[] { Row.class }; + } + + @Override + public boolean canRead(EvaluationContext context, Object target, String name) { + return ((Row) target).getColumnDefinitions().contains(name); + } + + @Override + public TypedValue read(EvaluationContext context, Object target, String name) { + Row row = (Row) target; + if (row.isNull(name)) { + return TypedValue.NULL; + } + DataType columnType = row.getColumnDefinitions().getType(name); + ByteBuffer bytes = row.getBytes(name); + Object object = columnType.deserialize(bytes); + return new TypedValue(object); + } + + @Override + public boolean canWrite(EvaluationContext context, Object target, String name) { + return false; + } + + @Override + public void write(EvaluationContext context, Object target, String name, Object newValue) { + throw new UnsupportedOperationException(); + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraAdminOperations.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraAdminOperations.java new file mode 100644 index 000000000..d41d08ea4 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraAdminOperations.java @@ -0,0 +1,79 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.core; + +import java.util.Map; + +import com.datastax.driver.core.TableMetadata; + +/** + * Operations for managing a Cassandra keyspace. + * + * @author David Webb + * @author Matthew T. Adams + */ +public interface CassandraAdminOperations { + + /** + * Create a table with the name given and fields corresponding to the given class. If the table already exists and + * parameter ifNotExists is {@literal true}, this is a no-op and {@literal false} is returned. If the + * table doesn't exist, parameter ifNotExists is ignored, the table is created and {@literal true} is + * returned. + * + * @param ifNotExists If true, will only create the table if it doesn't exist, else the create operation will be + * ignored and the method will return {@literal false}. + * @param tableName The name of the table. + * @param entityClass The class whose fields determine the columns created. + * @param optionsByName Table options, given by the string option name and the appropriate option value. + * @return Returns true if a table was created, false if not. + */ + boolean createTable(boolean ifNotExists, String tableName, Class entityClass, Map optionsByName); + + /** + * Add columns to the given table from the given class. If parameter dropRemovedAttributColumns is true, then this + * effectively becomes a synchronization operation between the class's fields and the existing table's columns. + * + * @param tableName The name of the existing table. + * @param entityClass The class whose fields determine the columns added. + * @param dropRemovedAttributeColumns Whether to drop columns that exist on the table but that don't have + * corresponding fields in the class. If true, this effectively becomes a synchronziation operation. + */ + void alterTable(String tableName, Class entityClass, boolean dropRemovedAttributeColumns); + + /** + * Drops the existing table with the given name and creates a new one; basically a {@link #dropTable(String)} followed + * by a {@link #createTable(boolean, String, Class, Map)}. + * + * @param tableName The name of the table. + * @param entityClass The class whose fields determine the new table's columns. + * @param optionsByName Table options, given by the string option name and the appropriate option value. + */ + void replaceTable(String tableName, Class entityClass, Map optionsByName); + + /** + * Drops the named table. + * + * @param tableName The name of the table. + */ + void dropTable(String tableName); + + /** + * @param keyspace + * @param tableName + * @return + */ + TableMetadata getTableMetadata(String keyspace, String tableName); +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraAdminTemplate.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraAdminTemplate.java new file mode 100644 index 000000000..f5d888b95 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraAdminTemplate.java @@ -0,0 +1,230 @@ +package org.springframework.data.cassandra.core; + +import java.util.List; +import java.util.Map; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.core.SessionCallback; +import org.springframework.cassandra.support.CassandraAccessor; +import org.springframework.cassandra.support.CassandraExceptionTranslator; +import org.springframework.cassandra.support.exception.CassandraTableExistsException; +import org.springframework.dao.DataAccessException; +import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.dao.support.PersistenceExceptionTranslator; +import org.springframework.data.cassandra.convert.CassandraConverter; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.cassandra.util.CqlUtils; +import org.springframework.data.mapping.context.MappingContext; +import org.springframework.util.Assert; + +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.TableMetadata; + +/** + * Default implementation of {@link CassandraAdminOperations}. + */ +public class CassandraAdminTemplate extends CassandraAccessor implements CassandraAdminOperations { + + private static final Logger log = LoggerFactory.getLogger(CassandraAdminTemplate.class); + + private Session session; + private CassandraConverter converter; + private MappingContext, CassandraPersistentProperty> mappingContext; + + private final PersistenceExceptionTranslator exceptionTranslator = new CassandraExceptionTranslator(); + + /** + * Constructor used for a basic template configuration + * + * @param keyspace must not be {@literal null}. + */ + public CassandraAdminTemplate(Session session) { + setSession(session); + } + + protected CassandraAdminTemplate setCassandraConverter(CassandraConverter converter) { + Assert.notNull(converter); + this.converter = converter; + return setMappingContext(converter.getMappingContext()); + } + + protected CassandraAdminTemplate setMappingContext( + MappingContext, CassandraPersistentProperty> mappingContext) { + Assert.notNull(mappingContext); + return this; + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraAdminOperations#createTable(boolean, java.lang.String, java.lang.Class, java.util.Map) + */ + @Override + public boolean createTable(boolean ifNotExists, final String tableName, Class entityClass, + Map optionsByName) { + + try { + + final CassandraPersistentEntity entity = mappingContext.getPersistentEntity(entityClass); + + execute(new SessionCallback() { + public Object doInSession(Session s) throws DataAccessException { + + String cql = CqlUtils.createTable(tableName, entity, converter); + log.info("CREATE TABLE CQL -> " + cql); + s.execute(cql); + return null; + } + }); + return true; + + } catch (CassandraTableExistsException ctex) { + return !ifNotExists; + } catch (RuntimeException x) { + throw tryToConvert(x); + } + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraAdminOperations#alterTable(java.lang.String, java.lang.Class, boolean) + */ + @Override + public void alterTable(String tableName, Class entityClass, boolean dropRemovedAttributeColumns) { + // TODO Auto-generated method stub + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraAdminOperations#replaceTable(java.lang.String, java.lang.Class) + */ + @Override + public void replaceTable(String tableName, Class entityClass, Map optionsByName) { + // TODO + } + + /** + * Create a list of query operations to alter the table for the given entity + * + * @param entityClass + * @param tableName + */ + protected void doAlterTable(Class entityClass, String keyspace, String tableName) { + + CassandraPersistentEntity entity = mappingContext.getPersistentEntity(entityClass); + + Assert.notNull(entity); + + final TableMetadata tableMetadata = getTableMetadata(keyspace, tableName); + + final List queryList = CqlUtils.alterTable(tableName, entity, tableMetadata); + + execute(new SessionCallback() { + + public Object doInSession(Session s) throws DataAccessException { + + for (String q : queryList) { + log.info(q); + s.execute(q); + } + + return null; + + } + }); + + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#dropTable(java.lang.Class) + */ + public void dropTable(Class entityClass) { + + final String tableName = determineTableName(entityClass); + + dropTable(tableName); + + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#dropTable(java.lang.String) + */ + @Override + public void dropTable(String tableName) { + + log.info("Dropping table => " + tableName); + + final String q = CqlUtils.dropTable(tableName); + log.info(q); + + execute(new SessionCallback() { + + @Override + public ResultSet doInSession(Session s) throws DataAccessException { + + return s.execute(q); + + } + + }); + + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#getTableMetadata(java.lang.Class) + */ + @Override + public TableMetadata getTableMetadata(final String keyspace, final String tableName) { + + Assert.notNull(tableName); + + return execute(new SessionCallback() { + + public TableMetadata doInSession(Session s) throws DataAccessException { + + return s.getCluster().getMetadata().getKeyspace(keyspace).getTable(tableName); + } + }); + } + + /** + * Execute a command at the Session Level + * + * @param callback + * @return + */ + protected T execute(SessionCallback callback) { + + Assert.notNull(callback); + + try { + return callback.doInSession(session); + } catch (RuntimeException x) { + throw tryToConvert(x); + } + } + + protected RuntimeException tryToConvert(RuntimeException x) { + RuntimeException resolved = exceptionTranslator.translateExceptionIfPossible(x); + return resolved == null ? x : resolved; + } + + /** + * @param entityClass + * @return + */ + public String determineTableName(Class entityClass) { + + if (entityClass == null) { + throw new InvalidDataAccessApiUsageException( + "No class parameter provided, entity table name can't be determined!"); + } + + CassandraPersistentEntity entity = mappingContext.getPersistentEntity(entityClass); + if (entity == null) { + throw new InvalidDataAccessApiUsageException("No Persitent Entity information found for the class " + + entityClass.getName()); + } + return entity.getTableName(); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraDataOperations.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraDataOperations.java new file mode 100644 index 000000000..217052354 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraDataOperations.java @@ -0,0 +1,459 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.core; + +import java.util.List; + +import org.springframework.cassandra.core.QueryOptions; +import org.springframework.data.cassandra.convert.CassandraConverter; + +import com.datastax.driver.core.querybuilder.Select; + +/** + * Operations for interacting with Cassandra. These operations are used by the Repository implementation, but can also + * be used directly when that is desired by the developer. + * + * @author Alex Shvid + * @author David Webb + * @author Matthew Adams + * + */ +public interface CassandraDataOperations { + + /** + * The table name used for the specified class by this template. + * + * @param entityClass must not be {@literal null}. + * @return + */ + String getTableName(Class entityClass); + + /** + * Execute query and convert ResultSet to the list of entities + * + * @param query must not be {@literal null}. + * @param selectClass must not be {@literal null}, mapped entity type. + * @return + */ + List select(String cql, Class selectClass); + + /** + * Execute query and convert ResultSet to the list of entities + * + * @param selectQuery must not be {@literal null}. + * @param selectClass must not be {@literal null}, mapped entity type. + * @return + */ + + List select(Select selectQuery, Class selectClass); + + /** + * Execute query and convert ResultSet to the entity + * + * @param query must not be {@literal null}. + * @param selectClass must not be {@literal null}, mapped entity type. + * @return + */ + T selectOne(String cql, Class selectClass); + + T selectOne(Select selectQuery, Class selectClass); + + /** + * Counts rows for given query + * + * @param selectQuery + * @return + */ + + Long count(Select selectQuery); + + /** + * Counts all rows for given table + * + * @param tableName + * @return + */ + + Long count(String tableName); + + /** + * Insert the given object to the table by id. + * + * @param entity + */ + T insert(T entity); + + /** + * Insert the given object to the table by id. + * + * @param entity + * @param tableName + * @return + */ + T insert(T entity, String tableName); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T insert(T entity, String tableName, QueryOptions options); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T insert(T entity, QueryOptions options); + + /** + * Insert the given list of objects to the table by annotation table name. + * + * @param entities + * @return + */ + List insert(List entities); + + /** + * Insert the given list of objects to the table by name. + * + * @param entities + * @param tableName + * @return + */ + List insert(List entities, String tableName); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List insert(List entities, QueryOptions options); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List insert(List entities, String tableName, QueryOptions options); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + T insertAsynchronously(T entity); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + T insertAsynchronously(T entity, String tableName); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T insertAsynchronously(T entity, QueryOptions options); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T insertAsynchronously(T entity, String tableName, QueryOptions options); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + List insertAsynchronously(List entities); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + List insertAsynchronously(List entities, String tableName); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List insertAsynchronously(List entities, QueryOptions options); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List insertAsynchronously(List entities, String tableName, QueryOptions options); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + T update(T entity); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + T update(T entity, String tableName); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T update(T entity, QueryOptions options); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T update(T entity, String tableName, QueryOptions options); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + List update(List entities); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + List update(List entities, String tableName); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List update(List entities, QueryOptions options); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List update(List entities, String tableName, QueryOptions options); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + T updateAsynchronously(T entity); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + T updateAsynchronously(T entity, String tableName); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T updateAsynchronously(T entity, QueryOptions options); + + /** + * @param entity + * @param tableName + * @param options + * @return + */ + T updateAsynchronously(T entity, String tableName, QueryOptions options); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + List updateAsynchronously(List entities); + + /** + * Insert the given object to the table by id. + * + * @param object + */ + List updateAsynchronously(List entities, String tableName); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List updateAsynchronously(List entities, QueryOptions options); + + /** + * @param entities + * @param tableName + * @param options + * @return + */ + List updateAsynchronously(List entities, String tableName, QueryOptions options); + + /** + * Remove the given object from the table by id. + * + * @param object + */ + void delete(T entity); + + /** + * Removes the given object from the given table. + * + * @param object + * @param table must not be {@literal null} or empty. + */ + void delete(T entity, String tableName); + + /** + * @param entity + * @param tableName + * @param options + */ + void delete(T entity, QueryOptions options); + + /** + * @param entity + * @param tableName + * @param options + */ + void delete(T entity, String tableName, QueryOptions options); + + /** + * Remove the given object from the table by id. + * + * @param object + */ + void delete(List entities); + + /** + * Removes the given object from the given table. + * + * @param object + * @param table must not be {@literal null} or empty. + */ + void delete(List entities, String tableName); + + /** + * @param entities + * @param tableName + * @param options + */ + void delete(List entities, QueryOptions options); + + /** + * @param entities + * @param tableName + * @param options + */ + void delete(List entities, String tableName, QueryOptions options); + + /** + * Remove the given object from the table by id. + * + * @param object + */ + void deleteAsynchronously(T entity); + + /** + * @param entity + * @param tableName + * @param options + */ + void deleteAsynchronously(T entity, QueryOptions options); + + /** + * @param entity + * @param tableName + * @param options + */ + void deleteAsynchronously(T entity, String tableName, QueryOptions options); + + /** + * Removes the given object from the given table. + * + * @param object + * @param table must not be {@literal null} or empty. + */ + void deleteAsynchronously(T entity, String tableName); + + /** + * Remove the given object from the table by id. + * + * @param object + */ + void deleteAsynchronously(List entities); + + /** + * Removes the given object from the given table. + * + * @param object + * @param table must not be {@literal null} or empty. + */ + void deleteAsynchronously(List entities, String tableName); + + /** + * @param entities + * @param tableName + * @param options + */ + void deleteAsynchronously(List entities, QueryOptions options); + + /** + * @param entities + * @param tableName + * @param options + */ + void deleteAsynchronously(List entities, String tableName, QueryOptions options); + + /** + * Returns the underlying {@link CassandraConverter}. + * + * @return + */ + CassandraConverter getConverter(); +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraDataTemplate.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraDataTemplate.java new file mode 100644 index 000000000..6e94b48d1 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraDataTemplate.java @@ -0,0 +1,1049 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.core; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import org.springframework.cassandra.core.CassandraTemplate; +import org.springframework.cassandra.core.QueryOptions; +import org.springframework.cassandra.core.SessionCallback; +import org.springframework.dao.DataAccessException; +import org.springframework.dao.DuplicateKeyException; +import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.cassandra.convert.CassandraConverter; +import org.springframework.data.cassandra.exception.EntityWriterException; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.cassandra.util.CqlUtils; +import org.springframework.data.mapping.context.MappingContext; +import org.springframework.util.Assert; + +import com.datastax.driver.core.Query; +import com.datastax.driver.core.ResultSet; +import com.datastax.driver.core.Row; +import com.datastax.driver.core.Session; +import com.datastax.driver.core.querybuilder.Batch; +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.datastax.driver.core.querybuilder.Select; + +/** + * The Cassandra Data Template is a convenience API for all Cassandra Operations using POJOs. This is the "Spring Data" + * flavor of the template. For low level Cassandra Operations use the {@link CassandraTemplate} + * + * @author Alex Shvid + * @author David Webb + */ +public class CassandraDataTemplate extends CassandraTemplate implements CassandraDataOperations { + + /* + * List of iterable classes when testing POJOs for specific operations. + */ + public static final Collection ITERABLE_CLASSES; + static { + + Set iterableClasses = new HashSet(); + iterableClasses.add(List.class.getName()); + iterableClasses.add(Collection.class.getName()); + iterableClasses.add(Iterator.class.getName()); + + ITERABLE_CLASSES = Collections.unmodifiableCollection(iterableClasses); + + } + + /* + * Required elements for successful Template Operations. These can be set with the Constructor, or wired in + * later. + */ + private String keyspace; + private CassandraConverter cassandraConverter; + private MappingContext, CassandraPersistentProperty> mappingContext; + + /** + * Default Constructor for wiring in the required components later + */ + public CassandraDataTemplate() { + } + + /** + * Constructor if only session is known at time of Template Creation + * + * @param session must not be {@literal null} + */ + public CassandraDataTemplate(Session session) { + this(session, null, null); + } + + /** + * Constructor if only session and converter are known at time of Template Creation + * + * @param session must not be {@literal null} + * @param converter must not be {@literal null}. + */ + public CassandraDataTemplate(Session session, CassandraConverter converter) { + this(session, converter, null); + } + + /** + * Constructor used for a basic template configuration + * + * @param session must not be {@literal null}. + * @param converter must not be {@literal null}. + */ + public CassandraDataTemplate(Session session, CassandraConverter converter, String keyspace) { + setSession(session); + this.keyspace = keyspace; + this.cassandraConverter = converter; + this.mappingContext = this.cassandraConverter.getMappingContext(); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#count(com.datastax.driver.core.querybuilder.Select) + */ + @Override + public Long count(Select selectQuery) { + return doSelectCount(selectQuery); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#count(java.lang.String) + */ + @Override + public Long count(String tableName) { + Select select = QueryBuilder.select().countAll().from(tableName); + return doSelectCount(select); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.util.List) + */ + @Override + public void delete(List entities) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + delete(entities, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.util.List, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void delete(List entities, QueryOptions options) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + delete(entities, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.util.List, java.lang.String) + */ + @Override + public void delete(List entities, String tableName) { + + delete(entities, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.util.List, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void delete(List entities, String tableName, QueryOptions options) { + Assert.notNull(entities); + Assert.notEmpty(entities); + Assert.notNull(tableName); + doBatchDelete(tableName, entities, options, false); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.lang.Object) + */ + @Override + public void delete(T entity) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + delete(entity, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.lang.Object, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void delete(T entity, QueryOptions options) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + delete(entity, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.lang.Object, java.lang.String) + */ + @Override + public void delete(T entity, String tableName) { + delete(entity, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#delete(java.lang.Object, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void delete(T entity, String tableName, QueryOptions options) { + Assert.notNull(entity); + Assert.notNull(tableName); + doDelete(tableName, entity, options, false); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.util.List) + */ + @Override + public void deleteAsynchronously(List entities) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + deleteAsynchronously(entities, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.util.List, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void deleteAsynchronously(List entities, QueryOptions options) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + deleteAsynchronously(entities, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.util.List, java.lang.String) + */ + @Override + public void deleteAsynchronously(List entities, String tableName) { + deleteAsynchronously(entities, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.util.List, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void deleteAsynchronously(List entities, String tableName, QueryOptions options) { + Assert.notNull(entities); + Assert.notEmpty(entities); + Assert.notNull(tableName); + doBatchDelete(tableName, entities, options, true); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.lang.Object) + */ + @Override + public void deleteAsynchronously(T entity) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + deleteAsynchronously(entity, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.lang.Object, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void deleteAsynchronously(T entity, QueryOptions options) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + deleteAsynchronously(entity, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.lang.Object, java.lang.String) + */ + @Override + public void deleteAsynchronously(T entity, String tableName) { + deleteAsynchronously(entity, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#deleteAsynchronously(java.lang.Object, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public void deleteAsynchronously(T entity, String tableName, QueryOptions options) { + Assert.notNull(entity); + Assert.notNull(tableName); + doDelete(tableName, entity, options, true); + } + + /** + * @param entityClass + * @return + */ + public String determineTableName(Class entityClass) { + + if (entityClass == null) { + throw new InvalidDataAccessApiUsageException( + "No class parameter provided, entity table name can't be determined!"); + } + + CassandraPersistentEntity entity = mappingContext.getPersistentEntity(entityClass); + if (entity == null) { + throw new InvalidDataAccessApiUsageException("No Persitent Entity information found for the class " + + entityClass.getName()); + } + return entity.getTableName(); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#getConverter() + */ + @Override + public CassandraConverter getConverter() { + return cassandraConverter; + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#getTableName(java.lang.Class) + */ + @Override + public String getTableName(Class entityClass) { + return determineTableName(entityClass); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.util.List) + */ + @Override + public List insert(List entities) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return insert(entities, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.util.List, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List insert(List entities, QueryOptions options) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return insert(entities, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.util.List, java.lang.String) + */ + @Override + public List insert(List entities, String tableName) { + return insert(entities, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.util.List, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List insert(List entities, String tableName, QueryOptions options) { + Assert.notNull(entities); + Assert.notEmpty(entities); + Assert.notNull(tableName); + return doBatchInsert(tableName, entities, options, false); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.lang.Object) + */ + @Override + public T insert(T entity) { + String tableName = determineTableName(entity); + Assert.notNull(tableName); + return insert(entity, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.lang.Object, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T insert(T entity, QueryOptions options) { + String tableName = determineTableName(entity); + Assert.notNull(tableName); + return insert(entity, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.lang.Object, java.lang.String) + */ + @Override + public T insert(T entity, String tableName) { + return insert(entity, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insert(java.lang.Object, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T insert(T entity, String tableName, QueryOptions options) { + Assert.notNull(entity); + Assert.notNull(tableName); + ensureNotIterable(entity); + return doInsert(tableName, entity, options, false); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.util.List) + */ + @Override + public List insertAsynchronously(List entities) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return insertAsynchronously(entities, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.util.List, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List insertAsynchronously(List entities, QueryOptions options) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return insertAsynchronously(entities, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.util.List, java.lang.String) + */ + @Override + public List insertAsynchronously(List entities, String tableName) { + return insertAsynchronously(entities, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.util.List, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List insertAsynchronously(List entities, String tableName, QueryOptions options) { + Assert.notNull(entities); + Assert.notEmpty(entities); + Assert.notNull(tableName); + return doBatchInsert(tableName, entities, options, true); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.lang.Object) + */ + @Override + public T insertAsynchronously(T entity) { + String tableName = determineTableName(entity); + Assert.notNull(tableName); + return insertAsynchronously(entity, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.lang.Object, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T insertAsynchronously(T entity, QueryOptions options) { + String tableName = determineTableName(entity); + Assert.notNull(tableName); + return insertAsynchronously(entity, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.lang.Object, java.lang.String) + */ + @Override + public T insertAsynchronously(T entity, String tableName) { + return insertAsynchronously(entity, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#insertAsynchronously(java.lang.Object, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T insertAsynchronously(T entity, String tableName, QueryOptions options) { + Assert.notNull(entity); + Assert.notNull(tableName); + + ensureNotIterable(entity); + + return doInsert(tableName, entity, options, true); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#select(com.datastax.driver.core.querybuilder.Select, java.lang.Class) + */ + @Override + public List select(Select cql, Class selectClass) { + return select(cql.getQueryString(), selectClass); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#select(java.lang.String, java.lang.Class) + */ + @Override + public List select(String cql, Class selectClass) { + return doSelect(cql, new ReadRowCallback(cassandraConverter, selectClass)); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#selectOne(com.datastax.driver.core.querybuilder.Select, java.lang.Class) + */ + @Override + public T selectOne(Select selectQuery, Class selectClass) { + return selectOne(selectQuery.getQueryString(), selectClass); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#selectOne(java.lang.String, java.lang.Class) + */ + @Override + public T selectOne(String cql, Class selectClass) { + return doSelectOne(cql, new ReadRowCallback(cassandraConverter, selectClass)); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.util.List) + */ + @Override + public List update(List entities) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return update(entities, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.util.List, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List update(List entities, QueryOptions options) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return update(entities, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.util.List, java.lang.String) + */ + @Override + public List update(List entities, String tableName) { + return update(entities, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.util.List, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List update(List entities, String tableName, QueryOptions options) { + Assert.notNull(entities); + Assert.notEmpty(entities); + Assert.notNull(tableName); + return doBatchUpdate(tableName, entities, options, false); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.lang.Object) + */ + @Override + public T update(T entity) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + return update(entity, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.lang.Object, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T update(T entity, QueryOptions options) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + return update(entity, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.lang.Object, java.lang.String) + */ + @Override + public T update(T entity, String tableName) { + return update(entity, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#update(java.lang.Object, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T update(T entity, String tableName, QueryOptions options) { + Assert.notNull(entity); + Assert.notNull(tableName); + return doUpdate(tableName, entity, options, false); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.util.List) + */ + @Override + public List updateAsynchronously(List entities) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return updateAsynchronously(entities, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.util.List, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List updateAsynchronously(List entities, QueryOptions options) { + String tableName = getTableName(entities.get(0).getClass()); + Assert.notNull(tableName); + return updateAsynchronously(entities, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.util.List, java.lang.String) + */ + @Override + public List updateAsynchronously(List entities, String tableName) { + return updateAsynchronously(entities, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.util.List, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public List updateAsynchronously(List entities, String tableName, QueryOptions options) { + Assert.notNull(entities); + Assert.notEmpty(entities); + Assert.notNull(tableName); + return doBatchUpdate(tableName, entities, options, true); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.lang.Object) + */ + @Override + public T updateAsynchronously(T entity) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + return updateAsynchronously(entity, tableName); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.lang.Object, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T updateAsynchronously(T entity, QueryOptions options) { + String tableName = getTableName(entity.getClass()); + Assert.notNull(tableName); + return updateAsynchronously(entity, tableName, options); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.lang.Object, java.lang.String) + */ + @Override + public T updateAsynchronously(T entity, String tableName) { + + return updateAsynchronously(entity, tableName, null); + } + + /* (non-Javadoc) + * @see org.springframework.data.cassandra.core.CassandraOperations#updateAsynchronously(java.lang.Object, java.lang.String, org.springframework.data.cassandra.core.QueryOptions) + */ + @Override + public T updateAsynchronously(T entity, String tableName, QueryOptions options) { + Assert.notNull(entity); + Assert.notNull(tableName); + return doUpdate(tableName, entity, options, true); + } + + /** + * @param obj + * @return + */ + private String determineTableName(T obj) { + if (null != obj) { + return determineTableName(obj.getClass()); + } + + return null; + } + + /** + * @param query + * @param readRowCallback + * @return + */ + private List doSelect(final String query, ReadRowCallback readRowCallback) { + + ResultSet resultSet = doExecute(new SessionCallback() { + + @Override + public ResultSet doInSession(Session s) throws DataAccessException { + return s.execute(query); + } + }); + + if (resultSet == null) { + return null; + } + + List result = new ArrayList(); + Iterator iterator = resultSet.iterator(); + while (iterator.hasNext()) { + Row row = iterator.next(); + result.add(readRowCallback.doWith(row)); + } + + return result; + } + + /** + * @param selectQuery + * @return + */ + private Long doSelectCount(final Select query) { + + Long count = null; + + ResultSet resultSet = doExecute(new SessionCallback() { + + @Override + public ResultSet doInSession(Session s) throws DataAccessException { + return s.execute(query); + } + }); + + if (resultSet == null) { + return null; + } + + Iterator iterator = resultSet.iterator(); + while (iterator.hasNext()) { + Row row = iterator.next(); + count = row.getLong(0); + } + + return count; + + } + + /** + * @param query + * @param readRowCallback + * @return + */ + private T doSelectOne(final String query, ReadRowCallback readRowCallback) { + + logger.info(query); + + /* + * Run the Query + */ + ResultSet resultSet = doExecute(new SessionCallback() { + + @Override + public ResultSet doInSession(Session s) throws DataAccessException { + return s.execute(query); + } + }); + + if (resultSet == null) { + return null; + } + + Iterator iterator = resultSet.iterator(); + if (iterator.hasNext()) { + Row row = iterator.next(); + T result = readRowCallback.doWith(row); + if (iterator.hasNext()) { + throw new DuplicateKeyException("found two or more results in query " + query); + } + return result; + } + + return null; + } + + /** + * Perform the deletion on a list of objects + * + * @param tableName + * @param objectToRemove + */ + protected void doBatchDelete(final String tableName, final List entities, final QueryOptions options, + final boolean deleteAsynchronously) { + + Assert.notEmpty(entities); + + try { + + final Batch b = CqlUtils.toDeleteBatchQuery(keyspace, tableName, entities, options, cassandraConverter); + logger.info(b.toString()); + + doExecute(new SessionCallback() { + + @Override + public Object doInSession(Session s) throws DataAccessException { + + if (deleteAsynchronously) { + s.executeAsync(b); + } else { + s.execute(b); + } + + return null; + + } + }); + + } catch (EntityWriterException e) { + throw getExceptionTranslator().translateExceptionIfPossible( + new RuntimeException("Failed to translate Object to Query", e)); + } + } + + /** + * Insert a row into a Cassandra CQL Table + * + * @param tableName + * @param entities + * @param optionsByName + * @param insertAsychronously + * @return + */ + protected List doBatchInsert(final String tableName, final List entities, final QueryOptions options, + final boolean insertAsychronously) { + + Assert.notEmpty(entities); + + try { + + final Batch b = CqlUtils.toInsertBatchQuery(keyspace, tableName, entities, options, cassandraConverter); + logger.info(b.getQueryString()); + + return doExecute(new SessionCallback>() { + + @Override + public List doInSession(Session s) throws DataAccessException { + + if (insertAsychronously) { + s.executeAsync(b); + } else { + s.execute(b); + } + + return entities; + + } + }); + + } catch (EntityWriterException e) { + throw getExceptionTranslator().translateExceptionIfPossible( + new RuntimeException("Failed to translate Object to Query", e)); + } + } + + /** + * Update a Batch of rows in a Cassandra CQL Table + * + * @param tableName + * @param entities + * @param optionsByName + * @param updateAsychronously + * @return + */ + protected List doBatchUpdate(final String tableName, final List entities, final QueryOptions options, + final boolean updateAsychronously) { + + Assert.notEmpty(entities); + + try { + + final Batch b = CqlUtils.toUpdateBatchQuery(keyspace, tableName, entities, options, cassandraConverter); + logger.info(b.toString()); + + return doExecute(new SessionCallback>() { + + @Override + public List doInSession(Session s) throws DataAccessException { + + if (updateAsychronously) { + s.executeAsync(b); + } else { + s.execute(b); + } + + return entities; + + } + }); + + } catch (EntityWriterException e) { + throw getExceptionTranslator().translateExceptionIfPossible( + new RuntimeException("Failed to translate Object to Query", e)); + } + } + + /** + * Perform the removal of a Row. + * + * @param tableName + * @param objectToRemove + */ + protected void doDelete(final String tableName, final T objectToRemove, final QueryOptions options, + final boolean deleteAsynchronously) { + + try { + + final Query q = CqlUtils.toDeleteQuery(keyspace, tableName, objectToRemove, options, cassandraConverter); + logger.info(q.toString()); + + doExecute(new SessionCallback() { + + @Override + public Object doInSession(Session s) throws DataAccessException { + + if (deleteAsynchronously) { + s.executeAsync(q); + } else { + s.execute(q); + } + + return null; + + } + }); + + } catch (EntityWriterException e) { + throw getExceptionTranslator().translateExceptionIfPossible( + new RuntimeException("Failed to translate Object to Query", e)); + } + } + + /** + * Execute a command at the Session Level + * + * @param callback + * @return + */ + protected T doExecute(SessionCallback callback) { + + Assert.notNull(callback); + + try { + + return callback.doInSession(getSession()); + + } catch (DataAccessException e) { + throw throwTranslated(e); + } + } + + /** + * Insert a row into a Cassandra CQL Table + * + * @param tableName + * @param entity + */ + protected T doInsert(final String tableName, final T entity, final QueryOptions options, + final boolean insertAsychronously) { + + try { + + final Query q = CqlUtils.toInsertQuery(keyspace, tableName, entity, options, cassandraConverter); + + logger.info(q.toString()); + if (q.getConsistencyLevel() != null) { + logger.info(q.getConsistencyLevel().name()); + } + if (q.getRetryPolicy() != null) { + logger.info(q.getRetryPolicy().toString()); + } + + return doExecute(new SessionCallback() { + + @Override + public T doInSession(Session s) throws DataAccessException { + + if (insertAsychronously) { + s.executeAsync(q); + } else { + s.execute(q); + } + + return entity; + + } + }); + + } catch (EntityWriterException e) { + throw getExceptionTranslator().translateExceptionIfPossible( + new RuntimeException("Failed to translate Object to Query", e)); + } + + } + + /** + * Update a row into a Cassandra CQL Table + * + * @param tableName + * @param entity + * @param optionsByName + * @param updateAsychronously + * @return + */ + protected T doUpdate(final String tableName, final T entity, final QueryOptions options, + final boolean updateAsychronously) { + + try { + + final Query q = CqlUtils.toUpdateQuery(keyspace, tableName, entity, options, cassandraConverter); + logger.info(q.toString()); + + return doExecute(new SessionCallback() { + + @Override + public T doInSession(Session s) throws DataAccessException { + + if (updateAsychronously) { + s.executeAsync(q); + } else { + s.execute(q); + } + + return entity; + + } + }); + + } catch (EntityWriterException e) { + throw getExceptionTranslator().translateExceptionIfPossible( + new RuntimeException("Failed to translate Object to Query", e)); + } + + } + + /** + * Verify the object is not an iterable type + * + * @param o + */ + protected void ensureNotIterable(Object o) { + if (null != o) { + if (o.getClass().isArray() || ITERABLE_CLASSES.contains(o.getClass().getName())) { + throw new IllegalArgumentException("Cannot use a collection here."); + } + } + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraValue.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraValue.java new file mode 100644 index 000000000..8d165908d --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/CassandraValue.java @@ -0,0 +1,45 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.core; + +import java.nio.ByteBuffer; + +import com.datastax.driver.core.DataType; + +/** + * Simple Cassandra value of the ByteBuffer with DataType + * + * @author Alex Shvid + */ +public class CassandraValue { + + private final ByteBuffer value; + private final DataType type; + + public CassandraValue(ByteBuffer value, DataType type) { + this.value = value; + this.type = type; + } + + public ByteBuffer getValue() { + return value; + } + + public DataType getType() { + return type; + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ClassNameToTableNameConverter.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ClassNameToTableNameConverter.java new file mode 100644 index 000000000..aee4faf3f --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ClassNameToTableNameConverter.java @@ -0,0 +1,6 @@ +package org.springframework.data.cassandra.core; + +import org.springframework.core.convert.converter.Converter; + +public interface ClassNameToTableNameConverter extends Converter { +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ColumnNameToFieldNameConverter.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ColumnNameToFieldNameConverter.java new file mode 100644 index 000000000..627873cd1 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ColumnNameToFieldNameConverter.java @@ -0,0 +1,6 @@ +package org.springframework.data.cassandra.core; + +import org.springframework.core.convert.converter.Converter; + +public interface ColumnNameToFieldNameConverter extends Converter { +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/FieldNameToColumnNameConverter.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/FieldNameToColumnNameConverter.java new file mode 100644 index 000000000..7d0d888ee --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/FieldNameToColumnNameConverter.java @@ -0,0 +1,6 @@ +package org.springframework.data.cassandra.core; + +import org.springframework.core.convert.converter.Converter; + +public interface FieldNameToColumnNameConverter extends Converter { +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ReadRowCallback.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ReadRowCallback.java new file mode 100644 index 000000000..a87050ebb --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/ReadRowCallback.java @@ -0,0 +1,47 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.core; + +import org.springframework.cassandra.core.RowCallback; +import org.springframework.data.convert.EntityReader; +import org.springframework.util.Assert; + +import com.datastax.driver.core.Row; + +/** + * Simple {@link RowCallback} that will transform {@link Row} into the given target type using the given + * {@link EntityReader}. + * + * @author Alex Shvid + */ +public class ReadRowCallback implements RowCallback { + + private final EntityReader reader; + private final Class type; + + public ReadRowCallback(EntityReader reader, Class type) { + Assert.notNull(reader); + Assert.notNull(type); + this.reader = reader; + this.type = type; + } + + @Override + public T doWith(Row row) { + T source = reader.read(type, row); + return source; + } +} \ No newline at end of file diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/TableNameToClassNameConverter.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/TableNameToClassNameConverter.java new file mode 100644 index 000000000..02a88d3fe --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/core/TableNameToClassNameConverter.java @@ -0,0 +1,6 @@ +package org.springframework.data.cassandra.core; + +import org.springframework.core.convert.converter.Converter; + +public interface TableNameToClassNameConverter extends Converter { +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/exception/EntityWriterException.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/exception/EntityWriterException.java new file mode 100644 index 000000000..d0275733e --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/exception/EntityWriterException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.exception; + +/** + * Exception to handle failing to write a PersistedEntity to a CQL String or Query object + * + * @author David Webb + * + */ +public class EntityWriterException extends Exception { + + /** + * @param message + */ + public EntityWriterException(String message) { + super(message); + } + + /** + * @param cause + */ + public EntityWriterException(Throwable cause) { + super(cause); + } + + /** + * @param message + * @param cause + */ + public EntityWriterException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/BasicCassandraPersistentEntity.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/BasicCassandraPersistentEntity.java new file mode 100644 index 000000000..e649c7bcf --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/BasicCassandraPersistentEntity.java @@ -0,0 +1,77 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import org.springframework.beans.BeansException; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationContextAware; +import org.springframework.context.expression.BeanFactoryAccessor; +import org.springframework.context.expression.BeanFactoryResolver; +import org.springframework.data.cassandra.util.CassandraNamingUtils; +import org.springframework.data.mapping.model.BasicPersistentEntity; +import org.springframework.data.util.TypeInformation; +import org.springframework.expression.Expression; +import org.springframework.expression.ParserContext; +import org.springframework.expression.spel.standard.SpelExpressionParser; +import org.springframework.expression.spel.support.StandardEvaluationContext; +import org.springframework.util.StringUtils; + +/** + * Cassandra specific {@link BasicPersistentEntity} implementation that adds Cassandra specific metadata such as the + * table name. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class BasicCassandraPersistentEntity extends BasicPersistentEntity implements + CassandraPersistentEntity, ApplicationContextAware { + + private final String table; + private final SpelExpressionParser parser; + private final StandardEvaluationContext context; + + /** + * Creates a new {@link BasicCassandraPersistentEntity} with the given {@link TypeInformation}. Will default the table + * name to the entity's simple type name. + * + * @param typeInformation + */ + public BasicCassandraPersistentEntity(TypeInformation typeInformation) { + + super(typeInformation, CassandraPersistentPropertyColumnNameComparator.INSTANCE); + + this.parser = new SpelExpressionParser(); + this.context = new StandardEvaluationContext(); + + Class rawType = typeInformation.getType(); + Table anno = rawType.getAnnotation(Table.class); + + this.table = anno != null && StringUtils.hasText(anno.name()) ? anno.name() : CassandraNamingUtils + .getPreferredTableName(rawType); + } + + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + + context.addPropertyAccessor(new BeanFactoryAccessor()); + context.setBeanResolver(new BeanFactoryResolver(applicationContext)); + context.setRootObject(applicationContext); + } + + public String getTableName() { + Expression expression = parser.parseExpression(table, ParserContext.TEMPLATE_EXPRESSION); + return expression.getValue(context, String.class); + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/BasicCassandraPersistentProperty.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/BasicCassandraPersistentProperty.java new file mode 100644 index 000000000..1ca9e6a9c --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/BasicCassandraPersistentProperty.java @@ -0,0 +1,238 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.beans.PropertyDescriptor; +import java.lang.reflect.Field; +import java.util.List; +import java.util.Set; + +import org.springframework.cassandra.core.Ordering; +import org.springframework.cassandra.core.PrimaryKeyType; +import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.mapping.Association; +import org.springframework.data.mapping.model.AnnotationBasedPersistentProperty; +import org.springframework.data.util.ClassTypeInformation; +import org.springframework.data.util.TypeInformation; +import org.springframework.util.StringUtils; + +import com.datastax.driver.core.DataType; + +/** + * Cassandra specific {@link org.springframework.data.mapping.model.AnnotationBasedPersistentProperty} implementation. + * + * @author Alex Shvid + */ +public class BasicCassandraPersistentProperty extends AnnotationBasedPersistentProperty + implements CassandraPersistentProperty { + + /** + * Creates a new {@link BasicCassandraPersistentProperty}. + * + * @param field + * @param propertyDescriptor + * @param owner + * @param simpleTypeHolder + */ + public BasicCassandraPersistentProperty(Field field, PropertyDescriptor propertyDescriptor, + CassandraPersistentEntity owner, CassandraSimpleTypeHolder simpleTypeHolder) { + super(field, propertyDescriptor, owner, simpleTypeHolder); + } + + @Override + public boolean isIdProperty() { + + if (super.isIdProperty()) { + return true; + } + + return isAnnotationPresent(PrimaryKey.class); + } + + @Override + public boolean isCompositePrimaryKey() { + return getField().getType().isAnnotationPresent(CompositePrimaryKey.class); + } + + @Override + public Class getCompositePrimaryKeyType() { + if (!isCompositePrimaryKey()) { + return null; + } + + return getField().getType(); + } + + @Override + public CassandraPersistentEntity getCompositePrimaryKeyEntity() { + if (!isCompositePrimaryKey()) { + return null; + } + + return (CassandraPersistentEntity) ClassTypeInformation.from(getCompositePrimaryKeyType()); + } + + public String getColumnName() { + + // first check @Column annotation + Column column = findAnnotation(Column.class); + if (column != null && StringUtils.hasText(column.value())) { + return column.value(); + } + + // else check @KeyColumn annotation + PrimaryKeyColumn pk = findAnnotation(PrimaryKeyColumn.class); + if (pk != null && StringUtils.hasText(pk.value())) { + return pk.value(); + } + + // else default + return field.getName().toLowerCase(); + } + + public Ordering getOrdering() { + + PrimaryKeyColumn anno = findAnnotation(PrimaryKeyColumn.class); + + return anno == null ? null : anno.ordering(); + } + + public DataType getDataType() { + + CassandraType annotation = findAnnotation(CassandraType.class); + if (annotation != null) { + return getDataTypeFor(annotation); + } + + if (isMap()) { + + List> args = getTypeInformation().getTypeArguments(); + ensureTypeArguments(args.size(), 2); + + return DataType.map(getDataTypeFor(args.get(0).getType()), getDataTypeFor(args.get(1).getType())); + } + + if (isCollectionLike()) { + + List> args = getTypeInformation().getTypeArguments(); + ensureTypeArguments(args.size(), 1); + + if (Set.class.isAssignableFrom(getType())) { + return DataType.set(getDataTypeFor(args.get(0).getType())); + } + if (List.class.isAssignableFrom(getType())) { + return DataType.list(getDataTypeFor(args.get(0).getType())); + } + } + + DataType dataType = CassandraSimpleTypeHolder.getDataTypeFor(getType()); + if (dataType == null) { + throw new InvalidDataAccessApiUsageException( + String + .format( + "unknown type for property [%s], type [%s] in entity [%s]; only primitive types and collections or maps of primitive types are allowed", + getName(), getType(), getOwner().getName())); + } + return dataType; + } + + private DataType getDataTypeFor(CassandraType annotation) { + + DataType.Name type = annotation.type(); + + if (type.isCollection()) { + switch (type) { + + case MAP: + ensureTypeArguments(annotation.typeArguments().length, 2); + return DataType.map(getDataTypeFor(annotation.typeArguments()[0]), + getDataTypeFor(annotation.typeArguments()[1])); + + case LIST: + ensureTypeArguments(annotation.typeArguments().length, 1); + return DataType.list(getDataTypeFor(annotation.typeArguments()[0])); + + case SET: + ensureTypeArguments(annotation.typeArguments().length, 1); + return DataType.set(getDataTypeFor(annotation.typeArguments()[0])); + + default: + throw new InvalidDataAccessApiUsageException( + String.format("unknown multivalued DataType [%s] for property [%s] in entity [%s]", type, getType(), + getOwner().getName())); + } + } else { + + return CassandraSimpleTypeHolder.getDataTypeFor(type); + } + } + + public boolean isIndexed() { + return isAnnotationPresent(Indexed.class); + } + + public boolean isPartitionKeyColumn() { + + PrimaryKeyColumn anno = findAnnotation(PrimaryKeyColumn.class); + + return anno != null && anno.type() == PrimaryKeyType.PARTITIONED; + } + + @Override + public boolean isClusterKeyColumn() { + + PrimaryKeyColumn anno = findAnnotation(PrimaryKeyColumn.class); + + return anno != null && anno.type() == PrimaryKeyType.CLUSTERED; + } + + @Override + public boolean isPrimaryKeyColumn() { + return isAnnotationPresent(PrimaryKeyColumn.class); + } + + @Override + protected Association createAssociation() { + return new Association(this, null); + } + + protected DataType getDataTypeFor(DataType.Name typeName) { + DataType dataType = CassandraSimpleTypeHolder.getDataTypeFor(typeName); + if (dataType == null) { + throw new InvalidDataAccessApiUsageException( + "only primitive types are allowed inside collections for the property '" + this.getName() + "' type is '" + + this.getType() + "' in the entity " + this.getOwner().getName()); + } + return dataType; + } + + protected DataType getDataTypeFor(Class javaType) { + DataType dataType = CassandraSimpleTypeHolder.getDataTypeFor(javaType); + if (dataType == null) { + throw new InvalidDataAccessApiUsageException( + "only primitive types are allowed inside collections for the property '" + this.getName() + "' type is '" + + this.getType() + "' in the entity " + this.getOwner().getName()); + } + return dataType; + } + + protected void ensureTypeArguments(int args, int expected) { + if (args != expected) { + throw new InvalidDataAccessApiUsageException("expected " + expected + " of typed arguments for the property '" + + this.getName() + "' type is '" + this.getType() + "' in the entity " + this.getOwner().getName()); + } + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CachingCassandraPersistentProperty.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CachingCassandraPersistentProperty.java new file mode 100644 index 000000000..0b4d05ed7 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CachingCassandraPersistentProperty.java @@ -0,0 +1,153 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.beans.PropertyDescriptor; +import java.lang.reflect.Field; + +import org.springframework.cassandra.core.Ordering; + +import com.datastax.driver.core.DataType; + +/** + * {@link BasicCassandraPersistentProperty} subclass that caches call results from the superclass. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class CachingCassandraPersistentProperty extends BasicCassandraPersistentProperty { + + private Boolean isIdProperty; + private Boolean isIndexed; + private Boolean isCompositePrimaryKey; + private Boolean isPartitionKeyColumn; + private Boolean isClusterKeyColumn; + private Boolean isPrimaryKeyColumn; + private String columnName; + private Ordering ordering; + private boolean orderingCached = false; + private DataType dataType; + private Class compositePrimaryKeyType; + private CassandraPersistentEntity compositePrimaryKeyEntity; + + /** + * Creates a new {@link CachingCassandraPersistentProperty}. + */ + public CachingCassandraPersistentProperty(Field field, PropertyDescriptor propertyDescriptor, + CassandraPersistentEntity owner, CassandraSimpleTypeHolder simpleTypeHolder) { + super(field, propertyDescriptor, owner, simpleTypeHolder); + } + + @Override + public CassandraPersistentEntity getCompositePrimaryKeyEntity() { + + if (compositePrimaryKeyEntity == null) { + compositePrimaryKeyEntity = super.getCompositePrimaryKeyEntity(); + } + return compositePrimaryKeyEntity; + } + + @Override + public Class getCompositePrimaryKeyType() { + + if (compositePrimaryKeyType == null) { + compositePrimaryKeyType = super.getCompositePrimaryKeyType(); + } + return compositePrimaryKeyType; + } + + @Override + public boolean isClusterKeyColumn() { + + if (isClusterKeyColumn == null) { + isClusterKeyColumn = super.isClusterKeyColumn(); + } + return isClusterKeyColumn; + } + + @Override + public boolean isPrimaryKeyColumn() { + + if (isPrimaryKeyColumn == null) { + isPrimaryKeyColumn = super.isPrimaryKeyColumn(); + } + return isPrimaryKeyColumn; + } + + @Override + public DataType getDataType() { + + if (dataType == null) { + dataType = super.getDataType(); + } + return dataType; + } + + @Override + public Ordering getOrdering() { + + if (!orderingCached) { + ordering = super.getOrdering(); + orderingCached = true; + } + return ordering; + } + + @Override + public boolean isCompositePrimaryKey() { + + if (isCompositePrimaryKey == null) { + isCompositePrimaryKey = super.isCompositePrimaryKey(); + } + return isCompositePrimaryKey; + } + + @Override + public boolean isIdProperty() { + + if (isIdProperty == null) { + isIdProperty = super.isIdProperty(); + } + return isIdProperty; + } + + @Override + public String getColumnName() { + + if (columnName == null) { + columnName = super.getColumnName(); + } + return columnName; + } + + @Override + public boolean isIndexed() { + + if (isIndexed == null) { + isIndexed = super.isIndexed(); + } + return isIndexed; + } + + @Override + public boolean isPartitionKeyColumn() { + + if (isPartitionKeyColumn == null) { + isPartitionKeyColumn = super.isPartitionKeyColumn(); + } + return isPartitionKeyColumn; + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraMappingContext.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraMappingContext.java new file mode 100644 index 000000000..dfe546e93 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraMappingContext.java @@ -0,0 +1,76 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.beans.PropertyDescriptor; +import java.lang.reflect.Field; + +import org.springframework.beans.BeansException; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationContextAware; +import org.springframework.data.mapping.context.AbstractMappingContext; +import org.springframework.data.mapping.context.MappingContext; +import org.springframework.data.mapping.model.SimpleTypeHolder; +import org.springframework.data.util.TypeInformation; + +/** + * Default implementation of a {@link MappingContext} for Cassandra using {@link CassandraPersistentEntity} and + * {@link CassandraPersistentProperty} as primary abstractions. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class CassandraMappingContext extends + AbstractMappingContext, CassandraPersistentProperty> implements + ApplicationContextAware { + + private ApplicationContext context; + + /** + * Creates a new {@link CassandraMappingContext}. + */ + public CassandraMappingContext() { + setSimpleTypeHolder(new CassandraSimpleTypeHolder()); + } + + @Override + public CassandraPersistentProperty createPersistentProperty(Field field, PropertyDescriptor descriptor, + CassandraPersistentEntity owner, SimpleTypeHolder simpleTypeHolder) { + return createPersistentProperty(field, descriptor, owner, (CassandraSimpleTypeHolder) simpleTypeHolder); + } + + public CassandraPersistentProperty createPersistentProperty(Field field, PropertyDescriptor descriptor, + CassandraPersistentEntity owner, CassandraSimpleTypeHolder simpleTypeHolder) { + return new CachingCassandraPersistentProperty(field, descriptor, owner, simpleTypeHolder); + } + + @Override + protected CassandraPersistentEntity createPersistentEntity(TypeInformation typeInformation) { + + BasicCassandraPersistentEntity entity = new BasicCassandraPersistentEntity(typeInformation); + + if (context != null) { + entity.setApplicationContext(context); + } + + return entity; + } + + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + this.context = applicationContext; + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentEntity.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentEntity.java new file mode 100644 index 000000000..12881fe95 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentEntity.java @@ -0,0 +1,33 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import org.springframework.data.mapping.PersistentEntity; +import org.springframework.data.mapping.model.MutablePersistentEntity; + +/** + * Cassandra specific {@link PersistentEntity} abstraction. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public interface CassandraPersistentEntity extends MutablePersistentEntity { + + /** + * Returns the table name to which the entity shall be persisted. + */ + String getTableName(); +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentProperty.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentProperty.java new file mode 100644 index 000000000..5b54b024c --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentProperty.java @@ -0,0 +1,85 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import org.springframework.cassandra.core.Ordering; +import org.springframework.data.mapping.PersistentProperty; + +import com.datastax.driver.core.DataType; + +/** + * Cassandra specific {@link org.springframework.data.mapping.PersistentProperty} extension. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public interface CassandraPersistentProperty extends PersistentProperty { + + /** + * Whether the property is a composite primary key. + */ + boolean isCompositePrimaryKey(); + + /** + * Returns the type of the composite primary key class of this entity, or null if this class does not use a composite + * primary key. + */ + Class getCompositePrimaryKeyType(); + + /** + * Returns a {@link CassandraPersistentEntity} representing the composite primary key class of this entity, or null if + * this class does not use a composite primary key. + */ + CassandraPersistentEntity getCompositePrimaryKeyEntity(); + + /** + * The name of the column to which a property is persisted. + */ + String getColumnName(); + + /** + * The ordering for the column. Valid only for clustered columns. + */ + Ordering getOrdering(); + + /** + * The column's data type. + */ + DataType getDataType(); + + /** + * Whether the property has secondary index on this column. + */ + boolean isIndexed(); + + /** + * Whether the property is a partition key column. + */ + boolean isPartitionKeyColumn(); + + /** + * Whether the property is a cluster key column. + */ + boolean isClusterKeyColumn(); + + /** + * Whether the property is a partition key column or a cluster key column + * + * @see #isPartitionKeyColumn() + * @see #isClusterKeyColumn() + */ + boolean isPrimaryKeyColumn(); +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentPropertyColumnNameComparator.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentPropertyColumnNameComparator.java new file mode 100644 index 000000000..c0b0650d1 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraPersistentPropertyColumnNameComparator.java @@ -0,0 +1,18 @@ +package org.springframework.data.cassandra.mapping; + +import java.util.Comparator; + +/** + * {@link Comparator} implementation that uses the {@link CassandraPersistentProperty}'s column name for ordering. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public enum CassandraPersistentPropertyColumnNameComparator implements Comparator { + + INSTANCE; + + public int compare(CassandraPersistentProperty o1, CassandraPersistentProperty o2) { + return o1.getColumnName().compareTo(o2.getColumnName()); + } +} \ No newline at end of file diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraSimpleTypeHolder.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraSimpleTypeHolder.java new file mode 100644 index 000000000..73eb4d5b7 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraSimpleTypeHolder.java @@ -0,0 +1,105 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.springframework.dao.InvalidDataAccessApiUsageException; +import org.springframework.data.mapping.model.SimpleTypeHolder; +import org.springframework.data.util.TypeInformation; + +import com.datastax.driver.core.DataType; + +/** + * Simple constant holder for a {@link SimpleTypeHolder} enriched with Cassandra specific simple types. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +public class CassandraSimpleTypeHolder extends SimpleTypeHolder { + + public static final Set> CASSANDRA_SIMPLE_TYPES; + + private static final Map, Class> primitiveTypesByWrapperType = new HashMap, Class>(8); + + private static final Map, DataType> dataTypesByJavaClass = new HashMap, DataType>(); + + private static final Map dataTypesByDataTypeName = new HashMap(); + + static { + + primitiveTypesByWrapperType.put(Boolean.class, boolean.class); + primitiveTypesByWrapperType.put(Byte.class, byte.class); + primitiveTypesByWrapperType.put(Character.class, char.class); + primitiveTypesByWrapperType.put(Double.class, double.class); + primitiveTypesByWrapperType.put(Float.class, float.class); + primitiveTypesByWrapperType.put(Integer.class, int.class); + primitiveTypesByWrapperType.put(Long.class, long.class); + primitiveTypesByWrapperType.put(Short.class, short.class); + + Set> simpleTypes = new HashSet>(); + + for (DataType dataType : DataType.allPrimitiveTypes()) { + + Class javaClass = dataType.asJavaClass(); + simpleTypes.add(javaClass); + + dataTypesByJavaClass.put(javaClass, dataType); + + Class primitiveJavaClass = primitiveTypesByWrapperType.get(javaClass); + if (primitiveJavaClass != null) { + dataTypesByJavaClass.put(primitiveJavaClass, dataType); + } + + dataTypesByDataTypeName.put(dataType.getName(), dataType); + } + + dataTypesByJavaClass.put(String.class, DataType.text()); + + CASSANDRA_SIMPLE_TYPES = Collections.unmodifiableSet(simpleTypes); + } + + public static DataType getDataTypeFor(DataType.Name name) { + return dataTypesByDataTypeName.get(name); + } + + public static DataType getDataTypeFor(Class javaClass) { + return dataTypesByJavaClass.get(javaClass); + } + + public static DataType.Name[] getDataTypeNamesFrom(List> arguments) { + DataType.Name[] array = new DataType.Name[arguments.size()]; + for (int i = 0; i != array.length; i++) { + TypeInformation typeInfo = arguments.get(i); + DataType dataType = getDataTypeFor(typeInfo.getType()); + if (dataType == null) { + throw new InvalidDataAccessApiUsageException("not found appropriate primitive DataType for type = '" + + typeInfo.getType()); + } + array[i] = dataType.getName(); + } + return array; + } + + public CassandraSimpleTypeHolder() { + super(CASSANDRA_SIMPLE_TYPES, true); + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraType.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraType.java new file mode 100644 index 000000000..e1505e6a7 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CassandraType.java @@ -0,0 +1,49 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.lang.annotation.Documented; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +import com.datastax.driver.core.DataType; + +/** + * Specifies the Cassandra type of the annotated property. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +public @interface CassandraType { + + /** + * The {@link DataType}.{@link Name} of the property. + */ + DataType.Name type(); + + /** + * If the property is collection-like, then this attribute holds a single {@link DataType}.{@link Name}, representing + * the element type of the collection. + *

+ * If the property is map, then this attribute holds exactly two {@link DataType}.{@link Name}s: the first is the key + * type, and the second is the value type. + *

+ * If the property is neither collection-like or a map, then this attribute is ignored. + */ + DataType.Name[] typeArguments() default {}; +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Column.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Column.java new file mode 100644 index 000000000..5fc1c6051 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Column.java @@ -0,0 +1,54 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to define custom metadata for document fields. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target(value = { ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE }) +public @interface Column { + + /** + * The name of the column in the table. + */ + String value() default ""; +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CompositePrimaryKey.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CompositePrimaryKey.java new file mode 100644 index 000000000..8288da4ea --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/CompositePrimaryKey.java @@ -0,0 +1,34 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Defines composite primary key class in the Cassandra table that contains several fields. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +@Inherited +@Retention(RetentionPolicy.RUNTIME) +@Target({ ElementType.TYPE }) +public @interface CompositePrimaryKey { +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Indexed.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Indexed.java new file mode 100644 index 000000000..60d7ecd52 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Indexed.java @@ -0,0 +1,38 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Identifies a secondary index in the table on a single, non-key column. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +@Retention(value = RetentionPolicy.RUNTIME) +@Target(value = { ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE }) +public @interface Indexed { + + /** + * The name of the index. If {@literal null} or empty, then the index name will be generated by Cassandra and will be + * unknown unless column metadata is used to discover the generated index name. + */ + String value() default ""; +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/PrimaryKey.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/PrimaryKey.java new file mode 100644 index 000000000..a738af9f9 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/PrimaryKey.java @@ -0,0 +1,36 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.springframework.data.annotation.Id; + +/** + * Identifies the primary key field of the entity, which may be of a basic type or of a type that represents a composite + * primary key class. This field corresponds to the PRIMARY KEY of the corresponding Cassandra table. + * + * @author Alex Shvid + * @author Matthew T. Adams + */ +@Retention(value = RetentionPolicy.RUNTIME) +@Target(value = { ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE }) +@Id +public @interface PrimaryKey { +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/PrimaryKeyColumn.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/PrimaryKeyColumn.java new file mode 100644 index 000000000..ce6861a53 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/PrimaryKeyColumn.java @@ -0,0 +1,54 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.springframework.cassandra.core.Ordering; +import org.springframework.cassandra.core.PrimaryKeyType; + +/** + * Identifies the annotated field of a composite primary key class as a primary key field that is either a partition or + * cluster key field. + */ +@Retention(value = RetentionPolicy.RUNTIME) +@Target(value = { ElementType.FIELD, ElementType.METHOD, ElementType.ANNOTATION_TYPE }) +public @interface PrimaryKeyColumn { + + /** + * The name of the column in the table. + */ + String value() default ""; + + /** + * The order of this column among all primary key columns. + */ + int ordinal(); + + /** + * The type of this key column. Default is {@link PrimaryKeyType#CLUSTERED}. + */ + PrimaryKeyType type() default PrimaryKeyType.CLUSTERED; + + /** + * The cluster ordering of this column if {@link #type()} is {@link PrimaryKeyType#CLUSTERED}, otherwise ignored. + * Default is {@link Ordering#ASCENDING}. + */ + Ordering ordering() default Ordering.ASCENDING; +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Table.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Table.java new file mode 100644 index 000000000..c875102af --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/mapping/Table.java @@ -0,0 +1,39 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.mapping; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.springframework.data.annotation.Persistent; + +/** + * Identifies a domain object to be persisted to Cassandra as a table. + * + * @author Alex Shvid + */ +@Persistent +@Inherited +@Retention(RetentionPolicy.RUNTIME) +@Target({ ElementType.TYPE }) +public @interface Table { + + String name() default ""; + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/CassandraRepository.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/CassandraRepository.java new file mode 100644 index 000000000..e8ed41547 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/CassandraRepository.java @@ -0,0 +1,32 @@ +/* + * Copyright 2011-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository; + +import java.io.Serializable; +import java.util.List; + +import org.springframework.data.repository.CrudRepository; + +/** + * Cassandra-specific extension of the {@link CrudRepository} interface. + * + * @author Alex Shvid + */ +public interface CassandraRepository extends CrudRepository { + + List findByPartitionKey(ID id); + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/Query.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/Query.java new file mode 100644 index 000000000..4098ebbf0 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/Query.java @@ -0,0 +1,42 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to declare finder queries directly on repository methods. + * + * @author Alex Shvid + */ + +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +@Documented +public @interface Query { + + /** + * Takes a Cassandra CQL3 string to define the actual query to be executed. + * + * @return + */ + String value() default ""; + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/CassandraRepositoriesRegistrar.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/CassandraRepositoriesRegistrar.java new file mode 100644 index 000000000..ac9a1c13a --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/CassandraRepositoriesRegistrar.java @@ -0,0 +1,54 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.config; + +import java.lang.annotation.Annotation; + +import org.springframework.context.annotation.ImportBeanDefinitionRegistrar; +import org.springframework.data.repository.config.RepositoryBeanDefinitionRegistrarSupport; +import org.springframework.data.repository.config.RepositoryConfigurationExtension; + +/** + * {@link ImportBeanDefinitionRegistrar} to setup Cassandra repositories via {@link EnableCassandraRepositories}. + * + * @author Alex Shvid + * + */ +public class CassandraRepositoriesRegistrar extends RepositoryBeanDefinitionRegistrarSupport { + + /* + * (non-Javadoc) + * + * @see org.springframework.data.repository.config. + * RepositoryBeanDefinitionRegistrarSupport#getAnnotation() + */ + @Override + protected Class getAnnotation() { + return EnableCassandraRepositories.class; + } + + /* + * (non-Javadoc) + * + * @see org.springframework.data.repository.config. + * RepositoryBeanDefinitionRegistrarSupport#getExtension() + */ + @Override + protected RepositoryConfigurationExtension getExtension() { + return new CassandraRepositoryConfigurationExtension(); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/CassandraRepositoryConfigurationExtension.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/CassandraRepositoryConfigurationExtension.java new file mode 100644 index 000000000..35b4912b8 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/CassandraRepositoryConfigurationExtension.java @@ -0,0 +1,86 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.config; + +import org.springframework.beans.factory.support.BeanDefinitionBuilder; +import org.springframework.core.annotation.AnnotationAttributes; +import org.springframework.data.cassandra.repository.support.CassandraRepositoryFactoryBean; +import org.springframework.data.config.ParsingUtils; +import org.springframework.data.repository.config.AnnotationRepositoryConfigurationSource; +import org.springframework.data.repository.config.RepositoryConfigurationExtension; +import org.springframework.data.repository.config.RepositoryConfigurationExtensionSupport; +import org.springframework.data.repository.config.XmlRepositoryConfigurationSource; +import org.springframework.util.StringUtils; +import org.w3c.dom.Element; + +/** + * {@link RepositoryConfigurationExtension} for Cassandra. + * + * @author Alex Shvid + * + */ +public class CassandraRepositoryConfigurationExtension extends RepositoryConfigurationExtensionSupport { + + private static final String CASSANDRA_DATA_TEMPLATE_REF = "cassandra-data-template-ref"; + private static final String CREATE_QUERY_INDEXES = "create-query-indexes"; + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.config.RepositoryConfigurationExtensionSupport#getModulePrefix() + */ + @Override + protected String getModulePrefix() { + return "cassandra"; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.config.RepositoryConfigurationExtension#getRepositoryFactoryClassName() + */ + public String getRepositoryFactoryClassName() { + return CassandraRepositoryFactoryBean.class.getName(); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.config.RepositoryConfigurationExtensionSupport#postProcess(org.springframework.beans.factory.support.BeanDefinitionBuilder, org.springframework.data.repository.config.XmlRepositoryConfigurationSource) + */ + @Override + public void postProcess(BeanDefinitionBuilder builder, XmlRepositoryConfigurationSource config) { + + Element element = config.getElement(); + + ParsingUtils.setPropertyReference(builder, element, CASSANDRA_DATA_TEMPLATE_REF, "cassandraDataTemplate"); + ParsingUtils.setPropertyValue(builder, element, CREATE_QUERY_INDEXES, "createIndexesForQueryMethods"); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.config.RepositoryConfigurationExtensionSupport#postProcess(org.springframework.beans.factory.support.BeanDefinitionBuilder, org.springframework.data.repository.config.AnnotationRepositoryConfigurationSource) + */ + @Override + public void postProcess(BeanDefinitionBuilder builder, AnnotationRepositoryConfigurationSource config) { + + AnnotationAttributes attributes = config.getAttributes(); + + String cassandraDataTemplateRef = attributes.getString("cassandraDataTemplateRef"); + if (StringUtils.hasText(cassandraDataTemplateRef)) { + builder.addPropertyReference("cassandraDataTemplate", cassandraDataTemplateRef); + } + builder.addPropertyValue("createIndexesForQueryMethods", attributes.getBoolean("createIndexesForQueryMethods")); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/EnableCassandraRepositories.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/EnableCassandraRepositories.java new file mode 100644 index 000000000..26a471b7a --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/config/EnableCassandraRepositories.java @@ -0,0 +1,125 @@ +/* + * Copyright 2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.config; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import org.springframework.beans.factory.FactoryBean; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Import; +import org.springframework.data.cassandra.core.CassandraDataTemplate; +import org.springframework.data.cassandra.repository.support.CassandraRepositoryFactoryBean; +import org.springframework.data.repository.query.QueryLookupStrategy; +import org.springframework.data.repository.query.QueryLookupStrategy.Key; + +/** + * Annotation to enable Cassandra repositories. + * + * @author Alex Shvid + * + */ + +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +@Documented +@Inherited +@Import(CassandraRepositoriesRegistrar.class) +public @interface EnableCassandraRepositories { + + /** + * Alias for the {@link #basePackages()} attribute. Allows for more concise annotation declarations e.g.: + * {@code @EnableCassandraRepositories("org.my.pkg")} instead of + * {@code @EnableCassandraRepositories(basePackages="org.my.pkg")}. + */ + String[] value() default {}; + + /** + * Base packages to scan for annotated components. {@link #value()} is an alias for (and mutually exclusive with) this + * attribute. Use {@link #basePackageClasses()} for a type-safe alternative to String-based package names. + */ + String[] basePackages() default {}; + + /** + * Type-safe alternative to {@link #basePackages()} for specifying the packages to scan for annotated components. The + * package of each class specified will be scanned. Consider creating a special no-op marker class or interface in + * each package that serves no purpose other than being referenced by this attribute. + */ + Class[] basePackageClasses() default {}; + + /** + * Specifies which types are eligible for component scanning. Further narrows the set of candidate components from + * everything in {@link #basePackages()} to everything in the base packages that matches the given filter or filters. + */ + Filter[] includeFilters() default {}; + + /** + * Specifies which types are not eligible for component scanning. + */ + Filter[] excludeFilters() default {}; + + /** + * Returns the postfix to be used when looking up custom repository implementations. Defaults to {@literal Impl}. So + * for a repository named {@code UserRepository} the corresponding implementation class will be looked up scanning for + * {@code UserRepositoryImpl}. + * + * @return + */ + String repositoryImplementationPostfix() default "Impl"; + + /** + * Configures the location of where to find the Spring Data named queries properties file. Will default to + * {@code META-INFO/casasndra-named-queries.properties}. + * + * @return + */ + String namedQueriesLocation() default ""; + + /** + * Returns the key of the {@link QueryLookupStrategy} to be used for lookup queries for query methods. Defaults to + * {@link Key#CREATE_IF_NOT_FOUND}. + * + * @return + */ + Key queryLookupStrategy() default Key.CREATE_IF_NOT_FOUND; + + /** + * Returns the {@link FactoryBean} class to be used for each repository instance. Defaults to + * {@link CassandraRepositoryFactoryBean}. + * + * @return + */ + Class repositoryFactoryBeanClass() default CassandraRepositoryFactoryBean.class; + + /** + * Configures the name of the {@link CassandraDataTemplate} bean to be used with the repositories detected. + * + * @return + */ + String cassandraDataTemplateRef() default "cassandraDataTemplate"; + + /** + * Whether to automatically create indexes for query methods defined in the repository interface. + * + * @return + */ + boolean createIndexesForQueryMethods() default false; + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/query/CassandraEntityInformation.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/query/CassandraEntityInformation.java new file mode 100644 index 000000000..528defa6d --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/query/CassandraEntityInformation.java @@ -0,0 +1,44 @@ +/* + * Copyright 2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.query; + +import java.io.Serializable; + +import org.springframework.data.repository.core.EntityInformation; + +/** + * Cassandra specific {@link EntityInformation}. + * + * @author Alex Shvid + * + */ +public interface CassandraEntityInformation extends EntityInformation { + + /** + * Returns the name of the table the entity shall be persisted to. + * + * @return + */ + String getTableName(); + + /** + * Returns the column that the id will be persisted to. + * + * @return + */ + String getIdColumn(); + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/query/CassandraEntityMetadata.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/query/CassandraEntityMetadata.java new file mode 100644 index 000000000..d0f49c238 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/query/CassandraEntityMetadata.java @@ -0,0 +1,35 @@ +/* + * Copyright 2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.query; + +import org.springframework.data.repository.core.EntityMetadata; + +/** + * Extension of {@link EntityMetadata} to additionally expose the table name an entity shall be persisted to. + * + * @author Alex Shvid + * + * @param + */ +public interface CassandraEntityMetadata extends EntityMetadata { + + /** + * Returns the name of the table the entity shall be persisted to. + * + * @return + */ + String getTableName(); +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/CassandraRepositoryFactory.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/CassandraRepositoryFactory.java new file mode 100644 index 000000000..e59caa323 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/CassandraRepositoryFactory.java @@ -0,0 +1,88 @@ +/* + * Copyright 2010-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.support; + +import java.io.Serializable; + +import org.springframework.data.cassandra.core.CassandraDataTemplate; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.cassandra.repository.CassandraRepository; +import org.springframework.data.cassandra.repository.query.CassandraEntityInformation; +import org.springframework.data.mapping.context.MappingContext; +import org.springframework.data.mapping.model.MappingException; +import org.springframework.data.repository.core.RepositoryMetadata; +import org.springframework.data.repository.core.support.RepositoryFactorySupport; +import org.springframework.util.Assert; + +/** + * Factory to create {@link CassandraRepository} instances. + * + * @author Alex Shvid + * + */ + +public class CassandraRepositoryFactory extends RepositoryFactorySupport { + + private final CassandraDataTemplate cassandraDataTemplate; + private final MappingContext, CassandraPersistentProperty> mappingContext; + + /** + * Creates a new {@link MongoRepositoryFactory} with the given {@link MongoOperations}. + * + * @param mongoOperations must not be {@literal null} + */ + public CassandraRepositoryFactory(CassandraDataTemplate cassandraDataTemplate) { + + Assert.notNull(cassandraDataTemplate); + + this.cassandraDataTemplate = cassandraDataTemplate; + this.mappingContext = cassandraDataTemplate.getConverter().getMappingContext(); + } + + @Override + protected Class getRepositoryBaseClass(RepositoryMetadata metadata) { + return SimpleCassandraRepository.class; + } + + @Override + @SuppressWarnings({ "rawtypes", "unchecked" }) + protected Object getTargetRepository(RepositoryMetadata metadata) { + + CassandraEntityInformation entityInformation = getEntityInformation(metadata.getDomainType()); + + return new SimpleCassandraRepository(entityInformation, cassandraDataTemplate); + + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.core.support.RepositoryFactorySupport#getEntityInformation(java.lang.Class) + */ + @Override + @SuppressWarnings("unchecked") + public CassandraEntityInformation getEntityInformation(Class domainClass) { + + CassandraPersistentEntity entity = mappingContext.getPersistentEntity(domainClass); + + if (entity == null) { + throw new MappingException(String.format("Could not lookup mapping metadata for domain class %s!", + domainClass.getName())); + } + + return new MappingCassandraEntityInformation((CassandraPersistentEntity) entity); + } +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/CassandraRepositoryFactoryBean.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/CassandraRepositoryFactoryBean.java new file mode 100644 index 000000000..ce3af5980 --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/CassandraRepositoryFactoryBean.java @@ -0,0 +1,66 @@ +/* + * Copyright 2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.support; + +import java.io.Serializable; + +import org.springframework.data.cassandra.core.CassandraDataTemplate; +import org.springframework.data.cassandra.repository.CassandraRepository; +import org.springframework.data.repository.Repository; +import org.springframework.data.repository.core.support.RepositoryFactoryBeanSupport; +import org.springframework.data.repository.core.support.RepositoryFactorySupport; +import org.springframework.util.Assert; + +/** + * {@link org.springframework.beans.factory.FactoryBean} to create {@link CassandraRepository} instances. + * + * @author Alex Shvid + * + */ +public class CassandraRepositoryFactoryBean, S, ID extends Serializable> extends + RepositoryFactoryBeanSupport { + + private CassandraDataTemplate cassandraDataTemplate; + + @Override + protected RepositoryFactorySupport createRepositoryFactory() { + return new CassandraRepositoryFactory(cassandraDataTemplate); + } + + /** + * Configures the {@link CassandraDataTemplate} to be used. + * + * @param operations the operations to set + */ + public void setCassandraDataTemplate(CassandraDataTemplate cassandraDataTemplate) { + this.cassandraDataTemplate = cassandraDataTemplate; + setMappingContext(cassandraDataTemplate.getConverter().getMappingContext()); + } + + /* + * (non-Javadoc) + * + * @see + * org.springframework.data.repository.support.RepositoryFactoryBeanSupport + * #afterPropertiesSet() + */ + @Override + public void afterPropertiesSet() { + super.afterPropertiesSet(); + Assert.notNull(cassandraDataTemplate, "cassandraDataTemplate must not be null!"); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/MappingCassandraEntityInformation.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/MappingCassandraEntityInformation.java new file mode 100644 index 000000000..c3385ad4e --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/MappingCassandraEntityInformation.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2011 by the original author(s). + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.support; + +import java.io.Serializable; + +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.cassandra.repository.query.CassandraEntityInformation; +import org.springframework.data.mapping.model.BeanWrapper; +import org.springframework.data.repository.core.support.AbstractEntityInformation; + +/** + * {@link CassandraEntityInformation} implementation using a {@link CassandraPersistentEntity} instance to lookup the + * necessary information. Can be configured with a custom collection to be returned which will trump the one returned by + * the {@link CassandraPersistentEntity} if given. + * + * @author Alex Shvid + * + */ +public class MappingCassandraEntityInformation extends AbstractEntityInformation + implements CassandraEntityInformation { + + private final CassandraPersistentEntity entityMetadata; + private final String customTableName; + + /** + * Creates a new {@link MappingCassandraEntityInformation} for the given {@link CassandraPersistentEntity}. + * + * @param entity must not be {@literal null}. + */ + public MappingCassandraEntityInformation(CassandraPersistentEntity entity) { + this(entity, null); + } + + /** + * Creates a new {@link MappingCassandraEntityInformation} for the given {@link CassandraPersistentEntity} and custom + * table name. + * + * @param entity must not be {@literal null}. + * @param customTableName + */ + public MappingCassandraEntityInformation(CassandraPersistentEntity entity, String customTableName) { + super(entity.getType()); + this.entityMetadata = entity; + this.customTableName = customTableName; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.support.EntityInformation#getId(java.lang.Object) + */ + @SuppressWarnings("unchecked") + @Override + public ID getId(T entity) { + + CassandraPersistentProperty idProperty = entityMetadata.getIdProperty(); + + if (idProperty == null) { + return null; + } + + try { + return (ID) BeanWrapper.create(entity, null).getProperty(idProperty); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /* (non-Javadoc) + * @see org.springframework.data.repository.support.EntityInformation#getIdType() + */ + @SuppressWarnings("unchecked") + @Override + public Class getIdType() { + return (Class) entityMetadata.getIdProperty().getType(); + } + + /* (non-Javadoc) + * @see org.springframework.data.mongodb.repository.CassandraEntityInformation#getTableName() + */ + @Override + public String getTableName() { + return customTableName == null ? entityMetadata.getTableName() : customTableName; + } + + /* (non-Javadoc) + * @see org.springframework.data.mongodb.repository.CassandraEntityInformation#getIdColumn() + */ + public String getIdColumn() { + return entityMetadata.getIdProperty().getName(); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/SimpleCassandraRepository.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/SimpleCassandraRepository.java new file mode 100644 index 000000000..8bc527e4e --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/repository/support/SimpleCassandraRepository.java @@ -0,0 +1,251 @@ +/* + * Copyright 2010-2012 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.repository.support; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.springframework.cassandra.core.CassandraOperations; +import org.springframework.data.cassandra.core.CassandraDataOperations; +import org.springframework.data.cassandra.core.CassandraDataTemplate; +import org.springframework.data.cassandra.repository.CassandraRepository; +import org.springframework.data.cassandra.repository.query.CassandraEntityInformation; +import org.springframework.util.Assert; + +import com.datastax.driver.core.querybuilder.Clause; +import com.datastax.driver.core.querybuilder.Delete; +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.datastax.driver.core.querybuilder.Select; + +/** + * Repository base implementation for Cassandra. + * + * @author Alex Shvid + * + */ + +public class SimpleCassandraRepository implements CassandraRepository { + + private final CassandraDataTemplate cassandraDataTemplate; + private final CassandraEntityInformation entityInformation; + + /** + * Creates a new {@link SimpleCassandraRepository} for the given {@link CassandraEntityInformation} and + * {@link CassandraDataTemplate}. + * + * @param metadata must not be {@literal null}. + * @param template must not be {@literal null}. + */ + public SimpleCassandraRepository(CassandraEntityInformation metadata, + CassandraDataTemplate cassandraDataTemplate) { + + Assert.notNull(cassandraDataTemplate); + Assert.notNull(metadata); + + this.entityInformation = metadata; + this.cassandraDataTemplate = cassandraDataTemplate; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#save(java.lang.Object) + */ + public S save(S entity) { + + Assert.notNull(entity, "Entity must not be null!"); + cassandraDataTemplate.insert(entity, entityInformation.getTableName()); + return entity; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#save(java.lang.Iterable) + */ + public List save(Iterable entities) { + + Assert.notNull(entities, "The given Iterable of entities not be null!"); + + List result = new ArrayList(); + + for (S entity : entities) { + save(entity); + result.add(entity); + } + + return result; + } + + private Clause getIdClause(ID id) { + Clause clause = QueryBuilder.eq(entityInformation.getIdColumn(), id); + return clause; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#findOne(java.io.Serializable) + */ + public T findOne(ID id) { + Assert.notNull(id, "The given id must not be null!"); + + Select select = QueryBuilder.select().all().from(entityInformation.getTableName()); + select.where(getIdClause(id)); + + return cassandraDataTemplate.selectOne(select, entityInformation.getJavaType()); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.cassandra.repository.CassandraRepository#findByPartitionKey(java.io.Serializable) + */ + @Override + public List findByPartitionKey(ID id) { + Assert.notNull(id, "The given id must not be null!"); + + Select select = QueryBuilder.select().all().from(entityInformation.getTableName()); + select.where(getIdClause(id)); + + return cassandraDataTemplate.select(select, entityInformation.getJavaType()); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#exists(java.io.Serializable) + */ + public boolean exists(ID id) { + + Assert.notNull(id, "The given id must not be null!"); + + Select select = QueryBuilder.select().countAll().from(entityInformation.getTableName()); + select.where(getIdClause(id)); + + Long num = cassandraDataTemplate.count(select); + return num != null && num.longValue() > 0; + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#count() + */ + public long count() { + return cassandraDataTemplate.count(entityInformation.getTableName()); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#delete(java.io.Serializable) + */ + public void delete(ID id) { + Assert.notNull(id, "The given id must not be null!"); + + Delete delete = QueryBuilder.delete().all().from(entityInformation.getTableName()); + delete.where(getIdClause(id)); + + cassandraDataTemplate.execute(delete.getQueryString()); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#delete(java.lang.Object) + */ + public void delete(T entity) { + Assert.notNull(entity, "The given entity must not be null!"); + delete(entityInformation.getId(entity)); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#delete(java.lang.Iterable) + */ + public void delete(Iterable entities) { + + Assert.notNull(entities, "The given Iterable of entities not be null!"); + + for (T entity : entities) { + delete(entity); + } + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#deleteAll() + */ + public void deleteAll() { + cassandraDataTemplate.truncate(entityInformation.getTableName()); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#findAll() + */ + public List findAll() { + Select select = QueryBuilder.select().all().from(entityInformation.getTableName()); + return findAll(select); + } + + /* + * (non-Javadoc) + * @see org.springframework.data.repository.CrudRepository#findAll(java.lang.Iterable) + */ + public Iterable findAll(Iterable ids) { + + List parameters = new ArrayList(); + for (ID id : ids) { + parameters.add(id); + } + Clause clause = QueryBuilder.in(entityInformation.getIdColumn(), parameters.toArray()); + Select select = QueryBuilder.select().all().from(entityInformation.getTableName()); + select.where(clause); + + return findAll(select); + } + + private List findAll(Select query) { + + if (query == null) { + return Collections.emptyList(); + } + + return cassandraDataTemplate.select(query, entityInformation.getJavaType()); + } + + /** + * Returns the underlying {@link CassandraOperations} instance. + * + * @return + */ + protected CassandraOperations getCassandraOperations() { + return this.cassandraDataTemplate; + } + + /** + * Returns the underlying {@link CassandraDataOperations} instance. + * + * @return + */ + protected CassandraDataOperations getCassandraDataOperations() { + return this.cassandraDataTemplate; + } + + /** + * @return the entityInformation + */ + protected CassandraEntityInformation getEntityInformation() { + return entityInformation; + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/util/CassandraNamingUtils.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/util/CassandraNamingUtils.java new file mode 100644 index 000000000..4e752568e --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/util/CassandraNamingUtils.java @@ -0,0 +1,36 @@ +/* + * Copyright 2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.util; + +/** + * Helper class featuring helper methods for working with Cassandra tables. Mainly intended for internal use within the + * framework. + * + * @author Alex Shvid + */ +public abstract class CassandraNamingUtils { + + /** + * Obtains the table name to use for the provided class + * + * @param entityClass The class to determine the preferred table name for + * @return The preferred collection name + */ + public static String getPreferredTableName(Class entityClass) { + return entityClass.getSimpleName().toLowerCase(); + } + +} diff --git a/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/util/CqlUtils.java b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/util/CqlUtils.java new file mode 100644 index 000000000..8be12f30c --- /dev/null +++ b/spring-data-cassandra/src/main/java/org/springframework/data/cassandra/util/CqlUtils.java @@ -0,0 +1,387 @@ +package org.springframework.data.cassandra.util; + +import java.util.ArrayList; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.cassandra.core.CassandraTemplate; +import org.springframework.cassandra.core.QueryOptions; +import org.springframework.cassandra.core.cql.generator.CreateTableCqlGenerator; +import org.springframework.cassandra.core.keyspace.CreateTableSpecification; +import org.springframework.data.cassandra.convert.CassandraConverter; +import org.springframework.data.cassandra.exception.EntityWriterException; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.convert.EntityWriter; +import org.springframework.data.mapping.PropertyHandler; + +import com.datastax.driver.core.ColumnMetadata; +import com.datastax.driver.core.DataType; +import com.datastax.driver.core.Query; +import com.datastax.driver.core.Statement; +import com.datastax.driver.core.TableMetadata; +import com.datastax.driver.core.querybuilder.Batch; +import com.datastax.driver.core.querybuilder.Delete; +import com.datastax.driver.core.querybuilder.Delete.Where; +import com.datastax.driver.core.querybuilder.Insert; +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.datastax.driver.core.querybuilder.Update; + +/** + * Utilities to convert Cassandra Annotated objects to Queries and CQL. + * + * @author Alex Shvid + * @author David Webb + * + */ +public abstract class CqlUtils { + + private static Logger log = LoggerFactory.getLogger(CqlUtils.class); + + /** + * Generates the CQL String to create a table in Cassandra + * + * @param tableName + * @param entity + * @return The CQL that can be passed to session.execute() + */ + public static String createTable(String tableName, final CassandraPersistentEntity entity, + CassandraConverter cassandraConverter) { + + CreateTableSpecification spec = cassandraConverter.getCreateTableSpecification(entity); + spec.name(tableName); + + CreateTableCqlGenerator generator = new CreateTableCqlGenerator(spec); + + return generator.toCql(); + } + + /** + * Create the List of CQL for the indexes required for Cassandra mapped Table. + * + * @param tableName + * @param entity + * @return The list of CQL statements to run with session.execute() + */ + public static List createIndexes(final String tableName, final CassandraPersistentEntity entity) { + final List result = new ArrayList(); + + entity.doWithProperties(new PropertyHandler() { + public void doWithPersistentProperty(CassandraPersistentProperty prop) { + + if (prop.isIndexed()) { + + final StringBuilder str = new StringBuilder(); + str.append("CREATE INDEX ON "); + str.append(tableName); + str.append(" ("); + str.append(prop.getColumnName()); + str.append(");"); + + result.add(str.toString()); + } + + } + }); + + return result; + } + + /** + * Alter the table to refelct the entity annotations + * + * @param tableName + * @param entity + * @param table + * @return + */ + public static List alterTable(final String tableName, final CassandraPersistentEntity entity, + final TableMetadata table) { + final List result = new ArrayList(); + + entity.doWithProperties(new PropertyHandler() { + public void doWithPersistentProperty(CassandraPersistentProperty prop) { + + String columnName = prop.getColumnName(); + DataType columnDataType = prop.getDataType(); + ColumnMetadata columnMetadata = table.getColumn(columnName.toLowerCase()); + + if (columnMetadata != null && columnDataType.equals(columnMetadata.getType())) { + return; + } + + final StringBuilder str = new StringBuilder(); + str.append("ALTER TABLE "); + str.append(tableName); + if (columnMetadata == null) { + str.append(" ADD "); + } else { + str.append(" ALTER "); + } + + str.append(columnName); + str.append(' '); + + if (columnMetadata != null) { + str.append("TYPE "); + } + + str.append(toCQL(columnDataType)); + + str.append(';'); + result.add(str.toString()); + + } + }); + + return result; + } + + /** + * Generates a Query Object for an insert + * + * @param keyspaceName + * @param tableName + * @param objectToSave + * @param entity + * @param optionsByName + * + * @return The Query object to run with session.execute(); + * @throws EntityWriterException + */ + public static Query toInsertQuery(String keyspaceName, String tableName, final Object objectToSave, + QueryOptions options, EntityWriter entityWriter) throws EntityWriterException { + + final Insert q = QueryBuilder.insertInto(keyspaceName, tableName); + + /* + * Write properties + */ + entityWriter.write(objectToSave, q); + + /* + * Add Query Options + */ + CassandraTemplate.addQueryOptions(q, options); + + /* + * Add TTL to Insert object + */ + if (options != null && options.getTtl() != null) { + q.using(QueryBuilder.ttl(options.getTtl())); + } + + return q; + + } + + /** + * Generates a Query Object for an Update + * + * @param keyspaceName + * @param tableName + * @param objectToSave + * @param entity + * @param optionsByName + * + * @return The Query object to run with session.execute(); + * @throws EntityWriterException + */ + public static Query toUpdateQuery(String keyspaceName, String tableName, final Object objectToSave, + QueryOptions options, EntityWriter entityWriter) throws EntityWriterException { + + final Update q = QueryBuilder.update(keyspaceName, tableName); + + /* + * Write properties + */ + entityWriter.write(objectToSave, q); + + /* + * Add Query Options + */ + CassandraTemplate.addQueryOptions(q, options); + + /* + * Add TTL to Insert object + */ + if (options != null && options.getTtl() != null) { + q.using(QueryBuilder.ttl(options.getTtl())); + } + + return q; + + } + + /** + * Generates a Batch Object for multiple Updates + * + * @param keyspaceName + * @param tableName + * @param objectsToSave + * @param entity + * @param optionsByName + * + * @return The Query object to run with session.execute(); + * @throws EntityWriterException + */ + public static Batch toUpdateBatchQuery(final String keyspaceName, final String tableName, + final List objectsToSave, QueryOptions options, EntityWriter entityWriter) + throws EntityWriterException { + + /* + * Return variable is a Batch statement + */ + final Batch b = QueryBuilder.batch(); + + for (final T objectToSave : objectsToSave) { + + b.add((Statement) toUpdateQuery(keyspaceName, tableName, objectToSave, options, entityWriter)); + + } + + /* + * Add Query Options + */ + CassandraTemplate.addQueryOptions(b, options); + + return b; + + } + + /** + * Generates a Batch Object for multiple inserts + * + * @param keyspaceName + * @param tableName + * @param objectsToSave + * @param entity + * @param optionsByName + * + * @return The Query object to run with session.execute(); + * @throws EntityWriterException + */ + public static Batch toInsertBatchQuery(final String keyspaceName, final String tableName, + final List objectsToSave, QueryOptions options, EntityWriter entityWriter) + throws EntityWriterException { + + /* + * Return variable is a Batch statement + */ + final Batch b = QueryBuilder.batch(); + + for (final T objectToSave : objectsToSave) { + + b.add((Statement) toInsertQuery(keyspaceName, tableName, objectToSave, options, entityWriter)); + + } + + /* + * Add Query Options + */ + CassandraTemplate.addQueryOptions(b, options); + + return b; + + } + + /** + * Create a Delete Query Object from an annotated POJO + * + * @param keyspace + * @param tableName + * @param objectToRemove + * @param entity + * @param optionsByName + * @return + * @throws EntityWriterException + */ + public static Query toDeleteQuery(String keyspace, String tableName, final Object objectToRemove, + QueryOptions options, EntityWriter entityWriter) throws EntityWriterException { + + final Delete.Selection ds = QueryBuilder.delete(); + final Delete q = ds.from(keyspace, tableName); + final Where w = q.where(); + + /* + * Write where condition to find by Id + */ + entityWriter.write(objectToRemove, w); + + CassandraTemplate.addQueryOptions(q, options); + + return q; + + } + + /** + * @param dataType + * @return + */ + public static String toCQL(DataType dataType) { + if (dataType.getTypeArguments().isEmpty()) { + return dataType.getName().name(); + } else { + StringBuilder str = new StringBuilder(); + str.append(dataType.getName().name()); + str.append('<'); + for (DataType argDataType : dataType.getTypeArguments()) { + if (str.charAt(str.length() - 1) != '<') { + str.append(','); + } + str.append(argDataType.getName().name()); + } + str.append('>'); + return str.toString(); + } + } + + /** + * @param tableName + * @return + */ + public static String dropTable(String tableName) { + + if (tableName == null) { + return null; + } + + StringBuilder str = new StringBuilder(); + str.append("DROP TABLE " + tableName + ";"); + return str.toString(); + } + + /** + * Create a Batch Query object for multiple deletes. + * + * @param keyspaceName + * @param tableName + * @param entities + * @param entity + * @param optionsByName + * + * @return + * @throws EntityWriterException + */ + public static Batch toDeleteBatchQuery(String keyspaceName, String tableName, List entities, + QueryOptions options, EntityWriter entityWriter) throws EntityWriterException { + + /* + * Return variable is a Batch statement + */ + final Batch b = QueryBuilder.batch(); + + for (final T objectToSave : entities) { + + b.add((Statement) toDeleteQuery(keyspaceName, tableName, objectToSave, options, entityWriter)); + + } + + CassandraTemplate.addQueryOptions(b, options); + + return b; + + } + +} diff --git a/spring-data-cassandra/src/main/resources/META-INF/spring.handlers b/spring-data-cassandra/src/main/resources/META-INF/spring.handlers new file mode 100644 index 000000000..8a812d6ba --- /dev/null +++ b/spring-data-cassandra/src/main/resources/META-INF/spring.handlers @@ -0,0 +1 @@ +http\://www.springframework.org/schema/data/cassandra=org.springframework.data.cassandra.config.CassandraNamespaceHandler diff --git a/spring-data-cassandra/src/main/resources/META-INF/spring.schemas b/spring-data-cassandra/src/main/resources/META-INF/spring.schemas new file mode 100644 index 000000000..7cd18a56c --- /dev/null +++ b/spring-data-cassandra/src/main/resources/META-INF/spring.schemas @@ -0,0 +1,2 @@ +http\://www.springframework.org/schema/data/cassandra/spring-cassandra-1.0.xsd=org/springframework/data/cassandra/config/spring-cassandra-1.0.xsd +http\://www.springframework.org/schema/data/cassandra/spring-cassandra.xsd=org/springframework/data/cassandra/config/spring-cassandra-1.0.xsd \ No newline at end of file diff --git a/spring-data-cassandra/src/main/resources/META-INF/spring.tooling b/spring-data-cassandra/src/main/resources/META-INF/spring.tooling new file mode 100644 index 000000000..bdc47bdbb --- /dev/null +++ b/spring-data-cassandra/src/main/resources/META-INF/spring.tooling @@ -0,0 +1,4 @@ +# Tooling related information for the cassandra namespace +http\://www.springframework.org/schema/data/cassandra@name=Spring Data Cassandra Namespace +http\://www.springframework.org/schema/data/cassandra@prefix=cassandra +http\://www.springframework.org/schema/data/cassandra@icon=org/springframework/data/cassandra/config/spring-cassandra.gif diff --git a/spring-data-cassandra/src/main/resources/org/springframework/data/cassandra/config/spring-cassandra-1.0.xsd b/spring-data-cassandra/src/main/resources/org/springframework/data/cassandra/config/spring-cassandra-1.0.xsd new file mode 100644 index 000000000..44daecc74 --- /dev/null +++ b/spring-data-cassandra/src/main/resources/org/springframework/data/cassandra/config/spring-cassandra-1.0.xsd @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/spring-data-cassandra/src/main/resources/org/springframework/data/cassandra/config/spring-cassandra.gif b/spring-data-cassandra/src/main/resources/org/springframework/data/cassandra/config/spring-cassandra.gif new file mode 100644 index 000000000..20ed1f9a4 Binary files /dev/null and b/spring-data-cassandra/src/main/resources/org/springframework/data/cassandra/config/spring-cassandra.gif differ diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/CassandraNamespaceTests.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/CassandraNamespaceTests.java new file mode 100644 index 000000000..e27eda166 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/CassandraNamespaceTests.java @@ -0,0 +1,37 @@ +package org.springframework.data.cassandra.test.integration.config; + +import java.io.IOException; + +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.thrift.transport.TTransportException; +import org.cassandraunit.utils.EmbeddedCassandraServerHelper; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.ApplicationContext; + +// @RunWith(SpringJUnit4ClassRunner.class) +// @ContextConfiguration +public class CassandraNamespaceTests { + + @Autowired + private ApplicationContext ctx; + + @BeforeClass + public static void startCassandra() throws IOException, TTransportException, ConfigurationException, + InterruptedException { + EmbeddedCassandraServerHelper.startEmbeddedCassandra("cassandra.yaml"); + } + + @After + public void clearCassandra() { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + } + + @AfterClass + public static void stopCassandra() { + EmbeddedCassandraServerHelper.stopEmbeddedCassandra(); + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/DriverTests.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/DriverTests.java new file mode 100644 index 000000000..7ccbdf563 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/DriverTests.java @@ -0,0 +1,50 @@ +package org.springframework.data.cassandra.test.integration.config; + +import java.io.IOException; + +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.thrift.transport.TTransportException; +import org.cassandraunit.utils.EmbeddedCassandraServerHelper; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.datastax.driver.core.Cluster; +import com.datastax.driver.core.Session; + +public class DriverTests { + + @BeforeClass + public static void startCassandra() throws IOException, TTransportException, ConfigurationException, + InterruptedException { + EmbeddedCassandraServerHelper.startEmbeddedCassandra("cassandra.yaml"); + } + + @Test + public void test() throws Exception { + + Cluster.Builder builder = Cluster.builder().addContactPoint("127.0.0.1"); + + // builder.withCompression(ProtocolOptions.Compression.SNAPPY); + + Cluster cluster = builder.build(); + + Session session = cluster.connect(); + + session.shutdown(); + + cluster.shutdown(); + + } + + @After + public void clearCassandra() { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + } + + @AfterClass + public static void stopCassandra() { + EmbeddedCassandraServerHelper.stopEmbeddedCassandra(); + } +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/TestConfig.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/TestConfig.java new file mode 100644 index 000000000..5cd565bfd --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/config/TestConfig.java @@ -0,0 +1,37 @@ +package org.springframework.data.cassandra.test.integration.config; + +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.data.cassandra.config.java.AbstractSpringDataCassandraConfiguration; +import org.springframework.data.cassandra.convert.CassandraConverter; +import org.springframework.data.cassandra.convert.MappingCassandraConverter; +import org.springframework.data.cassandra.core.CassandraDataOperations; +import org.springframework.data.cassandra.core.CassandraDataTemplate; +import org.springframework.data.cassandra.mapping.CassandraMappingContext; + +/** + * Setup any spring configuration for unit tests + * + * @author David Webb + * @author Matthew T. Adams + */ +@Configuration +public class TestConfig extends AbstractSpringDataCassandraConfiguration { + + public static final String keyspaceName = "test"; + + @Override + protected String getKeyspaceName() { + return keyspaceName; + } + + @Bean + public CassandraConverter cassandraConverter() { + return new MappingCassandraConverter(new CassandraMappingContext()); + } + + @Bean + public CassandraDataOperations cassandraDataTemplate() throws Exception { + return new CassandraDataTemplate(session().getObject(), converter(), keyspaceName); + } +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/mapping/BasicCassandraPersistentEntityIntegrationTests.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/mapping/BasicCassandraPersistentEntityIntegrationTests.java new file mode 100644 index 000000000..0112a2ae1 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/mapping/BasicCassandraPersistentEntityIntegrationTests.java @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2011 by the original author(s). + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.mapping; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertThat; +import static org.mockito.Mockito.when; + +import java.io.IOException; + +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.thrift.transport.TTransportException; +import org.cassandraunit.utils.EmbeddedCassandraServerHelper; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import org.springframework.context.ApplicationContext; +import org.springframework.data.cassandra.mapping.BasicCassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.Table; +import org.springframework.data.util.ClassTypeInformation; + +/** + * Integration tests for {@link BasicCassandraPersistentEntity}. + * + * @author Alex Shvid + */ +@RunWith(MockitoJUnitRunner.class) +public class BasicCassandraPersistentEntityIntegrationTests { + + @Mock + ApplicationContext context; + + @BeforeClass + public static void startCassandra() throws IOException, TTransportException, ConfigurationException, + InterruptedException { + EmbeddedCassandraServerHelper.startEmbeddedCassandra("cassandra.yaml"); + } + + @Test + public void subclassInheritsAtDocumentAnnotation() { + + BasicCassandraPersistentEntity entity = new BasicCassandraPersistentEntity( + ClassTypeInformation.from(Notification.class)); + assertThat(entity.getTableName(), is("messages")); + } + + @Test + public void evaluatesSpELExpression() { + + BasicCassandraPersistentEntity entity = new BasicCassandraPersistentEntity( + ClassTypeInformation.from(Area.class)); + assertThat(entity.getTableName(), is("123")); + } + + @Test + public void collectionAllowsReferencingSpringBean() { + + MappingBean bean = new MappingBean(); + bean.userLine = "user_line"; + + when(context.getBean("mappingBean")).thenReturn(bean); + when(context.containsBean("mappingBean")).thenReturn(true); + + BasicCassandraPersistentEntity entity = new BasicCassandraPersistentEntity( + ClassTypeInformation.from(UserLine.class)); + entity.setApplicationContext(context); + + assertThat(entity.getTableName(), is("user_line")); + } + + @After + public void clearCassandra() { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + } + + @AfterClass + public static void stopCassandra() { + EmbeddedCassandraServerHelper.stopEmbeddedCassandra(); + } + + @Table(name = "messages") + class Message { + + } + + class Notification extends Message { + + } + + @Table(name = "#{123}") + class Area { + + } + + @Table(name = "#{mappingBean.userLine}") + class UserLine { + + } + + class MappingBean { + + String userLine; + + public String getUserLine() { + return userLine; + } + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/mapping/BasicCassandraPersistentPropertyIntegrationTests.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/mapping/BasicCassandraPersistentPropertyIntegrationTests.java new file mode 100644 index 000000000..db6f31658 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/mapping/BasicCassandraPersistentPropertyIntegrationTests.java @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2011 by the original author(s). + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.mapping; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + +import java.lang.reflect.Field; +import java.util.Date; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.data.cassandra.mapping.BasicCassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.BasicCassandraPersistentProperty; +import org.springframework.data.cassandra.mapping.CassandraPersistentEntity; +import org.springframework.data.cassandra.mapping.CassandraPersistentProperty; +import org.springframework.data.cassandra.mapping.CassandraSimpleTypeHolder; +import org.springframework.data.cassandra.mapping.Column; +import org.springframework.data.cassandra.mapping.PrimaryKey; +import org.springframework.data.util.ClassTypeInformation; +import org.springframework.util.ReflectionUtils; + +/** + * Integration test for {@link BasicCassandraPersistentProperty}. + * + * @author Alex Shvid + */ +public class BasicCassandraPersistentPropertyIntegrationTests { + + static class Timeline { + + @PrimaryKey + String id; + + Date time; + + @Column("message") + String text; + + } + + CassandraPersistentEntity entity; + + @Before + public void setup() { + entity = new BasicCassandraPersistentEntity(ClassTypeInformation.from(Timeline.class)); + } + + @Test + public void usesAnnotatedColumnName() { + + Field field = ReflectionUtils.findField(Timeline.class, "text"); + assertThat(getPropertyFor(field).getColumnName(), is("message")); + } + + @Test + public void checksIdProperty() { + Field field = ReflectionUtils.findField(Timeline.class, "id"); + CassandraPersistentProperty property = getPropertyFor(field); + assertTrue(property.isIdProperty()); + } + + @Test + public void returnsPropertyNameForUnannotatedProperty() { + Field field = ReflectionUtils.findField(Timeline.class, "time"); + assertThat(getPropertyFor(field).getColumnName(), is("time")); + } + + private CassandraPersistentProperty getPropertyFor(Field field) { + return new BasicCassandraPersistentProperty(field, null, entity, new CassandraSimpleTypeHolder()); + } +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/repository/UserRepository.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/repository/UserRepository.java new file mode 100644 index 000000000..99256f7f3 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/repository/UserRepository.java @@ -0,0 +1,29 @@ +/* + * Copyright 2011 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.repository; + +import org.springframework.data.cassandra.repository.CassandraRepository; +import org.springframework.data.cassandra.test.integration.table.User; + +/** + * Sample repository managing {@link User} entities. + * + * @author Alex Shvid + * + */ +public interface UserRepository extends CassandraRepository { + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/repository/UserRepositoryIntegrationTests.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/repository/UserRepositoryIntegrationTests.java new file mode 100644 index 000000000..18005b797 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/repository/UserRepositoryIntegrationTests.java @@ -0,0 +1,164 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.repository; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.junit.Assert.assertThat; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.thrift.transport.TTransportException; +import org.cassandraunit.utils.EmbeddedCassandraServerHelper; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.cassandra.core.CassandraDataOperations; +import org.springframework.data.cassandra.test.integration.table.User; + +import com.google.common.collect.Lists; + +/** + * Base class for tests for {@link UserRepository}. + * + * @author Alex Shvid + * + */ +// @ContextConfiguration +// @RunWith(SpringJUnit4ClassRunner.class) +public class UserRepositoryIntegrationTests { + + @Autowired + protected UserRepository repository; + + @Autowired + protected CassandraDataOperations dataOperations; + + User tom, bob, alice, scott; + + List all; + + @BeforeClass + public static void startCassandra() throws IOException, TTransportException, ConfigurationException, + InterruptedException { + EmbeddedCassandraServerHelper.startEmbeddedCassandra("cassandra.yaml"); + } + + @Before + public void setUp() throws InterruptedException { + + repository.deleteAll(); + + tom = new User(); + tom.setUsername("tom"); + tom.setFirstName("Tom"); + tom.setLastName("Ron"); + tom.setPassword("123"); + tom.setPlace("SF"); + + bob = new User(); + bob.setUsername("bob"); + bob.setFirstName("Bob"); + bob.setLastName("White"); + bob.setPassword("555"); + bob.setPlace("NY"); + + alice = new User(); + alice.setUsername("alice"); + alice.setFirstName("Alice"); + alice.setLastName("Red"); + alice.setPassword("777"); + alice.setPlace("LA"); + + scott = new User(); + scott.setUsername("scott"); + scott.setFirstName("Scott"); + scott.setLastName("Van"); + scott.setPassword("444"); + scott.setPlace("Boston"); + + all = dataOperations.insert(Arrays.asList(tom, bob, alice, scott)); + } + + // @Test + public void findsUserById() throws Exception { + + User user = repository.findOne(bob.getUsername()); + Assert.assertNotNull(user); + assertEquals(bob, user); + + } + + // @Test + public void findsAll() throws Exception { + List result = Lists.newArrayList(repository.findAll()); + assertThat(result.size(), is(all.size())); + assertThat(result.containsAll(all), is(true)); + + } + + // @Test + public void findsAllWithGivenIds() { + + Iterable result = repository.findAll(Arrays.asList(bob.getUsername(), tom.getUsername())); + assertThat(result, hasItems(bob, tom)); + assertThat(result, not(hasItems(alice, scott))); + } + + // @Test + public void deletesUserCorrectly() throws Exception { + + repository.delete(tom); + + List result = Lists.newArrayList(repository.findAll()); + + assertThat(result.size(), is(all.size() - 1)); + assertThat(result, not(hasItem(tom))); + } + + // @Test + public void deletesUserByIdCorrectly() { + + repository.delete(tom.getUsername().toString()); + + List result = Lists.newArrayList(repository.findAll()); + + assertThat(result.size(), is(all.size() - 1)); + assertThat(result, not(hasItem(tom))); + } + + @AfterClass + public static void stopCassandra() { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + EmbeddedCassandraServerHelper.stopEmbeddedCassandra(); + } + + private static void assertEquals(User user1, User user2) { + Assert.assertEquals(user1.getUsername(), user2.getUsername()); + Assert.assertEquals(user1.getFirstName(), user2.getFirstName()); + Assert.assertEquals(user1.getLastName(), user2.getLastName()); + Assert.assertEquals(user1.getPlace(), user2.getPlace()); + Assert.assertEquals(user1.getPassword(), user2.getPassword()); + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Book.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Book.java new file mode 100644 index 000000000..e647f9da1 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Book.java @@ -0,0 +1,104 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import org.springframework.data.cassandra.mapping.PrimaryKey; +import org.springframework.data.cassandra.mapping.Table; + +/** + * Test POJO + * + * @author David Webb + * + */ +@Table(name = "book") +public class Book { + + @PrimaryKey + private String isbn; + + private String title; + private String author; + private int pages; + + /** + * @return Returns the isbn. + */ + public String getIsbn() { + return isbn; + } + + /** + * @param isbn The isbn to set. + */ + public void setIsbn(String isbn) { + this.isbn = isbn; + } + + /** + * @return Returns the title. + */ + public String getTitle() { + return title; + } + + /** + * @param title The title to set. + */ + public void setTitle(String title) { + this.title = title; + } + + /** + * @return Returns the author. + */ + public String getAuthor() { + return author; + } + + /** + * @param author The author to set. + */ + public void setAuthor(String author) { + this.author = author; + } + + /** + * @return Returns the pages. + */ + public int getPages() { + return pages; + } + + /** + * @param pages The pages to set. + */ + public void setPages(int pages) { + this.pages = pages; + } + + /* (non-Javadoc) + * @see java.lang.Object#toString() + */ + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("isbn -> " + isbn).append("\n"); + sb.append("tile -> " + title).append("\n"); + sb.append("author -> " + author).append("\n"); + sb.append("pages -> " + pages).append("\n"); + return sb.toString(); + } +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Comment.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Comment.java new file mode 100644 index 000000000..b7b02b3d8 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Comment.java @@ -0,0 +1,93 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; +import java.util.Set; + +import org.springframework.data.cassandra.mapping.PrimaryKey; +import org.springframework.data.cassandra.mapping.CassandraType; +import org.springframework.data.cassandra.mapping.Table; + +import com.datastax.driver.core.DataType; + +/** + * This is an example of dynamic table (wide row). PartitionKey (former RowId) is pk.author. ClusteredColumn (former + * Column Id) is pk.time + * + * @author Alex Shvid + */ +@Table(name = "comments") +public class Comment { + + /* + * Primary Key + */ + @PrimaryKey + private CommentPK pk; + + private String text; + + @CassandraType(type = DataType.Name.SET, typeArguments = { DataType.Name.TEXT }) + private Set likes; + + /* + * Reference to the Post + */ + private String postAuthor; + private Date postTime; + + public CommentPK getPk() { + return pk; + } + + public void setPk(CommentPK pk) { + this.pk = pk; + } + + public String getText() { + return text; + } + + public void setText(String text) { + this.text = text; + } + + public Set getLikes() { + return likes; + } + + public void setLikes(Set likes) { + this.likes = likes; + } + + public String getPostAuthor() { + return postAuthor; + } + + public void setPostAuthor(String postAuthor) { + this.postAuthor = postAuthor; + } + + public Date getPostTime() { + return postTime; + } + + public void setPostTime(Date postTime) { + this.postTime = postTime; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/CommentPK.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/CommentPK.java new file mode 100644 index 000000000..6ebe1ad8f --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/CommentPK.java @@ -0,0 +1,65 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; + +import org.springframework.cassandra.core.PrimaryKeyType; +import org.springframework.data.cassandra.mapping.CompositePrimaryKey; +import org.springframework.data.cassandra.mapping.PrimaryKeyColumn; +import org.springframework.data.cassandra.mapping.CassandraType; + +import com.datastax.driver.core.DataType; + +/** + * This is an example of dynamic table (wide row) that creates each time new column with timestamp. + * + * @author Alex Shvid + */ + +@CompositePrimaryKey +public class CommentPK { + + /* + * Row ID + */ + @PrimaryKeyColumn(ordinal = 0, type = PrimaryKeyType.PARTITIONED) + private String author; + + /* + * Clustered Column + */ + @PrimaryKeyColumn(ordinal = 1) + @CassandraType(type = DataType.Name.TIMESTAMP) + private Date time; + + public String getAuthor() { + return author; + } + + public void setAuthor(String author) { + this.author = author; + } + + public Date getTime() { + return time; + } + + public void setTime(Date time) { + this.time = time; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/LogEntry.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/LogEntry.java new file mode 100644 index 000000000..d2650f536 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/LogEntry.java @@ -0,0 +1,84 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; + +import org.springframework.data.cassandra.mapping.PrimaryKey; +import org.springframework.data.cassandra.mapping.Table; + +/** + * This is an example of the LogEntry static table, where all fields are columns in Cassandra row. + * + * + * @author Alex Shvid + */ +@Table(name = "log_entry") +public class LogEntry { + + /* + * Primary Key + */ + @PrimaryKey + private Date logDate; + + private String hostname; + + private String logData; + + /** + * @return Returns the logDate. + */ + public Date getLogDate() { + return logDate; + } + + /** + * @param logDate The logDate to set. + */ + public void setLogDate(Date logDate) { + this.logDate = logDate; + } + + /** + * @return Returns the hostname. + */ + public String getHostname() { + return hostname; + } + + /** + * @param hostname The hostname to set. + */ + public void setHostname(String hostname) { + this.hostname = hostname; + } + + /** + * @return Returns the logData. + */ + public String getLogData() { + return logData; + } + + /** + * @param logData The logData to set. + */ + public void setLogData(String logData) { + this.logData = logData; + } + +} \ No newline at end of file diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Notification.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Notification.java new file mode 100644 index 000000000..5b6ed07d7 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Notification.java @@ -0,0 +1,92 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; + +import org.springframework.data.annotation.Id; +import org.springframework.data.cassandra.mapping.Indexed; +import org.springframework.data.cassandra.mapping.Table; + +/** + * This is an example of dynamic table that creates each time new column with Notification timestamp. + * + * By default it is active Notification until user deactivate it. This table uses index on the field active to access in + * WHERE cause only for active notifications. + * + * @author Alex Shvid + */ +@Table(name = "notifications") +public class Notification { + + /* + * Primary Key + */ + @Id + private NotificationPK pk; + + @Indexed + private boolean active; + + /* + * Reference data + */ + + private String type; // comment, post + private String refAuthor; + private Date refTime; + + public NotificationPK getPk() { + return pk; + } + + public void setPk(NotificationPK pk) { + this.pk = pk; + } + + public boolean isActive() { + return active; + } + + public void setActive(boolean active) { + this.active = active; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getRefAuthor() { + return refAuthor; + } + + public void setRefAuthor(String refAuthor) { + this.refAuthor = refAuthor; + } + + public Date getRefTime() { + return refTime; + } + + public void setRefTime(Date refTime) { + this.refTime = refTime; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/NotificationPK.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/NotificationPK.java new file mode 100644 index 000000000..884a73f40 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/NotificationPK.java @@ -0,0 +1,67 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; + +import org.springframework.cassandra.core.PrimaryKeyType; +import org.springframework.data.cassandra.mapping.CompositePrimaryKey; +import org.springframework.data.cassandra.mapping.PrimaryKeyColumn; +import org.springframework.data.cassandra.mapping.CassandraType; + +import com.datastax.driver.core.DataType; + +/** + * This is an example of dynamic table that creates each time new column with Notification timestamp. + * + * By default it is active Notification until user deactivate it. This table uses index on the field active to access in + * WHERE cause only for active notifications. + * + * @author Alex Shvid + */ +@CompositePrimaryKey +public class NotificationPK { + + /* + * Row ID + */ + @PrimaryKeyColumn(ordinal = 0, type = PrimaryKeyType.PARTITIONED) + private String username; + + /* + * Clustered Column + */ + @PrimaryKeyColumn(ordinal = 1) + @CassandraType(type = DataType.Name.TIMESTAMP) + private Date time; + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public Date getTime() { + return time; + } + + public void setTime(Date time) { + this.time = time; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Post.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Post.java new file mode 100644 index 000000000..c4d9317ff --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Post.java @@ -0,0 +1,107 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; +import java.util.Map; +import java.util.Set; + +import org.springframework.data.annotation.Id; +import org.springframework.data.cassandra.mapping.Table; + +/** + * This is an example of dynamic table that creates each time new column with Post timestamp. + * + * It is possible to use a static table for posts and identify them by PostId(UUID), but in this case we need to use + * MapReduce for Big Data to find posts for particular user, so it is better to have index (userId) -> index (post time) + * architecture. It helps a lot to build eventually a search index for the particular user. + * + * @author Alex Shvid + */ +@Table(name = "posts") +public class Post { + + /* + * Primary Key + */ + @Id + private PostPK pk; + + private String type; // status, share + + private String text; + private Set resources; + private Map comments; + private Set likes; + private Set followers; + + public PostPK getPk() { + return pk; + } + + public void setPk(PostPK pk) { + this.pk = pk; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getText() { + return text; + } + + public void setText(String text) { + this.text = text; + } + + public Set getResources() { + return resources; + } + + public void setResources(Set resources) { + this.resources = resources; + } + + public Map getComments() { + return comments; + } + + public void setComments(Map comments) { + this.comments = comments; + } + + public Set getLikes() { + return likes; + } + + public void setLikes(Set likes) { + this.likes = likes; + } + + public Set getFollowers() { + return followers; + } + + public void setFollowers(Set followers) { + this.followers = followers; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/PostPK.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/PostPK.java new file mode 100644 index 000000000..b6f7e3630 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/PostPK.java @@ -0,0 +1,65 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; + +import org.springframework.cassandra.core.PrimaryKeyType; +import org.springframework.data.cassandra.mapping.CompositePrimaryKey; +import org.springframework.data.cassandra.mapping.PrimaryKeyColumn; + +/** + * This is an example of dynamic table that creates each time new column with Post timestamp. + * + * It is possible to use a static table for posts and identify them by PostId(UUID), but in this case we need to use + * MapReduce for Big Data to find posts for particular user, so it is better to have index (userId) -> index (post time) + * architecture. It helps a lot to build eventually a search index for the particular user. + * + * @author Alex Shvid + */ + +@CompositePrimaryKey +public class PostPK { + + /* + * Row ID + */ + @PrimaryKeyColumn(ordinal = 0, type = PrimaryKeyType.PARTITIONED) + private String author; + + /* + * Clustered Column + */ + @PrimaryKeyColumn(ordinal = 1) + private Date time; + + public String getAuthor() { + return author; + } + + public void setAuthor(String author) { + this.author = author; + } + + public Date getTime() { + return time; + } + + public void setTime(Date time) { + this.time = time; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Timeline.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Timeline.java new file mode 100644 index 000000000..813f6a444 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/Timeline.java @@ -0,0 +1,71 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; + +import org.springframework.data.annotation.Id; +import org.springframework.data.cassandra.mapping.Table; + +/** + * This is an example of the users timeline dynamic table, where all columns are dynamically created by @ColumnId field + * value. The rest fields are places in Cassandra value. + * + * Timeline entity is used to store user's status updates that it follows in the site. Timeline always ordered by @ColumnId + * field and we can retrieve last top status updates by using limits. + * + * @author Alex Shvid + */ +@Table(name = "timeline") +public class Timeline { + + /* + * Row ID + */ + @Id + private TimelinePK pk; + + /* + * Reference to the post by author and postUID + */ + private String author; + private Date postTime; + + public TimelinePK getPk() { + return pk; + } + + public void setPk(TimelinePK pk) { + this.pk = pk; + } + + public String getAuthor() { + return author; + } + + public void setAuthor(String author) { + this.author = author; + } + + public Date getPostTime() { + return postTime; + } + + public void setPostTime(Date postTime) { + this.postTime = postTime; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/TimelinePK.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/TimelinePK.java new file mode 100644 index 000000000..8a54f2191 --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/TimelinePK.java @@ -0,0 +1,65 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Date; + +import org.springframework.cassandra.core.PrimaryKeyType; +import org.springframework.data.cassandra.mapping.CompositePrimaryKey; +import org.springframework.data.cassandra.mapping.PrimaryKeyColumn; + +/** + * This is an example of the users timeline dynamic table, where all columns are dynamically created by @ColumnId field + * value. The rest fields are places in Cassandra value. + * + * Timeline entity is used to store user's status updates that it follows in the site. Timeline always ordered by @ColumnId + * field and we can retrieve last top status updates by using limits. + * + * @author Alex Shvid + */ + +@CompositePrimaryKey +public class TimelinePK { + + /* + * Row ID + */ + @PrimaryKeyColumn(ordinal = 0, type = PrimaryKeyType.PARTITIONED) + private String username; + + /* + * Clustered Column + */ + @PrimaryKeyColumn(ordinal = 1) + private Date time; + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public Date getTime() { + return time; + } + + public void setTime(Date time) { + this.time = time; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/User.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/User.java new file mode 100644 index 000000000..3bb91aeff --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/table/User.java @@ -0,0 +1,183 @@ +/* + * Copyright 2010-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.table; + +import java.util.Set; + +import org.springframework.data.annotation.Id; +import org.springframework.data.cassandra.mapping.Indexed; +import org.springframework.data.cassandra.mapping.Table; + +/** + * This is an example of the Users status table, where all fields are columns in Cassandra row. Some fields can be + * Set,List,Map like emails. + * + * User contains base information related for separate user, like names, additional information, emails, following + * users, friends. + * + * @author Alex Shvid + */ +@Table(name = "users") +public class User { + + /* + * Primary Row ID + */ + @Id + private String username; + + /* + * Public information + */ + private String firstName; + private String lastName; + + /* + * Secondary index, used only on fields with common information, + * not effective on email, username + */ + @Indexed + private String place; + + /* + * User emails + */ + private Set emails; + + /* + * Password + */ + private String password; + + /* + * Birth Year + */ + private int birthYear; + + /* + * Following other users in userline + */ + private Set following; + + /* + * Friends of the user + */ + private Set friends; + + public String getUsername() { + return username; + } + + public void setUsername(String username) { + this.username = username; + } + + public String getFirstName() { + return firstName; + } + + public void setFirstName(String firstName) { + this.firstName = firstName; + } + + public String getLastName() { + return lastName; + } + + public void setLastName(String lastName) { + this.lastName = lastName; + } + + public String getPlace() { + return place; + } + + public void setPlace(String place) { + this.place = place; + } + + public Set getEmails() { + return emails; + } + + public void setEmails(Set emails) { + this.emails = emails; + } + + public String getPassword() { + return password; + } + + public void setPassword(String password) { + this.password = password; + } + + public Set getFollowing() { + return following; + } + + public void setFollowing(Set following) { + this.following = following; + } + + public Set getFriends() { + return friends; + } + + public void setFriends(Set friends) { + this.friends = friends; + } + + /** + * @return Returns the birthYear. + */ + public int getBirthYear() { + return birthYear; + } + + /** + * @param birthYear The birthYear to set. + */ + public void setBirthYear(int birthYear) { + this.birthYear = birthYear; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((username == null) ? 0 : username.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + User other = (User) obj; + if (username == null) { + if (other.username != null) + return false; + } else if (!username.equals(other.username)) + return false; + return true; + } + +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/template/CassandraAdminTest.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/template/CassandraAdminTest.java new file mode 100644 index 000000000..ff1b8f81e --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/template/CassandraAdminTest.java @@ -0,0 +1,106 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.template; + +import java.io.IOException; + +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.thrift.transport.TTransportException; +import org.cassandraunit.DataLoader; +import org.cassandraunit.dataset.yaml.ClassPathYamlDataSet; +import org.cassandraunit.utils.EmbeddedCassandraServerHelper; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.cassandra.core.CassandraOperations; +import org.springframework.context.ApplicationContext; +import org.springframework.data.cassandra.test.integration.config.TestConfig; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +/** + * @author David Webb + * + */ +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(classes = { TestConfig.class }, loader = AnnotationConfigContextLoader.class) +public class CassandraAdminTest { + + @Autowired + private CassandraOperations cassandraTemplate; + + @Mock + ApplicationContext context; + + private static Logger log = LoggerFactory.getLogger(CassandraAdminTest.class); + + @BeforeClass + public static void startCassandra() throws IOException, TTransportException, ConfigurationException, + InterruptedException { + EmbeddedCassandraServerHelper.startEmbeddedCassandra("cassandra.yaml"); + + /* + * Load data file to creat the test keyspace before we init the template + */ + DataLoader dataLoader = new DataLoader("Test Cluster", "localhost:9160"); + dataLoader.load(new ClassPathYamlDataSet("cassandra-keyspace.yaml")); + + } + + @Before + public void setupKeyspace() { + + /* + * Load data file to creat the test keyspace before we init the template + */ + DataLoader dataLoader = new DataLoader("Test Cluster", "localhost:9160"); + dataLoader.load(new ClassPathYamlDataSet("cassandra-keyspace.yaml")); + + } + + @Test + public void alterTableTest() { + + // cassandraTemplate.alterTable(UserAlter.class); + + } + + @Test + public void dropTableTest() { + + // cassandraTemplate.dropTable(User.class); + // cassandraTemplate.dropTable("comments"); + + } + + @After + public void clearCassandra() { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + } + + @AfterClass + public static void stopCassandra() { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + } +} diff --git a/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/template/CassandraDataOperationsTest.java b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/template/CassandraDataOperationsTest.java new file mode 100644 index 000000000..bc4bff66c --- /dev/null +++ b/spring-data-cassandra/src/test/java/org/springframework/data/cassandra/test/integration/template/CassandraDataOperationsTest.java @@ -0,0 +1,697 @@ +/* + * Copyright 2011-2013 the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.springframework.data.cassandra.test.integration.template; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; + +import org.apache.cassandra.exceptions.ConfigurationException; +import org.apache.thrift.transport.TTransportException; +import org.cassandraunit.CassandraCQLUnit; +import org.cassandraunit.DataLoader; +import org.cassandraunit.dataset.cql.ClassPathCQLDataSet; +import org.cassandraunit.dataset.yaml.ClassPathYamlDataSet; +import org.cassandraunit.utils.EmbeddedCassandraServerHelper; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.cassandra.core.ConsistencyLevel; +import org.springframework.cassandra.core.QueryOptions; +import org.springframework.cassandra.core.RetryPolicy; +import org.springframework.data.cassandra.core.CassandraDataOperations; +import org.springframework.data.cassandra.test.integration.config.TestConfig; +import org.springframework.data.cassandra.test.integration.table.Book; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.datastax.driver.core.querybuilder.QueryBuilder; +import com.datastax.driver.core.querybuilder.Select; + +/** + * Unit Tests for CassandraTemplate + * + * @author David Webb + * + */ +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(classes = { TestConfig.class }, loader = AnnotationConfigContextLoader.class) +public class CassandraDataOperationsTest { + + @Autowired + private CassandraDataOperations cassandraDataTemplate; + + private static Logger log = LoggerFactory.getLogger(CassandraDataOperationsTest.class); + + private final static String CASSANDRA_CONFIG = "cassandra.yaml"; + private final static String KEYSPACE_NAME = "test"; + private final static String CASSANDRA_HOST = "localhost"; + private final static int CASSANDRA_NATIVE_PORT = 9042; + private final static int CASSANDRA_THRIFT_PORT = 9160; + + @Rule + public CassandraCQLUnit cassandraCQLUnit = new CassandraCQLUnit(new ClassPathCQLDataSet("cql-dataload.cql", + KEYSPACE_NAME), CASSANDRA_CONFIG, CASSANDRA_HOST, CASSANDRA_NATIVE_PORT); + + @BeforeClass + public static void startCassandra() throws IOException, TTransportException, ConfigurationException, + InterruptedException { + + EmbeddedCassandraServerHelper.startEmbeddedCassandra(CASSANDRA_CONFIG); + + /* + * Load data file to creat the test keyspace before we init the template + */ + DataLoader dataLoader = new DataLoader("Test Cluster", CASSANDRA_HOST + ":" + CASSANDRA_THRIFT_PORT); + dataLoader.load(new ClassPathYamlDataSet("cassandra-keyspace.yaml")); + } + + @Test + public void insertTest() { + + /* + * Test Single Insert with entity + */ + Book b1 = new Book(); + b1.setIsbn("123456-1"); + b1.setTitle("Spring Data Cassandra Guide"); + b1.setAuthor("Cassandra Guru"); + b1.setPages(521); + + cassandraDataTemplate.insert(b1); + + Book b2 = new Book(); + b2.setIsbn("123456-2"); + b2.setTitle("Spring Data Cassandra Guide"); + b2.setAuthor("Cassandra Guru"); + b2.setPages(521); + + cassandraDataTemplate.insert(b2, "book_alt"); + + /* + * Test Single Insert with entity + */ + Book b3 = new Book(); + b3.setIsbn("123456-3"); + b3.setTitle("Spring Data Cassandra Guide"); + b3.setAuthor("Cassandra Guru"); + b3.setPages(265); + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + cassandraDataTemplate.insert(b3, "book", options); + + /* + * Test Single Insert with entity + */ + Book b5 = new Book(); + b5.setIsbn("123456-5"); + b5.setTitle("Spring Data Cassandra Guide"); + b5.setAuthor("Cassandra Guru"); + b5.setPages(265); + + cassandraDataTemplate.insert(b5, options); + + } + + @Test + public void insertAsynchronouslyTest() { + + /* + * Test Single Insert with entity + */ + Book b1 = new Book(); + b1.setIsbn("123456-1"); + b1.setTitle("Spring Data Cassandra Guide"); + b1.setAuthor("Cassandra Guru"); + b1.setPages(521); + + cassandraDataTemplate.insertAsynchronously(b1); + + Book b2 = new Book(); + b2.setIsbn("123456-2"); + b2.setTitle("Spring Data Cassandra Guide"); + b2.setAuthor("Cassandra Guru"); + b2.setPages(521); + + cassandraDataTemplate.insertAsynchronously(b2, "book_alt"); + + /* + * Test Single Insert with entity + */ + Book b3 = new Book(); + b3.setIsbn("123456-3"); + b3.setTitle("Spring Data Cassandra Guide"); + b3.setAuthor("Cassandra Guru"); + b3.setPages(265); + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + cassandraDataTemplate.insertAsynchronously(b3, "book", options); + + /* + * Test Single Insert with entity + */ + Book b4 = new Book(); + b4.setIsbn("123456-4"); + b4.setTitle("Spring Data Cassandra Guide"); + b4.setAuthor("Cassandra Guru"); + b4.setPages(465); + + /* + * Test Single Insert with entity + */ + Book b5 = new Book(); + b5.setIsbn("123456-5"); + b5.setTitle("Spring Data Cassandra Guide"); + b5.setAuthor("Cassandra Guru"); + b5.setPages(265); + + cassandraDataTemplate.insertAsynchronously(b5, options); + + } + + @Test + public void insertBatchTest() { + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + List books = null; + + books = getBookList(20); + + cassandraDataTemplate.insert(books); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book_alt"); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book", options); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, options); + + } + + @Test + public void insertBatchAsynchronouslyTest() { + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + List books = null; + + books = getBookList(20); + + cassandraDataTemplate.insertAsynchronously(books); + + books = getBookList(20); + + cassandraDataTemplate.insertAsynchronously(books, "book_alt"); + + books = getBookList(20); + + cassandraDataTemplate.insertAsynchronously(books, "book", options); + + books = getBookList(20); + + cassandraDataTemplate.insertAsynchronously(books, options); + + } + + /** + * @return + */ + private List getBookList(int numBooks) { + + List books = new ArrayList(); + + Book b = null; + for (int i = 0; i < numBooks; i++) { + b = new Book(); + b.setIsbn(UUID.randomUUID().toString()); + b.setTitle("Spring Data Cassandra Guide"); + b.setAuthor("Cassandra Guru"); + b.setPages(i * 10 + 5); + books.add(b); + } + + return books; + } + + @Test + public void updateTest() { + + insertTest(); + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + /* + * Test Single Insert with entity + */ + Book b1 = new Book(); + b1.setIsbn("123456-1"); + b1.setTitle("Spring Data Cassandra Book"); + b1.setAuthor("Cassandra Guru"); + b1.setPages(521); + + cassandraDataTemplate.update(b1); + + Book b2 = new Book(); + b2.setIsbn("123456-2"); + b2.setTitle("Spring Data Cassandra Book"); + b2.setAuthor("Cassandra Guru"); + b2.setPages(521); + + cassandraDataTemplate.update(b2, "book_alt"); + + /* + * Test Single Insert with entity + */ + Book b3 = new Book(); + b3.setIsbn("123456-3"); + b3.setTitle("Spring Data Cassandra Book"); + b3.setAuthor("Cassandra Guru"); + b3.setPages(265); + + cassandraDataTemplate.update(b3, "book", options); + + /* + * Test Single Insert with entity + */ + Book b5 = new Book(); + b5.setIsbn("123456-5"); + b5.setTitle("Spring Data Cassandra Book"); + b5.setAuthor("Cassandra Guru"); + b5.setPages(265); + + cassandraDataTemplate.update(b5, options); + + } + + @Test + public void updateAsynchronouslyTest() { + + insertTest(); + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + /* + * Test Single Insert with entity + */ + Book b1 = new Book(); + b1.setIsbn("123456-1"); + b1.setTitle("Spring Data Cassandra Book"); + b1.setAuthor("Cassandra Guru"); + b1.setPages(521); + + cassandraDataTemplate.updateAsynchronously(b1); + + Book b2 = new Book(); + b2.setIsbn("123456-2"); + b2.setTitle("Spring Data Cassandra Book"); + b2.setAuthor("Cassandra Guru"); + b2.setPages(521); + + cassandraDataTemplate.updateAsynchronously(b2, "book_alt"); + + /* + * Test Single Insert with entity + */ + Book b3 = new Book(); + b3.setIsbn("123456-3"); + b3.setTitle("Spring Data Cassandra Book"); + b3.setAuthor("Cassandra Guru"); + b3.setPages(265); + + cassandraDataTemplate.updateAsynchronously(b3, "book", options); + + /* + * Test Single Insert with entity + */ + Book b5 = new Book(); + b5.setIsbn("123456-5"); + b5.setTitle("Spring Data Cassandra Book"); + b5.setAuthor("Cassandra Guru"); + b5.setPages(265); + + cassandraDataTemplate.updateAsynchronously(b5, options); + + } + + @Test + public void updateBatchTest() { + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + List books = null; + + books = getBookList(20); + + cassandraDataTemplate.insert(books); + + alterBooks(books); + + cassandraDataTemplate.update(books); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book_alt"); + + alterBooks(books); + + cassandraDataTemplate.update(books, "book_alt"); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book", options); + + alterBooks(books); + + cassandraDataTemplate.update(books, "book", options); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, options); + + alterBooks(books); + + cassandraDataTemplate.update(books, options); + + } + + @Test + public void updateBatchAsynchronouslyTest() { + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + List books = null; + + books = getBookList(20); + + cassandraDataTemplate.insert(books); + + alterBooks(books); + + cassandraDataTemplate.updateAsynchronously(books); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book_alt"); + + alterBooks(books); + + cassandraDataTemplate.updateAsynchronously(books, "book_alt"); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book", options); + + alterBooks(books); + + cassandraDataTemplate.updateAsynchronously(books, "book", options); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, options); + + alterBooks(books); + + cassandraDataTemplate.updateAsynchronously(books, options); + + } + + /** + * @param books + */ + private void alterBooks(List books) { + + for (Book b : books) { + b.setAuthor("Ernest Hemmingway"); + b.setTitle("The Old Man and the Sea"); + b.setPages(115); + } + } + + @Test + public void deleteTest() { + + insertTest(); + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + /* + * Test Single Insert with entity + */ + Book b1 = new Book(); + b1.setIsbn("123456-1"); + + cassandraDataTemplate.delete(b1); + + Book b2 = new Book(); + b2.setIsbn("123456-2"); + + cassandraDataTemplate.delete(b2, "book_alt"); + + /* + * Test Single Insert with entity + */ + Book b3 = new Book(); + b3.setIsbn("123456-3"); + + cassandraDataTemplate.delete(b3, "book", options); + + /* + * Test Single Insert with entity + */ + Book b5 = new Book(); + b5.setIsbn("123456-5"); + + cassandraDataTemplate.delete(b5, options); + + } + + @Test + public void deleteAsynchronouslyTest() { + + insertTest(); + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + /* + * Test Single Insert with entity + */ + Book b1 = new Book(); + b1.setIsbn("123456-1"); + + cassandraDataTemplate.deleteAsynchronously(b1); + + Book b2 = new Book(); + b2.setIsbn("123456-2"); + + cassandraDataTemplate.deleteAsynchronously(b2, "book_alt"); + + /* + * Test Single Insert with entity + */ + Book b3 = new Book(); + b3.setIsbn("123456-3"); + + cassandraDataTemplate.deleteAsynchronously(b3, "book", options); + + /* + * Test Single Insert with entity + */ + Book b5 = new Book(); + b5.setIsbn("123456-5"); + + cassandraDataTemplate.deleteAsynchronously(b5, options); + + } + + @Test + public void deleteBatchTest() { + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + List books = null; + + books = getBookList(20); + + cassandraDataTemplate.insert(books); + + cassandraDataTemplate.delete(books); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book_alt"); + + cassandraDataTemplate.delete(books, "book_alt"); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book", options); + + cassandraDataTemplate.delete(books, "book", options); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, options); + + cassandraDataTemplate.delete(books, options); + + } + + @Test + public void deleteBatchAsynchronouslyTest() { + + QueryOptions options = new QueryOptions(); + options.setConsistencyLevel(ConsistencyLevel.ONE); + options.setRetryPolicy(RetryPolicy.DOWNGRADING_CONSISTENCY); + + List books = null; + + books = getBookList(20); + + cassandraDataTemplate.insert(books); + + cassandraDataTemplate.deleteAsynchronously(books); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book_alt"); + + cassandraDataTemplate.deleteAsynchronously(books, "book_alt"); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, "book", options); + + cassandraDataTemplate.deleteAsynchronously(books, "book", options); + + books = getBookList(20); + + cassandraDataTemplate.insert(books, options); + + cassandraDataTemplate.deleteAsynchronously(books, options); + + } + + @Test + public void selectOneTest() { + + /* + * Test Single Insert with entity + */ + Book b1 = new Book(); + b1.setIsbn("123456-1"); + b1.setTitle("Spring Data Cassandra Guide"); + b1.setAuthor("Cassandra Guru"); + b1.setPages(521); + + cassandraDataTemplate.insert(b1); + + Select select = QueryBuilder.select().all().from("book"); + select.where(QueryBuilder.eq("isbn", "123456-1")); + + Book b = cassandraDataTemplate.selectOne(select, Book.class); + + log.info("SingleSelect Book Title -> " + b.getTitle()); + log.info("SingleSelect Book Author -> " + b.getAuthor()); + + Assert.assertEquals(b.getTitle(), "Spring Data Cassandra Guide"); + Assert.assertEquals(b.getAuthor(), "Cassandra Guru"); + + } + + @Test + public void selectTest() { + + List books = getBookList(20); + + cassandraDataTemplate.insert(books); + + Select select = QueryBuilder.select().all().from("book"); + + List b = cassandraDataTemplate.select(select, Book.class); + + log.info("Book Count -> " + b.size()); + + Assert.assertEquals(b.size(), 20); + + } + + @Test + public void selectCountTest() { + + List books = getBookList(20); + + cassandraDataTemplate.insert(books); + + Select select = QueryBuilder.select().countAll().from("book"); + + Long count = cassandraDataTemplate.count(select); + + log.info("Book Count -> " + count); + + Assert.assertEquals(count, new Long(20)); + + } + + @After + public void clearCassandra() { + EmbeddedCassandraServerHelper.cleanEmbeddedCassandra(); + + } + + @AfterClass + public static void stopCassandra() { + EmbeddedCassandraServerHelper.stopEmbeddedCassandra(); + } +} diff --git a/spring-data-cassandra/src/test/resources/META-INF/beans.xml b/spring-data-cassandra/src/test/resources/META-INF/beans.xml new file mode 100644 index 000000000..73ae3a251 --- /dev/null +++ b/spring-data-cassandra/src/test/resources/META-INF/beans.xml @@ -0,0 +1,6 @@ + + + + diff --git a/spring-data-cassandra/src/test/resources/META-INF/cassandra-named-queries.properties b/spring-data-cassandra/src/test/resources/META-INF/cassandra-named-queries.properties new file mode 100644 index 000000000..f8e240021 --- /dev/null +++ b/spring-data-cassandra/src/test/resources/META-INF/cassandra-named-queries.properties @@ -0,0 +1 @@ +User.findByNamedQuery=SELECT firstName FROM table WHERE firstName=?0 diff --git a/spring-data-cassandra/src/test/resources/cassandra-keyspace.yaml b/spring-data-cassandra/src/test/resources/cassandra-keyspace.yaml new file mode 100644 index 000000000..a0e13da6e --- /dev/null +++ b/spring-data-cassandra/src/test/resources/cassandra-keyspace.yaml @@ -0,0 +1,3 @@ +name: test +replicationFactor: 1 +strategy: org.apache.cassandra.locator.SimpleStrategy \ No newline at end of file diff --git a/spring-data-cassandra/src/test/resources/cassandra.yaml b/spring-data-cassandra/src/test/resources/cassandra.yaml new file mode 100644 index 000000000..82fcfc5ad --- /dev/null +++ b/spring-data-cassandra/src/test/resources/cassandra.yaml @@ -0,0 +1,690 @@ +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +# num_tokens: 256 + +# If you haven't specified num_tokens, or have set it to the default of 1 then +# you should always specify InitialToken when setting up a production +# cluster for the first time, and often when adding capacity later. +# The principle is that each node should be given an equal slice of +# the token ring; see http://wiki.apache.org/cassandra/Operations +# for more details. +# +# If blank, Cassandra will request a token bisecting the range of +# the heaviest-loaded existing node. If there is no load information +# available, such as is the case with a new cluster, it will pick +# a random token, which will lead to hot spots. +initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# throttle in KBs per second, per delivery thread +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# The following setting populates the page cache on memtable flush and compaction +# WARNING: Enable this setting only when the whole node's data fits in memory. +# Defaults to: false +# populate_io_cache_on_flush: false + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +authenticator: org.apache.cassandra.auth.AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: org.apache.cassandra.auth.AllowAllAuthorizer + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +# permissions_validity_in_ms: 2000 + +# The partitioner is responsible for distributing rows (by key) across +# nodes in the cluster. Any IPartitioner may be used, including your +# own as long as it is on the classpath. Out of the box, Cassandra +# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner +# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}. +# +# - RandomPartitioner distributes rows across the cluster evenly by md5. +# This is the default prior to 1.2 and is retained for compatibility. +# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128 +# Hash Function instead of md5. When in doubt, this is the best option. +# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows +# scanning rows in key order, but the ordering can generate hot spots +# for sequential insertion workloads. +# - OrderPreservingPartitioner is an obsolete form of BOP, that stores +# - keys in a less-efficient format and only works with keys that are +# UTF8-encoded Strings. +# - CollatingOPP collates according to EN,US rules rather than lexical byte +# ordering. Use this as an example if you need custom collation. +# +# See http://wiki.apache.org/cassandra/Operations for more on +# partitioners and token selection. +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +data_file_directories: + - target/embeddedCassandra/data + +# commit log +commitlog_directory: target/embeddedCassandra/commitlog + +# policy for data disk failures: +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Maximum size of the row cache in memory. +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should +# safe the row cache. Caches are saved to saved_caches_directory as specified +# in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save +# Disabled by default, meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# The provider for the row cache to use. +# +# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider +# +# SerializingCacheProvider serialises the contents of the row and stores +# it in native memory, i.e., off the JVM Heap. Serialized rows take +# significantly less memory than "live" rows in the JVM, so you can cache +# more rows in a given memory footprint. And storing the cache off-heap +# means you can use smaller heap sizes, reducing the impact of GC pauses. +# Note however that when a row is requested from the row cache, it must be +# deserialized into the heap for use. +# +# It is also valid to specify the fully-qualified class name to a class +# that implements org.apache.cassandra.cache.IRowCacheProvider. +# +# Defaults to SerializingCacheProvider +row_cache_provider: SerializingCacheProvider + +# saved caches +saved_caches_directory: target/embeddedCassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait up to +# commitlog_sync_batch_window_in_ms milliseconds for other writes, before +# performing the sync. +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 50 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# emergency pressure valve: each time heap usage after a full (CMS) +# garbage collection is above this fraction of the max, Cassandra will +# flush the largest memtables. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +# +# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY: +# it is most effective under light to moderate load, or read-heavy +# workloads; under truly massive write load, it will often be too +# little, too late. +flush_largest_memtables_at: 0.75 + +# emergency pressure valve #2: the first time heap usage after a full +# (CMS) garbage collection is above this fraction of the max, +# Cassandra will reduce cache maximum _capacity_ to the given fraction +# of the current _size_. Should usually be set substantially above +# flush_largest_memtables_at, since that will have less long-term +# impact on the system. +# +# Set to 1.0 to disable. Setting this lower than +# CMSInitiatingOccupancyFraction is not likely to be useful. +reduce_cache_sizes_at: 0.85 +reduce_cache_capacity_to: 0.6 + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 + +# Total memory to use for memtables. Cassandra will flush the largest +# memtable when this much memory is used. +# If omitted, Cassandra will set it to 1/3 of the heap. +# memtable_total_space_in_mb: 2048 + +# Total space to use for commitlogs. Since commitlog segments are +# mmapped, and hence use up address space, the default size is 32 +# on 32-bit JVMs, and 1024 on 64-bit JVMs. +# +# If space gets above this value (it will round up to the next nearest +# segment multiple), Cassandra will flush every dirty CF in the oldest +# segment and remove it. So a small total commitlog space will tend +# to cause more flush activity on less-active columnfamilies. +# commitlog_total_space_in_mb: 4096 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. If you have a large heap and many data directories, +# you can increase this value for better flush performance. +# By default this will be set to the amount of data directories defined. +#memtable_flush_writers: 1 + +# the number of full memtables to allow pending flush, that is, +# waiting for a writer thread. At a minimum, this should be set to +# the maximum number of secondary indexes created on a single CF. +memtable_flush_queue_size: 4 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +ssl_storage_port: 7001 + +# Address to bind to and tell other Cassandra nodes to connect to. You +# _must_ change this if you want multiple nodes to be able to +# communicate! +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting this to 0.0.0.0 is always wrong. +listen_address: localhost + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +native_transport_port: 9042 +# The minimum and maximum threads for handling requests when the native +# transport is used. They are similar to rpc_min_threads and rpc_max_threads, +# though the defaults differ slightly. +# native_transport_min_threads: 16 +# native_transport_max_threads: 128 + +# Whether to start the thrift rpc server. +start_rpc: true + +# The address to bind the Thrift RPC service to -- clients connect +# here. Unlike ListenAddress above, you _can_ specify 0.0.0.0 here if +# you want Thrift to listen on all interfaces. +# +# Leaving this blank has the same effect it does for ListenAddress, +# (i.e. it will be based on the configured hostname of the node). +rpc_address: localhost +# port for Thrift to listen for clients on +rpc_port: 9160 + +# enable or disable keepalive on rpc connections +rpc_keepalive: true + +# Cassandra provides three out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum field length). +thrift_framed_transport_size_in_mb: 15 + +# The max length of a thrift message, including all fields and +# internal thrift overhead. +thrift_max_message_length_in_mb: 16 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Add column indexes to a row after its contents reach this size. +# Increase if your column values are large, or if you have a very large +# number of columns. The competing causes are, Cassandra has to +# deserialize this much of the row to read a single column, so you want +# it to be small - at least if you do many partial-row reads - but all +# the index data is read for each access, so you don't want to generate +# that wastefully either. +column_index_size_in_kb: 64 + +# Size limit for rows being compacted in memory. Larger rows will spill +# over to disk and use a slower two-pass compaction process. A message +# will be logged specifying the row key. +in_memory_compaction_limit_in_mb: 64 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the number of cores. +# Uncomment to make compaction mono-threaded, the pre-0.8 default. +#concurrent_compactors: 1 + +# Multi-threaded compaction. When enabled, each compaction will use +# up to one thread per core, plus one thread per sstable being merged. +# This is usually only useful for SSD-based hardware: otherwise, +# your concern is usually to get compaction to do LESS i/o (see: +# compaction_throughput_mb_per_sec), not more. +multithreaded_compaction: false + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Track cached row keys during compaction, and re-cache their new +# positions in the compacted sstable. Disable if you use really large +# key caches. +compaction_preheat_key_cache: true + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 10000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 10000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts, If disabled cassandra will assuming the request +# was forwarded to the replica instantly by the coordinator +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This _can_ involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 0, which never timeout streams. +# streaming_socket_timeout_in_ms: 0 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This improves cache locality +# when disabling read repair, which can further improve throughput. +# Only appropriate for single-datacenter deployments. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - GossipingPropertyFileSnitch +# The rack and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via gossip. If +# cassandra-topology.properties exists, it is used as a fallback, allowing +# migration from the PropertyFileSnitch. +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's +# IP address, respectively. Unless this happens to match your +# deployment conventions (as it did Facebook's), this is best used +# as an example of writing a custom Snitch class. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# index_interval controls the sampling of entries from the primrary +# row index in terms of space versus time. The larger the interval, +# the smaller and less effective the sampling will be. In technicial +# terms, the interval coresponds to the number of index entries that +# are skipped between taking each sample. All the sampled entries +# must fit in memory. Generally, a value between 128 and 512 here +# coupled with a large key cache size on CFs results in the best trade +# offs. This value is not often changed, however if you have many +# very small rows (many to an OS page), then increasing this will +# often lower memory usage without a impact on performance. +index_interval: 128 + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: all + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +# inter_dc_tcp_nodelay: true diff --git a/spring-data-cassandra/src/test/resources/cassandraOperationsTest-cql-dataload.cql b/spring-data-cassandra/src/test/resources/cassandraOperationsTest-cql-dataload.cql new file mode 100644 index 000000000..239ae3e25 --- /dev/null +++ b/spring-data-cassandra/src/test/resources/cassandraOperationsTest-cql-dataload.cql @@ -0,0 +1,3 @@ +create table book (isbn text, title text, author text, pages int, PRIMARY KEY (isbn)); +create table book_alt (isbn text, title text, author text, pages int, PRIMARY KEY (isbn)); +insert into book (isbn, title, author, pages) values ('999999999', 'Book of Nines', 'Nine Nine', 999); \ No newline at end of file diff --git a/spring-data-cassandra/src/test/resources/cql-dataload.cql b/spring-data-cassandra/src/test/resources/cql-dataload.cql new file mode 100644 index 000000000..e38d18d36 --- /dev/null +++ b/spring-data-cassandra/src/test/resources/cql-dataload.cql @@ -0,0 +1,3 @@ +create table book (isbn text, title text, author text, pages int, PRIMARY KEY (isbn)); +create table book_alt (isbn text, title text, author text, pages int, PRIMARY KEY (isbn)); +/*insert into book (isbn, title, author, pages) values ('999999999', 'Book of Nines', 'Nine Nine', 999);*/ \ No newline at end of file diff --git a/spring-data-cassandra/src/test/resources/log4j-embedded-cassandra.properties b/spring-data-cassandra/src/test/resources/log4j-embedded-cassandra.properties new file mode 100644 index 000000000..3984b4c0c --- /dev/null +++ b/spring-data-cassandra/src/test/resources/log4j-embedded-cassandra.properties @@ -0,0 +1,6 @@ +log4j.rootLogger=WARN, stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d [%t] %-5p %c - %m%n +log4j.logger.org.springframework.cassandra=INFO +log4j.logger.org.springframework.data.cassandra=INFO diff --git a/spring-data-cassandra/src/test/resources/log4j.properties b/spring-data-cassandra/src/test/resources/log4j.properties new file mode 100644 index 000000000..3984b4c0c --- /dev/null +++ b/spring-data-cassandra/src/test/resources/log4j.properties @@ -0,0 +1,6 @@ +log4j.rootLogger=WARN, stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d [%t] %-5p %c - %m%n +log4j.logger.org.springframework.cassandra=INFO +log4j.logger.org.springframework.data.cassandra=INFO diff --git a/spring-data-cassandra/src/test/resources/logback.xml b/spring-data-cassandra/src/test/resources/logback.xml new file mode 100644 index 000000000..38a367981 --- /dev/null +++ b/spring-data-cassandra/src/test/resources/logback.xml @@ -0,0 +1,19 @@ + + + + + + + %d %5p | %t | %-55logger{55} | %m | %n + + + + + + + + + + + + \ No newline at end of file diff --git a/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/config/CassandraNamespaceTests-context.xml b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/config/CassandraNamespaceTests-context.xml new file mode 100644 index 000000000..56723073e --- /dev/null +++ b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/config/CassandraNamespaceTests-context.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/config/cassandra.properties b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/config/cassandra.properties new file mode 100644 index 000000000..6a0dd3197 --- /dev/null +++ b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/config/cassandra.properties @@ -0,0 +1,7 @@ +cassandra.contactPoints=localhost +cassandra.port=9042 +cassandra.keyspace=TestKS123 + + + + diff --git a/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/repository/UserRepositoryIntegrationTests-context.xml b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/repository/UserRepositoryIntegrationTests-context.xml new file mode 100644 index 000000000..3601a030b --- /dev/null +++ b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/repository/UserRepositoryIntegrationTests-context.xml @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/repository/cassandra.properties b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/repository/cassandra.properties new file mode 100644 index 000000000..6a0dd3197 --- /dev/null +++ b/spring-data-cassandra/src/test/resources/org/springframework/data/cassandra/test/integration/repository/cassandra.properties @@ -0,0 +1,7 @@ +cassandra.contactPoints=localhost +cassandra.port=9042 +cassandra.keyspace=TestKS123 + + + + diff --git a/spring-data-cassandra/template.mf b/spring-data-cassandra/template.mf new file mode 100644 index 000000000..053c579cc --- /dev/null +++ b/spring-data-cassandra/template.mf @@ -0,0 +1,31 @@ +Bundle-SymbolicName: org.springframework.data.cassandra +Bundle-Name: Spring Data Cassandra +Bundle-Vendor: Spring Data Cassandra Community +Bundle-ManifestVersion: 2 +Import-Package: + sun.reflect;version="0";resolution:=optional +Import-Template: + org.springframework.beans.*;version="[3.1.0, 4.0.0)", + org.springframework.cache.*;version="[3.1.0, 4.0.0)", + org.springframework.context.*;version="[3.1.0, 4.0.0)", + org.springframework.core.*;version="[3.1.0, 4.0.0)", + org.springframework.dao.*;version="[3.1.0, 4.0.0)", + org.springframework.scheduling.*;resolution:="optional";version="[3.1.0, 4.0.0)", + org.springframework.util.*;version="[3.1.0, 4.0.0)", + org.springframework.oxm.*;resolution:="optional";version="[3.1.0, 4.0.0)", + org.springframework.transaction.support.*;version="[3.1.0, 4.0.0)", + org.springframework.data.*;version="[1.5.0, 2.0.0)", + org.springframework.expression.*;version="[3.1.0, 4.0.0)", + org.springframework.cassandra.*;version="[1.0.0,2.0.0)", + org.aopalliance.*;version="[1.0.0, 2.0.0)";resolution:=optional, + org.apache.commons.logging.*;version="[1.1.1, 2.0.0)", + org.w3c.dom.*;version="0", + javax.xml.transform.*;resolution:="optional";version="0", + com.datastax.driver.core.*;resolution:="optional";version="[0.1.0, 1.0.0)", + org.apache.cassandra.db.marshal.*;version="[1.2.0, 1.3.0)", + org.slf4j.*;version="[1.5.0, 1.8.0)", + org.idevlab.rjc.*;resolution:="optional";version="[0.6.4, 0.6.4]", + org.apache.commons.pool.impl.*;resolution:="optional";version="[1.0.0, 3.0.0)", + org.codehaus.jackson.*;resolution:="optional";version="[1.6, 2.0.0)", + org.apache.commons.beanutils.*;resolution:="optional";version=1.8.5, + com.google.common.*;resolution:="optional";version="[11.0.0, 20.0.0)" \ No newline at end of file