Skip to content
Browse files

Modified all tests to be run against a live database, no more fixtures

  • Loading branch information...
1 parent 07a1e11 commit 79da65d6a67fb1209f0a79d7389743b04594befc @kreynolds committed Nov 1, 2011
Showing with 777 additions and 859 deletions.
  1. +1 −0 .gitignore
  2. +35 −34 Rakefile
  3. +3 −7 lib/cassandra-cql/database.rb
  4. +21 −5 lib/cassandra-cql/result.rb
  5. +5 −9 lib/cassandra-cql/row.rb
  6. +2 −4 lib/cassandra-cql/schema.rb
  7. +4 −17 lib/cassandra-cql/statement.rb
  8. +1 −1 lib/cassandra-cql/version.rb
  9. +27 −38 spec/column_family_spec.rb
  10. +15 −19 spec/{roundtrip_comparator_spec.rb → comparator_spec.rb}
  11. 0 { → spec}/conf/0.8/cassandra.in.sh
  12. 0 { → spec}/conf/0.8/cassandra.yaml
  13. 0 { → spec}/conf/0.8/log4j-server.properties
  14. 0 { → spec}/conf/0.8/schema.txt
  15. +41 −0 spec/conf/1.0/cassandra.in.sh
  16. +416 −0 spec/conf/1.0/cassandra.yaml
  17. +40 −0 spec/conf/1.0/log4j-server.properties
  18. +10 −0 spec/conf/1.0/schema.txt
  19. +0 −33 spec/fixtures/result_for_sparse_columns.yaml
  20. +0 −16 spec/fixtures/result_for_standard_counter.yaml
  21. +0 −28 spec/fixtures/result_for_standard_with_long_validation.yaml
  22. +0 −65 spec/fixtures/result_for_standard_with_validations.yaml
  23. +0 −17 spec/fixtures/result_for_timeuuid.yaml
  24. +0 −2 spec/fixtures/result_for_void_operations.yaml
  25. +0 −26 spec/fixtures/standard_column_family.yaml
  26. +0 −26 spec/fixtures/standard_counter.yaml
  27. +0 −26 spec/fixtures/standard_with_long_validation.yaml
  28. +0 −40 spec/fixtures/standard_with_validations.yaml
  29. +0 −27 spec/fixtures/super_column_family.yaml
  30. +0 −164 spec/fixtures/system_schema.yaml
  31. +57 −79 spec/result_spec.rb
  32. +20 −49 spec/row_spec.rb
  33. +15 −19 spec/{roundtrip_rowkey_spec.rb → rowkey_spec.rb}
  34. +12 −14 spec/schema_spec.rb
  35. +10 −0 spec/spec_helper.rb
  36. +27 −75 spec/statement_spec.rb
  37. +15 −19 spec/{roundtrip_validation_spec.rb → validation_spec.rb}
View
1 .gitignore
@@ -6,3 +6,4 @@ coverage
.rspec-tm
doc
Gemfile.lock
+tmp
View
69 Rakefile
@@ -4,6 +4,16 @@ Bundler::GemHelper.install_tasks
require 'rake'
require 'rspec/core'
require 'rspec/core/rake_task'
+
+CassandraBinaries = {
+ '0.8' => 'http://archive.apache.org/dist/cassandra/0.8.4/apache-cassandra-0.8.4-bin.tar.gz',
+ '1.0' => 'http://archive.apache.org/dist/cassandra/1.0.1/apache-cassandra-1.0.1-bin.tar.gz',
+}
+
+CASSANDRA_VERSION = ENV['CASSANDRA_VERSION'] || '1.0'
+CASSANDRA_HOME = File.dirname(__FILE__) + '/tmp'
+CASSANDRA_PIDFILE = ENV['CASSANDRA_PIDFILE'] || "#{CASSANDRA_HOME}/cassandra.pid"
+
RSpec::Core::RakeTask.new(:spec) do |spec|
spec.pattern = FileList['spec/**/*_spec.rb']
end
@@ -14,15 +24,21 @@ RSpec::Core::RakeTask.new(:rcov) do |spec|
spec.rcov_opts = "--exclude 'spec/*'"
end
-task :default => :spec
-
-CassandraBinaries = {
- '0.8' => 'http://archive.apache.org/dist/cassandra/0.8.4/apache-cassandra-0.8.4-bin.tar.gz'
-}
+desc "Download Cassandra and run specs against it"
+task :spec_with_server do
+ Rake::Task["cassandra:clean"].invoke
+ Rake::Task["cassandra:start"].invoke
+ error = nil
+ begin
+ Rake::Task["spec"].invoke
+ rescue
+ error = $!
+ end
+ Rake::Task["cassandra:stop"].invoke
+ raise $! if $!
+end
-CASSANDRA_HOME = ENV['CASSANDRA_HOME'] || "#{ENV['HOME']}/cassandra"
-CASSANDRA_VERSION = ENV['CASSANDRA_VERSION'] || '0.8'
-CASSANDRA_PIDFILE = ENV['CASSANDRA_PIDFILE'] || "#{CASSANDRA_HOME}/cassandra.pid"
+task :default => :spec
def setup_cassandra_version(version = CASSANDRA_VERSION)
FileUtils.mkdir_p CASSANDRA_HOME
@@ -31,7 +47,7 @@ def setup_cassandra_version(version = CASSANDRA_VERSION)
unless File.exists?(File.join(destination_directory, 'bin','cassandra'))
download_source = CassandraBinaries[CASSANDRA_VERSION]
- download_destination = File.join("/tmp", File.basename(download_source))
+ download_destination = File.join(CASSANDRA_HOME, File.basename(download_source))
untar_directory = File.join(CASSANDRA_HOME, File.basename(download_source,'-bin.tar.gz'))
puts "downloading cassandra"
@@ -45,9 +61,9 @@ end
def setup_environment
env = ""
if !ENV["CASSANDRA_INCLUDE"]
- env << "CASSANDRA_INCLUDE=#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}/cassandra.in.sh "
+ env << "CASSANDRA_INCLUDE=#{File.expand_path(Dir.pwd)}/spec/conf/#{CASSANDRA_VERSION}/cassandra.in.sh "
env << "CASSANDRA_HOME=#{CASSANDRA_HOME}/cassandra-#{CASSANDRA_VERSION} "
- env << "CASSANDRA_CONF=#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}"
+ env << "CASSANDRA_CONF=#{File.expand_path(Dir.pwd)}/spec/conf/#{CASSANDRA_VERSION}"
else
env << "CASSANDRA_INCLUDE=#{ENV['CASSANDRA_INCLUDE']} "
env << "CASSANDRA_HOME=#{ENV['CASSANDRA_HOME']} "
@@ -85,6 +101,8 @@ namespace :cassandra do
Dir.chdir(File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")) do
sh("env #{env} bin/cassandra #{'-f' unless args.daemonize} -p #{CASSANDRA_PIDFILE}")
end
+ $stdout.puts "Sleeping for 8 seconds to wait for Cassandra to start ..."
+ sleep(8)
end
desc "Stop Cassandra"
@@ -93,6 +111,12 @@ namespace :cassandra do
env = setup_environment
sh("kill $(cat #{CASSANDRA_PIDFILE})")
end
+
+ desc "Delete all data files in #{CASSANDRA_HOME}"
+ task :clean do
+ sh("rm -rf #{File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}", 'data')}")
+ end
+
end
desc "Start Cassandra"
@@ -122,28 +146,5 @@ task :java do
end
end
-namespace :data do
- desc "Reset test data"
- task :reset do
- puts "Resetting test data"
- sh("rm -rf #{File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}", 'data')}")
- end
-
- desc "Load test data structures."
- task :load do
- schema_path = "#{File.expand_path(Dir.pwd)}/conf/#{CASSANDRA_VERSION}/schema.txt"
- puts "Loading test data structures."
- Dir.chdir(File.join(CASSANDRA_HOME, "cassandra-#{CASSANDRA_VERSION}")) do
- begin
- sh("bin/cassandra-cli --host localhost --batch < #{schema_path}")
- rescue
- puts "Schema already loaded."
- end
- end
- end
-end
-
-#task :spec => 'data:load'
-
require 'yard'
YARD::Rake::YardocTask.new
View
10 lib/cassandra-cql/database.rb
@@ -83,13 +83,9 @@ def keyspaces
@connection.describe_keyspaces.map { |keyspace| Schema.new(keyspace) }
end
- def update_schema!
- if @keyspace.nil?
- @schema = nil
- else
- # TODO: This should be replaced with a CQL call that doesn't exist yet
- @schema = Schema.new(@connection.describe_keyspace(@keyspace))
- end
+ def schema
+ # TODO: This should be replaced with a CQL call that doesn't exist yet
+ Schema.new(@connection.describe_keyspace(@keyspace))
end
end
end
View
26 lib/cassandra-cql/result.rb
@@ -4,12 +4,28 @@ class InvalidResultType < Exception; end
class InvalidCursor < Exception; end
end
+ class ResultSchema
+ attr_reader :names, :values
+
+ def initialize(schema)
+ # When https://issues.apache.org/jira/browse/CASSANDRA-3436 is resolve, no more need to split/last
+ @names = Hash.new(schema.default_name_type.split(".").last)
+ schema.name_types.each_pair { |key, type|
+ @names[key] = type.split(".").last
+ }
+ @values = Hash.new(schema.default_value_type.split(".").last)
+ schema.value_types.each_pair { |key, type|
+ @values[key] = type.split(".").last
+ }
+ end
+ end
+
class Result
- attr_reader :result, :column_family, :cursor
+ attr_reader :result, :schema, :cursor
- def initialize(result, column_family=nil)
- @result, @column_family = result, column_family
- @column_family = @column_family.dup unless @column_family.nil?
+ def initialize(result)
+ @result = result
+ @schema = ResultSchema.new(result.schema) if rows?
@cursor = 0
end
@@ -40,7 +56,7 @@ def fetch_row
when CassandraCQL::Thrift::CqlResultType::ROWS
return nil if @cursor >= rows
- row = Row.new(@result.rows[@cursor], @column_family)
+ row = Row.new(@result.rows[@cursor], @schema)
@cursor += 1
return row
when CassandraCQL::Thrift::CqlResultType::VOID
View
14 lib/cassandra-cql/row.rb
@@ -2,8 +2,8 @@ module CassandraCQL
class Row
attr_reader :row
- def initialize(row, column_family)
- @row, @column_family = row, column_family
+ def initialize(row, schema)
+ @row, @schema = row, schema
end
def [](obj)
@@ -15,16 +15,12 @@ def [](obj)
def column_names
@names ||= @row.columns.map do |column|
- if column.name == @column_family.key_alias
- column.name
- else
- ColumnFamily.cast(column.name, @column_family.comparator_type)
- end
+ ColumnFamily.cast(column.name, @schema.names[column.name])
end
end
def column_values
- @values ||= @row.columns.map { |column| ColumnFamily.cast(column.value, @column_family.columns[column.name]) }
+ @values ||= @row.columns.map { |column| ColumnFamily.cast(column.value, @schema.values[column.name]) }
end
def columns
@@ -41,7 +37,7 @@ def to_hash
end
def key
- ColumnFamily.cast(@row.key, @column_family.key_validation_class)
+ ColumnFamily.cast(@row.key, @schema.values[@row.key])
end
end
end
View
6 lib/cassandra-cql/schema.rb
@@ -62,10 +62,8 @@ def columns
def self.cast(value, type)
return nil if value.nil?
- # 3x faster than split
- klass = type[type.rindex('.')+1..-1]
- if CassandraCQL::Types.const_defined?(klass)
- CassandraCQL::Types.const_get(klass).cast(value)
+ if CassandraCQL::Types.const_defined?(type)
+ CassandraCQL::Types.const_get(type).cast(value)
else
CassandraCQL::Types::AbstractType.cast(value)
end
View
21 lib/cassandra-cql/statement.rb
@@ -7,9 +7,7 @@ class UnescapableObject < Exception; end
class Statement
KS_CHANGE_RE = /^use (\w+)/i
- SCHEMA_CHANGE_RE = /\s*(create|drop|alter)\s+(\w+)/i
KS_DROP_RE = /^drop keyspace (\w+)/i
- COLFAM_RE = /\s*select.*from\s+'?(\w+)/i
attr_reader :statement
@@ -23,30 +21,19 @@ def prepare(statement)
end
def execute(bind_vars=[], options={})
- column_family = nil
- if @statement =~ COLFAM_RE
- column_family = @handle.schema.column_families[$1].dup
- end
-
if options[:compression]
- res = Result.new(@handle.execute_cql_query(Utility.compress(self.class.sanitize(@statement, bind_vars)), CassandraCQL::Thrift::Compression::GZIP), column_family)
+ res = Result.new(@handle.execute_cql_query(Utility.compress(self.class.sanitize(@statement, bind_vars)), CassandraCQL::Thrift::Compression::GZIP))
else
- res = Result.new(@handle.execute_cql_query(self.class.sanitize(@statement, bind_vars), CassandraCQL::Thrift::Compression::NONE), column_family)
+ res = Result.new(@handle.execute_cql_query(self.class.sanitize(@statement, bind_vars), CassandraCQL::Thrift::Compression::NONE))
end
# Change our keyspace if required
if @statement =~ KS_CHANGE_RE
@handle.keyspace = $1
+ elsif @statement =~ KS_DROP_RE
+ @handle.keyspace = nil
end
- # If we are dropping a keyspace, we should set it to nil
- @handle.keyspace = nil if @statement =~ KS_DROP_RE
-
- # Update the schema if it has changed
- if @statement =~ KS_CHANGE_RE or @statement =~ SCHEMA_CHANGE_RE or @statement =~ KS_DROP_RE
- @handle.update_schema!
- end
-
# We let ints be fetched for now because they'll probably be deprecated later
if res.void?
nil
View
2 lib/cassandra-cql/version.rb
@@ -1,3 +1,3 @@
module CassandraCQL
- VERSION = "1.1.1"
+ VERSION = "1.0.1"
end
View
65 spec/column_family_spec.rb
@@ -2,16 +2,21 @@
include CassandraCQL
describe "ColumnFamily class" do
- let(:standard_column_family) { ColumnFamily.new(yaml_fixture(:standard_column_family)) }
- let(:super_column_family) { ColumnFamily.new(yaml_fixture(:super_column_family)) }
+ before(:each) do
+ @connection = setup_cassandra_connection
+ @connection.execute("USE system")
+ @super_column_family = @connection.schema.column_families["HintsColumnFamily"]
+ @standard_column_family = @connection.schema.column_families["NodeIdInfo"]
+ end
context "initialize" do
it "should set a cf_def" do
- super_column_family.cf_def.should_not be_nil
+ @super_column_family.cf_def.should_not be_nil
+ @standard_column_family.cf_def.should_not be_nil
end
it "should have some common attributes" do
- [standard_column_family, super_column_family].each do |column|
+ [@standard_column_family, @super_column_family].each do |column|
column.name.should_not be_nil
column.id.should_not be_nil
column.column_type.should_not be_nil
@@ -20,97 +25,81 @@
it "should super method_missing" do
expect {
- standard_column_family.this_method_does_not_exist
+ @standard_column_family.this_method_does_not_exist
}.to raise_error NoMethodError
expect {
- super_column_family.this_method_does_not_exist
+ @super_column_family.this_method_does_not_exist
}.to raise_error NoMethodError
end
end
context "with a standard column family" do
it "should be standard" do
- standard_column_family.type.should eq("Standard")
- standard_column_family.standard?.should be_true
- standard_column_family.super?.should_not be_true
+ @standard_column_family.super?.should be_false
+ @standard_column_family.standard?.should be_true
+ @standard_column_family.type.should eq("Standard")
end
end
context "with a super column family" do
it "should be super" do
- super_column_family.type.should eq("Super")
- super_column_family.standard?.should_not be_true
- super_column_family.super?.should be_true
+ @super_column_family.super?.should be_true
+ @super_column_family.standard?.should be_false
+ @super_column_family.type.should eq("Super")
end
end
context "when calling self.cast" do
it "should turn UUID bytes into a UUID object" do
uuid = UUID.new
- ColumnFamily.cast(uuid.bytes, "org.apache.cassandra.db.marshal.TimeUUIDType").should eq(uuid)
+ ColumnFamily.cast(uuid.bytes, "TimeUUIDType").should eq(uuid)
end
it "should turn a UUID bytes into a UUID object" do
uuid = UUID.new
- ColumnFamily.cast(uuid.bytes, "org.apache.cassandra.db.marshal.UUIDType").should eq(uuid)
+ ColumnFamily.cast(uuid.bytes, "UUIDType").should eq(uuid)
end
it "should turn a packed long into a number" do
number = 2**33
packed = [number >> 32, number].pack("N*")
- ColumnFamily.cast(packed, "org.apache.cassandra.db.marshal.LongType").should eq(number)
- ColumnFamily.cast(packed, "org.apache.cassandra.db.marshal.CounterColumnType").should eq(number)
+ ColumnFamily.cast(packed, "LongType").should eq(number)
+ ColumnFamily.cast(packed, "CounterColumnType").should eq(number)
end
it "should turn a packed negative long into a negative number" do
number = -2**33
packed = [number >> 32, number].pack("N*")
- ColumnFamily.cast(packed, "org.apache.cassandra.db.marshal.LongType").should eq(number)
- ColumnFamily.cast(packed, "org.apache.cassandra.db.marshal.CounterColumnType").should eq(number)
+ ColumnFamily.cast(packed, "LongType").should eq(number)
+ ColumnFamily.cast(packed, "CounterColumnType").should eq(number)
end
it "should call to_s with AsciiType" do
obj = double("String")
obj.stub(:to_s) { "string" }
obj.should_receive(:to_s)
- ColumnFamily.cast(obj, "org.apache.cassandra.db.marshal.AsciiType")
+ ColumnFamily.cast(obj, "AsciiType")
end
it "should call to_s with UTF8Type" do
obj = double("String")
obj.stub(:to_s) { "string" }
obj.should_receive(:to_s)
- ColumnFamily.cast(obj, "org.apache.cassandra.db.marshal.UTF8Type")
+ ColumnFamily.cast(obj, "UTF8Type")
end
it "should return self with BytesType" do
obj = Object.new
- ColumnFamily.cast(obj, "org.apache.cassandra.db.marshal.BytesType").object_id.should eq(obj.object_id)
+ ColumnFamily.cast(obj, "BytesType").object_id.should eq(obj.object_id)
end
it "should return nil for all types of nil" do
%w(TimeUUIDType UUIDType LongType IntegerType
UTF8Type AsciiType CounterColumnType).each do |type|
- ColumnFamily.cast(nil, "org.apache.cassandra.db.marshal.#{type}").should eq(nil)
+ ColumnFamily.cast(nil, type).should eq(nil)
end
end
end
-
- context "validations classes" do
- let(:column_family) { ColumnFamily.new(yaml_fixture(:standard_with_validations)) }
- it "should have a hash of column_names and validations" do
- column_family.columns.should be_kind_of(Hash)
- end
-
- it "should have a default validation class" do
- column_family.columns.default.should eq(column_family.cf_def.default_validation_class)
- end
-
- it "should have a validation class for the key" do
- column_family.columns.has_key?(column_family.key_alias).should be_true
- column_family.columns[column_family.key_alias].should eq(column_family.key_validation_class)
- end
- end
end
View
34 spec/roundtrip_comparator_spec.rb → spec/comparator_spec.rb
@@ -3,12 +3,8 @@
include CassandraCQL
describe "Comparator Roundtrip tests" do
- before(:all) do
- @connection = CassandraCQL::Database.new(["127.0.0.1:9160"], {}, :retries => 2, :timeout => 1) rescue false
- if !@connection.keyspaces.map(&:name).include?("CassandraCQLTestKeyspace")
- @connection.execute("CREATE KEYSPACE CassandraCQLTestKeyspace WITH strategy_class='org.apache.cassandra.locator.SimpleStrategy' AND strategy_options:replication_factor=1")
- end
- @connection.execute("USE CassandraCQLTestKeyspace")
+ before(:each) do
+ @connection = setup_cassandra_connection
end
def create_and_fetch_column(column_family, name)
@@ -25,7 +21,7 @@ def create_column_family(name, comparator_type)
context "with ascii comparator" do
let(:cf_name) { "comparator_cf_ascii" }
- before(:all) { create_column_family(cf_name, 'AsciiType') }
+ before(:each) { create_column_family(cf_name, 'AsciiType') }
it "should return an ascii string" do
create_and_fetch_column(cf_name, "test string").should eq("test string")
@@ -34,7 +30,7 @@ def create_column_family(name, comparator_type)
context "with bigint comparator" do
let(:cf_name) { "comparator_cf_bigint" }
- before(:all) { create_column_family(cf_name, 'LongType') }
+ before(:each) { create_column_family(cf_name, 'LongType') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should eq(value)
@@ -60,7 +56,7 @@ def test_for_value(value)
context "with blob comparator" do
let(:cf_name) { "comparator_cf_blob" }
- before(:all) { create_column_family(cf_name, 'BytesType') }
+ before(:each) { create_column_family(cf_name, 'BytesType') }
it "should return a blob" do
bytes = "binary\x00"
@@ -71,7 +67,7 @@ def test_for_value(value)
context "with boolean comparator" do
let(:cf_name) { "comparator_cf_boolean" }
- before(:all) { create_column_family(cf_name, 'BooleanType') }
+ before(:each) { create_column_family(cf_name, 'BooleanType') }
it "should return true" do
create_and_fetch_column(cf_name, true).should be_true
@@ -85,7 +81,7 @@ def test_for_value(value)
context "with decimal comparator" do
let(:cf_name) { "comparator_cf_decimal" }
- before(:all) { create_column_family(cf_name, 'DecimalType') }
+ before(:each) { create_column_family(cf_name, 'DecimalType') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should eq(value)
@@ -103,7 +99,7 @@ def test_for_value(value)
context "with double comparator" do
let(:cf_name) { "comparator_cf_double" }
- before(:all) { create_column_family(cf_name, 'DoubleType') }
+ before(:each) { create_column_family(cf_name, 'DoubleType') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should be_within(0.1).of(value)
@@ -121,7 +117,7 @@ def test_for_value(value)
context "with float comparator" do
let(:cf_name) { "comparator_cf_float" }
- before(:all) { create_column_family(cf_name, 'FloatType') }
+ before(:each) { create_column_family(cf_name, 'FloatType') }
def test_for_value(value)
create_and_fetch_column(cf_name, value*-1).should eq(value*-1)
@@ -137,7 +133,7 @@ def test_for_value(value)
context "with int comparator" do
let(:cf_name) { "comparator_cf_int" }
- before(:all) { create_column_family(cf_name, 'Int32Type') }
+ before(:each) { create_column_family(cf_name, 'Int32Type') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should eq(value)
@@ -160,7 +156,7 @@ def test_for_value(value)
context "with text comparator" do
let(:cf_name) { "comparator_cf_text" }
- before(:all) { create_column_family(cf_name, 'UTF8Type') }
+ before(:each) { create_column_family(cf_name, 'UTF8Type') }
it "should return a non-multibyte string" do
create_and_fetch_column(cf_name, "snark").should eq("snark")
@@ -173,7 +169,7 @@ def test_for_value(value)
context "with timestamp comparator" do
let(:cf_name) { "comparator_cf_timestamp" }
- before(:all) { create_column_family(cf_name, 'TimeUUIDType') }
+ before(:each) { create_column_family(cf_name, 'TimeUUIDType') }
it "should return a timestamp" do
uuid = UUID.new
@@ -183,7 +179,7 @@ def test_for_value(value)
context "with uuid comparator" do
let(:cf_name) { "comparator_cf_uuid" }
- before(:all) { create_column_family(cf_name, 'UUIDType') }
+ before(:each) { create_column_family(cf_name, 'UUIDType') }
it "should return a uuid" do
uuid = UUID.new
@@ -193,7 +189,7 @@ def test_for_value(value)
context "with varchar comparator" do
let(:cf_name) { "comparator_cf_varchar" }
- before(:all) { create_column_family(cf_name, 'UTF8Type') }
+ before(:each) { create_column_family(cf_name, 'UTF8Type') }
it "should return a non-multibyte string" do
create_and_fetch_column(cf_name, "snark").should eq("snark")
@@ -206,7 +202,7 @@ def test_for_value(value)
context "with varint comparator" do
let(:cf_name) { "comparator_cf_varint" }
- before(:all) { create_column_family(cf_name, 'IntegerType') }
+ before(:each) { create_column_family(cf_name, 'IntegerType') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should eq(value)
View
0 conf/0.8/cassandra.in.sh → spec/conf/0.8/cassandra.in.sh
File renamed without changes.
View
0 conf/0.8/cassandra.yaml → spec/conf/0.8/cassandra.yaml
File renamed without changes.
View
0 conf/0.8/log4j-server.properties → spec/conf/0.8/log4j-server.properties
File renamed without changes.
View
0 conf/0.8/schema.txt → spec/conf/0.8/schema.txt
File renamed without changes.
View
41 spec/conf/1.0/cassandra.in.sh
@@ -0,0 +1,41 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "x$CASSANDRA_HOME" = "x" ]; then
+ CASSANDRA_HOME=`dirname $0`/..
+fi
+
+# The directory where Cassandra's configs live (required)
+if [ "x$CASSANDRA_CONF" = "x" ]; then
+ CASSANDRA_CONF=$CASSANDRA_HOME/conf
+fi
+
+# This can be the path to a jar file, or a directory containing the
+# compiled classes. NOTE: This isn't needed by the startup script,
+# it's just used here in constructing the classpath.
+cassandra_bin=$CASSANDRA_HOME/build/classes/main
+cassandra_bin=$cassandra_bin:$CASSANDRA_HOME/build/classes/thrift
+#cassandra_bin=$cassandra_home/build/cassandra.jar
+
+# JAVA_HOME can optionally be set here
+#JAVA_HOME=/usr/local/jdk6
+
+# The java classpath (required)
+CLASSPATH=$CASSANDRA_CONF:$cassandra_bin
+
+for jar in $CASSANDRA_HOME/lib/*.jar; do
+ CLASSPATH=$CLASSPATH:$jar
+done
View
416 spec/conf/1.0/cassandra.yaml
@@ -0,0 +1,416 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: 'Test Cluster'
+
+# You should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node. If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+initial_token:
+
+# See http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, hints will be dropped.
+max_hint_window_in_ms: 3600000 # one hour
+# Sleep this long after delivering each row or row fragment
+hinted_handoff_throttle_delay_in_ms: 50
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
+
+# authorization backend, implementing IAuthority; used to limit access/provide permissions
+authority: org.apache.cassandra.auth.AllowAllAuthority
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster. Any IPartitioner may be used, including your
+# own as long as it is on the classpath. Out of the box, Cassandra
+# provides org.apache.cassandra.dht.RandomPartitioner
+# org.apache.cassandra.dht.ByteOrderedPartitioner,
+# org.apache.cassandra.dht.OrderPreservingPartitioner (deprecated),
+# and org.apache.cassandra.dht.CollatingOrderPreservingPartitioner
+# (deprecated).
+#
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+# When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
+# scanning rows in key order, but the ordering can generate hot spots
+# for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+# UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+# ordering. Use this as an example if you need custom collation.
+#
+# See http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.RandomPartitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+ - data/data
+
+# commit log
+commitlog_directory: data/commitlog
+
+# saved caches
+saved_caches_directory: data/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "127.0.0.1"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_. Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables. Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread. At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Buffer size to use when performing contiguous column slices.
+# Increase this to the size of the column slices you typically perform
+sliced_buffer_size_in_kb: 64
+
+# TCP port, for commands and data
+storage_port: 7000
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: localhost
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: localhost
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three options for the RPC Server:
+#
+# sync -> One connection per thread in the rpc pool (see below).
+# For a very large number of clients, memory will be your limiting
+# factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
+# Connection pooling is very, very strongly recommended.
+#
+# async -> Nonblocking server implementation with one thread to serve
+# rpc connections. This is not recommended for high throughput use
+# cases. Async has been tested to be about 50% slower than sync
+# or hsha and is deprecated: it will be removed in the next major release.
+#
+# hsha -> Stands for "half synchronous, half asynchronous." The rpc thread pool
+# (see below) is used to manage requests, but the threads are multiplexed
+# across the different clients.
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max|thread to set request pool size.
+# You would primarily set max for the sync server to safeguard against
+# misbehaved clients; if you do hit the max, Cassandra will block until one
+# disconnects before accepting more. The defaults for sync are min of 16 and max
+# unlimited.
+#
+# For the Hsha server, the min and max both default to quadruple the number of
+# CPU cores.
+#
+# This configuration is ignored by the async server.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+# 0 disables TFramedTransport in favor of TSocket. This option
+# is deprecated; we strongly recommend using Framed mode.
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns. The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory. Larger rows will spill
+# over to disk and use a slower two-pass compaction process. A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# This setting has no effect on LeveledCompactionStrategy.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable. Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
+# Time to wait for a reply from other nodes before failing the command
+rpc_timeout_in_ms: 10000
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch, which will let Cassandra know enough
+# about your network topology to route requests efficiently.
+# Out of the box, Cassandra provides
+# - org.apache.cassandra.locator.SimpleSnitch:
+# Treats Strategy order as proximity. This improves cache locality
+# when disabling read repair, which can further improve throughput.
+# - org.apache.cassandra.locator.RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's
+# IP address, respectively
+# org.apache.cassandra.locator.PropertyFileSnitch:
+# - Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time. The larger the interval,
+# the smaller and less effective the sampling will be. In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample. All the sampled entries
+# must fit in memory. Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs. This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
View
40 spec/conf/1.0/log4j-server.properties
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# for production, you should probably set pattern to %c instead of %l.
+# (%l is slower.)
+
+# output messages into a rolling log file as well as stdout
+log4j.rootLogger=DEBUG,stdout,R
+
+# stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
+
+# rolling log file
+log4j.appender.R=org.apache.log4j.RollingFileAppender
+log4j.appender.R.maxFileSize=20MB
+log4j.appender.R.maxBackupIndex=50
+log4j.appender.R.layout=org.apache.log4j.PatternLayout
+log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
+# Edit the next line to point to your logs directory
+log4j.appender.R.File=data/logs/system.log
+
+# Application logging options
+#log4j.logger.org.apache.cassandra=DEBUG
+#log4j.logger.org.apache.cassandra.db=DEBUG
+#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
View
10 spec/conf/1.0/schema.txt
@@ -0,0 +1,10 @@
+create keyspace TypeConversions with
+ placement_strategy = 'org.apache.cassandra.locator.LocalStrategy' AND
+ strategy_options = [{replication_factor:1}];
+use TypeConversions;
+create column family UUIDColumnConversion with comparator = TimeUUIDType;
+create column family SuperUUID with comparator = TimeUUIDType and column_type = Super;
+create column family IntegerConversion with comparator = 'IntegerType';
+create column family LongConversion with comparator = 'LongType';
+create column family CounterConversion with comparator = 'UTF8Type' and
+ default_validation_class = CounterColumnType;
View
33 spec/fixtures/result_for_sparse_columns.yaml
@@ -1,33 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CqlResult
-rows:
-- !ruby/object:CassandraCQL::Thrift::CqlRow
- columns:
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col1
- timestamp: 1316558825675
- value: val1
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col2
- timestamp: 1316558825675
- value: val2
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col3
- timestamp: 1316558825675
- value: val3
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col4
- key: row_key
-- !ruby/object:CassandraCQL::Thrift::CqlRow
- columns:
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col1
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col2
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col3
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col4
- timestamp: 1316558825683
- value: val4
- key: row_key1
-type: 1
View
16 spec/fixtures/result_for_standard_counter.yaml
@@ -1,16 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CqlResult
-rows:
-- !ruby/object:CassandraCQL::Thrift::CqlRow
- columns:
- - !ruby/object:CassandraCQL::Thrift::Column
- name: KEY
- timestamp: -1
- value: row_key
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col_counter
- timestamp: 1316563572627
- value: !binary |
- AAAAAAAAAAE=
-
- key: row_key
-type: 1
View
28 spec/fixtures/result_for_standard_with_long_validation.yaml
@@ -1,28 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CqlResult
-rows:
-- !ruby/object:CassandraCQL::Thrift::CqlRow
- columns:
- - !ruby/object:CassandraCQL::Thrift::Column
- name: KEY
- timestamp: -1
- value: row_key
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col1
- timestamp: 1316560402904
- value: !binary |
- AAAAAAAAAAE=
-
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col2
- timestamp: 1316560402904
- value: !binary |
- AAAAAAAAAAI=
-
- - !ruby/object:CassandraCQL::Thrift::Column
- name: col3
- timestamp: 1316560402904
- value: !binary |
- AAAAAAAAAAM=
-
- key: row_key
-type: 1
View
65 spec/fixtures/result_for_standard_with_validations.yaml
@@ -1,65 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CqlResult
-rows:
-- !ruby/object:CassandraCQL::Thrift::CqlRow
- columns:
- - !ruby/object:CassandraCQL::Thrift::Column
- name: id
- timestamp: -1
- value: !binary |
- /vI=
-
- - !ruby/object:CassandraCQL::Thrift::Column
- name: created_at
- timestamp: 1313352911996
- value: !binary |
- HVjV1sayEeCUwDPvrI9RHg==
-
- - !ruby/object:CassandraCQL::Thrift::Column
- name: default_column
- timestamp: 1313353037633
- value: some other default value
- - !ruby/object:CassandraCQL::Thrift::Column
- name: name
- timestamp: 1313352960214
- value: Kelley
- - !ruby/object:CassandraCQL::Thrift::Column
- name: serial
- timestamp: 1313352941406
- value: !binary |
- AAAAAAAAW6A=
-
- key: !binary |
- /vI=
-
-- !ruby/object:CassandraCQL::Thrift::CqlRow
- columns:
- - !ruby/object:CassandraCQL::Thrift::Column
- name: id
- timestamp: -1
- value: !binary |
- /vE=
-
- - !ruby/object:CassandraCQL::Thrift::Column
- name: created_at
- timestamp: 1313352903358
- value: !binary |
- GDK0AMayEeCTDOF8opXpJg==
-
- - !ruby/object:CassandraCQL::Thrift::Column
- name: default_column
- timestamp: 1313353031433
- value: some default value
- - !ruby/object:CassandraCQL::Thrift::Column
- name: name
- timestamp: 1313352968205
- value: Kevin
- - !ruby/object:CassandraCQL::Thrift::Column
- name: serial
- timestamp: 1313352934397
- value: !binary |
- AAAAAAAAMDk=
-
- key: !binary |
- /vE=
-
-type: 1
View
17 spec/fixtures/result_for_timeuuid.yaml
@@ -1,17 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CqlResult
-rows:
-- !ruby/object:CassandraCQL::Thrift::CqlRow
- columns:
- - !ruby/object:CassandraCQL::Thrift::Column
- name: KEY
- timestamp: -1
- value: incinerators-councils
- - !ruby/object:CassandraCQL::Thrift::Column
- name: !binary |
- tAeA9KciEeCFVRy7npxYoA==
-
- timestamp: 1313700445148
- value: Olympiad-bayonet
- key: incinerators-councils
-
-type: 1
View
2 spec/fixtures/result_for_void_operations.yaml
@@ -1,2 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CqlResult
-type: 2
View
26 spec/fixtures/standard_column_family.yaml
@@ -1,26 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Standard
- comment: nodeId and their metadata
- comparator_type: org.apache.cassandra.db.marshal.TimeUUIDType
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 6
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.0375
- memtable_throughput_in_mb: 8
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: NodeIdInfo
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
View
26 spec/fixtures/standard_counter.yaml
@@ -1,26 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CfDef
-column_metadata: []
-
-column_type: Standard
-comment: ""
-comparator_type: org.apache.cassandra.db.marshal.AsciiType
-default_validation_class: org.apache.cassandra.db.marshal.CounterColumnType
-gc_grace_seconds: 864000
-id: 1006
-key_alias: KEY
-key_cache_save_period_in_seconds: 14400
-key_cache_size: 200000.0
-key_validation_class: org.apache.cassandra.db.marshal.UTF8Type
-keyspace: Keyspace1
-max_compaction_threshold: 32
-memtable_flush_after_mins: 1440
-memtable_operations_in_millions: 0.290625
-memtable_throughput_in_mb: 62
-merge_shards_chance: 0.0
-min_compaction_threshold: 4
-name: Foo
-read_repair_chance: 1.0
-replicate_on_write: true
-row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
-row_cache_save_period_in_seconds: 0
-row_cache_size: 0.0
View
26 spec/fixtures/standard_with_long_validation.yaml
@@ -1,26 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CfDef
-column_metadata: []
-
-column_type: Standard
-comment: ""
-comparator_type: org.apache.cassandra.db.marshal.UTF8Type
-default_validation_class: org.apache.cassandra.db.marshal.LongType
-gc_grace_seconds: 864000
-id: 1006
-key_alias: KEY
-key_cache_save_period_in_seconds: 14400
-key_cache_size: 200000.0
-key_validation_class: org.apache.cassandra.db.marshal.AsciiType
-keyspace: Keyspace1
-max_compaction_threshold: 32
-memtable_flush_after_mins: 1440
-memtable_operations_in_millions: 0.290625
-memtable_throughput_in_mb: 62
-merge_shards_chance: 0.0
-min_compaction_threshold: 4
-name: Foo
-read_repair_chance: 1.0
-replicate_on_write: true
-row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
-row_cache_save_period_in_seconds: 0
-row_cache_size: 0.0
View
40 spec/fixtures/standard_with_validations.yaml
@@ -1,40 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata:
- - !ruby/object:CassandraCQL::Thrift::ColumnDef
- name: bytes
- validation_class: org.apache.cassandra.db.marshal.BytesType
- - !ruby/object:CassandraCQL::Thrift::ColumnDef
- name: created_at
- validation_class: org.apache.cassandra.db.marshal.TimeUUIDType
- - !ruby/object:CassandraCQL::Thrift::ColumnDef
- name: serial
- validation_class: org.apache.cassandra.db.marshal.LongType
- - !ruby/object:CassandraCQL::Thrift::ColumnDef
- name: number
- validation_class: org.apache.cassandra.db.marshal.LongType
- - !ruby/object:CassandraCQL::Thrift::ColumnDef
- name: name
- validation_class: org.apache.cassandra.db.marshal.UTF8Type
- column_type: Standard
- comment: ""
- comparator_type: org.apache.cassandra.db.marshal.UTF8Type
- default_validation_class: org.apache.cassandra.db.marshal.UTF8Type
- gc_grace_seconds: 864000
- id: 1001
- key_alias: id
- key_cache_save_period_in_seconds: 14400
- key_cache_size: 200000.0
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: Keyspace1
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.290625
- memtable_throughput_in_mb: 62
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: Foo
- read_repair_chance: 1.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
View
27 spec/fixtures/super_column_family.yaml
@@ -1,27 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Super
- comment: hinted handoff data
- comparator_type: org.apache.cassandra.db.marshal.BytesType
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 1
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.15
- memtable_throughput_in_mb: 32
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: HintsColumnFamily
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
- subcomparator_type: org.apache.cassandra.db.marshal.BytesType
View
164 spec/fixtures/system_schema.yaml
@@ -1,164 +0,0 @@
---- !ruby/object:CassandraCQL::Thrift::KsDef
-cf_defs:
-- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Standard
- comment: nodeId and their metadata
- comparator_type: org.apache.cassandra.db.marshal.TimeUUIDType
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 6
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.0375
- memtable_throughput_in_mb: 8
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: NodeIdInfo
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
-- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Standard
- comment: indexes that have been completed
- comparator_type: org.apache.cassandra.db.marshal.UTF8Type
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 5
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.0375
- memtable_throughput_in_mb: 8
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: IndexInfo
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
-- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Standard
- comment: current state of the schema
- comparator_type: org.apache.cassandra.db.marshal.UTF8Type
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 3
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.0375
- memtable_throughput_in_mb: 8
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: Schema
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
-- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Standard
- comment: individual schema mutations
- comparator_type: org.apache.cassandra.db.marshal.TimeUUIDType
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 2
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.0375
- memtable_throughput_in_mb: 8
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: Migrations
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
-- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Standard
- comment: persistent metadata for the local node
- comparator_type: org.apache.cassandra.db.marshal.BytesType
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 0
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.0375
- memtable_throughput_in_mb: 8
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: LocationInfo
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
-- !ruby/object:CassandraCQL::Thrift::CfDef
- column_metadata: []
-
- column_type: Super
- comment: hinted handoff data
- comparator_type: org.apache.cassandra.db.marshal.BytesType
- default_validation_class: org.apache.cassandra.db.marshal.BytesType
- gc_grace_seconds: 0
- id: 1
- key_alias: KEY
- key_cache_save_period_in_seconds: 0
- key_cache_size: 0.01
- key_validation_class: org.apache.cassandra.db.marshal.BytesType
- keyspace: system
- max_compaction_threshold: 32
- memtable_flush_after_mins: 1440
- memtable_operations_in_millions: 0.15
- memtable_throughput_in_mb: 32
- merge_shards_chance: 0.0
- min_compaction_threshold: 4
- name: HintsColumnFamily
- read_repair_chance: 0.0
- replicate_on_write: true
- row_cache_provider: org.apache.cassandra.cache.ConcurrentLinkedHashCacheProvider
- row_cache_save_period_in_seconds: 0
- row_cache_size: 0.0
- subcomparator_type: org.apache.cassandra.db.marshal.BytesType
-name: system
-replication_factor: 1
-strategy_class: org.apache.cassandra.locator.LocalStrategy
-strategy_options:
- replication_factor: "1"
View
136 spec/result_spec.rb
@@ -2,45 +2,29 @@
include CassandraCQL
describe "void results" do
- let(:column_family) { ColumnFamily.new(yaml_fixture(:standard_with_validations)) }
- let(:cql_result) { yaml_fixture(:result_for_void_operations) }
- let(:result) { Result.new(cql_result, column_family) }
- it "should return true only for void?" do
- result.void?.should be_true
- result.rows?.should be_false
- result.int?.should be_false
+ before(:each) do
+ @connection = setup_cassandra_connection
end
-end
-describe "long validation" do
- let(:column_family) { ColumnFamily.new(yaml_fixture(:standard_with_long_validation)) }
- let(:cql_result) { yaml_fixture(:result_for_standard_with_long_validation) }
- let(:result) { Result.new(cql_result, column_family) }
- it "should return UTF8 column_names and Fixnum values" do
- result.fetch do |row|
- row.column_names.should eq(['KEY', 'col1', 'col2', 'col3'])
- row.column_values.should eq(['row_key', 1, 2, 3])
- end
+ it "should return nil" do
+ @connection.execute("USE system").should be_nil
end
end
-describe "counter validation" do
- let(:column_family) { ColumnFamily.new(yaml_fixture(:standard_counter)) }
- let(:cql_result) { yaml_fixture(:result_for_standard_counter) }
- let(:result) { Result.new(cql_result, column_family) }
- it "should return UTF8 column_names and Fixnum values" do
- result.fetch do |row|
- row.column_names.should eq(['KEY', 'col_counter'])
- row.column_values.should eq(['row_key', 1])
+describe "sparse row results" do
+ before(:each) do
+ @connection = setup_cassandra_connection
+ if !@connection.schema.column_family_names.include?('sparse_results')
+ @connection.execute("CREATE COLUMNFAMILY sparse_results (id varchar PRIMARY KEY)")
+ else
+ @connection.execute("TRUNCATE sparse_results")
end
end
-end
-describe "sparse row results" do
- let(:column_family) { ColumnFamily.new(yaml_fixture(:standard_with_validations)) }
- let(:cql_result) { yaml_fixture(:result_for_sparse_columns) }
- let(:result) { Result.new(cql_result, column_family) }
it "should should be handled properly" do
+ @connection.execute("INSERT INTO sparse_results (id, col1, col2, col3) VALUES (?, ?, ?, ?)", 'key1', 'val1', 'val2', 'val3').should be_nil
+ @connection.execute("INSERT INTO sparse_results (id, col4, col5, col6) VALUES (?, ?, ?, ?)", 'key2', 'val4', 'val5', 'val6').should be_nil
+ result = @connection.execute("SELECT col1, col2, col3, col4 FROM sparse_results")
result.rows.should eq(2)
# First column should have 3 columns set, one nil
row = result.fetch
@@ -57,105 +41,99 @@
end
describe "row results" do
- let(:column_family) { ColumnFamily.new(yaml_fixture(:standard_with_validations)) }
- let(:cql_result) { yaml_fixture(:result_for_standard_with_validations) }
- let(:result) { Result.new(cql_result, column_family) }
+ before(:each) do
+ @connection = setup_cassandra_connection
+ @connection.execute("INSERT INTO sparse_results (id, col1, col2, col3) VALUES (?, ?, ?, ?)", 'key1', 'val1', 'val2', 'val3').should be_nil
+ @connection.execute("INSERT INTO sparse_results (id, col4, col5, col6) VALUES (?, ?, ?, ?)", 'key2', 'val4', 'val5', 'val6').should be_nil
+ @result = @connection.execute("SELECT col1, col2, col3, col4 FROM sparse_results")
+ end
it "should return true only for rows?" do
- result.void?.should be_false
- result.rows?.should be_true
- result.int?.should be_false
+ @result.void?.should be_false
+ @result.rows?.should be_true
+ @result.int?.should be_false
end
it "should have two rows" do
- result.rows.should eq(2)
+ @result.rows.should eq(2)
end
context "initialize" do
it "should have a cursor set to 0" do
- result.instance_variable_get(:@cursor).should eq(0)
+ @result.instance_variable_get(:@cursor).should eq(0)
end
- it "should have a duplicate of the column_family" do
- result.instance_variable_get(:@column_family).cf_def.should eq(column_family.cf_def)
- end
-
- it "should have a duplicate of the column_family" do
- result.instance_variable_get(:@column_family).should_not eq(column_family)
- result.instance_variable_get(:@column_family).cf_def.should eq(column_family.cf_def)
- end
-
it "should have a result" do
- result.instance_variable_get(:@result).should be_kind_of(CassandraCQL::Thrift::CqlResult)
+ @result.instance_variable_get(:@result).should be_kind_of(CassandraCQL::Thrift::CqlResult)
end
end
context "setting the cursor" do
it "should set the cursor" do
expect {
- result.cursor = 15
+ @result.cursor = 15
}.to_not raise_error
- result.instance_variable_get(:@cursor).should eq(15)
+ @result.instance_variable_get(:@cursor).should eq(15)
end
it "should not set the cursor" do
expect {
- result.cursor = Object
+ @result.cursor = Object
}.to raise_error(CassandraCQL::Error::InvalidCursor)
end
end
context "fetching a single row" do
it "should return a row object twice then nil" do
- result.fetch_row.should be_kind_of(Row)
- result.instance_variable_get(:@cursor).should eq(1)
+ @result.fetch_row.should be_kind_of(Row)
+ @result.instance_variable_get(:@cursor).should eq(1)
- result.fetch_row.should be_kind_of(Row)
- result.instance_variable_get(:@cursor).should eq(2)
+ @result.fetch_row.should be_kind_of(Row)
+ @result.instance_variable_get(:@cursor).should eq(2)
- result.fetch_row.should be_nil
- result.instance_variable_get(:@cursor).should eq(2)
+ @result.fetch_row.should be_nil
+ @result.instance_variable_get(:@cursor).should eq(2)
end
end
context "resetting cursor should fetch the same row" do
it "should return the same row" do
- result.instance_variable_get(:@cursor).should eq(0)
- arr = result.fetch_array
- result.cursor = 0
- arr.should eq(result.fetch_array)
+ @result.instance_variable_get(:@cursor).should eq(0)
+ arr = @result.fetch_array
+ @result.cursor = 0
+ arr.should eq(@result.fetch_array)
end
end
context "fetch without a block" do
it "should return a row twice then nil" do
- result.fetch.should be_kind_of(Row)
- result.instance_variable_get(:@cursor).should eq(1)
+ @result.fetch.should be_kind_of(Row)
+ @result.instance_variable_get(:@cursor).should eq(1)
- result.fetch.should be_kind_of(Row)
- result.instance_variable_get(:@cursor).should eq(2)
+ @result.fetch.should be_kind_of(Row)
+ @result.instance_variable_get(:@cursor).should eq(2)
- result.fetch.should be_nil
- result.instance_variable_get(:@cursor).should eq(2)
+ @result.fetch.should be_nil
+ @result.instance_variable_get(:@cursor).should eq(2)
end
end
context "fetch with a block" do
it "fetched count should equal the number of rows" do
counter = 0
- result.fetch do |row|
+ @result.fetch do |row|
counter += 1
row.should be_kind_of(Row)
end
- counter.should eq(result.rows)
+ counter.should eq(@result.rows)
end
end
context "fetch_array without a block" do
it "should return a row as an array" do
- row = result.fetch
- result.cursor = 0
- arr = result.fetch_array
+ row = @result.fetch
+ @result.cursor = 0
+ arr = @result.fetch_array
arr.should be_kind_of(Array)
arr.should eq(row.column_values)
end
@@ -164,19 +142,19 @@
context "fetch_array_with a block" do
it "fetched count should equal the number of rows" do
counter = 0
- result.fetch_array do |arr|
+ @result.fetch_array do |arr|
counter += 1
arr.should be_kind_of(Array)
end
- counter.should eq(result.rows)
+ counter.should eq(@result.rows)
end
end
context "fetch_hash without a block" do
it "should return a hash" do
- row = result.fetch
- result.cursor = 0
- hash = result.fetch_hash
+ row = @result.fetch
+ @result.cursor = 0
+ hash = @result.fetch_hash
hash.should be_kind_of(Hash)
hash.should eq(row.to_hash)
end
@@ -185,11 +163,11 @@
context "fetch_hash_with a block" do
it "should iterate rows() times and return hashes" do
counter = 0
- result.fetch_hash do |hash|
+ @result.fetch_hash do |hash|
counter += 1
hash.should be_kind_of(Hash)
end
- counter.should eq(result.rows)
+ counter.should eq(@result.rows)
end
end
end
View
69 spec/row_spec.rb
@@ -3,81 +3,52 @@
describe "basic methods" do
- let(:column_family) { ColumnFamily.new(yaml_fixture(:standard_with_validations)) }
- let(:cql_result_row) { yaml_fixture(:result_for_standard_with_validations).rows[0] }
- let(:row) { Row.new(cql_result_row, column_family) }
-
- let(:cf_time_uuid_comp) { ColumnFamily.new(yaml_fixture(:standard_column_family)) }
- let(:cql_result_time_uuid_row) { yaml_fixture(:result_for_timeuuid).rows[0] }
- let(:row_time_uuid) { Row.new(cql_result_time_uuid_row, cf_time_uuid_comp) }
-
- context "initialize" do
- it "should set row and column_family" do
- row.row.should eq(cql_result_row)
- row.instance_variable_get(:@column_family).should eq(column_family)
+ before(:each) do
+ @connection = setup_cassandra_connection
+ if @connection.schema.column_family_names.include?('basic_methods')
+ @connection.execute("DROP COLUMNFAMILY basic_methods")
end
+ @connection.execute("CREATE COLUMNFAMILY basic_methods (id varchar PRIMARY KEY, created_at uuid, default_column varchar, name varchar, serial int)")
+
+ @connection.execute("INSERT INTO basic_methods (id, created_at, name, serial, default_column) VALUES (?, ?, ?, ?, ?)", 'test', Time.new, 'name', 12345, 'snork')
+ @row = @connection.execute("SELECT * FROM basic_methods WHERE id=?", "test").fetch
end
-
+
context "column_names" do
it "should return a list of column names" do
- row.column_names.sort.should eq(["created_at", "default_column", "id", "name", "serial"].sort)
+ @row.column_names.sort.should eq(["created_at", "default_column", "id", "name", "serial"].sort)
end
end
context "column_values" do
it "should return a list of column values as Ruby objects" do
- row.column_values.should be_kind_of(Array)
- row.column_values.size.should eq(row.column_names.size)
+ @row.column_values.should be_kind_of(Array)
+ @row.column_values.size.should eq(@row.column_names.size)
end
end
-
- context "checking types" do
- it "should return a UUID for created_at" do
- row['created_at'].should be_kind_of(UUID)
- end
-
- it "should return a Fixnum for serial" do
- row['serial'].should be_kind_of(Fixnum)
- end
-
- it "should return a String for name" do
- row['name'].should be_kind_of(String)
- end
-
- it "should return a String for id" do
- row['id'].should be_kind_of(String)
- end
-
- it "should return a String for default_column" do
- row['default_column'].should be_kind_of(String)
- end
-
- it "should not crash when getting the row key name from column names" do
- lambda { row_time_uuid.column_names }.should_not raise_error
- end
- end
-
+
context "columns" do
it "should equal the number of columns" do
- row.columns.should eq(cql_result_row.columns.size)
+ @row.column_names.size.should eq(@row.column_values.size)
+ @row.columns.should eq(@row.column_names.size)
end
end
context "key" do
it "should return the cql_result row key" do
- row.key.should eq(cql_result_row.key)
+ @row.key.should eq(@row.row.key)
end
end
- context "checking coersion" do
+ context "checking casting" do
it "should return column_values for to_a" do
- row.to_a.should eq(row.column_values)
+ @row.to_a.should eq(@row.column_values)
end
it "should return a hash for to_hash" do
- h = row.to_hash
+ h = @row.to_hash
h.should be_kind_of(Hash)
- h.keys.sort.should eq(row.column_names.sort)
+ h.keys.sort.should eq(@row.column_names.sort)
end
end
View
34 spec/roundtrip_rowkey_spec.rb → spec/rowkey_spec.rb
@@ -3,12 +3,8 @@
include CassandraCQL
describe "Validation Roundtrip tests" do
- before(:all) do
- @connection = CassandraCQL::Database.new(["127.0.0.1:9160"], {}, :retries => 20, :timeout => 1) rescue false
- if !@connection.keyspaces.map(&:name).include?("CassandraCQLTestKeyspace")
- @connection.execute("CREATE KEYSPACE CassandraCQLTestKeyspace WITH strategy_class='org.apache.cassandra.locator.SimpleStrategy' AND strategy_options:replication_factor=1")
- end
- @connection.execute("USE CassandraCQLTestKeyspace")
+ before(:each) do
+ @connection = setup_cassandra_connection
end
def create_and_fetch_column(column_family, row_key)
@@ -25,7 +21,7 @@ def create_column_family(name, test_row_key_type)
context "with ascii row_key_validation" do
let(:cf_name) { "row_key_validation_cf_ascii" }
- before(:all) { create_column_family(cf_name, 'ascii') }
+ before(:each) { create_column_family(cf_name, 'ascii') }
it "should return an ascii string" do
create_and_fetch_column(cf_name, "test string").should eq("test string")
@@ -34,7 +30,7 @@ def create_column_family(name, test_row_key_type)
context "with bigint row_key_validation" do
let(:cf_name) { "row_key_validation_cf_bigint" }
- before(:all) { create_column_family(cf_name, 'bigint') }
+ before(:each) { create_column_family(cf_name, 'bigint') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should eq(value)
@@ -60,7 +56,7 @@ def test_for_value(value)
context "with blob row_key_validation" do
let(:cf_name) { "row_key_validation_cf_blob" }
- before(:all) { create_column_family(cf_name, 'blob') }
+ before(:each) { create_column_family(cf_name, 'blob') }
it "should return a blob" do
bytes = "binary\x00"
@@ -71,7 +67,7 @@ def test_for_value(value)
context "with boolean row_key_validation" do
let(:cf_name) { "row_key_validation_cf_boolean" }
- before(:all) { create_column_family(cf_name, 'boolean') }
+ before(:each) { create_column_family(cf_name, 'boolean') }
it "should return true" do
create_and_fetch_column(cf_name, true).should be_true
@@ -84,7 +80,7 @@ def test_for_value(value)
context "with decimal row_key_validation" do
let(:cf_name) { "row_key_validation_cf_decimal" }
- before(:all) { create_column_family(cf_name, 'decimal') }
+ before(:each) { create_column_family(cf_name, 'decimal') }
def test_for_value(value)
create_and_fetch_column(cf_name, value*-1).should eq(value*-1)
@@ -101,7 +97,7 @@ def test_for_value(value)
context "with double row_key_validation" do
let(:cf_name) { "row_key_validation_cf_double" }
- before(:all) { create_column_family(cf_name, 'double') }
+ before(:each) { create_column_family(cf_name, 'double') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should be_within(0.1).of(value)
@@ -119,7 +115,7 @@ def test_for_value(value)
context "with float row_key_validation" do
let(:cf_name) { "row_key_validation_cf_float" }
- before(:all) { create_column_family(cf_name, 'float') }
+ before(:each) { create_column_family(cf_name, 'float') }
def test_for_value(value)
create_and_fetch_column(cf_name, value*-1).should eq(value*-1)
@@ -135,7 +131,7 @@ def test_for_value(value)
context "with int row_key_validation" do
let(:cf_name) { "row_key_validation_cf_int" }
- before(:all) { create_column_family(cf_name, 'int') }
+ before(:each) { create_column_family(cf_name, 'int') }
def test_for_value(value)
create_and_fetch_column(cf_name, value).should eq(value)
@@ -158,7 +154,7 @@ def test_for_value(value)
context "with text row_key_validation" do
let(:cf_name) { "row_key_validation_cf_text" }
- before(:all) { create_column_family(cf_name, 'varchar') }
+ before(:each) { create_column_family(cf_name, 'varchar') }
it "should return a non-multibyte string" do
create_and_fetch_column(cf_name, "snark").should eq("snark")
@@ -171,7 +167,7 @@ def test_for_value(value)
context "with timestamp row_key_validation" do
let(:cf_name) { "row_key_validation_cf_timestamp" }
- before(:all) { create_column_family(cf_name, 'timestamp') }
+ before(:each) { create_column_family(cf_name, 'timestamp') }
it "should return a timestamp" do
uuid = UUID.new