Skip to content
Browse files

Merge branch 'master' of git@gitorious.org:strokedb/mainline

* 'master' of git@gitorious.org:strokedb/mainline:
  storage-5 fixed
  ROADMAP is actually a roadmap now
  Minor gem generation tweaking
  Authors' emails updated
  Minor README update
  Very minor strokedb.rb reorganization
  More obsolete code removed; benchmarks' requires fixed
  stores/chunk.rb is no longer needed
  require of strokedb.rb works again
  Minor Document#find tweaking
  Add strokedb dir to $LOAD_PATH so require "/path/to/strokedb/lib/strokedb" works
  Last slight adjustment to the rake tasks relating to rcov and cruise
  renamed bin/stroke to bin/strokedb
  Rake 'sup' task now shows a textual coverage summary isntead of opening a browser.
  Added a sup task
  fix examples after reorganization
  • Loading branch information...
2 parents 8d62fa4 + 1e4284c commit a6dc62ad5820dd2f44fe9caef49ed6c82623e052 Oleg Andreev committed
View
3 .gitignore
@@ -10,4 +10,5 @@ TAGS
.*.strokedb
examples/.*.strokedb
examples/strokewiki/.*.strokedb
-pkg
+pkg
+strokedb.gemspec
View
25 README
@@ -1,8 +1,13 @@
-
StrokeDB is a distributed document-oriented database engine.
Main features are complete decentralization, JSON object format,
metadocuments, integration with runtime (it is just a ruby library).
+
+= Starting points
+
+One of the most important concepts of StrokeDB is a StrokeDB::Document.
+
+
= Installing StrokeDB
=== Requirements
@@ -32,21 +37,9 @@ or
=== <i>(Optional) Running test suite</i>
- $ cd strokedb/strokedb-ruby
- $ rake ci
- $ rake jci # for jruby, jruby should be in PATH
+ $ rake sup
-
-= Starting points
-
-One of the most important concepts of StrokeDB is a StrokeDB::Document.
-
-
-= Some benchmarks
-
- $ rake bench
-
=AUTHORS
-* Yurii Rashkovskii <yrashk@issuesdone.com>
-* Oleg Andreev <oleganza@gmail.com>
+* Yurii Rashkovskii <yrashk@idbns.com>
+* Oleg Andreev <oleganza@idbns.com>
View
11 Rakefile
@@ -8,7 +8,7 @@ Echoe.taskify do
Dir['task/**/*.task'].each {|t| load t}
namespace :echoe do
- Echoe.new('StrokeDB', StrokeDB::VERSION) do |g|
+ Echoe.new('strokedb', StrokeDB::VERSION) do |g|
g.author = ['Yurii Rashkovskii', 'Oleg Andreev']
g.email = ['strokedb@googlegroups.com']
g.summary = 'embeddable, distributed, document-based database'
@@ -25,7 +25,7 @@ Echoe.taskify do
g.manifest_name = 'meta/MANIFEST'
g.ignore_pattern = /(^\.git|^.DS_Store$|^meta|^test\/storages|^examples\/(.*).strokedb|^bugs)/
- g.executable_pattern = 'bin/stroke'
+ g.executable_pattern = 'bin/strokedb'
end
desc 'tests packaged files to ensure they are all present'
@@ -40,15 +40,18 @@ Echoe.taskify do
task :magic => [:clean, :manifest, :install]
end
+ desc 'Check what\'s up in this mug'
+ task :sup => [:'rcov:run', :'rcov:verify']
+
# Developers: Run this before commiting!
desc 'Check everything over before commiting!'
- task :aok => [:'rcov:verbose', :'rcov:verify_verbose', :'rcov:open',
+ task :aok => [:'rcov:verbose', :'rcov:strict', :'rcov:open',
:'rdoc:html', :'rdoc:open',
:'ditz:stage', :'ditz:html', :'ditz:todo', :'ditz:status', :'ditz:html:open']
end
# desc 'Run by CruiseControl.rb during continuous integration'
-task :cruise => [:'rcov:run', :'rcov:verify', :'ditz:html', :'rdoc:html']
+task :cruise => [:'ditz:html', :'rdoc:html', :'rcov:bw', :'rcov:verify']
# By default, we just list the tasks.
task :default => :list
View
0 bin/stroke → bin/strokedb
File renamed without changes.
View
12 bugs/issue-fa64e74e54d1dd7a895939fb8bd41a899fe746ce.yaml
@@ -32,8 +32,8 @@ type: :bugfix
component: Storage
release: v0.0.3
reporter: Yurii Rashkovskii <yrashk@idbns.com>
-status: :unstarted
-disposition:
+status: :closed
+disposition: :fixed
creation_time: 2008-04-17 21:28:49.887863 Z
references: []
@@ -47,3 +47,11 @@ log_events:
- Yurii Rashkovskii <yrashk@idbns.com>
- commented
- The main problem is that it fails randomly (not each time you run specs)
+- - 2008-04-19 10:49:12.179365 Z
+ - Yurii Rashkovskii <yrashk@idbns.com>
+ - commented
+ - Spec is located at spec/lib/strokedb/sync/store_sync_spec.rb now
+- - 2008-04-20 13:10:54.668785 Z
+ - Yurii Rashkovskii <yrashk@idbns.com>
+ - closed issue with disposition fixed
+ - That appears to be a spec bug (store was not synced, and autosync was trying to sync everything on files already engaged in the next subsequent example)
View
2 examples/movies.rb
@@ -1,4 +1,4 @@
-require File.dirname(__FILE__) + '/../strokedb'
+require File.dirname(__FILE__) + '/../lib/strokedb'
$KCODE = 'u'
View
2 examples/movies2.rb
@@ -1,4 +1,4 @@
-require File.dirname(__FILE__) + '/../strokedb'
+require File.dirname(__FILE__) + '/../lib/strokedb'
$KCODE = 'u'
View
3 examples/strokewiki/wiki.rb
@@ -1,8 +1,7 @@
+require File.dirname(__FILE__) + '/../../lib/strokedb'
require 'rubygems'
require 'ramaze'
require 'redcloth'
-require File.dirname(__FILE__) + '/../../strokedb'
-
# strokewiki will run in port 7000 and uses WEBRick by default
# you can change it uncommenting these four lines below.
View
2 examples/todo.rb
@@ -1,5 +1,5 @@
#! /usr/bin/env ruby
-$:.unshift File.dirname(__FILE__) + "/.."
+$:.unshift File.dirname(__FILE__) + "/../lib"
require "strokedb"
StrokeDB::Config.build :default => true, :base_path => '.todo.strokedb'
View
5 lib/strokedb.rb
@@ -1,4 +1,5 @@
require 'rubygems'
+$LOAD_PATH.unshift File.expand_path(File.dirname(__FILE__))
$LOAD_PATH.unshift( File.expand_path(File.join(File.dirname(__FILE__), 'strokedb')) ).uniq!
require 'strokedb/core_ext'
@@ -19,7 +20,7 @@ module StrokeDB
# Coverage threshold - bump this float anytime your changes increase the spec coverage
# DO NOT LOWER THIS NUMBER. EVER.
- COVERAGE = 88.1
+ COVERAGE = 91.9
# UUID regexp (like 1e3d02cc-0769-4bd8-9113-e033b246b013)
UUID_RE = /([a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})/
@@ -27,11 +28,11 @@ module StrokeDB
# document version regexp
VERSION_RE = UUID_RE
- RAW_NIL_UUID = "\x00" * 16
# following are special UUIDs used by StrokeDB
# so called Nil UUID, should be used as special UUID for Meta meta
NIL_UUID = "00000000-0000-0000-0000-000000000000"
+ RAW_NIL_UUID = "\x00" * 16
# UUID used for DeletedDocument meta
DELETED_DOCUMENT_UUID = 'e5e0ef20-e10f-4269-bff3-3040a90e194e'
View
87 lib/strokedb/data_structures/simple_skiplist.rb
@@ -348,90 +348,3 @@ def random_level
end
end
-
-if __FILE__ == $0
-
- require 'benchmark'
- include StrokeDB
-
- puts "Serialization techniques"
-
- len = 2_000
- array = (1..len).map{ [rand(len).to_s]*2 }
- biglist = SimpleSkiplist.from_a(array)
- dumped = biglist.marshal_dump
-
- Benchmark.bm(17) do |x|
- # First technique: to_a/from_a
- GC.start
- x.report("SimpleSkiplist#to_a ") do
- biglist.to_a
- biglist.to_a
- biglist.to_a
- biglist.to_a
- biglist.to_a
- end
- GC.start
- x.report("SimpleSkiplist.from_a ") do
- SimpleSkiplist.from_a(array)
- SimpleSkiplist.from_a(array)
- SimpleSkiplist.from_a(array)
- SimpleSkiplist.from_a(array)
- SimpleSkiplist.from_a(array)
- end
-
- # Another technique: Marshal.dump
- GC.start
- x.report("SimpleSkiplist#marshal_dump ") do
- biglist.marshal_dump
- biglist.marshal_dump
- biglist.marshal_dump
- biglist.marshal_dump
- biglist.marshal_dump
- end
- GC.start
- x.report("SimpleSkiplist#marshal_load ") do
- SimpleSkiplist.allocate.marshal_load(dumped.dup)
- SimpleSkiplist.allocate.marshal_load(dumped.dup)
- SimpleSkiplist.allocate.marshal_load(dumped.dup)
- SimpleSkiplist.allocate.marshal_load(dumped.dup)
- SimpleSkiplist.allocate.marshal_load(dumped.dup)
- end
- end
-
- puts
- puts "Find/insert techniques"
- Benchmark.bm(32) do |x|
- langs = [:C] if RUBY_PLATFORM !~ /java/
- langs = [:Java] if RUBY_PLATFORM =~ /java/
- SimpleSkiplist.with_optimizations(langs) do |lang|
- GC.start
- x.report("SimpleSkiplist#find #{lang}".ljust(32)) do
- 100.times do
- key = rand(len).to_s
- biglist.find(key)
- biglist.find(key)
- biglist.find(key)
- biglist.find(key)
- biglist.find(key)
- end
- end
- GC.start
- x.report("SimpleSkiplist#insert #{lang}".ljust(32)) do
- 100.times do
- key = rand(len).to_s
- biglist.insert(key, key)
- key = rand(len).to_s
- biglist.insert(key, key)
- key = rand(len).to_s
- biglist.insert(key, key)
- key = rand(len).to_s
- biglist.insert(key, key)
- key = rand(len).to_s
- biglist.insert(key, key)
- end
- end
- end
- end
-end
-
View
4 lib/strokedb/document.rb
@@ -352,7 +352,7 @@ def self.find(*args)
if (txns = Thread.current[:strokedb_transactions]) && !txns.nil? && !txns.empty?
store = txns.last
else
- if args.empty? || args.first.is_a?(String) || args.first.is_a?(Hash)
+ if args.empty? || args.first.is_a?(String) || args.first.is_a?(Hash) || args.first.nil?
store = StrokeDB.default_store
else
store = args.shift
@@ -366,7 +366,7 @@ def self.find(*args)
when Hash
store.search(query)
else
- raise TypeError
+ raise ArgumentError, "use UUID or query to find document(s)"
end
end
View
53 lib/strokedb/index.rb
@@ -1,55 +1,2 @@
module StrokeDB
- module Index
-
- #
- # H index is built over data's SHA-256 and offset pairs
- #
- class H
-
- def initialize(options = {})
- @options = options.stringify_keys
- @skiplist = FixedLengthSkiplistVolume.new(:path => File.join(@options['path'],'hindexvol'),
- :key_length => 64, :value_length => 4, :capacity => 100000)
- @cache = {}
- end
-
- def insert(data,offset)
- @skiplist.insert(Util.sha(data),[offset].pack('N'))
- end
-
- def find(data)
- if result = @skiplist.find(Util.sha(data))
- result.unpack('N').first
- else
- nil
- end
- end
-
-
-
- end
-
- #
- # IL index is built over atomic objects
- #
- class IL
- def initialize(dv)
- @skiplist = SimpleSkiplist.new
- @datavolume = dv
- end
- def insert(label, value, offset)
- @skiplist.insert(key(label,value), offset)
- end
- def find(label, value)
- @skiplist.find(key(label,value))
- end
-
- private
-
- def key(label,value)
- @datavolume.insert!(label) + @datavolume.insert!(value)
- end
- end
-
- end
end
View
1 lib/strokedb/stores.rb
@@ -1,6 +1,5 @@
require 'store'
require 'stores/chainable_storage'
-require 'stores/chunk'
require 'stores/file_storage'
require 'stores/memory_storage'
require 'stores/remote_store'
View
119 lib/strokedb/stores/chunk.rb
@@ -1,119 +0,0 @@
-module StrokeDB
- class Chunk
- attr_accessor :skiplist, :next_chunk, :prev_chunk, :uuid, :cut_level, :timestamp
- attr_accessor :next_chunk_uuid
- attr_accessor :store_uuid
- def initialize(cut_level)
- @skiplist, @cut_level = Skiplist.new({}, nil, cut_level), cut_level
- end
-
- def insert(uuid, raw_doc, __cheaters_level = nil, __timestamp = nil)
- @uuid ||= uuid
- __cheaters_level ||= $DEBUG_CHEATERS_LEVEL
- a, new_list = skiplist.insert(uuid, raw_doc, __cheaters_level, __timestamp)
- if new_list
- tmp = Chunk.new(@cut_level)
- tmp.skiplist = new_list
- tmp.next_chunk = @next_chunk if @next_chunk
- @next_chunk = tmp
- @next_chunk.uuid = uuid
- end
- [self, @next_chunk]
- end
-
- def delete(uuid)
- skiplist.delete(uuid)
- end
-
- def find(uuid, default = nil)
- skiplist.find(uuid, default)
- end
-
- def find_node(uuid)
- skiplist.find_node(uuid)
- end
-
- def find_nearest(uuid, default = nil)
- skiplist.find_nearest(uuid, default)
- end
-
- # Finds next node across separate chunks
- def find_next_node(node)
- chunk = self
- node2 = node.next
- if node2.is_a?(Skiplist::TailNode)
- chunk = chunk.next_chunk
- unless chunk.nil?
- node2 = chunk.first_node
- else
- node2 = nil
- end
- end
- node2
- end
-
-
- def first_uuid
- skiplist.first_node.key
- end
-
- def first_node
- skiplist.first_node
- end
-
- def size
- skiplist.size
- end
-
- def each(&block)
- skiplist.each &block
- end
-
- # Raw format
-
- # TODO: lazify
- def self.from_raw(raw)
- chunk = Chunk.new(raw['cut_level'])
- chunk.uuid = raw['uuid']
- chunk.next_chunk_uuid = raw['next_uuid']
- chunk.timestamp = raw['timestamp']
- chunk.store_uuid = raw['store_uuid']
- chunk.skiplist.raw_insert(raw['nodes']) do |rn|
- [rn['key'], rn['value'], rn['forward'].size, rn['timestamp']]
- end
- yield(chunk) if block_given?
- chunk
- end
-
- def to_raw
- # enumerate nodes
- skiplist.each_with_index do |node, i|
- node._serialized_index = i
- end
-
- # now we know keys' positions right in the nodes
- nodes = skiplist.map do |node|
- {
- 'key' => node.key,
- 'forward' => node.forward.map{|n| n._serialized_index || 0 },
- 'value' => node.value,
- 'timestamp' => node.timestamp
- }
- end
- {
- 'nodes' => nodes,
- 'cut_level' => @cut_level,
- 'uuid' => @uuid,
- # TODO: may not be needed
- 'next_uuid' => next_chunk ? next_chunk.uuid : nil,
- 'timestamp' => @timestamp,
- 'store_uuid' => @store_uuid
- }
- end
-
- def eql?(chunk)
- chunk.uuid == @uuid && chunk.skiplist.eql?(@skiplist)
- end
-
- end
-end
View
1 lib/strokedb/util.rb
@@ -38,7 +38,6 @@ def catch_circular_reference(value,name = 'StrokeDB.reference_stack')
require 'util/lazy_mapping_array'
require 'util/lazy_mapping_hash'
require 'util/serialization'
-require 'util/trigger_partition'
require 'util/uuid'
require 'util/xml'
require 'util/java_util' if RUBY_PLATFORM =~ /java/
View
136 lib/strokedb/util/trigger_partition.rb
@@ -1,136 +0,0 @@
-module Enumerable
- class TriggerPartitionContext
- def initialize(enum, &block)
- @enum = enum
- @cont = block
- end
- def fill(&block)
- @fill = block
- self
- end
- def emit
- partitions = []
- cont = @cont
- fill = @fill
- p = @enum.inject(nil) do |part, elem|
- if part && cont.call(part, elem)
- fill.call(part, elem)
- part
- else
- partitions << part if part
- yield(elem)
- end
- end
- partitions << p if p
- partitions
- end
- end
- def trigger_partition(&block)
- TriggerPartitionContext.new(self, &block)
- end
-
- class TriggerPartitions
- def self.partition(list)
- partitions = []
- p = list.inject(nil) do |part, elem|
- if part && continue?(part, elem)
- fill(part, elem)
- part
- else
- partitions << part if part
- emit(elem)
- end
- end
- partitions << p if p
- partitions
- end
- def self.continue?(p, e)
- true
- end
- def self.emit(e)
- [e]
- end
- def self.fill(p, e)
- p << e
- end
- end
-end
-
-if __FILE__ == $0
- arr = [1,2,3,4,5, -1, -4, -3, 5, 6, 7, 8, -6, -7]
- parr = arr.trigger_partition do |partition, element|
- partition[0] > 0 && element > 0 || partition[0] < 0 && element < 0
- end.fill do |p, e|
- p << e
- end.emit do |e|
- [e]
- end
-
- p arr
- p parr
-
- # Class might be faster
- class SignPartitions < Enumerable::TriggerPartitions
- def self.continue?(partition, element)
- partition[0] > 0 && element > 0 || partition[0] < 0 && element < 0
- end
- end
-
- p Enumerable::TriggerPartitions.partition(arr)
- p SignPartitions.partition(arr)
-
- require 'benchmark'
- include Benchmark
- n = 1000
- bm(32) do |x|
- x.report("#{n} times:" ) do
- n.times do
- arr.trigger_partition do |partition, element|
- partition[0] > 0 && element > 0 || partition[0] < 0 && element < 0
- end.fill do |p, e|
- p << e
- end.emit do |e|
- [e]
- end
- end
- end
- arrL = arr*28
- x.report("#{n} times (x28 larger data):" ) do
- n.times do
- arrL.trigger_partition do |partition, element|
- partition[0] > 0 && element > 0 || partition[0] < 0 && element < 0
- end.fill do |p, e|
- p << e
- end.emit do |e|
- [e]
- end
- end
- end
- # 35% faster
- x.report("#{n} times (SignPartitions):" ) do
- (n/5).times do
- SignPartitions.partition(arrL)
- SignPartitions.partition(arrL)
- SignPartitions.partition(arrL)
- SignPartitions.partition(arrL)
- SignPartitions.partition(arrL)
- end
- end
- # + 17% faster (relative to SignPartitions)
- x.report("#{n} times (raw code):" ) do
- n.times do
- parts = []
- p = arrL.inject(nil) do |partition, element|
- if partition && (partition[0] > 0 && element > 0 || partition[0] < 0 && element < 0)
- partition << element
- partition
- else
- parts << partition if partition
- [element]
- end
- end
- parts << p if p
- end
- end
- end
-end
View
9 meta/MANIFEST
@@ -1,4 +1,4 @@
-bin/stroke
+bin/strokedb
examples/movies.rb
examples/movies2.rb
examples/strokewiki/README
@@ -40,7 +40,6 @@ lib/strokedb/document.rb
lib/strokedb/index.rb
lib/strokedb/store.rb
lib/strokedb/stores/chainable_storage.rb
-lib/strokedb/stores/chunk.rb
lib/strokedb/stores/file_storage.rb
lib/strokedb/stores/inverted_list_file_storage.rb
lib/strokedb/stores/memory_storage.rb
@@ -64,7 +63,6 @@ lib/strokedb/util/lazy_array.rb
lib/strokedb/util/lazy_mapping_array.rb
lib/strokedb/util/lazy_mapping_hash.rb
lib/strokedb/util/serialization.rb
-lib/strokedb/util/trigger_partition.rb
lib/strokedb/util/uuid.rb
lib/strokedb/util/xml.rb
lib/strokedb/util.rb
@@ -76,13 +74,14 @@ lib/strokedb/volumes/map_volume.rb
lib/strokedb/volumes/skiplist_volume.rb
lib/strokedb/volumes.rb
lib/strokedb.rb
-MANIFEST
README
+script/console
spec/integration/remote_store_spec.rb
spec/integration/search_spec.rb
spec/integration/spec_helper.rb
spec/lib/spec_helper.rb
spec/lib/strokedb/config_spec.rb
+spec/lib/strokedb/core_ext/blank_spec.rb
spec/lib/strokedb/core_ext/spec_helper.rb
spec/lib/strokedb/core_ext/string_spec.rb
spec/lib/strokedb/core_ext/symbol_spec.rb
@@ -117,6 +116,7 @@ spec/lib/strokedb/sync/stroke_diff/hash_spec.rb
spec/lib/strokedb/sync/stroke_diff/scalar_spec.rb
spec/lib/strokedb/sync/stroke_diff/spec_helper.rb
spec/lib/strokedb/sync/stroke_diff/string_spec.rb
+spec/lib/strokedb/util/inflect_spec.rb
spec/lib/strokedb/util/lazy_array_spec.rb
spec/lib/strokedb/util/lazy_mapping_array_spec.rb
spec/lib/strokedb/util/lazy_mapping_hash_spec.rb
@@ -142,3 +142,4 @@ task/rcov.task
task/rdoc.task
task/rspec.task
vendor/java_inline.rb
+meta/MANIFEST
View
64 meta/ROADMAP
@@ -1,49 +1,21 @@
-TODO aka ROADMAP
+ROADMAP
+=======
-0.1 (April-May 2008)
-~~~~~~~~~~~~~~~~~~
-[.] Implement new storages
- [X] Drop capacity from MapVolume (by splitting to two files, bitmap and data)
- [.] Add multirecords to MapVolume (elastic MapVolume)
- [.] Implement 160-bit data pointers
- [ ] ...
-[ ] Implement transactions
- [?] Memory-based storage with permanent log
-[ ] Implement complex queries
- [ ] Improve inverted list index
- [ ] Improve PointQuery (I really don't like it). Or drop it
- [ ] Optimize prefixes
- [ ] Range/set query support
- [ ] Faster intersection
- [ ] Custom boolean operation on results
- [ ] Assoc's indexing
- [ ] Sub-attributes indexing
- [ ] Create efficient implementation
- [ ] Consider using Ambition API
- [?] Add sorting primitives for search functionality
-[ ] Improve spec coverage
- [ ] Spec SS#autosync
- [.] Spec RemoteStore::DRb
-[ ] Refactor some document-related code
- [.] Refactor Document#meta
- [ ] Refactor Meta#document, fix bugs
- [.] Refactor callback stuff in Document
- [.] Refactor associations
-[.] Improve validations
- [X] collect errors and add #valid? and #errors, #errors_on methods (seems to be quite important)
- [ ] add more options to validates_presence_of
- [X] add validates_type_of
- [X] add validates_uniqueness_of
-[ ] add created_at to Document by default? (in addition to lamport ;) (Timestampable meta?)
+Please note that more specific issues for each release are managed by ditz, consider running
-Later
-~~~~~
+ $ ditz todo v<release> # like v0.0.3
-[ ] Improve performance
- [ ] It seems that b895a99034f6a1d36d4300f97b705fc4d1df5899 caused a slow down
- [ ] New uuid-based versions caused some slow down, too
-[ ] Improve Diff
- [ ] Get diff strategy from either 'to' or 'from' (as per discussion with oleganza)
-[ ] Investigate performance issues
- [ ] Read/Write benchmarks showing dependency of data size
- [?] Pool new UUIDs
+Next Minor Release (0.0.3)
+==========================
+
+* More stable/mature storages
+* More complete transactions implementation
+* Better spec coverage
+* More documentation
+* Finish reorganizing project's file structure
+
+Next Major Release (0.1)
+========================
+
+* New improved extensible search subsystem
+* Faster storages
View
1 meta/benchmarks/archive_volume.rb
@@ -1,3 +1,4 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
require 'strokedb'
include StrokeDB
View
1 meta/benchmarks/big_database.rb
@@ -1,3 +1,4 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
require 'strokedb'
include StrokeDB
View
1 meta/benchmarks/instance_variables.rb
@@ -1,3 +1,4 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
require 'benchmark'
include Benchmark
View
2 meta/benchmarks/inverted_list.rb
@@ -1,3 +1,5 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
+
require 'strokedb'
include StrokeDB
View
4 meta/benchmarks/lazy_array.rb
@@ -1,4 +1,6 @@
-require File.dirname(__FILE__) + '/../strokedb'
+$:.unshift File.dirname(__FILE__) + "/../../lib"
+
+require 'strokedb'
NewLazyArray = StrokeDB::LazyArray
View
2 meta/benchmarks/map_volume.rb
@@ -1,3 +1,5 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
+
require 'strokedb'
include StrokeDB
View
2 meta/benchmarks/read_write.rb
@@ -1,3 +1,5 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
+
require 'strokedb'
include StrokeDB
View
2 meta/benchmarks/serialization.rb
@@ -1,3 +1,5 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
+
require 'strokedb'
include StrokeDB
View
84 meta/benchmarks/simple_skiplist_serialization.rb
@@ -0,0 +1,84 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
+require 'strokedb'
+require 'benchmark'
+include StrokeDB
+
+puts "Serialization techniques"
+
+len = 2_000
+array = (1..len).map{ [rand(len).to_s]*2 }
+biglist = SimpleSkiplist.from_a(array)
+dumped = biglist.marshal_dump
+
+Benchmark.bm(17) do |x|
+ # First technique: to_a/from_a
+ GC.start
+ x.report("SimpleSkiplist#to_a ") do
+ biglist.to_a
+ biglist.to_a
+ biglist.to_a
+ biglist.to_a
+ biglist.to_a
+ end
+ GC.start
+ x.report("SimpleSkiplist.from_a ") do
+ SimpleSkiplist.from_a(array)
+ SimpleSkiplist.from_a(array)
+ SimpleSkiplist.from_a(array)
+ SimpleSkiplist.from_a(array)
+ SimpleSkiplist.from_a(array)
+ end
+
+ # Another technique: Marshal.dump
+ GC.start
+ x.report("SimpleSkiplist#marshal_dump ") do
+ biglist.marshal_dump
+ biglist.marshal_dump
+ biglist.marshal_dump
+ biglist.marshal_dump
+ biglist.marshal_dump
+ end
+ GC.start
+ x.report("SimpleSkiplist#marshal_load ") do
+ SimpleSkiplist.allocate.marshal_load(dumped.dup)
+ SimpleSkiplist.allocate.marshal_load(dumped.dup)
+ SimpleSkiplist.allocate.marshal_load(dumped.dup)
+ SimpleSkiplist.allocate.marshal_load(dumped.dup)
+ SimpleSkiplist.allocate.marshal_load(dumped.dup)
+ end
+end
+
+puts
+puts "Find/insert techniques"
+Benchmark.bm(32) do |x|
+ langs = [:C] if RUBY_PLATFORM !~ /java/
+ langs = [:Java] if RUBY_PLATFORM =~ /java/
+ SimpleSkiplist.with_optimizations(langs) do |lang|
+ GC.start
+ x.report("SimpleSkiplist#find #{lang}".ljust(32)) do
+ 100.times do
+ key = rand(len).to_s
+ biglist.find(key)
+ biglist.find(key)
+ biglist.find(key)
+ biglist.find(key)
+ biglist.find(key)
+ end
+ end
+ GC.start
+ x.report("SimpleSkiplist#insert #{lang}".ljust(32)) do
+ 100.times do
+ key = rand(len).to_s
+ biglist.insert(key, key)
+ key = rand(len).to_s
+ biglist.insert(key, key)
+ key = rand(len).to_s
+ biglist.insert(key, key)
+ key = rand(len).to_s
+ biglist.insert(key, key)
+ key = rand(len).to_s
+ biglist.insert(key, key)
+ end
+ end
+ end
+end
View
2 meta/benchmarks/skiplist_volume.rb
@@ -1,3 +1,5 @@
+$:.unshift File.dirname(__FILE__) + "/../../lib"
+
require 'strokedb'
include StrokeDB
View
2 script/console
@@ -1,2 +1,2 @@
#!/bin/sh
-bin/stroke $*
+bin/strokedb $*
View
6 spec/lib/strokedb/document/document_spec.rb
@@ -19,9 +19,11 @@
Document.find(@store, :uuid => @document.uuid).should == [@document]
end
- it "should raise TypeError when find with wrong argument" do
+ it "should raise ArgumentError when invoking #find with wrong argument" do
@document = Document.create!
- lambda { Document.find([]) }.should raise_error(TypeError)
+ [ [], nil, 1 ].each do |arg|
+ lambda { Document.find(arg) }.should raise_error(ArgumentError)
+ end
end
end
View
2 spec/lib/strokedb/sync/store_sync_spec.rb
@@ -12,6 +12,8 @@
end
after(:each) do
+ @store.stop_autosync!
+ @another_store.stop_autosync!
FileUtils.rm_rf TEMP_STORAGES + '/store_sync'
FileUtils.rm_rf TEMP_STORAGES + '/store_sync_another'
end
View
21 task/rcov.task
@@ -1,30 +1,45 @@
namespace :rcov do
+ # Runs specs, runs rcov and textual summary
+ Spec::Rake::SpecTask.new(:bw) do |t|
+ t.spec_files = Dir['spec/**/*_spec.rb'].sort
+ t.libs = ['lib', 'server/lib' ]
+ t.rcov = true
+ # t.rcov_opts = ['--exclude-only', '".*"', '--include-file', '^app,^lib']
+ t.rcov_opts = ['--text-summary', '--exclude-only', '"spec\/,^\/"']
+ t.rcov_dir = :meta / :coverage
+ end
+
+ # Runs specs, runs rcov, with color and textual summary
Spec::Rake::SpecTask.new(:run) do |t|
+ t.spec_opts = ["--colour"]
t.spec_files = Dir['spec/**/*_spec.rb'].sort
t.libs = ['lib', 'server/lib' ]
t.rcov = true
# t.rcov_opts = ['--exclude-only', '".*"', '--include-file', '^app,^lib']
- t.rcov_opts = ['--exclude-only', '"spec\/,^\/"']
+ t.rcov_opts = ['--text-summary', '--exclude-only', '"spec\/,^\/"']
t.rcov_dir = :meta / :coverage
end
+ # Runs verbose specs, runs rcov, with color and textual summary
Spec::Rake::SpecTask.new(:verbose) do |t|
t.spec_opts = ["--format", "specdoc", "--colour"]
t.spec_files = Dir['spec/**/*_spec.rb'].sort
t.libs = ['lib', 'server/lib' ]
t.rcov = true
# t.rcov_opts = ['--exclude-only', '".*"', '--include-file', '^app,^lib']
- t.rcov_opts = ['--exclude-only', '"spec\/,^\/"']
+ t.rcov_opts = ['--text-summary', '--exclude-only', '"spec\/,^\/"']
t.rcov_dir = :meta / :coverage
end
+ # Verify coverage
RCov::VerifyTask.new(:verify) do |t|
t.threshold = StrokeDB::COVERAGE
t.index_html = :meta / :coverage / 'index.html'
t.require_exact_threshold = false
end
- RCov::VerifyTask.new(:verify_verbose) do |t|
+ # Verify coverage, strictly
+ RCov::VerifyTask.new(:strict) do |t|
t.threshold = StrokeDB::COVERAGE
t.index_html = :meta / :coverage / 'index.html'
end

0 comments on commit a6dc62a

Please sign in to comment.
Something went wrong with that request. Please try again.