diff --git a/README b/README index 97520990b2..669609667a 100644 --- a/README +++ b/README @@ -1,68 +1,124 @@ -= about ServerSide +== about Sequel -ServerSide is an HTTP server framework designed to be as fast as possible, and -as easy as possible to use. ServerSide includes a full-featured HTTP server, a -controller-view system and a bunch of other tools to easily create servers and -clusters of servers. +Sequel is an ORM framework for Ruby. Sequel provides thread safety, connection pooling, and a DSL for constructing queries and table schemas. + +== Sequel vs. ActiveRecord + +Sequel offers the following advantages over ActiveRecord: + +* Better performance with large tables: unlike ActiveRecord, Sequel does not load the entire resultset into memory, but fetches each record separately and implements an Enumerable interface. +* Easy construction of queries using a DSL. +* Using model classes is possible, but not mandatory. == Resources -* {Project page}[http://code.google.com/p/serverside/] -* {Source code}[http://serverside.googlecode.com/svn/] -* {Bug tracking}[http://code.google.com/p/serverside/issues/list] -* {RubyForge page}[http://rubyforge.org/projects/serverside/] +* {Project page}[http://code.google.com/p/ruby-sequel/] +* {Source code}[http://ruby-sequel.googlecode.com/svn/] +* {Bug tracking}[http://code.google.com/p/ruby-sequel/issues/list] +* {RubyForge page}[http://rubyforge.org/projects/sequel/] To check out the source code: - svn co http://serverside.googlecode.com/svn/trunk + svn co http://ruby-sequel.googlecode.com/svn/trunk == Installation - sudo gem install serverside + sudo gem install sequel + +== A Short Tutorial + +=== Connecting to a database + +There are two ways to create a connection to a database. The easier way is to provide a connection URL: + + DB = Sequel.connect("postgres://postgres:postgres@localhost:5432/my_db") + +You can also specify optional parameters, such as the connection pool size: + + DB = Sequel.connect("postgres://postgres:postgres@localhost:5432/my_db", + :max_connections => 10) -== Usage +The second, more verbose, way is to create an instance of a database class: -Once you have the ServerSide gem installed, you can use the serverside -script to control servers. For example: + DB = Sequel::Postgres::Database.new(:database => 'my_db', :host => 'localhost', + :port => 5432) - serverside start . +=== Creating Datasets -will start an HTTP server on port 8000, serving the content of the working -directory. You can stop the server by running serverside stop . +Dataset is the primary means through which records are retrieved and manipulated. You can create an blank dataset by using the query method: -To run the server without forking, use the 'serve' command: + dataset = DB.query - serverside serve . +Or by using the from methods: -== Serving ERb Templates + posts = DB.from(:posts) -ServerSide can render ERb[http://www.ruby-doc.org/stdlib/libdoc/erb/rdoc/] -templates in a fashion similar to PHP. You can store templates in .rhtml files, -and ServerSide takes care of all the rest. ServerSide is also smart enough to -allow you to use nice looking URL's with your templates, and automatically adds -the .rhtml extension if the file is there. - -== Serving Dynamic Content +You can also use the equivalent shorthand: -By default ServerSide serves static files, but you can change the behavior by -creating custom {routing rules}[classes/ServerSide/Connection/Router.html]. -Here's a simple routing rule: + posts = DB[:posts] - ServerSide::Router.route(:path => '/hello/:name') { - send_response(200, 'text', "Hello #{@parameters[:name]}!") - } +Note: the dataset will only fetch records when you explicitly ask for them, as will be shown below. Datasets can be manipulated to filter through records, change record order and even join tables, as will also be shown below. -The ServerSide framework also lets you route requests based on any attribute of -incoming requests, such as host name, path, URL parameters etc. +=== Retrieving Records -To run your custom rules, you can either put them in a file called serverside.rb, -or tell serverside to explicitly load a specific file: +You can retrieve records by using the all method: - serverside start ~/myapp/myapp.rb + posts.all -== Running a Cluster of Servers +The all method returns an array of hashes, where each hash corresponds to a record. + +You can also iterate through records one at a time: + + posts.each {|row| p row} + +Or perform more advanced stuff: + + posts.map(:id) + posts.inject({}) {|h, r| h[r[:id]] = r[:name]} + +You can also retrieve the first record in a dataset: + + posts.first + +If the dataset is ordered, you can also ask for the last record: + + posts.order(:stamp).last + +=== Filtering Records + +The simplest way to filter records is to provide a hash of values to match: + + my_posts = posts.filter(:category => 'ruby', :author => 'david') + +You can also specify ranges: + + my_posts = posts.filter(:stamp => 2.weeks.ago..1.week.ago) + +Some adapters will also let you specify Regexps: + + my_posts = posts.filter(:category => /ruby/i) + +You can also use an inverse filter: + + my_posts = posts.exclude(:category => /ruby/i) + +You can then retrieve the records by using any of the retrieval methods: + + my_posts.each {|row| p row} + +You can also specify a custom WHERE clause: + + posts.filter('(stamp < ?) AND (author <> ?)', 3.days.ago, author_name) + +=== Counting Records + + posts.count + +=== Ordering Records + + posts.order(:stamp) + +You can also specify descending order -ServerSide makes it easy to control a cluster of servers. Just supply a range of -ports instead of a single port: + posts.order(:stamp.DESC) - serverside -p 8000..8009 start . diff --git a/lib/sequel/connection_pool.rb b/lib/sequel/connection_pool.rb new file mode 100644 index 0000000000..14be73c2bc --- /dev/null +++ b/lib/sequel/connection_pool.rb @@ -0,0 +1,65 @@ +require 'thread' + +module ServerSide + class ConnectionPool + attr_reader :max_size, :mutex, :conn_maker + attr_reader :available_connections, :allocated, :created_count + + def initialize(max_size = 4, &block) + @max_size = max_size + @mutex = Mutex.new + @conn_maker = block + + @available_connections = [] + @allocated = {} + @created_count = 0 + end + + def size + @created_count + end + + def hold + t = Thread.current + if (conn = owned_connection(t)) + return yield(conn) + end + while !(conn = acquire(t)) + sleep 0.001 + end + begin + yield conn + ensure + release(t) + end + end + + def owned_connection(thread) + @mutex.synchronize {@allocated[thread]} + end + + def acquire(thread) + @mutex.synchronize do + @allocated[thread] ||= available + end + end + + def available + @available_connections.pop || make_new + end + + def make_new + if @created_count < @max_size + @created_count += 1 + @conn_maker.call + end + end + + def release(thread) + @mutex.synchronize do + @available_connections << @allocated[thread] + @allocated.delete(thread) + end + end + end +end diff --git a/lib/sequel/database.rb b/lib/sequel/database.rb new file mode 100644 index 0000000000..444677d4a5 --- /dev/null +++ b/lib/sequel/database.rb @@ -0,0 +1,93 @@ +require 'uri' + +require File.join(File.dirname(__FILE__), 'schema') + +module ServerSide + class Database + def initialize(opts = {}) + @opts = opts + end + + # Some convenience methods + + # Returns a new dataset with the from method invoked. + def from(*args); query.from(*args); end + + # Returns a new dataset with the select method invoked. + def select(*args); query.select(*args); end + + # returns a new dataset with the from parameter set. For example, + # + # db[:posts].each {|p| puts p[:title]} + def [](table) + query.from(table) + end + + # Returns a literal SQL representation of a value. This method is usually + # overriden in descendants. + def literal(v) + case v + when String: "'%s'" % v + else v.to_s + end + end + + # Creates a table. + def create_table(name, columns = nil, indexes = nil, &block) + if block + schema = Schema.new + schema.create_table(name, &block) + schema.create(self) + else + execute Schema.create_table_sql(name, columns, indexes) + end + end + + # Drops a table. + def drop_table(name) + execute Schema.drop_table_sql(name) + end + + # Performs a brute-force check for the existance of a table. This method is + # usually overriden in descendants. + def table_exists?(name) + from(name).count + true + rescue + false + end + + @@adapters = Hash.new + + # Sets the adapter scheme for the database class. + def self.set_adapter_scheme(scheme) + @@adapters[scheme.to_sym] = self + end + + # Converts a uri to an options hash. + def self.uri_to_options(uri) + { + :user => uri.user, + :password => uri.password, + :host => uri.host, + :port => uri.port, + :database => (uri.path =~ /\/(.*)/) && ($1) + } + end + + def self.connect(conn_string) + uri = URI.parse(conn_string) + c = @@adapters[uri.scheme.to_sym] + raise "Invalid database scheme" unless c + c.new(c.uri_to_options(uri)) + end + end +end + +class Time + SQL_FORMAT = "TIMESTAMP '%Y-%m-%d %H:%M:%S'".freeze + + def to_sql_timestamp + strftime(SQL_FORMAT) + end +end diff --git a/lib/sequel/dataset.rb b/lib/sequel/dataset.rb new file mode 100644 index 0000000000..bcc750b786 --- /dev/null +++ b/lib/sequel/dataset.rb @@ -0,0 +1,315 @@ +module ServerSide + class Dataset + include Enumerable + + attr_reader :db + attr_accessor :record_class + + def initialize(db, opts = {}, record_class = nil) + @db = db + @opts = opts || {} + @record_class = record_class + end + + def dup_merge(opts) + self.class.new(@db, @opts.merge(opts), @record_class) + end + + AS_REGEXP = /(.*)___(.*)/.freeze + AS_FORMAT = "%s AS %s".freeze + DOUBLE_UNDERSCORE = '__'.freeze + PERIOD = '.'.freeze + + # sql helpers + def field_name(field) + field.is_a?(Symbol) ? field.to_field_name : field + end + + QUALIFIED_REGEXP = /(.*)\.(.*)/.freeze + QUALIFIED_FORMAT = "%s.%s".freeze + + def qualified_field_name(field, table) + fn = field_name(field) + fn = QUALIFIED_FORMAT % [table, fn] unless fn =~ QUALIFIED_REGEXP + end + + WILDCARD = '*'.freeze + COMMA_SEPARATOR = ", ".freeze + + def field_list(fields) + case fields + when Array: + if fields.empty? + WILDCARD + else + fields.map {|i| field_name(i)}.join(COMMA_SEPARATOR) + end + when Symbol: + fields.to_field_name + else + fields + end + end + + def source_list(source) + case source + when Array: source.join(COMMA_SEPARATOR) + else source + end + end + + def literal(v) + case v + when String: "'%s'" % v + else v.to_s + end + end + + AND_SEPARATOR = " AND ".freeze + EQUAL_COND = "(%s = %s)".freeze + + def where_equal_condition(left, right) + EQUAL_COND % [field_name(left), literal(right)] + end + + def where_list(where) + case where + when Hash: + where.map {|kv| where_equal_condition(kv[0], kv[1])}.join(AND_SEPARATOR) + when Array: + fmt = where.shift + fmt.gsub('?') {|i| literal(where.shift)} + else + where + end + end + + def join_cond_list(cond, join_table) + cond.map do |kv| + EQUAL_COND % [ + qualified_field_name(kv[0], join_table), + qualified_field_name(kv[1], @opts[:from])] + end.join(AND_SEPARATOR) + end + + # DSL constructors + def from(source) + dup_merge(:from => source) + end + + def select(*fields) + fields = fields.first if fields.size == 1 + dup_merge(:select => fields) + end + + def order(*order) + dup_merge(:order => order) + end + + DESC_ORDER_REGEXP = /(.*)\sDESC/.freeze + + def reverse_order(order) + order.map do |f| + if f.to_s =~ DESC_ORDER_REGEXP + $1 + else + f.DESC + end + end + end + + def where(*where) + if where.size == 1 + where = where.first + if @opts[:where] && @opts[:where].is_a?(Hash) && where.is_a?(Hash) + where = @opts[:where].merge(where) + end + end + dup_merge(:where => where) + end + + LEFT_OUTER_JOIN = 'LEFT OUTER JOIN'.freeze + INNER_JOIN = 'INNER JOIN'.freeze + RIGHT_OUTER_JOIN = 'RIGHT OUTER JOIN'.freeze + FULL_OUTER_JOIN = 'FULL OUTER JOIN'.freeze + + def join(table, cond) + dup_merge(:join_type => LEFT_OUTER_JOIN, :join_table => table, + :join_cond => cond) + end + + alias_method :filter, :where + + def from!(source) + @sql = nil + @opts[:from] = source + self + end + + def select!(*fields) + @sql = nil + fields = fields.first if fields.size == 1 + @opts[:select] = fields + self + end + + alias_method :all, :to_a + + alias_method :enum_map, :map + + def map(field_name = nil, &block) + if block + enum_map(&block) + elsif field_name + enum_map {|r| r[field_name]} + else + [] + end + end + + def hash_column(key_column, value_column) + inject({}) do |m, r| + m[r[key_column]] = r[value_column] + m + end + end + + SELECT = "SELECT %s FROM %s".freeze + LIMIT = " LIMIT %s".freeze + ORDER = " ORDER BY %s".freeze + WHERE = " WHERE %s".freeze + JOIN_CLAUSE = " %s %s ON %s".freeze + + EMPTY = ''.freeze + + SPACE = ' '.freeze + + def select_sql(opts = nil) + opts = opts ? @opts.merge(opts) : @opts + + fields = opts[:select] + select_fields = fields ? field_list(fields) : WILDCARD + select_source = source_list(opts[:from]) + sql = SELECT % [select_fields, select_source] + + if join_type = opts[:join_type] + join_table = opts[:join_table] + join_cond = join_cond_list(opts[:join_cond], join_table) + sql << (JOIN_CLAUSE % [join_type, join_table, join_cond]) + end + + if where = opts[:where] + sql << (WHERE % where_list(where)) + end + + if order = opts[:order] + sql << (ORDER % order.join(COMMA_SEPARATOR)) + end + + if limit = opts[:limit] + sql << (LIMIT % limit) + end + + sql + end + + INSERT = "INSERT INTO %s (%s) VALUES (%s)".freeze + INSERT_EMPTY = "INSERT INTO %s DEFAULT VALUES".freeze + + def insert_sql(values, opts = nil) + opts = opts ? @opts.merge(opts) : @opts + + if values.nil? || values.empty? + INSERT_EMPTY % opts[:from] + else + field_list = [] + value_list = [] + values.each do |k, v| + field_list << k + value_list << literal(v) + end + + INSERT % [ + opts[:from], + field_list.join(COMMA_SEPARATOR), + value_list.join(COMMA_SEPARATOR)] + end + end + + UPDATE = "UPDATE %s SET %s".freeze + SET_FORMAT = "%s = %s".freeze + + def update_sql(values, opts = nil) + opts = opts ? @opts.merge(opts) : @opts + + set_list = values.map {|kv| SET_FORMAT % [kv[0], literal(kv[1])]}. + join(COMMA_SEPARATOR) + update_clause = UPDATE % [opts[:from], set_list] + + where = opts[:where] + where_clause = where ? WHERE % where_list(where) : EMPTY + + [update_clause, where_clause].join(SPACE) + end + + DELETE = "DELETE FROM %s".freeze + + def delete_sql(opts = nil) + opts = opts ? @opts.merge(opts) : @opts + + delete_source = opts[:from] + + where = opts[:where] + where_clause = where ? WHERE % where_list(where) : EMPTY + + [DELETE % delete_source, where_clause].join(SPACE) + end + + COUNT = "COUNT(*)".freeze + SELECT_COUNT = {:select => COUNT, :order => nil}.freeze + + def count_sql(opts = nil) + select_sql(opts ? opts.merge(SELECT_COUNT) : SELECT_COUNT) + end + + # aggregates + def min(field) + select(field.MIN).first[:min] + end + + def max(field) + select(field.MAX).first[:max] + end + end +end + +class Symbol + def DESC + "#{to_s} DESC" + end + + def AS(target) + "#{field_name} AS #{target}" + end + + def MIN; "MIN(#{to_field_name})"; end + def MAX; "MAX(#{to_field_name})"; end + + AS_REGEXP = /(.*)___(.*)/.freeze + AS_FORMAT = "%s AS %s".freeze + DOUBLE_UNDERSCORE = '__'.freeze + PERIOD = '.'.freeze + + def to_field_name + s = to_s + if s =~ AS_REGEXP + s = AS_FORMAT % [$1, $2] + end + s.split(DOUBLE_UNDERSCORE).join(PERIOD) + end + + def ALL + "#{to_s}.*" + end +end + diff --git a/lib/sequel/model.rb b/lib/sequel/model.rb new file mode 100644 index 0000000000..8443c009a3 --- /dev/null +++ b/lib/sequel/model.rb @@ -0,0 +1,237 @@ +#require 'rubygems' +require 'metaid' + +module ServerSide + class Model + @@db = nil + + def self.db; @@db; end + def self.db=(db); @@db = db; end + + def self.table_name; @table_name; end + def self.set_table_name(t); @table_name = t; end + + def self.dataset + return @dataset if @dataset + if !table_name + raise RuntimeError, "Table name not specified for class #{self}." + elsif !db + raise RuntimeError, "No database connected." + end + @dataset = db[table_name] + @dataset.record_class = self + @dataset + end + def self.set_dataset(ds); @dataset = ds; @dataset.record_class = self; end + + def self.cache_by(column, expiration) + @cache_column = column + + prefix = "#{name}.#{column}." + define_method(:cache_key) do + prefix + @values[column].to_s + end + + define_method("find_by_#{column}".to_sym) do |arg| + key = cache_key + rec = CACHE[key] + if !rec + rec = find(column => arg) + CACHE.set(key, rec, expiration) + end + rec + end + + alias_method :delete, :delete_and_invalidate_cache + alias_method :set, :set_and_update_cache + end + + def self.cache_column + @cache_column + end + + def self.primary_key; @primary_key ||= :id; end + def self.set_primary_key(k); @primary_key = k; end + + def self.schema(name = nil, &block) + name ||= table_name + @schema = Schema::Generator.new(name, &block) + set_table_name name + if @schema.primary_key_name + set_primary_key @schema.primary_key_name + end + end + + def self.table_exists? + db.table_exists?(table_name) + end + + def self.create_table + db.execute get_schema.create_sql + end + + def self.drop_table + db.execute get_schema.drop_sql + end + + def self.recreate_table + drop_table if table_exists? + create_table + end + + def self.get_schema + @schema + end + + ONE_TO_ONE_PROC = "proc {i = @values[:%s]; %s[i] if i}".freeze + ID_POSTFIX = "_id".freeze + FROM_DATASET = "db[%s]".freeze + + def self.one_to_one(name, opts) + klass = opts[:class] ? opts[:class] : (FROM_DATASET % name.inspect) + key = opts[:key] || (name.to_s + ID_POSTFIX) + define_method name, &eval(ONE_TO_ONE_PROC % [key, klass]) + end + + ONE_TO_MANY_PROC = "proc {%s.filter(:%s => @pkey)}".freeze + ONE_TO_MANY_ORDER_PROC = "proc {%s.filter(:%s => @pkey).order(%s)}".freeze + def self.one_to_many(name, opts) + klass = opts[:class] ? opts[:class] : + (FROM_DATASET % (opts[:table] || name.inspect)) + key = opts[:on] + order = opts[:order] + define_method name, &eval( + (order ? ONE_TO_MANY_ORDER_PROC : ONE_TO_MANY_PROC) % + [klass, key, order.inspect] + ) + end + + def self.get_hooks(key) + @hooks ||= {} + @hooks[key] ||= [] + end + + def self.has_hooks?(key) + !get_hooks(key).empty? + end + + def run_hooks(key) + self.class.get_hooks(key).each {|h| instance_eval(&h)} + end + + def self.before_delete(&block) + get_hooks(:before_delete).unshift(block) + end + + def self.after_create(&block) + get_hooks(:after_create) << block + end + + ############################################################################ + + attr_reader :values, :pkey + + def model + self.class + end + + def primary_key + model.primary_key + end + + def initialize(values) + @values = values + @pkey = values[self.class.primary_key] + end + + def exists? + model.filter(primary_key => @pkey).count == 1 + end + + def refresh + record = self.class.find(primary_key => @pkey) + record ? (@values = record.values) : + (raise RuntimeError, "Record not found") + self + end + + def self.find(cond) + dataset.filter(cond).first # || (raise RuntimeError, "Record not found.") + end + + def self.each(&block); dataset.each(&block); end + def self.all; dataset.all; end + def self.filter(*arg); dataset.filter(*arg); end + def self.first; dataset.first; end + def self.count; dataset.count; end + def self.map(column); dataset.map(column); end + def self.hash_column(column); dataset.hash_column(primary_key, column); end + def self.join(*args); dataset.join(*args); end + def self.lock(mode, &block); dataset.lock(mode, &block); end + def self.delete_all + if has_hooks?(:before_delete) + db.transaction {dataset.all.each {|r| r.delete}} + else + dataset.delete + end + end + + def self.[](key) + find key.is_a?(Hash) ? key : {primary_key => key} + end + + def self.create(values = nil) + db.transaction do + obj = find(primary_key => dataset.insert(values)) + obj.run_hooks(:after_create) + obj + end + end + + def delete + db.transaction do + run_hooks(:before_delete) + model.dataset.filter(primary_key => @pkey).delete + end + end + + FIND_BY_REGEXP = /^find_by_(.*)/.freeze + FILTER_BY_REGEXP = /^filter_by_(.*)/.freeze + + def self.method_missing(m, *args) + Thread.exclusive do + method_name = m.to_s + if method_name =~ FIND_BY_REGEXP + c = $1 + meta_def(method_name) {|arg| find(c => arg)} + send(m, *args) if respond_to?(m) + elsif method_name =~ FILTER_BY_REGEXP + c = $1 + meta_def(method_name) {|arg| filter(c => arg)} + send(m, *args) if respond_to?(m) + else + super + end + end + end + + def db; @@db; end + + def [](field); @values[field]; end + + def ==(obj) + (obj.class == model) && (obj.pkey == @pkey) + end + + def set(values) + model.dataset.filter(primary_key => @pkey).update(values) + @values.merge!(values) + end + end + + def self.Model(table_name) + Class.new(ServerSide::Model) do + meta_def(:inherited) {|c| c.set_table_name(table_name)} + end + end +end diff --git a/lib/sequel/postgres.rb b/lib/sequel/postgres.rb new file mode 100644 index 0000000000..68d61cce33 --- /dev/null +++ b/lib/sequel/postgres.rb @@ -0,0 +1,396 @@ +require 'postgres' + +class PGconn + # the pure-ruby postgres adapter does not have a quote method. + unless methods.include?('quote') + def self.quote(obj) + case obj + when true: 't' + when false: 'f' + when nil: 'NULL' + when String: "'#{obj}'" + else obj.to_s + end + end + end + + def connected? + status == PGconn::CONNECTION_OK + end + + SQL_BEGIN = 'BEGIN'.freeze + SQL_COMMIT = 'COMMIT'.freeze + SQL_ROLLBACK = 'ROLLBACK'.freeze + + def execute(sql) + begin + ServerSide.info(sql) + async_exec(sql) + rescue PGError => e + unless connected? + ServerSide.warn('Reconnecting to Postgres server') + reset + async_exec(sql) + else + p sql + p e + raise e + end + end + end + + attr_reader :transaction_in_progress + + def transaction + if @transaction_in_progress + return yield + end + ServerSide.info('BEGIN') + async_exec(SQL_BEGIN) + begin + @transaction_in_progress = true + result = yield + ServerSide.info('COMMIT') + async_exec(SQL_COMMIT) + result + rescue => e + ServerSide.info('ROLLBACK') + async_exec(SQL_ROLLBACK) + raise e + ensure + @transaction_in_progress = nil + end + end +end + +class String + def postgres_to_bool + if self == 't' + true + elsif self == 'f' + false + else + nil + end + end + + TIME_REGEXP = /(\d{4})-(\d{2})-(\d{2})\s(\d{2}):(\d{2}):(\d{2})/ + + def postgres_to_time + if self =~ TIME_REGEXP + Time.local($1.to_i, $2.to_i, $3.to_i, $4.to_i, $5.to_i, $6.to_i) + else + nil + end + end +end + +module ServerSide + module Postgres + PG_TYPES = { + 16 => :postgres_to_bool, + 20 => :to_i, + 21 => :to_i, + 22 => :to_i, + 23 => :to_i, + 700 => :to_f, + 701 => :to_f, + 1114 => :postgres_to_time + } + + class Database < ServerSide::Database + set_adapter_scheme :postgres + + attr_reader :pool + + def initialize(opts = {}) + super + @pool = ServerSide::ConnectionPool.new(@opts[:max_connections] || 4) do + PGconn.connect( + @opts[:host] || 'localhost', + @opts[:port] || 5432, + '', '', + @opts[:database] || 'reality_development', + @opts[:user] || 'postgres', + @opts[:password]) + end + end + + + def query(opts = nil) + Postgres::Dataset.new(self, opts) + end + + RELATION_QUERY = {:from => :pg_class, :select => :relname}.freeze + RELATION_FILTER = "(relkind = 'r') AND (relname !~ '^pg|sql')".freeze + SYSTEM_TABLE_REGEXP = /^pg|sql/.freeze + + + def tables + query(RELATION_QUERY).filter(RELATION_FILTER).map(:relname) + end + + def locks + query.from("pg_class, pg_locks"). + select("pg_class.relname, pg_locks.*"). + filter("pg_class.relfilenode=pg_locks.relation") + end + + def execute(sql) + @pool.hold {|conn| conn.execute(sql)} + end + + def execute_and_forget(sql) + @pool.hold {|conn| conn.execute(sql).clear} + end + + def synchronize(&block) + @pool.hold(&block) + end + + def transaction(&block) + @pool.hold {|conn| conn.transaction(&block)} + end + + def table_exists?(name) + from(:pg_class).filter(:relname => name, :relkind => 'r').count > 0 + end + end + + class Dataset < ServerSide::Dataset + attr_reader :result, :fields + + def literal(v) + case v + when Time: v.to_sql_timestamp + when Symbol: PGconn.quote(v.to_s) + when Array: v.empty? ? EMPTY_ARRAY : v.join(COMMA_SEPARATOR) + else + PGconn.quote(v) + end + end + + LIKE = '%s ~ %s'.freeze + LIKE_CI = '%s ~* %s'.freeze + + IN_ARRAY = '%s IN (%s)'.freeze + EMPTY_ARRAY = 'NULL'.freeze + + def where_equal_condition(left, right) + case right + when Regexp: + (right.casefold? ? LIKE_CI : LIKE) % + [field_name(left), PGconn.quote(right.source)] + when Array: + IN_ARRAY % [field_name(left), literal(right)] + else + super + end + end + + def each(opts = nil, &block) + query_each(select_sql(opts), true, &block) + self + end + + LIMIT_1 = {:limit => 1}.freeze + + def first(opts = nil) + opts = opts ? opts.merge(LIMIT_1) : LIMIT_1 + query_first(select_sql(opts), true) + end + + def last(opts = nil) + raise RuntimeError, 'No order specified' unless + @opts[:order] || (opts && opts[:order]) + + opts = {:order => reverse_order(@opts[:order])}. + merge(opts ? opts.merge(LIMIT_1) : LIMIT_1) + + query_first(select_sql(opts), true) + end + + FOR_UPDATE = ' FOR UPDATE'.freeze + FOR_SHARE = ' FOR SHARE'.freeze + + def select_sql(opts = nil) + row_lock_mode = opts ? opts[:lock] : @opts[:lock] + sql = super + case row_lock_mode + when :update : sql << FOR_UPDATE + when :share : sql << FOR_SHARE + end + sql + end + + def for_update + dup_merge(:lock => :update) + end + + def for_share + dup_merge(:lock => :share) + end + + EXPLAIN = 'EXPLAIN '.freeze + QUERY_PLAN = 'QUERY PLAN'.to_sym + + def explain(opts = nil) + analysis = [] + query_each(select_sql(EXPLAIN + select_sql(opts))) do |r| + analysis << r[QUERY_PLAN] + end + analysis.join("\r\n") + end + + LOCK = 'LOCK TABLE %s IN %s MODE;'.freeze + + ACCESS_SHARE = 'ACCESS SHARE'.freeze + ROW_SHARE = 'ROW SHARE'.freeze + ROW_EXCLUSIVE = 'ROW EXCLUSIVE'.freeze + SHARE_UPDATE_EXCLUSIVE = 'SHARE UPDATE EXCLUSIVE'.freeze + SHARE = 'SHARE'.freeze + SHARE_ROW_EXCLUSIVE = 'SHARE ROW EXCLUSIVE'.freeze + EXCLUSIVE = 'EXCLUSIVE'.freeze + ACCESS_EXCLUSIVE = 'ACCESS EXCLUSIVE'.freeze + + # Locks the table with the specified mode. + def lock(mode, &block) + sql = LOCK % [@opts[:from], mode] + @db.synchronize do + if block # perform locking inside a transaction and yield to block + @db.transaction {@db.execute_and_forget(sql); yield} + else + @db.execute_and_forget(sql) # lock without a transaction + self + end + end + end + + def count(opts = nil) + query_single_value(count_sql(opts)).to_i + end + + SELECT_LASTVAL = ';SELECT lastval()'.freeze + + def insert(values = nil, opts = nil) + @db.execute_and_forget(insert_sql(values, opts)) + query_single_value(SELECT_LASTVAL).to_i + end + + def update(values, opts = nil) + @db.synchronize do + result = @db.execute(update_sql(values)) + begin + affected = result.cmdtuples + ensure + result.clear + end + affected + end + end + + def delete(opts = nil) + @db.synchronize do + result = @db.execute(delete_sql(opts)) + begin + affected = result.cmdtuples + ensure + result.clear + end + affected + end + end + + def query_all(sql, use_record_class = false) + @db.synchronize do + result = @db.execute(sql) + begin + conv = row_converter(result, use_record_class) + all = [] + result.each {|r| all << conv[r]} + ensure + result.clear + end + all + end + end + + def query_each(sql, use_record_class = false) + @db.synchronize do + result = @db.execute(sql) + begin + conv = row_converter(result, use_record_class) + result.each {|r| yield conv[r]} + ensure + result.clear + end + end + end + + def query_first(sql, use_record_class = false) + @db.synchronize do + result = @db.execute(sql) + begin + row = nil + conv = row_converter(result, use_record_class) + result.each {|r| row = conv.call(r)} + ensure + result.clear + end + row + end + end + + def query_single_value(sql) + @db.synchronize do + result = @db.execute(sql) + begin + value = result.getvalue(0, 0) + ensure + result.clear + end + value + end + end + + COMMA = ','.freeze + + @@converters_mutex = Mutex.new + @@converters = {} + + def row_converter(result, use_record_class) + fields = result.fields.map {|s| s.to_sym} + types = (0..(result.num_fields - 1)).map {|idx| result.type(idx)} + klass = use_record_class ? @record_class : nil + + # create result signature and memoize the converter + sig = fields.join(COMMA) + types.join(COMMA) + klass.to_s + @@converters_mutex.synchronize do + @@converters[sig] ||= compile_converter(fields, types, klass) + end + end + + CONVERT = "lambda {|r| {%s}}".freeze + CONVERT_RECORD_CLASS = "lambda {|r| %2$s.new(%1$s)}".freeze + + CONVERT_FIELD = '%s => r[%d]'.freeze + CONVERT_FIELD_TRANSLATE = '%s => ((t = r[%d]) ? t.%s : nil)'.freeze + + def compile_converter(fields, types, klass) + used_fields = [] + kvs = [] + fields.each_with_index do |field, idx| + next if used_fields.include?(field) + used_fields << field + + translate_fn = PG_TYPES[types[idx]] + kvs << (translate_fn ? CONVERT_FIELD_TRANSLATE : CONVERT_FIELD) % + [field.inspect, idx, translate_fn] + end + s = (klass ? CONVERT_RECORD_CLASS : CONVERT) % + [kvs.join(COMMA), klass] + eval(s) + end + end + end +end diff --git a/lib/sequel/schema.rb b/lib/sequel/schema.rb new file mode 100644 index 0000000000..d18b75e903 --- /dev/null +++ b/lib/sequel/schema.rb @@ -0,0 +1,163 @@ +require 'rubygems' +require 'postgres' + +module ServerSide + class Schema + COMMA_SEPARATOR = ', '.freeze + COLUMN_DEF = '%s %s'.freeze + UNIQUE = ' UNIQUE'.freeze + NOT_NULL = ' NOT NULL'.freeze + DEFAULT = ' DEFAULT %s'.freeze + PRIMARY_KEY = ' PRIMARY KEY'.freeze + REFERENCES = ' REFERENCES %s'.freeze + ON_DELETE = ' ON DELETE %s'.freeze + + RESTRICT = 'RESTRICT'.freeze + CASCADE = 'CASCADE'.freeze + NO_ACTION = 'NO ACTION'.freeze + SET_NULL = 'SET NULL'.freeze + SET_DEFAULT = 'SET DEFAULT'.freeze + + TYPES = Hash.new {|h, k| k} + TYPES[:double] = 'double precision' + + def self.on_delete_action(action) + case action + when :restrict: RESTRICT + when :cascade: CASCADE + when :set_null: SET_NULL + when :set_default: SET_DEFAULT + else NO_ACTION + end + end + + def self.column_definition(column) + c = COLUMN_DEF % [column[:name], TYPES[column[:type]]] + c << UNIQUE if column[:unique] + c << NOT_NULL if column[:null] == false + c << DEFAULT % PGconn.quote(column[:default]) if column.include?(:default) + c << PRIMARY_KEY if column[:primary_key] + c << REFERENCES % column[:table] if column[:table] + c << ON_DELETE % on_delete_action(column[:on_delete]) if + column[:on_delete] + c + end + + def self.create_table_column_list(columns) + columns.map {|c| column_definition(c)}.join(COMMA_SEPARATOR) + end + + CREATE_INDEX = 'CREATE INDEX %s ON %s (%s);'.freeze + CREATE_UNIQUE_INDEX = 'CREATE UNIQUE INDEX %s ON %s (%s);'.freeze + INDEX_NAME = '%s_%s_index'.freeze + UNDERSCORE = '_'.freeze + + def self.index_definition(table_name, index) + fields = index[:columns].join(COMMA_SEPARATOR) + index_name = index[:name] || INDEX_NAME % + [table_name, index[:columns].join(UNDERSCORE)] + (index[:unique] ? CREATE_UNIQUE_INDEX : CREATE_INDEX) % + [index_name, table_name, fields] + end + + def self.create_indexes_sql(table_name, indexes) + indexes.map {|i| index_definition(table_name, i)}.join + end + + CREATE_TABLE = "CREATE TABLE %s (%s);".freeze + + def self.create_table_sql(name, columns, indexes = nil) + sql = CREATE_TABLE % [name, create_table_column_list(columns)] + sql << create_indexes_sql(name, indexes) if indexes && !indexes.empty? + sql + end + + DROP_TABLE = "DROP TABLE %s CASCADE;".freeze + + def self.drop_table_sql(name) + DROP_TABLE % name + end + + class Generator + attr_reader :table_name + + def initialize(table_name, &block) + @table_name = table_name + @primary_key = {:name => :id, :type => :serial, :primary_key => true} + @columns = [] + @indexes = [] + instance_eval(&block) + end + + def primary_key(name, type = nil, opts = nil) + @primary_key = { + :name => name, + :type => type || :serial, + :primary_key => true + }.merge(opts || {}) + end + + def primary_key_name + @primary_key && @primary_key[:name] + end + + def column(name, type, opts = nil) + @columns << {:name => name, :type => type}.merge(opts || {}) + end + + def foreign_key(name, opts) + @columns << {:name => name, :type => :integer}.merge(opts || {}) + end + + def has_column?(name) + @columns.each {|c| return true if c[:name] == name} + false + end + + def index(columns, opts = nil) + columns = [columns] unless columns.is_a?(Array) + @indexes << {:columns => columns}.merge(opts || {}) + end + + def create_sql + if @primary_key && !has_column?(@primary_key[:name]) + @columns.unshift(@primary_key) + end + Schema.create_table_sql(@table_name, @columns, @indexes) + end + + def drop_sql + Schema.drop_table_sql(@table_name) + end + end + + attr_reader :instructions + + def initialize(&block) + @instructions = [] + instance_eval(&block) if block + end + + def create_table(table_name, &block) + @instructions << Generator.new(table_name, &block) + end + + def create(db) + @instructions.each do |s| + db.execute(s.create_sql) + end + end + + def drop(db) + @instructions.reverse_each do |s| + db.execute(s.drop_sql) if db.table_exists?(s.table_name) + end + end + + def recreate(db) + drop(db) + create(db) + end + end +end + diff --git a/lib/sequel/sqlite.rb b/lib/sequel/sqlite.rb new file mode 100644 index 0000000000..5720d3bacc --- /dev/null +++ b/lib/sequel/sqlite.rb @@ -0,0 +1,111 @@ +require 'sqlite3' +require 'metaid' + +module ServerSide + module SQLite + class Database < ServerSide::Database + attr_reader :pool + + def initialize(opts = {}) + super + @pool = ServerSide::ConnectionPool.new(@opts[:max_connections] || 4) do + db = SQLite3::Database.new(@opts[:database]) + db.type_translation = true + db + end + end + + def query(opts = nil) + SQLite::Dataset.new(self, opts) + end + + def tables + # return a list of tables + end + + def execute(sql) + @pool.hold {|conn| conn.execute(sql)} + end + + def execute_insert(sql) + @pool.hold {|conn| conn.execute(sql); conn.last_insert_row_id} + end + + def single_value(sql) + @pool.hold {|conn| conn.get_first_value(sql)} + end + + def result_set(sql, record_class, &block) + @pool.hold do |conn| + conn.query(sql) do |result| + columns = result.columns + column_count = columns.size + result.each do |values| + row = {} + column_count.times {|i| row[columns[i].to_sym] = values[i]} + block.call(record_class ? record_class.new(row) : row) + end + end + end + end + + def synchronize(&block) + @pool.hold(&block) + end + + def transaction(&block) + @pool.hold {|conn| conn.transaction(&block)} + end + + def table_exists?(name) + end + end + + class Dataset < ServerSide::Dataset + def each(opts = nil, &block) + @db.result_set(select_sql(opts), @record_class, &block) + self + end + + LIMIT_1 = {:limit => 1}.freeze + + def first(opts = nil) + opts = opts ? opts.merge(LIMIT_1) : LIMIT_1 + @db.result_set(select_sql(opts), @record_class) {|r| return r} + end + + def last(opts = nil) + raise RuntimeError, 'No order specified' unless + @opts[:order] || (opts && opts[:order]) + + opts = {:order => reverse_order(@opts[:order])}. + merge(opts ? opts.merge(LIMIT_1) : LIMIT_1) + @db.result_set(select_sql(opts), @record_class) {|r| return r} + end + + def count(opts = nil) + @db.single_value(count_sql(opts)).to_i + end + + def insert(values = nil, opts = nil) + @db.synchronize do + @db.execute_insert insert_sql(values, opts) + end + end + + def update(values, opts = nil) + @db.synchronize do + @db.execute update_sql(values, opts) + end + self + end + + def delete(opts = nil) + @db.synchronize do + @db.execute delete_sql(opts) + end + self + end + end + end +end