Skip to content

Commit

Permalink
Merge branch 'feature/1.0.9'
Browse files Browse the repository at this point in the history
  • Loading branch information
jedld committed Dec 26, 2020
2 parents 64d7943 + d41d2ee commit f056661
Show file tree
Hide file tree
Showing 29 changed files with 2,940 additions and 2,880 deletions.
14 changes: 14 additions & 0 deletions lib/tensor_stream/evaluator/base_evaluator.rb
Original file line number Diff line number Diff line change
Expand Up @@ -228,11 +228,25 @@ def self.evaluators

def self.register_evaluator(klass, name, index = 0)
@evaluators ||= {}
@storage_managers ||= {}
@evaluators[name] = {name: name, class: klass, index: index}
@storage_managers[klass] = klass.get_storage_manager
end

def self.default_evaluators
evaluators.values.sort { |v| v[:index] }.reverse.map { |v| v[:class] }
end

def self.clear_storages(graph)
@storage_managers.values.each { |manager| manager.clear_variables(graph) }
end

def self.read_variable(graph, name)
@storage_managers.values.each do |manager|
return manager.read_value(graph, name) if manager.exists?(graph, name)
end

nil
end
end
end
1 change: 1 addition & 0 deletions lib/tensor_stream/evaluator/evaluator.rb
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
require "tensor_stream/evaluator/ruby_evaluator"
require "tensor_stream/evaluator/buffer"
require "tensor_stream/evaluator/evaluator_utils"

module TensorStream
module Evaluator
Expand Down
20 changes: 20 additions & 0 deletions lib/tensor_stream/evaluator/evaluator_utils.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
module TensorStream
class EvaluatorUtils
extend TensorStream::StringHelper

def self.get_evaluator_classes(evaluators)
@evaluator_classes ||= if evaluators.is_a?(Array)
if evaluators.empty?
TensorStream::Evaluator.default_evaluators
else
evaluators.collect { |name| Object.const_get("TensorStream::Evaluator::#{camelize(name.to_s)}") }
end
elsif evaluators.nil?
TensorStream::Evaluator.default_evaluators
else
[Object.const_get("TensorStream::Evaluator::#{camelize(evaluators.to_s)}")]
end
@evaluator_classes
end
end
end
54 changes: 25 additions & 29 deletions lib/tensor_stream/evaluator/ruby/nn_ops.rb
Original file line number Diff line number Diff line change
Expand Up @@ -7,42 +7,39 @@ def self.included(klass)
target_var, learning_rate, delta = inputs
assign = tensor.inputs[0] || tensor

assign.container = process_vector_math_op(tensor, target_var, delta, context) { |t, u| t - u * learning_rate }
assign.container
var_assign_value(assign, process_vector_math_op(tensor, target_var, delta, context) { |t, u| t - u * learning_rate })
end

register_op :apply_momentum do |_context, tensor, inputs|
target_var, momentum_var, learning_rate, grad, momentum = inputs
assign = tensor.inputs[0] || tensor
assign_acc = tensor.inputs[1]
assign_acc.container = multi_array_op(->(t, u) { t * momentum + u }, momentum_var, grad)
assign.container = if tensor.options[:use_nesterov]
multi_array_op(->(v, g, acc) { v - (g * learning_rate + acc * momentum * learning_rate) }, target_var, grad, momentum_var)
else
multi_array_op(->(v, acc) { v - acc * learning_rate }, target_var, momentum_var)
end

assign.container
var_assign_value(assign_acc, multi_array_op(->(t, u) { t * momentum + u }, momentum_var, grad))
var = if tensor.options[:use_nesterov]
multi_array_op(->(v, g, acc) { v - (g * learning_rate + acc * momentum * learning_rate) }, target_var, grad, momentum_var)
else
multi_array_op(->(v, acc) { v - acc * learning_rate }, target_var, momentum_var)
end
var_assign_value(assign, var)
end

register_op :apply_adadelta do |_context, tensor, inputs|
target_var, accum, accum_update, lr, rho, epsilon, grad = inputs
assign = tensor.inputs[0] || tensor
assign_acc = tensor.inputs[1]
assign_acc_update = tensor.inputs[2]
assign_acc.container = multi_array_op(->(acc_t, grad_t) { acc_t * rho + (grad_t * grad_t) * (1.0 - rho) }, accum, grad)
update = multi_array_op(->(acc_update_t, acc_t, grad_t) { Math.sqrt(acc_update_t + epsilon) * (1.0 / Math.sqrt(acc_t + epsilon)) * grad_t }, accum_update, assign_acc.container, grad)
assign.container = multi_array_op(->(v, u) { v - (u * lr) }, target_var, update)
assign_acc_update.container = multi_array_op(->(acc_update_t, u) { acc_update_t * rho + (u * u) * (1.0 - rho) }, accum_update, update)
acc_val = var_assign_value(assign_acc, multi_array_op(->(acc_t, grad_t) { acc_t * rho + (grad_t * grad_t) * (1.0 - rho) }, accum, grad))
update = multi_array_op(->(acc_update_t, acc_t, grad_t) { Math.sqrt(acc_update_t + epsilon) * (1.0 / Math.sqrt(acc_t + epsilon)) * grad_t }, accum_update, acc_val, grad)
result = var_assign_value(assign, multi_array_op(->(v, u) { v - (u * lr) }, target_var, update))
var_assign_value(assign_acc_update,multi_array_op(->(acc_update_t, u) { acc_update_t * rho + (u * u) * (1.0 - rho) }, accum_update, update))

assign.container
result
end

register_op :apply_adagrad do |_context, tensor, inputs|
target_var, accum, lr, grad = inputs
assign = tensor.inputs[0] || tensor
assign.container = multi_array_op(->(v, a, g) { v - (g * lr * (1.0 / Math.sqrt(a))) }, target_var, accum, grad)
assign.container
var_assign_value(assign, multi_array_op(->(v, a, g) { v - (g * lr * (1.0 / Math.sqrt(a))) }, target_var, accum, grad))
end

register_op :apply_adam do |_context, tensor, inputs|
Expand All @@ -52,20 +49,19 @@ def self.included(klass)
assign_m = tensor.inputs[1]
assign_v = tensor.inputs[2]

assign_m.container = multi_array_op(->(u_d, g) { u_d + (g - u_d) * (1.0 - beta1_t) }, m, grad)
assign_v.container = multi_array_op(->(u_d, v_d) { u_d + (v_d**2 - u_d) * (1.0 - beta2_t)}, v, grad)
assign.container = multi_array_op(->(t, m_d, v_d) { t - ((m_d * alpha) / (Math.sqrt(v_d) + epsilon_t)) }, target_var, assign_m.container, assign_v.container)
assign.container
m_val = var_assign_value(assign_m, multi_array_op(->(u_d, g) { u_d + (g - u_d) * (1.0 - beta1_t) }, m, grad))
v_val = var_assign_value(assign_v, multi_array_op(->(u_d, v_d) { u_d + (v_d**2 - u_d) * (1.0 - beta2_t)}, v, grad))
var_assign_value(assign, multi_array_op(->(t, m_d, v_d) { t - ((m_d * alpha) / (Math.sqrt(v_d) + epsilon_t)) }, target_var, m_val, v_val))
end

register_op :apply_rms_prop do |_context, tensor, inputs|
var, ms, mom, lr, rho, momentum, epsilon, grad = inputs
assign = tensor.inputs[0]
assign_ms = tensor.inputs[1]
assign_mom = tensor.inputs[2]
assign_ms.container = multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho)}, grad, ms)
assign_mom.container = multi_array_op(->(mom_t, g, m) { mom_t * momentum + (g * lr) / Math.sqrt(m + epsilon)}, mom, grad, assign_ms.container)
assign.container = multi_array_op(->(v, m) { v - m }, var, assign_mom.container)
ms_val = var_assign_value(assign_ms, multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho)}, grad, ms))
mom_val = var_assign_value(assign_mom, multi_array_op(->(mom_t, g, m) { mom_t * momentum + (g * lr) / Math.sqrt(m + epsilon)}, mom, grad, ms_val))
var_assign_value(assign, multi_array_op(->(v, m) { v - m }, var, mom_val))
end

register_op :apply_centered_rms_prop do |_context, tensor, inputs|
Expand All @@ -75,11 +71,11 @@ def self.included(klass)
assign_ms = tensor.inputs[2]
assign_mom = tensor.inputs[3]

assign_ms.container = multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho) }, grad, ms)
assign_mg.container = multi_array_op(->(g, mg_t) { (g - mg_t) * (1.0 - rho) }, grad, mg)
denom = multi_array_op(->(s, mg_t) { (s - mg_t * mg_t) + epsilon }, assign_ms.container, mg)
assign_mom.container = multi_array_op(->(mom_t, g, d) { mom_t * momentum + (g * lr) / Math.sqrt(d)}, mom, grad, denom)
assign.container = multi_array_op(->(v, m) { v - m }, var, assign_mom.container)
val_ms = var_assign_value(assign_ms, multi_array_op(->(g, m) { m + (g * g - m) * (1.0 - rho) }, grad, ms))
var_assign_value(assign_mg, multi_array_op(->(g, mg_t) { (g - mg_t) * (1.0 - rho) }, grad, mg))
denom = multi_array_op(->(s, mg_t) { (s - mg_t * mg_t) + epsilon }, val_ms, mg)
val_mom = var_assign_value(assign_mom, multi_array_op(->(mom_t, g, d) { mom_t * momentum + (g * lr) / Math.sqrt(d)}, mom, grad, denom))
var_assign_value(assign, multi_array_op(->(v, m) { v - m }, var, val_mom))
end

register_op %i[softmax_cross_entropy_with_logits_v2 softmax_cross_entropy_with_logits] do |_context, tensor, inputs|
Expand Down
40 changes: 40 additions & 0 deletions lib/tensor_stream/evaluator/ruby/storage_manager.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
module TensorStream
class RubyStorageManager
def self.current_storage_manager
@storage_manager ||= RubyStorageManager.new
end

def initialize
@variables = {}
end

def exists?(graph, name)
return false if !@variables.key?(graph.object_id)

@variables[graph.object_id].key?(name.to_sym)
end

def create_variable(graph, name, value)
raise "no name specified" if name.nil?

@variables[graph.object_id][name.to_sym] = value
end

def assign_value(graph, name, value)
raise "no name specified" if name.nil?

@variables[graph.object_id] ||= {}
@variables[graph.object_id][name.to_sym] = value
end

def read_value(graph, name)
raise "no name specified" if name.nil?

@variables[graph.object_id][name.to_sym]
end

def clear_variables(graph)
@variables[graph.object_id] = {}
end
end
end
74 changes: 74 additions & 0 deletions lib/tensor_stream/evaluator/ruby/variable_ops.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
module TensorStream
## Collection of machine learning related ops
module VariableOps
def self.included(klass)
klass.class_eval do
register_op :variable_v2 do |_context, tensor, _inputs|
value = var_read_value(tensor)
raise "variable #{tensor.options[:var_name]} not initalized" if value.nil?

value
end

register_op :assign do |context, tensor, inputs|
var_assign_value(tensor, inputs[0])
end

register_op :assign_add, no_eval: true do |context, tensor, inputs|
current_val = var_read_value(tensor)

raise "variable #{tensor.options[:var_name]} not initialized" if current_val.nil?
eval_a, eval_b = broadcast(current_val, inputs[0])
result = multi_array_op(->(var, val) { var + val }, eval_a, eval_b)
var_assign_value(tensor, result)
end

register_op :assign_sub do |context, tensor, inputs|
current_val = var_read_value(tensor)
raise "variable #{tensor.options[:var_name]} not initialized" if current_val.nil?
eval_a, eval_b = broadcast(current_val, inputs[0])
result = multi_array_op(->(var, val) { var - val }, eval_a, eval_b)
var_assign_value(tensor, result)
end

register_op :save_ts do |_context, tensor, inputs|
outputfile = inputs[0]
inputs = tensor.inputs.dup

inputs.shift
variables = {}
inputs.each do |savable|
val = var_read_value(savable)

packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, savable.data_type))
variables[savable.options[:var_name]] = {
"shape" => shape_eval(val),
"data" => Base64.strict_encode64(packed_data),
}
end

File.write(outputfile, {"variables" => variables}.to_yaml)
nil
end

register_op :restore_ts do |_context, tensor, inputs|
inputs = inputs.dup
filename = inputs.shift
tensor_names = inputs

input_dump = YAML.safe_load(File.read(filename), [Symbol])
vars = tensor.graph.get_collection(GraphKeys::GLOBAL_VARIABLES)
vars.select! { |v| input_dump["variables"].key?(v.name) && tensor_names.include?(v.name) }
vars.each do |variable|
data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump["variables"][variable.name]["data"])), variable.data_type)
shape = input_dump["variables"][variable.name]["shape"]
variable.buffer = nil
var_assign_value(variable, TensorShape.reshape(data, shape))
end

nil
end
end
end
end
end
Loading

0 comments on commit f056661

Please sign in to comment.