From a65cdf15f0d585399b8f48eed568208a67fb8563 Mon Sep 17 00:00:00 2001 From: sujimichi Date: Sun, 27 May 2012 16:47:57 +0100 Subject: [PATCH] added fitness caching to GA. why had I not done that before! --- bootcamp.rb | 12 ++++++------ darwin.rb | 39 ++++++++++++++++++++++++++++++++++++--- player.rb | 42 +++++++++++++++++++----------------------- 3 files changed, 61 insertions(+), 32 deletions(-) diff --git a/bootcamp.rb b/bootcamp.rb index a5a7f82..be1a243 100644 --- a/bootcamp.rb +++ b/bootcamp.rb @@ -245,13 +245,12 @@ def initialize n_layers = 2 @target_score = 842 set_config_for n_layers reset_high_score + @fitness_cache = {} - @ga =MGA.new(:generations => 5000, :mutation_rate => 2, :gene_length => @gene_length, :fitness => Proc.new{|genome, gen| + @ga =MGA.new(:generations => 5000, :mutation_rate => 2, :gene_length => @gene_length, :cache_fitness => true, :fitness => Proc.new{|genome, gen| puts "#{gen}\n" - genome_file = "./genome" - File.open(genome_file,'w'){|f| f.write( genome.join(",") )} - + File.open(genome_file,'w'){|f| f.write( genome.join(",") )} score_sum = 0 threads = [] levels = [1,2,3,4,5,6,7,8,9] @@ -266,6 +265,7 @@ def initialize n_layers = 2 end threads.each{|t| t.join} score_sum = levels.map{|i| instance_variable_get("@ans#{i}")}.compact.sum + puts "\n\t==Summed Score #{score_sum}" remark_on score_sum puts "." @@ -290,7 +290,7 @@ def initialize n_layers =2 rootdir = "/home/sujimichi/coding/lab/rubywarrior" - @ga =MGA.new(:generations => 5000, :mutation_rate => 2, :gene_length => @gene_length, :fitness => Proc.new{|genome, gen| + @ga =MGA.new(:generations => 5000, :mutation_rate => 2, :gene_length => @gene_length, :cache_fitness => true, :fitness => Proc.new{|genome, gen| puts "#{gen}\n" Dir.chdir(rootdir) @@ -299,7 +299,7 @@ def initialize n_layers =2 puts "\n\n" threads = [] - levels.sort_by{rand}.each do |lvl| + levels.each do |lvl| Dir.chdir("#{rootdir}/level#{lvl}bot-beginner") File.open("./genome", 'w'){|f| f.write( genome.join(",") )} #write the genome to file which Player will use diff --git a/darwin.rb b/darwin.rb index 2e8678d..e68be9f 100644 --- a/darwin.rb +++ b/darwin.rb @@ -1,6 +1,8 @@ #Micro Genetic Algorithm - slight variation on https://github.com/Sujimichi/micro_ga class MGA - attr_accessor :population, :generations, :mutation_rate, :cross_over_rate, :current_generation, :popsize + require 'digest' + + attr_accessor :population, :generations, :mutation_rate, :cross_over_rate, :current_generation, :popsize, :scores def initialize args = {} @popsize = args[:popsize] || 30 #Number of members (genomes) in the population @gene_length = args[:gene_length] || 10 #Number of bit (genes) in a genome @@ -11,6 +13,7 @@ def initialize args = {} @fitness_function = args[:fitness] || Proc.new{|genome| genome.inject{|i,j| i+j} } #init fitness function or use simple max ones @current_generation = 0 @scores = {} + @cache_fitness = args[:cache_fitness] || false end def evolve generations = @generations @@ -29,14 +32,44 @@ def pos_mutate n n + (rand - 0.5) #plus or minus small value. || (n-1).abs #for binary mutation; 1 -> 0, 0 -> 1 end def fitness genome - @fitness_function.call(genome, @current_generation) + return @fitness_function.call(genome, @current_generation) unless @cache_fitness #return fitness as norm if caching is off + @scores[genome] = @fitness_function.call(genome, @current_generation) unless @scores[genome] #update cache if value not present + @scores[genome] #return cached value end + def ordered_population population.sort_by{|member| fitness(member)}.reverse end + def best ordered_population.first end - end +=begin +def cache_test + + f = Proc.new{|genome| print'.';sleep(0.05); genome.inject{|i,j| i+j} } + pop = Array.new(30){ Array.new(10){ 0 } } + g1 = MGA.new(:cache => false, :generations => 5000, :fitness => f) + g2 = MGA.new(:cache => true, :generations => 5000, :fitness => f) + g1.population = pop + g2.population = pop + + ave1 = g1.population.map{|g| g1.fitness g}.inject{|i,j| i+j} / g1.population.size + ave2 = g2.population.map{|g| g1.fitness g}.inject{|i,j| i+j} / g2.population.size + puts [ave1, ave2].inspect + + t1_1 = Time.now;g1.evolve; t1_2 = Time.now; + t2_1 = Time.now;g2.evolve; t2_2 = Time.now; + t1 = t1_2 - t1_1 + t2 = t2_2 - t2_1 + + ave1 = g1.population.map{|g| g1.fitness g}.inject{|i,j| i+j} / g1.population.size + ave2 = g2.population.map{|g| g1.fitness g}.inject{|i,j| i+j} / g2.population.size + puts [ave1, ave2].inspect + puts [t1, t2].inspect + + +end +=end diff --git a/player.rb b/player.rb index 709c5aa..ba3977c 100644 --- a/player.rb +++ b/player.rb @@ -3,21 +3,17 @@ class Player def initialize genome = File.open("./genome", "r"){|f| f.readlines}.join.split(",").map{|s| s.to_f} #Read genome from file. - nodes = {:in => 16, :inner => 6, :out => 8} #nodes = {:in => 15, :inner => 8, :inner2 => 8, :out => 5} || #nodes = {:in => 15, :out => 5} - @brain = Brains::R2D2.new(nodes, genome) + nodes = {:in => 16, :inner => 6, :out => 8} #3layernodes = {:in => 15, :inner => 8, :inner2 => 8, :out => 5} || #1layernodes = {:in => 15, :out => 5} + @brain = Brains::R2D2.new(nodes, genome) #Initialize warriors brain (neural net) end def play_turn(warrior) @previous_health ||= 20 - - #Sense world and present as an array of inputs for NN - inputs = input_array_for(warrior) - - #send inputs to neural network and interpret its output as :action and :impulse - action, impulse = @brain.act_on(inputs) - puts [inputs, action, impulse].inspect + inputs = input_array_for(warrior) #Sense world and present as an array of inputs for NN + action, impulse = @brain.act_on(inputs) #send inputs to neural network and interpret its output as :action and :impulse + puts [inputs, action, impulse].inspect #whats on its mind? - #send 'impulse' and 'action' from brain to warrior. done inside rescue as brain may request actions the body can't yet do, like rest! in the eariler levels. + #send 'action' and impulse from brain to warrior. done inside rescue as brain may request actions the body can't yet do, like rest! in the eariler levels. #no need to program which actions are allowed, evolution will work it out for itself. Yes creationists, this shit actually works! #Once evolved the brain will 'know' what its body is capable of and the rescue should not be needed. begin @@ -30,16 +26,18 @@ def play_turn(warrior) #sense the world and return info as an array of inputs for the NN def input_array_for warrior - dirs = [:left, :forward, :right, :backward] - things = [:wall, :enemy, :captive]#, :stairs, :ticking, :golem] + dirs = [:left, :forward, :right, :backward] #directions in which things can be + things = [:wall, :enemy, :captive] #type of things there can be vis_scale = [0, 0.6, 0.3] #used to scale the values returned by :look. - if warrior.respond_to?(:feel) - inputs = things.map do |thing| - dirs.map do |dir| - v = (warrior.feel(dir).send("#{thing}?").eql?(true) ? 1 : 0) - if warrior.respond_to?(:look) - look = warrior.look(dir) + if warrior.respond_to?(:feel) + can_look = warrior.respond_to?(:look) + inputs = things.map do |thing| #for each of the things + dirs.map do |dir| #in each of the directions + v = (warrior.feel(dir).send("#{thing}?").eql?(true) ? 1 : 0) #test if that thing is there, returning 1 for true else 0 + if can_look #if warrior can also look + look = warrior.look(dir) #look in direction + #reduce to a single val from given 3 ie [0,1,1] => [0, 0.6, 0.3] => [0.6] v = v + look.map{|l| (l.send("#{thing}?").eql?(true) ? 1 : 0) * vis_scale[look.index(l)] }.max end v @@ -48,9 +46,8 @@ def input_array_for warrior else #in the first level the warrior has less sensory input than a sea sponge. No sensory input means no neural activity. #So when warrior does not respond to :feel it 'imagines' that its in an empty corridor! - inputs = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] + inputs = [1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] #inputs for empty corridor. end - #give the NN sense of whether it is armed or not. inputs << (warrior.respond_to?(:shoot!) ? 1 : 0) @@ -59,8 +56,7 @@ def input_array_for warrior w_health = warrior.respond_to?(:health) ? warrior.health : 20 inputs << (1 - 1.0/20 * w_health).round(2) inputs << ((@previous_health > w_health) ? 1 : 0) #sense of health dropping - inputs << 1 #representational bias. yeah, I should prob explain this! its REALLY important! - - inputs.flatten + inputs << 1 #representational bias. yeah, I should prob explain this! its REALLY important! + inputs.flatten #return array of values. end end