Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

cleaning up old files that babel has generalized support for

  • Loading branch information...
commit 2828d944c8f5dda5043c788cfcfd93a0e6c466fc 1 parent 9fc3d25
@shatterednirvana shatterednirvana authored
View
159 AppController/lib/helperfunctions.rb
@@ -15,10 +15,6 @@
require 'user_app_client'
-$:.unshift File.join(File.dirname(__FILE__), "..")
-require 'djinn'
-
-
# BadConfigurationExceptions represent an exception that can be thrown by the
# AppController or any other library it uses, if a method receives inputs
# it isn't expecting.
@@ -35,11 +31,8 @@ module HelperFunctions
VER_NUM = "1.5"
-
- GOROOT = "#{APPSCALE_HOME}/AppServer/goroot/"
-
-
- GOBIN = "#{GOROOT}/bin/"
+
+ APPSCALE_HOME = ENV['APPSCALE_HOME']
# The maximum amount of time, in seconds, that we are willing to wait for
@@ -163,7 +156,7 @@ def self.sleep_until_port_is_open(ip, port, use_ssl=DONT_USE_SSL)
sleep_time *= 2
end
- Djinn.log_debug("Waiting on #{ip}:#{port} to be open (currently closed).")
+ Kernel.puts("Waiting on #{ip}:#{port} to be open (currently closed).")
}
end
@@ -178,7 +171,7 @@ def self.sleep_until_port_is_closed(ip, port, use_ssl=DONT_USE_SSL)
sleep_time *= 2
end
- Djinn.log_debug("Waiting on #{ip}:#{port} to be closed (currently open).")
+ Kernel.puts("Waiting on #{ip}:#{port} to be closed (currently open).")
}
end
@@ -209,7 +202,7 @@ def self.is_port_open?(ip, port, use_ssl=DONT_USE_SSL)
end
def self.run_remote_command(ip, command, public_key_loc, want_output)
- Djinn.log_debug("ip is [#{ip}], command is [#{command}], public key is [#{public_key_loc}], want output? [#{want_output}]")
+ Kernel.puts("ip is [#{ip}], command is [#{command}], public key is [#{public_key_loc}], want output? [#{want_output}]")
public_key_loc = File.expand_path(public_key_loc)
remote_cmd = "ssh -i #{public_key_loc} -o StrictHostkeyChecking=no root@#{ip} '#{command} "
@@ -221,7 +214,7 @@ def self.run_remote_command(ip, command, public_key_loc, want_output)
remote_cmd << "> /dev/null &' &"
end
- Djinn.log_debug("Running [#{remote_cmd}]")
+ Kernel.puts("Running [#{remote_cmd}]")
if want_output
return `#{remote_cmd}`
@@ -235,9 +228,9 @@ def self.scp_file(local_file_loc, remote_file_loc, target_ip, private_key_loc)
private_key_loc = File.expand_path(private_key_loc)
`chmod 0600 #{private_key_loc}`
local_file_loc = File.expand_path(local_file_loc)
- retval_file = File.expand_path("#{APPSCALE_HOME}/.appscale/retval-#{rand()}")
+ retval_file = "/etc/appscale/retval-#{Kernel.rand()}"
cmd = "scp -i #{private_key_loc} -o StrictHostkeyChecking=no 2>&1 #{local_file_loc} root@#{target_ip}:#{remote_file_loc}; echo $? > #{retval_file}"
- #Djinn.log_debug(cmd)
+ #Kernel.puts(cmd)
scp_result = `#{cmd}`
loop {
@@ -250,7 +243,7 @@ def self.scp_file(local_file_loc, remote_file_loc, target_ip, private_key_loc)
fails = 0
loop {
break if retval == "0"
- Djinn.log_debug("\n\n[#{cmd}] returned #{retval} instead of 0 as expected. Will try to copy again momentarily...")
+ Kernel.puts("\n\n[#{cmd}] returned #{retval} instead of 0 as expected. Will try to copy again momentarily...")
fails += 1
abort("SCP failed") if fails >= 5
sleep(2)
@@ -258,7 +251,7 @@ def self.scp_file(local_file_loc, remote_file_loc, target_ip, private_key_loc)
retval = (File.open(retval_file) { |f| f.read }).chomp
}
- #Djinn.log_debug(scp_result)
+ #Kernel.puts(scp_result)
`rm -fv #{retval_file}`
end
@@ -307,15 +300,15 @@ def self.setup_app(app_name, untar=true)
tar_path = "#{tar_dir}#{app_name}.tar.gz"
#Kernel.system "rm -rf #{tar_dir}"
- Djinn.log_run("mkdir -p #{tar_dir}")
- Djinn.log_run("mkdir -p #{meta_dir}/log")
- Djinn.log_run("cp #{APPSCALE_HOME}/AppLoadBalancer/public/404.html #{meta_dir}")
- Djinn.log_run("touch #{meta_dir}/log/server.log")
+ self.shell("mkdir -p #{tar_dir}")
+ self.shell("mkdir -p #{meta_dir}/log")
+ self.shell("cp #{APPSCALE_HOME}/AppLoadBalancer/public/404.html #{meta_dir}")
+ self.shell("touch #{meta_dir}/log/server.log")
#tar_file = File.open(tar_path, "w")
#decoded_tar = Base64.decode64(encoded_app_tar)
#tar_file.write(decoded_tar)
#tar_file.close
- Djinn.log_run("tar --file #{tar_path} --force-local -C #{tar_dir} -zx") if untar
+ self.shell("tar --file #{tar_path} --force-local -C #{tar_dir} -zx") if untar
end
# Returns pid if successful, -1 if not
@@ -328,11 +321,11 @@ def self.run_app(app_name, port, db_location, public_ip, private_ip, app_version
if app_language == "python"
if File.exist?("/var/apps/#{app_name}/app/app.yaml") == false
- Djinn.log_debug("The #{app_name} application was missing a app.yaml")
+ Kernel.puts("The #{app_name} application was missing a app.yaml")
return -1
end
- Djinn.log_debug("saw a python app coming through")
+ Kernel.puts("saw a python app coming through")
env_vars['MY_IP_ADDRESS'] = public_ip
env_vars['MY_PORT'] = port
env_vars['APPNAME'] = app_name
@@ -359,11 +352,11 @@ def self.run_app(app_name, port, db_location, public_ip, private_ip, app_version
stop_cmd = "ps ax | grep #{start_cmd} | grep -v grep | awk '{ print $1 }' | xargs -d '\n' kill -9"
elsif app_language == "java"
if File.exist?("/var/apps/#{app_name}/app/war/WEB-INF/web.xml") == false and File.exist?("/var/apps/#{app_name}/app/app.yaml") == false
- Djinn.log_debug("The #{app_name} application was missing a web.xml or app.yaml file")
+ Kernel.puts("The #{app_name} application was missing a web.xml or app.yaml file")
return -1
end
- Djinn.log_debug("saw a java app coming through")
+ Kernel.puts("saw a java app coming through")
`cp #{APPSCALE_HOME}/AppServer_Java/appengine-java-sdk-repacked/lib/user/*.jar /var/apps/#{app_name}/app/war/WEB-INF/lib/`
`cp #{APPSCALE_HOME}/AppServer_Java/appengine-java-sdk-repacked/lib/user/orm/*.jar /var/apps/#{app_name}/app/war/WEB-INF/lib/`
start_cmd = ["cd #{APPSCALE_HOME}/AppServer_Java &&",
@@ -382,7 +375,7 @@ def self.run_app(app_name, port, db_location, public_ip, private_ip, app_version
].join(' ')
stop_cmd = "ps ax | grep #{start_cmd} | grep -v grep | awk '{ print $1 }' | xargs -d '\n' kill -9"
else
- Djinn.log_debug("Currently we only support python, go, and java applications, not #{app_language}.")
+ Kernel.puts("Currently we only support python, go, and java applications, not #{app_language}.")
end
env_vars['APPSCALE_HOME'] = APPSCALE_HOME
@@ -391,7 +384,7 @@ def self.run_app(app_name, port, db_location, public_ip, private_ip, app_version
HelperFunctions.sleep_until_port_is_open(HelperFunctions.local_ip, port)
pid = `lsof -t -i :#{port}`
- Djinn.log_debug("Started app #{app_name} with pid #{pid}")
+ Kernel.puts("Started app #{app_name} with pid #{pid}")
return pid
end
@@ -402,9 +395,9 @@ def self.run_app(app_name, port, db_location, public_ip, private_ip, app_version
def self.stop_app(app_name, port)
watch = "appscale-" + app_name + "-" + port.to_s
GodInterface.stop(watch)
- Djinn.log_debug("Stopped #{watch} process via god.")
+ Kernel.puts("Stopped #{watch} process via god.")
GodInterface.remove(watch)
- Djinn.log_debug("Stopped watching #{watch} via god.")
+ Kernel.puts("Stopped watching #{watch} via god.")
end
@@ -423,7 +416,7 @@ def self.convert_fqdn_to_ip(host)
ip = `dig #{host} +short`.chomp
if ip.empty?
- Djinn.log_debug("couldn't use dig to resolve [#{host}]")
+ Kernel.puts("couldn't use dig to resolve [#{host}]")
abort("Couldn't convert #{host} to an IP address. Result of dig was \n#{ip}")
end
@@ -442,8 +435,8 @@ def self.get_ips(ips)
end
}
- Djinn.log_debug("Reported Public IPs: [#{reported_public.join(', ')}]")
- Djinn.log_debug("Reported Private IPs: [#{reported_private.join(', ')}]")
+ Kernel.puts("Reported Public IPs: [#{reported_public.join(', ')}]")
+ Kernel.puts("Reported Private IPs: [#{reported_private.join(', ')}]")
actual_public = []
actual_private = []
@@ -468,7 +461,7 @@ def self.get_ips(ips)
# this can happen if the private ip doesn't resolve
# which can happen in hybrid environments: euca boxes wont be
# able to resolve ec2 private ips, and vice-versa in euca-managed-mode
- Djinn.log_debug("rescued! failed to convert #{actual_private[index]} to public")
+ Kernel.puts("rescued! failed to convert #{actual_private[index]} to public")
actual_private[index] = actual_public[index]
end
}
@@ -488,8 +481,8 @@ def self.get_public_ips(ips)
end
}
- Djinn.log_debug("Reported Public IPs: [#{reported_public.join(', ')}]")
- Djinn.log_debug("Reported Private IPs: [#{reported_private.join(', ')}]")
+ Kernel.puts("Reported Public IPs: [#{reported_public.join(', ')}]")
+ Kernel.puts("Reported Private IPs: [#{reported_private.join(', ')}]")
public_ips = []
reported_public.each_index { |index|
@@ -522,7 +515,7 @@ def self.get_optimal_spot_price(instance_type)
average /= prices.length
plus_twenty = average * 1.20
- Djinn.log_debug("The average spot instance price for a #{instance_type} " +
+ Kernel.puts("The average spot instance price for a #{instance_type} " +
"machine is $#{average}, and 20% more is $#{plus_twenty}")
return plus_twenty
end
@@ -545,12 +538,12 @@ def self.set_creds_in_env(creds, cloud_num)
ENV['EC2_PRIVATE_KEY'] = "#{cloud_keys_dir}/mykey.pem"
ENV['EC2_CERT'] = "#{cloud_keys_dir}/mycert.pem"
- Djinn.log_debug("Setting private key to #{cloud_keys_dir}/mykey.pem, cert to #{cloud_keys_dir}/mycert.pem")
+ Kernel.puts("Setting private key to #{cloud_keys_dir}/mykey.pem, cert to #{cloud_keys_dir}/mycert.pem")
end
def self.spawn_hybrid_vms(creds, nodes)
info = "Spawning hybrid vms with creds #{self.obscure_creds(creds).inspect} and nodes #{nodes.inspect}"
- Djinn.log_debug(info)
+ Kernel.puts(info)
cloud_info = []
@@ -586,13 +579,13 @@ def self.spawn_hybrid_vms(creds, nodes)
this_cloud_info = self.spawn_vms(num_of_vms, jobs_needed, machine,
instance_type, keyname, cloud_type, cloud, group)
- Djinn.log_debug("Cloud#{cloud_num} reports the following info: #{this_cloud_info.join(', ')}")
+ Kernel.puts("Cloud#{cloud_num} reports the following info: #{this_cloud_info.join(', ')}")
cloud_info += this_cloud_info
cloud_num += 1
}
- Djinn.log_debug("Hybrid cloud spawning reports the following info: #{cloud_info.join(', ')}")
+ Kernel.puts("Hybrid cloud spawning reports the following info: #{cloud_info.join(', ')}")
return cloud_info
end
@@ -605,27 +598,27 @@ def self.spawn_vms(num_of_vms_to_spawn, job, image_id, instance_type, keyname,
return [] if num_of_vms_to_spawn < 1
ssh_key = File.expand_path("#{APPSCALE_HOME}/.appscale/keys/#{cloud}/#{keyname}.key")
- Djinn.log_debug("About to spawn VMs, expecting to find a key at #{ssh_key}")
+ Kernel.puts("About to spawn VMs, expecting to find a key at #{ssh_key}")
self.log_obscured_env
new_cloud = !File.exists?(ssh_key)
if new_cloud # need to create security group and key
- Djinn.log_debug("Creating keys/security group for #{cloud}")
+ Kernel.puts("Creating keys/security group for #{cloud}")
self.generate_ssh_key(ssh_key, keyname, infrastructure)
self.create_appscale_security_group(infrastructure, group)
else
- Djinn.log_debug("Not creating keys/security group for #{cloud}")
+ Kernel.puts("Not creating keys/security group for #{cloud}")
end
instance_ids_up = []
public_up_already = []
private_up_already = []
- Djinn.log_debug("[#{num_of_vms_to_spawn}] [#{job}] [#{image_id}] [#{instance_type}] [#{keyname}] [#{infrastructure}] [#{cloud}] [#{group}] [#{spot}]")
- Djinn.log_debug("EC2_URL = [#{ENV['EC2_URL']}]")
+ Kernel.puts("[#{num_of_vms_to_spawn}] [#{job}] [#{image_id}] [#{instance_type}] [#{keyname}] [#{infrastructure}] [#{cloud}] [#{group}] [#{spot}]")
+ Kernel.puts("EC2_URL = [#{ENV['EC2_URL']}]")
loop { # need to make sure ec2 doesn't return an error message here
describe_instances = `#{infrastructure}-describe-instances 2>&1`
- Djinn.log_debug("describe-instances says [#{describe_instances}]")
+ Kernel.puts("describe-instances says [#{describe_instances}]")
all_ip_addrs = describe_instances.scan(/\s+(#{IP_OR_FQDN})\s+(#{IP_OR_FQDN})\s+running\s+#{keyname}\s/).flatten
instance_ids_up = describe_instances.scan(/INSTANCE\s+(i-\w+)/).flatten
public_up_already, private_up_already = HelperFunctions.get_ips(all_ip_addrs)
@@ -642,22 +635,22 @@ def self.spawn_vms(num_of_vms_to_spawn, job, image_id, instance_type, keyname,
end
loop {
- Djinn.log_debug(command_to_run)
+ Kernel.puts(command_to_run)
run_instances = `#{command_to_run} 2>&1`
- Djinn.log_debug("run_instances says [#{run_instances}]")
+ Kernel.puts("run_instances says [#{run_instances}]")
if run_instances =~ /Please try again later./
- Djinn.log_debug("Error with run_instances: #{run_instances}. Will try again in a moment.")
+ Kernel.puts("Error with run_instances: #{run_instances}. Will try again in a moment.")
elsif run_instances =~ /try --addressing private/
- Djinn.log_debug("Need to retry with addressing private. Will try again in a moment.")
+ Kernel.puts("Need to retry with addressing private. Will try again in a moment.")
command_to_run << " --addressing private"
elsif run_instances =~ /PROBLEM/
- Djinn.log_debug("Error: #{run_instances}")
+ Kernel.puts("Error: #{run_instances}")
abort("Saw the following error message from EC2 tools. Please resolve the issue and try again:\n#{run_instances}")
else
- Djinn.log_debug("Run instances message sent successfully. Waiting for the image to start up.")
+ Kernel.puts("Run instances message sent successfully. Waiting for the image to start up.")
break
end
- Djinn.log_debug("sleepy time")
+ Kernel.puts("sleepy time")
sleep(5)
}
@@ -671,15 +664,15 @@ def self.spawn_vms(num_of_vms_to_spawn, job, image_id, instance_type, keyname,
end_time = Time.now + MAX_VM_CREATION_TIME
while (now = Time.now) < end_time
describe_instances = `#{infrastructure}-describe-instances`
- Djinn.log_debug("[#{Time.now}] #{end_time - now} seconds left...")
- Djinn.log_debug(describe_instances)
+ Kernel.puts("[#{Time.now}] #{end_time - now} seconds left...")
+ Kernel.puts(describe_instances)
# TODO: match on instance id
#if describe_instances =~ /terminated\s+#{keyname}\s+/
# terminated_message = "An instance was unexpectedly terminated. " +
# "Please contact your cloud administrator to determine why " +
# "and try again. \n#{describe_instances}"
- # Djinn.log_debug(terminated_message)
+ # Kernel.puts(terminated_message)
# abort(terminated_message)
#end
@@ -702,8 +695,8 @@ def self.spawn_vms(num_of_vms_to_spawn, job, image_id, instance_type, keyname,
potential_dead_ips.each_index { |index|
if potential_dead_ips[index] == "0.0.0.0"
instance_to_term = instance_ids[index]
- Djinn.log_debug("Instance #{instance_to_term} failed to get a public IP address and is being terminated.")
- Djinn.log_run("#{infrastructure}-terminate-instances #{instance_to_term}")
+ Kernel.puts("Instance #{instance_to_term} failed to get a public IP address and is being terminated.")
+ self.shell("#{infrastructure}-terminate-instances #{instance_to_term}")
end
}
end
@@ -726,10 +719,10 @@ def self.spawn_vms(num_of_vms_to_spawn, job, image_id, instance_type, keyname,
total_time = end_time - start_time
if spot
- Djinn.log_debug("TIMING: It took #{total_time} seconds to spawn " +
+ Kernel.puts("TIMING: It took #{total_time} seconds to spawn " +
"#{num_of_vms_to_spawn} spot instances")
else
- Djinn.log_debug("TIMING: It took #{total_time} seconds to spawn " +
+ Kernel.puts("TIMING: It took #{total_time} seconds to spawn " +
"#{num_of_vms_to_spawn} regular instances")
end
@@ -741,8 +734,8 @@ def self.generate_ssh_key(outputLocation, name, infrastructure)
loop {
ec2_output = `#{infrastructure}-add-keypair #{name} 2>&1`
break if ec2_output.include?("BEGIN RSA PRIVATE KEY")
- Djinn.log_debug("Trying again. Saw this from #{infrastructure}-add-keypair: #{ec2_output}")
- Djinn.log_run("#{infrastructure}-delete-keypair #{name} 2>&1")
+ Kernel.puts("Trying again. Saw this from #{infrastructure}-add-keypair: #{ec2_output}")
+ self.shell("#{infrastructure}-delete-keypair #{name} 2>&1")
}
# output is the ssh private key prepended with info we don't need
@@ -767,10 +760,10 @@ def self.generate_ssh_key(outputLocation, name, infrastructure)
end
def self.create_appscale_security_group(infrastructure, group)
- Djinn.log_run("#{infrastructure}-add-group #{group} -d appscale 2>&1")
- Djinn.log_run("#{infrastructure}-authorize #{group} -p 1-65535 -P udp 2>&1")
- Djinn.log_run("#{infrastructure}-authorize #{group} -p 1-65535 -P tcp 2>&1")
- Djinn.log_run("#{infrastructure}-authorize #{group} -s 0.0.0.0/0 -P icmp -t -1:-1 2>&1")
+ self.shell("#{infrastructure}-add-group #{group} -d appscale 2>&1")
+ self.shell("#{infrastructure}-authorize #{group} -p 1-65535 -P udp 2>&1")
+ self.shell("#{infrastructure}-authorize #{group} -p 1-65535 -P tcp 2>&1")
+ self.shell("#{infrastructure}-authorize #{group} -s 0.0.0.0/0 -P icmp -t -1:-1 2>&1")
end
def self.terminate_vms(nodes, infrastructure)
@@ -780,7 +773,7 @@ def self.terminate_vms(nodes, infrastructure)
instances << instance_id
}
- Djinn.log_run("#{infrastructure}-terminate-instances #{instances.join(' ')}")
+ self.shell("#{infrastructure}-terminate-instances #{instances.join(' ')}")
end
def self.terminate_hybrid_vms(creds)
@@ -796,7 +789,7 @@ def self.terminate_hybrid_vms(creds)
self.set_creds_in_env(creds, cloud_num)
keyname = creds["keyname"]
- Djinn.log_debug("Killing Cloud#{cloud_num}'s machines, of type #{cloud_type} and with keyname #{keyname}")
+ Kernel.puts("Killing Cloud#{cloud_num}'s machines, of type #{cloud_type} and with keyname #{keyname}")
self.terminate_all_vms(cloud_type, keyname)
cloud_num += 1
@@ -808,11 +801,11 @@ def self.terminate_all_vms(infrastructure, keyname)
self.log_obscured_env
desc_instances = `#{infrastructure}-describe-instances`
instances = desc_instances.scan(/INSTANCE\s+(i-\w+)\s+[\w\-\s\.]+#{keyname}/).flatten
- Djinn.log_run(`#{infrastructure}-terminate-instances #{instances.join(' ')}`)
+ self.shell(`#{infrastructure}-terminate-instances #{instances.join(' ')}`)
end
def self.get_hybrid_ips(creds)
- Djinn.log_debug("creds are #{self.obscure_creds(creds).inspect}")
+ Kernel.puts("creds are #{self.obscure_creds(creds).inspect}")
public_ips = []
private_ips = []
@@ -828,14 +821,14 @@ def self.get_hybrid_ips(creds)
self.set_creds_in_env(creds, cloud_num)
this_pub, this_priv = self.get_cloud_ips(cloud_type, keyname)
- Djinn.log_debug("CLOUD#{cloud_num} reports public ips [#{this_pub.join(', ')}] and private ips [#{this_priv.join(', ')}]")
+ Kernel.puts("CLOUD#{cloud_num} reports public ips [#{this_pub.join(', ')}] and private ips [#{this_priv.join(', ')}]")
public_ips = public_ips + this_pub
private_ips = private_ips + this_priv
cloud_num += 1
}
- Djinn.log_debug("all public ips are [#{public_ips.join(', ')}] and private ips [#{private_ips.join(', ')}]")
+ Kernel.puts("all public ips are [#{public_ips.join(', ')}] and private ips [#{private_ips.join(', ')}]")
return public_ips, private_ips
end
@@ -845,14 +838,14 @@ def self.get_cloud_ips(infrastructure, keyname)
describe_instances = ""
loop {
describe_instances = `#{infrastructure}-describe-instances 2>&1`
- Djinn.log_debug("[oi!] #{describe_instances}")
+ Kernel.puts("[oi!] #{describe_instances}")
break unless describe_instances =~ /Message replay detected./
sleep(10)
}
running_machine_regex = /\s+(#{IP_OR_FQDN})\s+(#{IP_OR_FQDN})\s+running\s+#{keyname}\s/
all_ip_addrs = describe_instances.scan(running_machine_regex).flatten
- Djinn.log_debug("[oi!] all ips are [#{all_ip_addrs.join(', ')}]")
+ Kernel.puts("[oi!] all ips are [#{all_ip_addrs.join(', ')}]")
public_ips, private_ips = HelperFunctions.get_ips(all_ip_addrs)
return public_ips, private_ips
end
@@ -930,7 +923,7 @@ def self.parse_static_data app_name
begin
tree = YAML.load_file(File.join(untar_dir,"app.yaml"))
rescue Errno::ENOENT => e
- Djinn.log_debug("Failed to load YAML file to parse static data")
+ Kernel.puts("Failed to load YAML file to parse static data")
return []
end
@@ -961,7 +954,7 @@ def self.parse_static_data app_name
# This is for bug https://bugs.launchpad.net/appscale/+bug/800539
# this is a temp fix
if handler["url"] == "/"
- Djinn.log_debug("Remapped path from / to temp_fix for application #{app_name}")
+ Kernel.puts("Remapped path from / to temp_fix for application #{app_name}")
handler["url"] = "/temp_fix"
end
cache_static_dir_path = File.join(cache_path,handler["static_dir"])
@@ -979,7 +972,7 @@ def self.parse_static_data app_name
# This is for bug https://bugs.launchpad.net/appscale/+bug/800539
# this is a temp fix
if handler["url"] == "/"
- Djinn.log_debug("Remapped path from / to temp_fix for application #{app_name}")
+ Kernel.puts("Remapped path from / to temp_fix for application #{app_name}")
handler["url"] = "/temp_fix"
end
# Need to convert all \1 into $1 so that nginx understands it
@@ -1080,22 +1073,22 @@ def self.does_image_have_location?(ip, location, key)
def self.ensure_image_is_appscale(ip, key)
if self.does_image_have_location?(ip, "/etc/appscale", key)
- Djinn.log_debug("Image at #{ip} is an AppScale image.")
+ Kernel.puts("Image at #{ip} is an AppScale image.")
else
fail_msg = "The image at #{ip} is not an AppScale image." +
" Please install AppScale on it and try again."
- Djinn.log_debug(fail_msg)
+ Kernel.puts(fail_msg)
abort(fail_msg)
end
end
def self.ensure_db_is_supported(ip, db, key)
if self.does_image_have_location?(ip, "/etc/appscale/#{VER_NUM}/#{db}", key)
- Djinn.log_debug("Image at #{ip} supports #{db}.")
+ Kernel.puts("Image at #{ip} supports #{db}.")
else
fail_msg = "The image at #{ip} does not have support for #{db}." +
" Please install support for this database and try again."
- Djinn.log_debug(fail_msg)
+ Kernel.puts(fail_msg)
abort(fail_msg)
end
end
@@ -1145,7 +1138,7 @@ def self.log_obscured_env()
end
}
- Djinn.log_debug(env)
+ Kernel.puts(env)
end
def self.get_num_cpus()
View
320 AppController/test/tc_datastore_factory.rb
@@ -1,320 +0,0 @@
-# Programmer: Chris Bunch
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "lib")
-require 'datastore_factory'
-require 'repo'
-
-
-require 'rubygems'
-require 'flexmock/test_unit'
-
-
-class TestDatastore < Test::Unit::TestCase
-
-
- def setup
- @repo = flexmock(Repo)
- @repo.should_receive(:get_public_ip).and_return("127.0.0.1")
-
- @secret = "baz"
- @helperfunctions = flexmock(HelperFunctions)
- @helperfunctions.should_receive(:sleep_until_port_is_open).and_return()
- @helperfunctions.should_receive(:read_file).
- with("/etc/appscale/secret.key", true).and_return(@secret)
-
- @djinn = flexmock(Djinn)
- @djinn.should_receive(:log_debug).and_return()
-
- @datastore_appscale = DatastoreFactory.get_datastore(
- DatastoreRepoOnAppScale::NAME, {})
-
- @s3_creds = {'@EC2_ACCESS_KEY' => "baz", '@EC2_SECRET_KEY' => "boo",
- '@S3_URL' => "bar"}
- end
-
-
- def test_repo_is_abstract
- assert_raises(NotImplementedError) { DatastoreRepo.new({}) }
- end
-
-
- def test_repo_get_output
- @helperfunctions.should_receive(:write_file).with("/baz.txt", "output").and_return()
-
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "base64data"))
-
- base64 = flexmock(Base64)
- base64.should_receive(:decode64).with("base64data").and_return("output")
-
- expected = nil
- actual = @datastore_appscale.get_output_and_save_to_fs("/repo/baz.txt", "/baz.txt")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_on_appscale
- expected = "127.0.0.1:#{Repo::SERVER_PORT}"
- actual = @datastore_appscale.host
- assert_equal(expected, actual)
- end
-
-
- def test_repo_appscale_get_acl
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "base64data"))
-
- base64 = flexmock(Base64)
- base64.should_receive(:decode64).with("base64data").and_return("private")
-
- expected = "private"
- actual = @datastore_appscale.get_acl("/baz")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_appscale_set_acl_success
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "success"))
-
- expected = true
- actual = @datastore_appscale.set_acl("/baz", "private")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_appscale_set_acl_failure
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "failure"))
-
- expected = false
- actual = @datastore_appscale.set_acl("/baz", "private")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_appscale_file_exists
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "true"))
-
- expected = true
- actual = @datastore_appscale.does_file_exist?("/baz")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_appscale_file_does_not_exist
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "false"))
-
- expected = false
- actual = @datastore_appscale.does_file_exist?("/baz")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_appscale_upload_dir_w_one_file
- # So we will choose to upload /baz, which is a directory with one
- # file in it: /baz/boo.txt
-
- file = flexmock(File)
- file.should_receive(:directory?).with("/baz").and_return(true)
- file.should_receive(:directory?).with("/baz/boo.txt").and_return(false)
-
- @helperfunctions.should_receive(:shell).with("ls /baz").and_return("boo.txt\n")
- @helperfunctions.should_receive(:read_file).with("/baz/boo.txt", false).and_return("")
-
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "success"))
-
- expected = true
- actual = @datastore_appscale.write_remote_file_from_local_file("/repo/baz", "/baz")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_appscale_upload_dir_w_two_files
- # So this is similar to the last test, but here, uploading one file will
- # fail, which should cause the entire operation to fail
-
- file = flexmock(File)
- file.should_receive(:directory?).with("/baz").and_return(true)
- file.should_receive(:directory?).with("/baz/boo.txt").and_return(false)
- file.should_receive(:directory?).with("/baz/boo2.txt").and_return(false)
-
- @helperfunctions.should_receive(:shell).with("ls /baz").and_return("boo.txt\nboo2.txt\n")
- @helperfunctions.should_receive(:read_file).with("/baz/boo.txt", false).and_return("")
- @helperfunctions.should_receive(:read_file).with("/baz/boo2.txt", false).and_return("")
-
- flexmock(DatastoreRepoOnAppScale).new_instances { |instance|
- instance.should_receive(:do_http_post_for_set).with("/repo/baz/boo.txt", :output, "").and_return(true)
- instance.should_receive(:do_http_post_for_set).with("/repo/baz/boo2.txt", :output, "").and_return(false)
- }
-
- d = DatastoreFactory.get_datastore(DatastoreRepoOnAppScale::NAME, {})
-
- expected = false
- actual = d.write_remote_file_from_local_file("/repo/baz", "/baz")
- assert_equal(expected, actual)
-
- end
-
-
- def test_repo_appscale_upload_file_from_string
- http = flexmock(Net::HTTP)
- http.should_receive(:post_form).and_return(flexmock(:body => "success"))
-
- expected = true
- actual = @datastore_appscale.write_remote_file_from_string("/repo/baz", "boo")
- assert_equal(expected, actual)
- end
-
-
- def test_repo_on_app_engine
- assert_raises(BadConfigurationException) {
- DatastoreFactory.get_datastore(DatastoreRepoOnAppEngine::NAME, "")
- }
-
- assert_raises(BadConfigurationException) {
- DatastoreFactory.get_datastore(DatastoreRepoOnAppEngine::NAME, {})
- }
-
- assert_raises(BadConfigurationException) {
- creds = {'@appid' => 'baz'}
- DatastoreFactory.get_datastore(DatastoreRepoOnAppEngine::NAME, creds)
- }
- end
-
-
- def test_s3_validation
- assert_raises(BadConfigurationException) {
- DatastoreFactory.get_datastore(DatastoreS3::NAME, "")
- }
-
- assert_raises(BadConfigurationException) {
- DatastoreS3.new("")
- }
-
- assert_raises(BadConfigurationException) {
- DatastoreFactory.get_datastore(DatastoreS3::NAME, {})
- }
-
- assert_raises(BadConfigurationException) {
- creds = {'@EC2_ACCESS_KEY' => "baz"}
- DatastoreFactory.get_datastore(DatastoreS3::NAME, creds)
- }
-
- assert_raises(BadConfigurationException) {
- creds = {'@EC2_ACCESS_KEY' => "baz", '@EC2_SECRET_KEY' => "boo"}
- DatastoreFactory.get_datastore(DatastoreS3::NAME, creds)
- }
-
- creds = {'@EC2_ACCESS_KEY' => "baz", '@EC2_SECRET_KEY' => "boo",
- '@S3_URL' => "bar"}
-
- s3 = flexmock(RightAws::S3Interface)
- s3.should_receive(:new).with("baz", "boo").and_return()
-
- d = DatastoreFactory.get_datastore(DatastoreS3::NAME, creds)
- assert_equal("baz", d.EC2_ACCESS_KEY)
- assert_equal("boo", d.EC2_SECRET_KEY)
- assert_equal("bar", d.S3_URL)
- end
-
-
- def test_s3_upload_dir_w_one_file
- file = flexmock(File)
- file.should_receive(:directory?).with("/baz").and_return(true)
- file.should_receive(:directory?).with("/baz/boo.txt").and_return(false)
- file.should_receive(:open).with("/baz/boo.txt").and_return("OPEN FILE 1")
-
- @helperfunctions.should_receive(:shell).with("ls /baz").and_return("boo.txt\n")
- @helperfunctions.should_receive(:read_file).with("/baz/boo.txt", false).and_return("")
-
- flexmock(RightAws::S3Interface).new_instances { |instance|
- instance.should_receive(:put).with("bucket", "baz/boo.txt", "OPEN FILE 1").and_return(true)
- }
-
- d = DatastoreFactory.get_datastore(DatastoreS3::NAME, @s3_creds)
-
- expected = true
- actual = d.write_remote_file_from_local_file("/bucket/baz", "/baz")
- assert_equal(expected, actual)
- end
-
-
- def test_s3_upload_dir_w_two_files
- # So this is similar to the last test, but here, uploading one file will
- # fail, which should cause the entire operation to fail
-
- file = flexmock(File)
- file.should_receive(:directory?).with("/baz").and_return(true)
- file.should_receive(:directory?).with("/baz/boo.txt").and_return(false)
- file.should_receive(:directory?).with("/baz/boo2.txt").and_return(false)
- file.should_receive(:open).with("/baz/boo.txt").and_return("OPEN FILE 1")
- file.should_receive(:open).with("/baz/boo2.txt").and_return("OPEN FILE 2")
-
- @helperfunctions.should_receive(:shell).with("ls /baz").and_return("boo.txt\nboo2.txt\n")
- @helperfunctions.should_receive(:read_file).with("/baz/boo.txt", false).and_return("")
- @helperfunctions.should_receive(:read_file).with("/baz/boo2.txt", false).and_return("")
-
- flexmock(RightAws::S3Interface).new_instances { |instance|
- instance.should_receive(:put).with("bucket", "baz/boo.txt", "OPEN FILE 1").and_return(true)
- instance.should_receive(:put).with("bucket", "baz/boo2.txt", "OPEN FILE 2").and_return(false)
- }
-
- d = DatastoreFactory.get_datastore(DatastoreS3::NAME, @s3_creds)
-
- expected = false
- actual = d.write_remote_file_from_local_file("/bucket/baz", "/baz")
- assert_equal(expected, actual)
- end
-
-
- def test_s3_file_exists_bucket_not_found
- flexmock(RightAws::S3Interface).new_instances { |instance|
- instance.should_receive(:list_all_my_buckets).and_return([])
- }
-
- d = DatastoreFactory.get_datastore(DatastoreS3::NAME, @s3_creds)
-
- expected = false
- actual = d.does_file_exist?("/baz")
- assert_equal(expected, actual)
- end
-
-
- def test_s3_file_exists_bucket_found_but_not_file
- flexmock(RightAws::S3Interface).new_instances { |instance|
- instance.should_receive(:list_all_my_buckets).and_return([{:name => "baz"}])
- instance.should_receive(:get_acl).with("baz", "boo.txt").and_raise(RightAws::AwsError)
- }
-
- d = DatastoreFactory.get_datastore(DatastoreS3::NAME, @s3_creds)
-
- expected = false
- actual = d.does_file_exist?("/baz/boo.txt")
- assert_equal(expected, actual)
- end
-
-
- def test_s3_file_exists_bucket_and_file_found
- flexmock(RightAws::S3Interface).new_instances { |instance|
- instance.should_receive(:list_all_my_buckets).and_return([{:name => "baz"}])
- instance.should_receive(:get_acl).with("baz", "boo.txt").and_return()
- }
-
- d = DatastoreFactory.get_datastore(DatastoreS3::NAME, @s3_creds)
-
- expected = true
- actual = d.does_file_exist?("/baz/boo.txt")
- assert_equal(expected, actual)
- end
-
-
- def test_bad_datastore
- assert_raises(NotImplementedError) {
- DatastoreFactory.get_datastore("definitely not supported", {})
- }
- end
-end
View
9 AppController/test/ts_all.rb
@@ -4,18 +4,9 @@
$:.unshift File.join(File.dirname(__FILE__))
# AppController library tests
require 'tc_infrastructure_manager_client'
-require 'tc_datastore_factory'
require 'tc_repo'
require 'tc_zkinterface'
# AppController tests
require 'tc_djinn'
-
-
-# Neptune tests
-require 'tc_appscale_helper'
-require 'tc_cicero_helper'
-require 'tc_neptune_job_data'
-require 'tc_babel_helper'
-require 'tc_queue'
View
31 Neptune/average_probs.R
@@ -1,31 +0,0 @@
-#!/usr/bin/Rscript --vanilla
-
-args.vec <- commandArgs(TRUE)
-if(length(args.vec) < 1) { stop("Usage: average_probs.R FILE(S)(character)") }
-file.vec <- args.vec
-
-# Initialize vars
-nprobs <- 4
-prob.mat <- numeric(0)
-
-# Populate result matrix
-for(file in file.vec) {
- prob <- system(paste("tail -n1 ", file, sep=""), intern=T)
- prob <- unlist(strsplit(prob, split=" "))
- prob.mat <- rbind(prob.mat, as.numeric(prob[(length(prob)-nprobs+1):length(prob)]))
-}
-
-# Combine results
-N <- prob.mat[,1]
-m1 <- prob.mat[,3]*N
-sigma2 <- prob.mat[,4]^2*N
-m2 <- (sigma2+(m1/N)^2)*N
-
-M1overN <- sum(m1)/sum(N)
-M2overN <- sum(m2)/sum(N)
-Sigma2 <- M2overN - M1overN^2
-SE <- sqrt(Sigma2/sum(N))
-
-# Output results
-cat(paste(M1overN," +/- ",SE,"\n", sep=""))
-
View
71 Neptune/erlang_helper.rb
@@ -1,71 +0,0 @@
-#!/usr/bin/ruby
-# Programmer: Chris Bunch
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "AppController")
-require 'djinn'
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "AppController", "lib")
-require 'datastore_factory'
-
-
-ERLANG_OUTPUT = "/tmp/erlang_output"
-
-public
-
-
-def neptune_erlang_run_job(nodes, job_data, secret)
- return BAD_SECRET_MSG unless valid_secret?(secret)
- Djinn.log_debug("erlang - run")
-
- Thread.new {
- keyname = @creds['keyname']
- nodes = Djinn.convert_location_array_to_class(nodes, keyname)
-
- ENV['HOME'] = "/root"
-
- code = job_data['@code'].split(/\//)[-1]
-
- unless my_node.is_shadow?
- Djinn.log_run("rm -rfv /tmp/#{code}")
- end
- sleep(1)
-
- remote = job_data['@code']
- storage = job_data['@storage']
-
- datastore = DatastoreFactory.get_datastore(storage, job_data)
- datastore.get_output_and_save_to_fs(remote, "/tmp/#{code}")
-
- module_name = code.split(/\./)[0]
- Djinn.log_debug("got code #{code}, trying to run module #{module_name}")
- Djinn.log_run("chmod +x #{code}")
- Djinn.log_run("cd /tmp; erl -noshell -run #{module_name} main > #{ERLANG_OUTPUT}")
-
- datastore.write_remote_file_from_local_file(job_data['@output'], ERLANG_OUTPUT)
- remove_lock_file(job_data)
- Djinn.log_run("rm -rfv /tmp/#{code}")
- }
-
- return "OK"
-end
-
-private
-
-def start_erlang_master()
- Djinn.log_debug("#{my_node.private_ip} is starting erlang master")
-end
-
-def start_erlang_slave()
- Djinn.log_debug("#{my_node.private_ip} is starting erlang slave")
-end
-
-def stop_erlang_master()
- Djinn.log_debug("#{my_node.private_ip} is stopping erlang master")
-end
-
-def stop_erlang_slave()
- Djinn.log_debug("#{my_node.private_ip} is stopping erlang slave")
-end
-
View
68 Neptune/go_helper.rb
@@ -1,68 +0,0 @@
-#!/usr/bin/ruby
-# Programmer: Chris Bunch
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "AppController")
-require 'djinn'
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "AppController", "lib")
-require 'datastore_factory'
-
-
-public
-
-
-def neptune_go_run_job(nodes, job_data, secret)
- return BAD_SECRET_MSG unless valid_secret?(secret)
- Djinn.log_debug("go - run")
-
- Thread.new {
- keyname = @creds['keyname']
- nodes = Djinn.convert_location_array_to_class(nodes, keyname)
-
- #ENV['HOME'] = "/root"
- Djinn.log_debug("job data is #{job_data.inspect}")
-
- code = job_data['@code'].split(/\//)[-1]
-
- code_dir = "/tmp/go-#{rand()}/"
- code_loc = "#{code_dir}/#{code}"
- output_loc = "#{code_dir}/output.txt"
- FileUtils.mkdir_p(code_dir)
-
- remote = job_data['@code']
- storage = job_data['@storage']
-
- datastore = DatastoreFactory.get_datastore(storage, job_data)
- datastore.get_output_and_save_to_fs(remote, code_loc)
-
- Djinn.log_debug("got code #{code}, saved at #{code_loc}")
- Djinn.log_run("chmod +x #{code_loc}")
- Djinn.log_run("cd #{code_dir}; ./#{code} > #{output_loc}")
-
- datastore.write_remote_file_from_local_file(job_data['@output'], output_loc)
- remove_lock_file(job_data)
- }
-
- return "OK"
-end
-
-private
-
-def start_go_master()
- Djinn.log_debug("#{my_node.private_ip} is starting go master")
-end
-
-def start_go_slave()
- Djinn.log_debug("#{my_node.private_ip} is starting go slave")
-end
-
-def stop_go_master()
- Djinn.log_debug("#{my_node.private_ip} is stopping go master")
-end
-
-def stop_go_slave()
- Djinn.log_debug("#{my_node.private_ip} is stopping go slave")
-end
-
View
117 Neptune/lib/job_types/cewssa_helper.rb
@@ -1,117 +0,0 @@
-#!/usr/bin/ruby
-# Programmer: Chris Bunch
-# baz
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "..")
-require 'neptune_manager'
-
-
-BIRTH_DEATH = "/usr/local/lib/R/site-library/cewSSA/data/birth_death.r"
-OUTPUT_HOME = "/usr/local/cewssa/data"
-OUR_CEWSSA_CODE = "#{APPSCALE_HOME}/Neptune/run_dwSSA.R"
-MERGE_SCRIPT = "#{APPSCALE_HOME}/Neptune/average_probs.R"
-
-public
-
-def neptune_cewssa_run_job(nodes, job_data, secret)
- Djinn.log_debug("cewssa - pre-run")
- return BAD_SECRET_MSG unless valid_secret?(secret)
-
- #message = validate_environment(job_data, secret)
- #return message unless message == "no error"
-
- Djinn.log_debug("cewssa - run")
-
- Thread.new {
-
- start_time = Time.now
-
- keyname = @creds['keyname']
- nodes = Djinn.convert_location_array_to_class(nodes, keyname)
- sims = neptune_get_ssa_num_simulations(nodes, job_data)
-
- threads = []
- at = 0
-
- random_numbers = neptune_get_ssa_seed_vals(nodes.length)
-
- nodes.each_with_index { |node, i|
- threads << Thread.new {
- ip = node.private_ip
- ssh_key = node.ssh_key
- start = at
- fin = at + sims[i]
- at = fin
- remote_del_command = "rm -rf #{OUTPUT_HOME}/*"
- Djinn.log_run("ssh -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip} '#{remote_del_command}'")
-
- iterations = fin - start # don't need to add one here
- seed = random_numbers[i]
-
- output_file = "#{OUTPUT_HOME}/data#{i}.txt"
- remote_run_command = "#{OUR_CEWSSA_CODE} #{BIRTH_DEATH} #{iterations} #{seed} 1.454 0.686 > #{output_file}"
- Djinn.log_run("ssh -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip} '#{remote_run_command}'")
- }
- }
-
- Djinn.log_debug("cewssa - joining threads")
-
- threads.each { |t| t.join }
-
- Djinn.log_debug("cewssa - retrieving run data")
-
- nodes.each { |node|
- ip = node.private_ip
- ssh_key = node.ssh_key
- remote_cp_command = "scp -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip}:#{OUTPUT_HOME}/data* #{OUTPUT_HOME}/"
- Djinn.log_run(remote_cp_command)
- }
-
- Djinn.log_debug("cewssa - collecting stats")
-
- collect_stats = "#{MERGE_SCRIPT} #{OUTPUT_HOME}/data* > #{OUTPUT_HOME}/finalresult.txt"
- Djinn.log_run(collect_stats)
-
- fin_time = Time.now
- total = fin_time - start_time
- Djinn.log_debug("cewssa - done!")
- Djinn.log_debug("TIMING: Took #{total} seconds.")
-
- shadow = get_shadow
- shadow_ip = shadow.private_ip
- shadow_key = shadow.ssh_key
-
- final_result = "#{OUTPUT_HOME}/finalresult.txt"
- HelperFunctions.scp_file(final_result, final_result, shadow_ip, shadow_key)
-
- neptune_write_job_output(job_data, final_result)
-
- remove_lock_file(job_data)
- }
-
- return "OK"
-end
-
-private
-
-def neptune_cewssa_get_output(job_data)
- return OUTPUT_HOME
-end
-
-def start_cewssa_master()
- Djinn.log_debug("#{my_node.private_ip} is starting cewssa master")
-end
-
-def start_cewssa_slave()
- Djinn.log_debug("#{my_node.private_ip} is starting cewssa slave")
-end
-
-def stop_cewssa_master()
- Djinn.log_debug("#{my_node.private_ip} is stopping cewssa master")
- # tell the shadow we're done running cewssa jobs
-end
-
-def stop_cewssa_slave()
- Djinn.log_debug("#{my_node.private_ip} is stopping cewssa slave")
-end
-
View
106 Neptune/lib/job_types/dfsp_helper.rb
@@ -1,106 +0,0 @@
-#!/usr/bin/ruby
-# Programmer: Chris Bunch
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "..")
-require 'neptune_manager'
-
-
-DFSP_HOME = "/usr/local/dfsp"
-
-public
-
-def neptune_dfsp_run_job(nodes, job_data, secret)
- return BAD_SECRET_MSG unless valid_secret?(secret)
- Djinn.log_debug("dfsp - run")
-
- Thread.new {
- start_time = Time.now
-
- keyname = @creds['keyname']
- nodes = Djinn.convert_location_array_to_class(nodes, keyname)
- sims = neptune_get_ssa_num_simulations(nodes, job_data)
-
- threads = []
- at = 0
- nodes.each_with_index { |node, i|
- threads << Thread.new {
- ip = node.private_ip
- ssh_key = node.ssh_key
- start = at
- fin = at + sims[i]
- at = fin
- remote_del_command = "rm -rf #{DFSP_HOME}/data*"
- remote_run_command = "cd #{DFSP_HOME}; ./multi_run.pl #{start} #{fin}"
- Djinn.log_run("ssh -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip} '#{remote_del_command}'")
- Djinn.log_run("ssh -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip} '#{remote_run_command}'")
- }
- }
-
- Djinn.log_debug("dfsp - joining threads")
-
- threads.each { |t| t.join }
-
- Djinn.log_debug("dfsp - retrieving run data")
-
- nodes.each { |node|
- ip = node.private_ip
- ssh_key = node.ssh_key
- remote_cp_command = "scp -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip}:#{DFSP_HOME}/data* #{DFSP_HOME}/"
- Djinn.log_run(remote_cp_command)
- }
-
- Djinn.log_debug("dfsp - collecting stats")
-
- collect_stats = "cd #{DFSP_HOME}/; ./collect_stats.pl #{at} >out 2>err"
- Djinn.log_run(collect_stats)
-
- fin_time = Time.now
- total = fin_time - start_time
- Djinn.log_debug("dfsp - done!")
- Djinn.log_debug("TIMING: Took #{total} seconds.")
-
- shadow = get_shadow
- shadow_ip = shadow.private_ip
- shadow_key = shadow.ssh_key
-
- out = "#{DFSP_HOME}/out"
- HelperFunctions.scp_file(out, out, shadow_ip, shadow_key)
-
- err = "#{DFSP_HOME}/err"
- HelperFunctions.scp_file(err, err, shadow_ip, shadow_key)
-
- data = "#{DFSP_HOME}/data*"
- HelperFunctions.scp_file(data, DFSP_HOME, shadow_ip, shadow_key)
-
- neptune_write_job_output(job_data, out)
-
- remove_lock_file(job_data)
- }
-
- return "OK"
-end
-
-private
-
-def neptune_dfsp_get_output(job_data)
- return DFSP_HOME
-end
-
-def start_dfsp_master()
- Djinn.log_debug("#{my_node.private_ip} is starting dfsp master")
-end
-
-def start_dfsp_slave()
- Djinn.log_debug("#{my_node.private_ip} is starting dfsp slave")
-end
-
-def stop_dfsp_master()
- Djinn.log_debug("#{my_node.private_ip} is stopping dfsp master")
-end
-
-def stop_dfsp_slave()
- Djinn.log_debug("#{my_node.private_ip} is stopping dfsp slave")
-end
-
-
View
281 Neptune/lib/job_types/ssa_helper.rb
@@ -1,281 +0,0 @@
-#!/usr/bin/ruby
-# Programmer: Chris Bunch
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "..")
-require 'neptune_manager'
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "AppController", "lib")
-require 'datastore_factory'
-
-
-MULTICORE = true
-SSA_HOME = "/usr/local/StochKit2.0/"
-
-
-IS_FILE = true
-NOT_A_FILE = false
-
-
-public
-
-
-def neptune_ssa_run_job(nodes, job_data, secret)
- return BAD_SECRET_MSG unless valid_secret?(secret)
- Djinn.log_debug("ssa - run")
-
- Thread.new {
- start_time = Time.now
- total_compute_time = 0
- total_storage_time = 0
- total_slowest_path = 0
- c_times = []
- s_times = []
-
- Djinn.log_debug("job data is #{job_data.inspect}")
- keyname = @creds['keyname']
-
- nodes = Djinn.convert_location_array_to_class(nodes, keyname)
-
- sims = neptune_get_ssa_num_simulations(nodes, job_data)
-
- working_dir = "/tmp/ssa-#{rand(10000)}"
- FileUtils.mkdir_p(working_dir)
-
- tar = working_dir + "/" + File.basename(job_data['@tar'])
-
- Djinn.log_debug("tar is #{job_data['@tar']}")
- Djinn.log_debug("working dir is #{working_dir}")
-
- remote = job_data['@tar']
-
- datastore = DatastoreFactory.get_datastore(job_data['@storage'], job_data)
- datastore.get_output_and_save_to_fs(remote, tar)
-
- neptune_uncompress_file(tar)
-
- num_sims = job_data["@trajectories"] || job_data["@simulations"]
-
- param_num = job_data['@param_num']
-
- threads = []
- node_times = []
- at = 0
- nodes.each_with_index { |node, i|
- threads << Thread.new {
- node_times[i] = 0
-
- ip = node.private_ip
- ssh_key = node.ssh_key
- start = at
- fin = at + sims[i]
- at = fin
- fin -= 1
- Djinn.log_debug("This node will run trajectories #{start} to #{fin}")
-
- code_path = "#{working_dir}/code/run.sh"
- Djinn.log_run("chmod +x #{code_path}")
- exec = "bash #{code_path}"
-
- input = "#{working_dir}/code/#{job_data['@input']}"
-
- unless ip == HelperFunctions.local_ip
- Djinn.log_run("scp -r -i #{ssh_key} -o StrictHostkeyChecking=no #{working_dir} root@#{ip}:#{working_dir}")
- end
-
- trajectories = fin - start + 1
-
- if MULTICORE
- cores = HelperFunctions.get_num_cpus()
- else
- cores = 1
- end
-
- done = 0
- loop {
- trajectories_left = trajectories - done
- Djinn.log_debug("Need to run #{trajectories_left} more trajectories on #{cores} cores")
- break if trajectories_left.zero?
- need_to_run = [trajectories_left, cores].min
-
- Djinn.log_debug("Running #{need_to_run} trajectories")
- core_threads = []
- current_times = []
- need_to_run.times { |j|
- core_threads << Thread.new {
- my_trajectory = start+done+j
- Djinn.log_debug("Thread #{j} is running trajectory #{my_trajectory}")
- output = File.expand_path("#{working_dir}/output-#{my_trajectory}")
-
- # run the computation, remembering to place StochKit in the user's PATH
- path = "PATH=$PATH:#{SSA_HOME}"
- run_command = "#{path} #{exec} #{input} #{output} #{my_trajectory} #{param_num}"
-
- start_compute = Time.now
- Djinn.log_run("ssh -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip} '#{run_command}'")
- end_compute = Time.now
- c_time = end_compute - start_compute
- total_compute_time += c_time
- c_times << c_time
-
- # copy the output back to this box - in the future we can do merges here
- # or in the future we can just have the node upload to s3
- start_storage = Time.now
- unless HelperFunctions.local_ip == ip
- remote_cp_command = "scp -r -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip}:#{output} #{output}"
- Djinn.log_run(remote_cp_command)
-
- end
-
- remote_location = "#{job_data['@output']}/output-#{my_trajectory}"
- datastore.write_remote_file_from_local_file(remote_location, output)
- end_storage = Time.now
- s_time = end_storage - start_storage
- total_storage_time += s_time
- s_times << s_time
-
- node_times[i] += (c_time + s_time)
-
- # remove our output - we can't want the disk to fill up
- remove_cmd = "rm -rf #{output}"
- Djinn.log_run("ssh -i #{ssh_key} -o StrictHostkeyChecking=no root@#{ip} '#{remove_cmd}'")
- Djinn.log_run(remove_cmd)
- }
- }
-
- core_threads.each { |c| c.join }
-
- done += need_to_run
- Djinn.log_debug("Done running #{need_to_run} trajectories, #{trajectories - done} to go")
- }
- }
- }
-
- Djinn.log_debug("ssa - joining threads")
-
- threads.each { |t| t.join }
-
- # clean up after ourselves
- Djinn.log_run("rm -rf #{working_dir}")
-
- fin_time = Time.now
- total = fin_time - start_time
- total_slowest_path = node_times.max
- total_overhead_time = total - total_slowest_path
-
- timing_info = <<BAZ
- TIMING: total execution time is #{total} seconds.
- TIMING: total compute time is #{total_compute_time} seconds.
- TIMING: total storage time is #{total_storage_time} seconds.
- TIMING: slowest path time is #{total_slowest_path} seconds.
- TIMING: overhead time is #{total_overhead_time} seconds.
- TIMING: average compute time is #{average(c_times)} seconds.
- TIMING: stddev compute time is #{standard_deviation(c_times)} seconds.
- TIMING: average storage time is #{average(s_times)} seconds.
- TIMING: stddev storage time is #{standard_deviation(s_times)} seconds.
- RAW_DATA: node times are: [#{node_times.join(', ')}]
- RAW_DATA: compute times are: [#{c_times.join(', ')}]
- RAW_DATA: storage times are: [#{s_times.join(', ')}]
-BAZ
-
- Djinn.log_debug(timing_info)
-
- remote_location = "#{job_data['@output']}/timing_info.txt"
- datastore.write_remote_file_from_string(remote_location, timing_info)
-
- remove_lock_file(job_data)
- }
-
- return "OK"
-end
-
-private
-
-def neptune_ssa_get_output(job_data)
- return SSA_HOME
-end
-
-def start_ssa_master()
- Djinn.log_debug("#{my_node.private_ip} is starting ssa master")
-end
-
-def start_ssa_slave()
- Djinn.log_debug("#{my_node.private_ip} is starting ssa slave")
-end
-
-def stop_ssa_master()
- Djinn.log_debug("#{my_node.private_ip} is stopping ssa master")
-end
-
-def stop_ssa_slave()
- Djinn.log_debug("#{my_node.private_ip} is stopping ssa slave")
-end
-
-def neptune_get_ssa_seed_vals(num_vals)
- random_numbers = []
- loop {
- possible_rand = rand(32000)
- unless random_numbers.include?(possible_rand)
- random_numbers << possible_rand
- end
- break if num_vals == random_numbers.length
- }
-
- return random_numbers
-end
-
-def neptune_get_ssa_num_simulations(nodes, job_data)
- num_nodes = nodes.length
- num_sims = job_data["@trajectories"] || job_data["@simulations"]
- sims_per_node = num_sims / num_nodes
-
- Djinn.log_debug("num nodes = #{num_nodes}")
- Djinn.log_debug("num_sims = #{num_sims}")
- Djinn.log_debug("sims_per_node = #{sims_per_node}")
-
- # set up how many simulations each node
- # should run by divying it up equally
- # any remainder can be assigned to an
- # arbitrary node
-
- sims = [sims_per_node] * num_nodes
- remainder = num_sims % num_nodes
- sims[-1] += remainder
-
- Djinn.log_debug("sims = #{sims.join(', ')}")
- Djinn.log_debug("remainder = #{remainder}")
-
- return sims
-end
-
-def average(population)
- total = 0.0
- n = 0
-
- population.each { |val|
- total += val
- n += 1
- }
-
- return total / n
-end
-
-def variance(population)
- n = 0
- mean = 0.0
-
- sum = population.reduce(0.0) do |sum, x|
- n += 1
- delta = x - mean
- mean += delta / n
-
- sum + delta * (x - mean)
- end
-
- sum / n
-end
-
-def standard_deviation(population)
- Math.sqrt(variance(population))
-end
View
33 Neptune/patch/mpi-install.patch
@@ -1,33 +0,0 @@
-*** ./src/pm/Makefile.in.org 2010-06-25 14:37:03.000000000 -0700
---- ./src/pm/Makefile.in 2010-06-25 14:37:44.000000000 -0700
-***************
-*** 127,140 ****
- all-postamble:
- if [ -n "@pm_name@" -a -d "@pm_name@" ] ; then \
- build_prefix=`cd ../.. && pwd` && cd @pm_name@ && \
-! ${MAKE} prefix=$$build_prefix exec_prefix=$$build_prefix bindir=$$build_prefix/bin mpich2-build-install ; \
- fi
-
- # Add util to distclean if possible
- distclean-local:
- if [ -n "@pm_name@" -a -d "@pm_name@" ] ; then \
- build_prefix=`cd ../.. && pwd` && cd @pm_name@ && \
-! ${MAKE} prefix=$$build_prefix exec_prefix=$$build_prefix bindir=$$build_prefix/bin mpich2-build-uninstall ; \
- fi
- if [ -s util/Makefile ] ; then (cd util && ${MAKE} distclean) ; fi
-
---- 127,140 ----
- all-postamble:
- if [ -n "@pm_name@" -a -d "@pm_name@" ] ; then \
- build_prefix=`cd ../.. && pwd` && cd @pm_name@ && \
-! ${MAKE} mpich2-build-install ; \
- fi
-
- # Add util to distclean if possible
- distclean-local:
- if [ -n "@pm_name@" -a -d "@pm_name@" ] ; then \
- build_prefix=`cd ../.. && pwd` && cd @pm_name@ && \
-! ${MAKE} mpich2-build-uninstall ; \
- fi
- if [ -s util/Makefile ] ; then (cd util && ${MAKE} distclean) ; fi
-
View
66 Neptune/r_helper.rb
@@ -1,66 +0,0 @@
-#!/usr/bin/ruby
-# Programmer: Chris Bunch
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "AppController")
-require 'djinn'
-
-
-$:.unshift File.join(File.dirname(__FILE__), "..", "AppController", "lib")
-require 'datastore_factory'
-
-
-public
-
-def neptune_r_run_job(nodes, job_data, secret)
- return BAD_SECRET_MSG unless valid_secret?(secret)
- Djinn.log_debug("r - run")
-
- Thread.new {
- keyname = @creds['keyname']
- nodes = Djinn.convert_location_array_to_class(nodes, keyname)
-
- Djinn.log_debug("job data is #{job_data.inspect}")
-
- code = job_data['@code'].split(/\//)[-1]
-
- code_dir = "/tmp/r-#{rand()}/"
- code_loc = "#{code_dir}/#{code}"
- output_loc = "#{code_dir}/output.txt"
- FileUtils.mkdir_p(code_dir)
-
- remote = job_data['@code']
- storage = job_data['@storage']
-
- datastore = DatastoreFactory.get_datastore(storage, job_data)
- datastore.get_output_and_save_to_fs(remote, code_loc)
-
- Djinn.log_debug("got code #{code}, saved at #{code_loc}")
- Djinn.log_run("chmod +x #{code_loc}")
- Djinn.log_run("Rscript --vanilla #{code_loc} > #{output_loc}")
-
- datastore.write_remote_file_from_local_file(job_data['@output'], output_loc)
- remove_lock_file(job_data)
- }
-
- return "OK"
-end
-
-private
-
-def start_r_master()
- Djinn.log_debug("#{my_node.private_ip} is starting r master")
-end
-
-def start_r_slave()
- Djinn.log_debug("#{my_node.private_ip} is starting r slave")
-end
-
-def stop_r_master()
- Djinn.log_debug("#{my_node.private_ip} is stopping r master")
-end
-
-def stop_r_slave()
- Djinn.log_debug("#{my_node.private_ip} is stopping r slave")
-end
-
View
13 Neptune/run_dwSSA.R
@@ -1,13 +0,0 @@
-#!/usr/bin/Rscript --vanilla
-
-args.vec <- commandArgs(TRUE)
-if(length(args.vec) < 4) { stop("Usage: run_dwSSA.R MODEL(character) N(numeric) SEED(numeric) GAMMAS(numeric)") }
-model <- args.vec[1]
-N <- as.numeric(args.vec[2])
-seed <- as.numeric(args.vec[3])
-gamma <- as.numeric(args.vec[4:length(args.vec)])
-
-library(cewSSA)
-system.time(p <- dwSSA(model=model, gamma=gamma, N=N, seed=seed))
-p
-
Please sign in to comment.
Something went wrong with that request. Please try again.