Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/AppScale/appscale into pr…
Browse files Browse the repository at this point in the history
…epare-pq-select
  • Loading branch information
cdonati committed Apr 26, 2017
2 parents 80d58b6 + d134b20 commit bf51b25
Show file tree
Hide file tree
Showing 125 changed files with 1,564 additions and 2,270 deletions.
718 changes: 352 additions & 366 deletions AppController/djinn.rb

Large diffs are not rendered by default.

14 changes: 0 additions & 14 deletions AppController/get_app.py

This file was deleted.

4 changes: 2 additions & 2 deletions AppController/lib/ejabberd.rb
Expand Up @@ -281,7 +281,7 @@ def self.write_config_file(my_private_ip)
{access, c2s},
{shaper, c2s_shaper},
{max_stanza_size, 65536},
starttls, {certfile, "/etc/ejabberd/ejabberd.pem"}
starttls, {certfile, "#{Djinn::APPSCALE_CONFIG_DIR}/ejabberd.pem"}
]},
{5269, ejabberd_s2s_in, [
Expand All @@ -298,7 +298,7 @@ def self.write_config_file(my_private_ip)
]}.
{s2s_use_starttls, true}.
{s2s_certfile, "/etc/ejabberd/ejabberd.pem"}.
{s2s_certfile, "#{Djinn::APPSCALE_CONFIG_DIR}/ejabberd.pem"}.
%%% ==============
%%% AUTHENTICATION
Expand Down
6 changes: 1 addition & 5 deletions AppController/lib/groomer_service.rb
Expand Up @@ -15,26 +15,22 @@ module GroomerService
# Starts the Groomer Service on this machine. We don't want to monitor
# it ourselves, so just tell monit to start it and watch it.
def self.start()
groomer = self.scriptname()
groomer = self.scriptname
start_cmd = "/usr/bin/python2 #{groomer}"
stop_cmd = "/usr/bin/python2 #{APPSCALE_HOME}/scripts/stop_service.py " +
"#{groomer} /usr/bin/python"
MonitInterface.start(:groomer_service, start_cmd, stop_cmd, nil, {},
start_cmd, MAX_MEM, nil, nil)
MonitInterface.start_file(:groomer_file_check,
"/var/log/appscale/groomer_service.log", stop_cmd, "12")
end

# Stops the groomer service running on this machine. Since it's
# managed by monit, just tell monit to shut it down.
def self.stop()
MonitInterface.stop(:groomer_service)
MonitInterface.stop(:groomer_file_check)
end

def self.scriptname()
return `which appscale-groomer-service`.chomp
end

end

31 changes: 14 additions & 17 deletions AppController/lib/haproxy.rb
Expand Up @@ -89,6 +89,10 @@ module HAProxy
HAPROXY_ERROR_PREFIX = "No such"


# The number of seconds HAProxy should wait for a server response.
HAPROXY_SERVER_TIMEOUT = 600


def self.start()
start_cmd = "/usr/sbin/service haproxy start"
stop_cmd = "/usr/sbin/service haproxy stop"
Expand Down Expand Up @@ -165,7 +169,8 @@ def self.create_tq_server_config(server_ips, my_ip, listen_port)
# name : the name of the server
def self.create_app_config(servers, my_private_ip, listen_port, name)
config = "# Create a load balancer for the #{name} application\n"
config << "listen #{name} #{my_private_ip}:#{listen_port}\n"
config << "listen #{name}\n"
config << " bind #{my_private_ip}:#{listen_port}\n"
servers.each do |server|
config << HAProxy.server_config(name, "#{server['ip']}:#{server['port']}") + "\n"
end
Expand Down Expand Up @@ -241,7 +246,8 @@ def self.update_app_config(private_ip, app_name, listen_port, appservers)
end

config = "# Create a load balancer for the app #{app_name} \n"
config << "listen #{full_app_name} #{private_ip}:#{listen_port} \n"
config << "listen #{full_app_name}\n"
config << " bind #{private_ip}:#{listen_port}\n"
config << servers.join("\n")

config_path = File.join(SITES_ENABLED_PATH,
Expand Down Expand Up @@ -314,14 +320,6 @@ def self.initialize_config()
# Log details about HTTP requests
#option httplog
# Abort request if client closes its output channel while waiting for the
# request. HAProxy documentation has a long explanation for this option.
option abortonclose
# Check if a "Connection: close" header is already set in each direction,
# and will add one if missing.
option httpclose
# If sending a request fails, try to send it to another, 3 times
# before aborting the request
retries 3
Expand All @@ -330,15 +328,14 @@ def self.initialize_config()
# any Mongrel, not just the one that started the session
option redispatch
# Timeout a request if the client did not read any data for 600 seconds
timeout client 600000
# Time to wait for a connection attempt to a server.
timeout connect 5000ms
# Timeout a request if Mongrel does not accept a connection for 600 seconds
timeout connect 600000
# The maximum inactivity time allowed for a client.
timeout client 50000ms
# Timeout a request if Mongrel does not accept the data on the connection,
# or does not send a response back in 10 minutes.
timeout server 600000
# The maximum inactivity time allowed for a server.
timeout server #{HAPROXY_SERVER_TIMEOUT}s
# Enable the statistics page
stats enable
Expand Down
11 changes: 9 additions & 2 deletions AppController/lib/helperfunctions.rb
Expand Up @@ -583,13 +583,20 @@ def self.local_ip()
# private IP address from its private FQDN is to use dig. This method
# attempts to resolve IPs in that method, deferring to other methods if that
# fails.
#
# Args:
# host: the String containing the IP or hostname.
# Returns:
# A String with the IP address.
# Raises:
# AppScaleException: if host cannot be translated to IP.
def self.convert_fqdn_to_ip(host)
return host if host =~ /#{IP_REGEX}/

ip = `dig #{host} +short`.chomp
if ip.empty?
Djinn.log_debug("couldn't use dig to resolve [#{host}]")
self.log_and_crash("Couldn't convert #{host} to an IP address. Result of dig was \n#{ip}")
Djinn.log_warn("Couldn't use dig to resolve #{host}.")
raise AppScaleException.new("Couldn't convert #{host}: result of dig was \n#{ip}.")
end

return ip
Expand Down
24 changes: 14 additions & 10 deletions AppController/lib/infrastructure_manager_client.rb
Expand Up @@ -176,7 +176,20 @@ def terminate_instances(options, instance_ids)
end


def spawn_vms(num_vms, options, job, disks)
# Create new VMs.
#
# Args:
# num_vms: the number of VMs to create.
# options: a hash containing information needed by the agent
# (credentials etc ...).
# jobs: an Array containing the roles for each VM to be created.
# disks: an Array specifying the disks to be associated with the VMs
# (if any, it can be nil).
#
# Returns
# An Array containing the nodes information, suitable to be converted
# into Node.
def spawn_vms(num_vms, options, jobs, disks)
parameters = get_parameters_from_credentials(options)
parameters['num_vms'] = num_vms.to_s
parameters['cloud'] = 'cloud1'
Expand Down Expand Up @@ -204,15 +217,6 @@ def spawn_vms(num_vms, options, job, disks)
Kernel.sleep(10)
}

# now, turn this info back into the format we normally use
jobs = []
if job.is_a?(String)
# We only got one job, so just repeat it for each one of the nodes
jobs = Array.new(size=vm_info['public_ips'].length, obj=job)
else
jobs = job
end

# ip:job:instance-id
instances_created = []
vm_info['public_ips'].each_index { |index|
Expand Down
16 changes: 0 additions & 16 deletions AppController/lib/monit_interface.rb
Expand Up @@ -56,22 +56,6 @@ def self.start(watch, start_cmd, stop_cmd, ports, env_vars, match_cmd, mem,
self.run_cmd("#{MONIT} start -g #{watch}")
end

def self.start_file(watch, path, action, hours=12)
contents = <<BOO
check file #{watch} path "#{path}" every 2 cycles
group #{watch}
if timestamp > 12 hours then exec "#{action}"
BOO
monit_file = "#{MONIT_CONFIG}/appscale-#{watch}.cfg"
HelperFunctions.write_file(monit_file, contents)
self.run_cmd('service monit reload', true)

Djinn.log_info("Watching file #{path} for #{watch}" +
" with exec action [#{action}]")

self.run_cmd('#{MONIT} start -g #{watch}')
end

def self.restart(watch)
self.run_cmd("#{MONIT} restart -g #{watch}")
end
Expand Down
128 changes: 7 additions & 121 deletions AppController/lib/zkinterface.rb
Expand Up @@ -213,8 +213,8 @@ def self.get_appcontroller_lock()
end

info = self.run_zookeeper_operation {
@@zk.create(:path => APPCONTROLLER_LOCK_PATH,
:ephemeral => EPHEMERAL, :data => JSON.dump(@@client_ip))
@@zk.create(:path => APPCONTROLLER_LOCK_PATH, :ephemeral => EPHEMERAL,
:data => @@client_ip)
}
if info[:rc].zero?
return true
Expand Down Expand Up @@ -431,8 +431,8 @@ def self.is_node_done_loading?(ip)
end

begin
json_contents = self.get(loading_file)
return JSON.load(json_contents)
contents = self.get(loading_file)
return contents == "true"
rescue FailedZooKeeperOperationException
return false
end
Expand All @@ -454,8 +454,9 @@ def self.set_live_node_ephemeral_link(ip)
# node is done loading (if they have finished starting/stopping roles), or is
# not done loading (if they have roles they need to start or stop).
def self.set_done_loading(ip, val)
return self.set("#{APPCONTROLLER_NODE_PATH}/#{ip}/done_loading",
JSON.dump(val), NOT_EPHEMERAL)
zk_value = val ? "true" : "false"
return self.set("#{APPCONTROLLER_NODE_PATH}/#{ip}/done_loading",
zk_value, NOT_EPHEMERAL)
end


Expand All @@ -477,121 +478,6 @@ def self.set_job_data_for_ip(ip, job_data)
end


# Adds the specified role to the given node in ZooKeeper. A node can call this
# function to add a role to another node, and the other node should take on
# this role, or a node can call this function to let others know that it is
# taking on a new role.
# Callers should acquire the ZK Lock before calling this function.
# roles should be an Array of Strings, where each String is a role to add
# node should be a DjinnJobData representing the node that we want to add
# the roles to
def self.add_roles_to_node(roles, node, keyname)
old_job_data = self.get_job_data_for_ip(node.private_ip)
new_node = DjinnJobData.new(old_job_data, keyname)
new_node.add_roles(roles.join(":"))
self.set_job_data_for_ip(node.private_ip, new_node.to_hash())
self.set_done_loading(node.private_ip, false)
self.update_ips_timestamp()
end


# Removes the specified roles from the given node in ZooKeeper. A node can
# call this function to remove roles from another node, and the other node
# should take on this role, or a node can call this function to let others
# know that it is stopping existing roles.
# Callers should acquire the ZK Lock before calling this function.
# roles should be an Array of Strings, where each String is a role to remove
# node should be a DjinnJobData representing the node that we want to remove
# the roles from
def self.remove_roles_from_node(roles, node, keyname)
old_job_data = self.get_job_data_for_ip(node.private_ip)
new_node = DjinnJobData.new(old_job_data, keyname)
new_node.remove_roles(roles.join(":"))
self.set_job_data_for_ip(node.private_ip, new_node.to_hash())
self.set_done_loading(node.private_ip, false)
self.update_ips_timestamp()
end


# Asks ZooKeeper for all of the scaling requests (e.g., scale up or scale
# down) for the given application.
#
# Args:
# appid: A String that names the application whose scaling requests we
# wish to query.
# Returns:
# An Array of Strings, where each String is a request to either add or
# remove AppServers for this application. If no requests have been made
# for this application, an empty Array is returned.
def self.get_scaling_requests_for_app(appid)
path = "#{SCALING_DECISION_PATH}/#{appid}"
requestors = self.get_children(path)
scaling_requests = []
requestors.each { |ip|
scaling_requests << self.get("#{path}/#{ip}")
}
return scaling_requests
end


# Erases all requests to scale AppServers up or down for the named
# application.
#
# Args:
# appid: A String that names the application whose scaling requests we
# wish to erase.
def self.clear_scaling_requests_for_app(appid)
path = "#{SCALING_DECISION_PATH}/#{appid}"
requests = self.get_children(path)
requests.each { |request|
self.delete("#{path}/#{request}")
}
end


# Writes a node in ZooKeeper indicating that the named application needs
# additional AppServers running to serve the amount of traffic currently
# accessing the caller's machine.
#
# Args:
# appid: A String that names the application that should be scaled up.
# ip: A String that names the IP address of the machine that is requesting
# more AppServers for this application.
# Returns:
# true if the request was successfully made, and false otherwise.
def self.request_scale_up_for_app(appid, ip)
return self.request_scaling_for_app(appid, ip, :scale_up)
end


# Writes a node in ZooKeeper indicating that the named application needs
# less AppServers running to serve the amount of traffic currently
# accessing the caller's machine.
#
# Args:
# appid: A String that names the application that should be scaled down.
# ip: A String that names the IP address of the machine that is requesting
# less AppServers for this application.
# Returns:
# true if the request was successfully made, and false otherwise.
def self.request_scale_down_for_app(appid, ip)
return self.request_scaling_for_app(appid, ip, :scale_down)
end


def self.request_scaling_for_app(appid, ip, decision)
begin
path = "#{SCALING_DECISION_PATH}/#{appid}/#{ip}"
self.set(SCALING_DECISION_PATH, DUMMY_DATA, NOT_EPHEMERAL)
self.set("#{SCALING_DECISION_PATH}/#{appid}", DUMMY_DATA, NOT_EPHEMERAL)
self.set(path, decision.to_s, NOT_EPHEMERAL)
return true
rescue FailedZooKeeperOperationException
return false
end
end


private


Expand Down
2 changes: 1 addition & 1 deletion AppController/scripts/appcontroller
Expand Up @@ -25,7 +25,7 @@ do_start()
{
if [ ! -e $SECRET_FILE ]; then
log_begin_msg "AppScale not configured: not starting."
exit 0
exit 1
fi

# If we start from boot, we need to clear the monit state. The
Expand Down

0 comments on commit bf51b25

Please sign in to comment.