Skip to content

Commit

Permalink
optimize result merger
Browse files Browse the repository at this point in the history
  • Loading branch information
tycooon committed Apr 24, 2021
1 parent ddccb3c commit 15c03b2
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 5 deletions.
11 changes: 8 additions & 3 deletions lib/simplecov/result_merger.rb
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,16 @@ def merge_results(*file_paths, ignore_timeout: false)
# of data. Reading them all in easily produces Gigabytes of memory consumption which
# we want to avoid.

results = file_paths.map { |path| valid_results(path, ignore_timeout: ignore_timeout) }
merge_coverage(results)
file_paths = file_paths.dup
initial_result = merge_file_results(file_paths.shift, ignore_timeout: ignore_timeout)

file_paths.reduce(initial_result) do |memo, path|
file_result = merge_file_results(path, ignore_timeout: ignore_timeout)
merge_coverage([memo, file_result])
end
end

def valid_results(file_path, ignore_timeout: false)
def merge_file_results(file_path, ignore_timeout:)
raw_results = parse_file(file_path)
results = Result.from_hash(raw_results)
merge_valid_results(results, ignore_timeout: ignore_timeout)
Expand Down
4 changes: 2 additions & 2 deletions lib/simplecov/result_serialization.rb
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def deserialize(hash) # rubocop:disable Metrics/MethodLength
hash.map do |command_name, data|
coverage = {}

data["coverage"].each do |file_name, file_data|
data.fetch("coverage").each do |file_name, file_data|
parsed_file_data = {}

file_data = {lines: file_data} if file_data.is_a?(Array)
Expand All @@ -39,7 +39,7 @@ def deserialize(hash) # rubocop:disable Metrics/MethodLength

result = SimpleCov::Result.new(coverage)
result.command_name = command_name
result.created_at = Time.at(data["timestamp"])
result.created_at = Time.at(data.fetch("timestamp"))
result
end
end
Expand Down

0 comments on commit 15c03b2

Please sign in to comment.