Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 100 additions & 12 deletions reframe/frontend/dependency.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import itertools

import reframe as rfm
import reframe.utility as util
from reframe.core.exceptions import DependencyError


Expand Down Expand Up @@ -50,7 +51,6 @@ def resolve_dep(target, from_map, *args):

graph = {}
for c in cases:
graph[c] = c.deps
cname = c.check.name
pname = c.partition.fullname
ename = c.environ.name
Expand All @@ -70,6 +70,8 @@ def resolve_dep(target, from_map, *args):
c.deps.append(resolve_dep(c, cases_revmap,
tname, pname, te))

graph[c] = util.OrderedSet(c.deps)

return graph


Expand All @@ -78,6 +80,19 @@ def print_deps(graph):
print(c, '->', deps)


def _reduce_deps(graph):
"""Reduce test case graph to a test-only graph."""
ret = {}
for case, deps in graph.items():
test_deps = util.OrderedSet(d.check.name for d in deps)
try:
ret[case.check.name] |= test_deps
except KeyError:
ret[case.check.name] = test_deps

return ret


def validate_deps(graph):
"""Validate dependency graph."""

Expand All @@ -87,16 +102,7 @@ def validate_deps(graph):
# (t0, e1) -> (t1, e1)
# (t1, e0) -> (t0, e0)
#
# This reduction step will result in a graph description with duplicate
# entries in the adjacency list; this is not a problem, cos they will be
# filtered out during the DFS traversal below.
test_graph = {}
for case, deps in graph.items():
test_deps = [d.check.name for d in deps]
try:
test_graph[case.check.name] += test_deps
except KeyError:
test_graph[case.check.name] = test_deps
test_graph = _reduce_deps(graph)

# Check for cyclic dependencies in the test name graph
visited = set()
Expand All @@ -112,7 +118,7 @@ def validate_deps(graph):
while path and path[-1] != parent:
path.pop()

adjacent = reversed(test_graph[node])
adjacent = test_graph[node]
path.append(node)
for n in adjacent:
if n in path:
Expand All @@ -126,3 +132,85 @@ def validate_deps(graph):
visited.add(node)

sources -= visited


def _reverse_deps(graph):
ret = {}
for n, deps in graph.items():
ret.setdefault(n, util.OrderedSet({}))
for d in deps:
try:
ret[d] |= {n}
except KeyError:
ret[d] = util.OrderedSet({n})

return ret


def toposort(graph):
# NOTES on implementation:
#
# 1. This function assumes a directed acyclic graph.
# 2. The purpose of this function is to topologically sort the test cases,
# not only the tests. However, since we do not allow cycles between
# tests in any case (even if this could be classified a
# pseudo-dependency), we first do a topological sort of the tests and we
# subsequently sort the test cases by partition and by programming
# environment.
# 3. To achieve this 3-step sorting with a single sort operations, we rank
# the test cases by associating them with an integer key based on the
# result of the topological sort of the tests and by choosing an
# arbitrary ordering of the partitions and the programming environment.

test_deps = _reduce_deps(graph)
rev_deps = _reverse_deps(test_deps)

# We do a BFS traversal from each root
visited = {}
roots = set(t for t, deps in test_deps.items() if not deps)
for r in roots:
unvisited = util.OrderedSet([r])
visited[r] = util.OrderedSet()
while unvisited:
# Next node is one whose all dependencies are already visited
# FIXME: This makes sorting's complexity O(V^2)
node = None
for n in unvisited:
if test_deps[n] <= visited[r]:
node = n
break

# If node is None, graph has a cycle and this is a bug; this
# function assumes acyclic graphs only
assert node is not None

unvisited.remove(node)
adjacent = rev_deps[node]
unvisited |= util.OrderedSet(
n for n in adjacent if n not in visited
)
visited[r].add(node)

# Combine all individual sequences into a single one
ordered_tests = util.OrderedSet()
for tests in visited.values():
ordered_tests |= tests

# Get all partitions and programming environments from test cases
partitions = util.OrderedSet()
environs = util.OrderedSet()
for c in graph.keys():
partitions.add(c.partition.fullname)
environs.add(c.environ.name)

# Rank test cases; we first need to calculate the base for the rank number
base = max(len(partitions), len(environs)) + 1
ranks = {}
for i, test in enumerate(ordered_tests):
for j, part in enumerate(partitions):
for k, env in enumerate(environs):
ranks[test, part, env] = i*base**2 + j*base + k

return sorted(graph.keys(),
key=lambda x: ranks[x.check.name,
x.partition.fullname, x.environ.name])
190 changes: 190 additions & 0 deletions reframe/utility/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import abc
import collections
import functools
import importlib
import importlib.util
import itertools
Expand Down Expand Up @@ -243,6 +244,195 @@ def __missing__(self, key):
raise KeyError(str(key))


@functools.total_ordering
class OrderedSet(collections.abc.MutableSet):
"""An ordered set."""

def __init__(self, *args):
# We need to allow construction without arguments
if not args:
iterable = []
elif len(args) == 1:
iterable = args[0]
else:
# We use the exact same error message as for the built-in set
raise TypeError('%s expected at most 1 arguments, got %s' %
type(self).__name__, len(args))

if not isinstance(iterable, collections.abc.Iterable):
raise TypeError("'%s' object is not iterable" %
type(iterable).__name__)

# We implement an ordered set through the keys of an OrderedDict;
# its values are all set to None
self.__data = collections.OrderedDict(
itertools.zip_longest(iterable, [], fillvalue=None)
)

def __repr__(self):
vals = self.__data.keys()
if not vals:
return type(self).__name__ + '()'
else:
return '{' + ', '.join(str(v) for v in vals) + '}'

# Container i/face
def __contains__(self, item):
return item in self.__data

def __iter__(self):
return iter(self.__data)

def __len__(self):
return len(self.__data)

# Set i/face
#
# Note on the complexity of the operators
#
# In every case below we first construct a set from the internal ordered
# dictionary's keys and then apply the operator. This step's complexity is
# O(len(self.__data.keys())). Since the complexity of the standard set
# operators are at the order of magnitute of the lenghts of the operands
# (ranging from O(min(len(a), len(b))) to O(len(a) + len(b))), this step
# does not change the complexity class; it just changes the constant
# factor.
#
def __eq__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

return set(self.__data.keys()) == other

def __gt__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

return set(self.__data.keys()) > other

def __and__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

return set(self.__data.keys()) & other

def __or__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

return set(self.__data.keys()) | other

def __sub__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

return set(self.__data.keys()) - other

def __xor__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

return set(self.__data.keys()) ^ other

def isdisjoint(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

return set(self.__data.keys()).isdisjoint(other)

def issubset(self, other):
return self <= other

def issuperset(self, other):
return self >= other

def symmetric_difference(self, other):
return self ^ other

def union(self, *others):
ret = type(self)(self)
for s in others:
ret |= s

return ret

def intersection(self, *others):
ret = type(self)(self)
for s in others:
ret &= s

return ret

def difference(self, *others):
ret = type(self)(self)
for s in others:
ret -= s

return ret

# MutableSet i/face

def add(self, elem):
self.__data[elem] = None

def remove(self, elem):
del self.__data[elem]

def discard(self, elem):
try:
self.remove(elem)
except KeyError:
pass

def pop(self):
return self.__data.popitem()[0]

def clear(self):
self.__data.clear()

def __ior__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

for e in other:
self.add(e)

return self

def __iand__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

discard_list = [e for e in self if e not in other]
for e in discard_list:
self.discard(e)

return self

def __isub__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

for e in other:
self.discard(e)

return self

def __ixor__(self, other):
if not isinstance(other, collections.abc.Set):
return NotImplemented

discard_list = [e for e in self if e in other]
for e in discard_list:
self.discard(e)

return self

# Other functions
def __reversed__(self):
return reversed(self.__data.keys())


class SequenceView(collections.abc.Sequence):
"""A read-only view of a sequence."""

Expand Down
Loading