diff --git a/pacman/operations/algorithms_metadata_schema.xsd b/pacman/operations/algorithms_metadata_schema.xsd index f784e7322..a1fc871bb 100644 --- a/pacman/operations/algorithms_metadata_schema.xsd +++ b/pacman/operations/algorithms_metadata_schema.xsd @@ -18,7 +18,10 @@ - + + + + diff --git a/pacman/operations/placer_algorithms/one_to_one_placer.py b/pacman/operations/placer_algorithms/one_to_one_placer.py index c7128faad..459c020df 100644 --- a/pacman/operations/placer_algorithms/one_to_one_placer.py +++ b/pacman/operations/placer_algorithms/one_to_one_placer.py @@ -5,7 +5,8 @@ from pacman.operations.placer_algorithms import RadialPlacer from pacman.utilities.utility_objs import ResourceTracker from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import ( - get_same_chip_vertex_groups, get_vertices_on_same_chip, group_vertices) + create_vertices_groups, get_same_chip_vertex_groups, + get_vertices_on_same_chip) from pacman.model.constraints.placer_constraints import ( SameChipAsConstraint, ChipAndCoreConstraint, RadialPlacementFromChipConstraint) @@ -88,31 +89,51 @@ class OneToOnePlacer(RadialPlacer): def __call__(self, machine_graph, machine): + # Iterate over vertices and generate placements + # +3 covers check_constraints, get_same_chip_vertex_groups and + # create_vertices_groups + progress = ProgressBar( + machine_graph.n_vertices + 3, "Placing graph vertices") # check that the algorithm can handle the constraints self._check_constraints( machine_graph.vertices, additional_placement_constraints={SameChipAsConstraint}) - + progress.update() # Get which vertices must be placed on the same chip as another vertex same_chip_vertex_groups = get_same_chip_vertex_groups(machine_graph) + progress.update() # Work out the vertices that should be on the same chip by one-to-one # connectivity - one_to_one_groups = group_vertices( - machine_graph.vertices, functools.partial( - _find_one_to_one_vertices, graph=machine_graph)) + one_to_one_groups = create_vertices_groups( + machine_graph.vertices, + functools.partial(_find_one_to_one_vertices, graph=machine_graph)) + progress.update() return self._do_allocation( - one_to_one_groups, same_chip_vertex_groups, machine, machine_graph) + one_to_one_groups, same_chip_vertex_groups, machine, + machine_graph, progress) def _do_allocation( self, one_to_one_groups, same_chip_vertex_groups, - machine, machine_graph): + machine, machine_graph, progress): + """ + + :param one_to_one_groups: + Groups of vertexes that would be nice on same chip + :type one_to_one_groups: + list(set(vertex)) + :param same_chip_vertex_groups: + Mapping of Vertex to the Vertex that must be on the same Chip + :type same_chip_vertex_groups: + dict(vertex, collection(vertex)) + :param machine: + :param machine_graph: + :param progress: + :return: + """ placements = Placements() - # Iterate over vertices and generate placements - progress = ProgressBar( - machine_graph.n_vertices, "Placing graph vertices") resource_tracker = ResourceTracker( machine, self._generate_radial_chips(machine)) all_vertices_placed = set() @@ -142,50 +163,33 @@ def _do_allocation( vertex, placements, resource_tracker, same_chip_vertex_groups, all_vertices_placed, progress) - # iterate over the remaining unconstrained (or less constrained) - # vertices - for vertex in unconstrained: - - # If the vertex has been placed, skip it - if vertex in all_vertices_placed: - continue - - # Find vertices that are one-to-one connected to this one - one_to_one_vertices = one_to_one_groups[vertex] - + for grouped_vertices in one_to_one_groups: # Get unallocated vertices and placements of allocated vertices unallocated = list() chips = list() - for vert in one_to_one_vertices: + for vert in grouped_vertices: if vert in all_vertices_placed: placement = placements.get_placement_of_vertex(vert) chips.append((placement.x, placement.y)) else: unallocated.append(vert) - # if too many one to ones to fit on a chip, allocate individually - if len(unallocated) > \ + if len(unallocated) <=\ resource_tracker.get_maximum_cores_available_on_a_chip(): - for vert in unallocated: - self._allocate_same_chip_as_group( - vert, placements, resource_tracker, - same_chip_vertex_groups, all_vertices_placed, - progress) - continue - - # Try to allocate all vertices to the same chip - success = self._allocate_one_to_one_group( - resource_tracker, unallocated, progress, placements, chips, - all_vertices_placed) - - if not success: - - # Something went wrong, try to allocate each individually - for vertex in progress.over(unallocated, False): - self._allocate_same_chip_as_group( - vertex, placements, resource_tracker, - same_chip_vertex_groups, all_vertices_placed, - progress) + # Try to allocate all vertices to the same chip + self._allocate_one_to_one_group( + resource_tracker, unallocated, progress, placements, chips, + all_vertices_placed) + # if too big or failed go on to other groups first + + # check all have been allocated if not do so now. + for vertex in machine_graph.vertices: + if vertex not in all_vertices_placed: + self._allocate_same_chip_as_group( + vertex, placements, resource_tracker, + same_chip_vertex_groups, all_vertices_placed, + progress) + progress.end() return placements diff --git a/pacman/utilities/algorithm_utilities/placer_algorithm_utilities.py b/pacman/utilities/algorithm_utilities/placer_algorithm_utilities.py index 15ef151b3..5ec19759c 100644 --- a/pacman/utilities/algorithm_utilities/placer_algorithm_utilities.py +++ b/pacman/utilities/algorithm_utilities/placer_algorithm_utilities.py @@ -1,3 +1,4 @@ +import functools try: from collections.abc import OrderedDict except ImportError: @@ -8,7 +9,6 @@ RadialPlacementFromChipConstraint) from pacman.model.graphs.common.edge_traffic_type import EdgeTrafficType from pacman.utilities import VertexSorter, ConstraintOrder -import functools from pacman.model.graphs.abstract_virtual_vertex import AbstractVirtualVertex @@ -68,50 +68,55 @@ def group_vertices(vertices, same_group_as_function): A dictionary of vertex to list of vertices that are grouped with it """ - # Dict of vertex to list of vertices on same chip (repeated lists expected) + groups = create_vertices_groups(vertices, same_group_as_function) + # Dict of vertex to setof vertices on same chip (repeated lists expected) + # A empty set value indicates a set that is too big. same_chip_vertices = OrderedDict() + for group in groups: + for vertex in group: + same_chip_vertices[vertex] = group + for vertex in vertices: + if vertex not in same_chip_vertices: + same_chip_vertices[vertex] = {vertex} + return same_chip_vertices + + +def add_set(all_sets, new_set): + """ + Adds a new set into the list of sets, concatenating ssets if required. + If the new set does not overlap any existing sets it is added. + + However if the new sets overlaps one or more existing sets a super set is + created combining all the overlapping sets. + Existing overlapping sets are removed and only the new super set is added. + + :param all_sets: List of Non overlapping sets + :param new_set: A new set which may or may not overlap the previous sets. + """ + + union = OrderedSet() + removes = [] + for a_set in all_sets: + intersection = new_set & a_set + if intersection: + removes.append(a_set) + union = union | a_set + union = union | new_set + for a_set in removes: + all_sets.remove(a_set) + all_sets.append(union) + return + + +def create_vertices_groups(vertices, same_group_as_function): + groups = list() for vertex in vertices: - # Find all vertices that should be grouped with this vertex same_chip_as_vertices = same_group_as_function(vertex) - if same_chip_as_vertices: - # Go through all the vertices that want to be on the same chip as - # the top level vertex - for same_as_chip_vertex in same_chip_as_vertices: - # Neither vertex has been seen - if (same_as_chip_vertex not in same_chip_vertices and - vertex not in same_chip_vertices): - # add both to a new group - group = {vertex, same_as_chip_vertex} - same_chip_vertices[vertex] = group - same_chip_vertices[same_as_chip_vertex] = group - - # Both vertices have been seen elsewhere - elif (same_as_chip_vertex in same_chip_vertices and - vertex in same_chip_vertices): - # merge their groups - group_1 = same_chip_vertices[vertex] - group_2 = same_chip_vertices[same_as_chip_vertex] - group_1.update(group_2) - for vert in group_1: - same_chip_vertices[vert] = group_1 - - # The current vertex has been seen elsewhere - elif vertex in same_chip_vertices: - # add the new vertex to the existing group - group = same_chip_vertices[vertex] - group.add(same_as_chip_vertex) - same_chip_vertices[same_as_chip_vertex] = group - - # The other vertex has been seen elsewhere - elif same_as_chip_vertex in same_chip_vertices: - # so add this vertex to the existing group - group = same_chip_vertices[same_as_chip_vertex] - group.add(vertex) - same_chip_vertices[vertex] = group - - else: - same_chip_vertices[vertex] = OrderedSet([vertex]) - - return same_chip_vertices + same_chip_as_vertices = OrderedSet(same_chip_as_vertices) + same_chip_as_vertices.add(vertex) + # Singletons on interesting and added later if needed + if len(same_chip_as_vertices) > 1: + add_set(groups, same_chip_as_vertices) + return groups diff --git a/unittests/utilities_tests/test_placer_algorithm_utilities.py b/unittests/utilities_tests/test_placer_algorithm_utilities.py new file mode 100644 index 000000000..194834b93 --- /dev/null +++ b/unittests/utilities_tests/test_placer_algorithm_utilities.py @@ -0,0 +1,42 @@ +import unittest +from pacman.utilities.algorithm_utilities.placer_algorithm_utilities import \ + add_set + + +class TestUtilities(unittest.TestCase): + + def test_add_join(self): + all_sets = list() + all_sets.append({1, 2}) + all_sets.append({3, 4}) + all_sets.append({5, 6}) + new_set = {2, 4} + add_set(all_sets, new_set) + self.assertEqual(2, len(all_sets)) + self.assertIn({1, 2, 3, 4}, all_sets) + self.assertIn({5, 6}, all_sets) + + def test_add_one(self): + all_sets = list() + all_sets.append({1, 2}) + all_sets.append({3, 4}) + all_sets.append({5, 6}) + new_set = {2, 7} + add_set(all_sets, new_set) + self.assertEqual(3, len(all_sets)) + self.assertIn({1, 2, 7}, all_sets) + self.assertIn({3, 4}, all_sets) + self.assertIn({5, 6}, all_sets) + + def test_add_new(self): + all_sets = list() + all_sets.append({1, 2}) + all_sets.append({3, 4}) + all_sets.append({5, 6}) + new_set = {8, 7} + add_set(all_sets, new_set) + self.assertEqual(4, len(all_sets)) + self.assertIn({1, 2}, all_sets) + self.assertIn({3, 4}, all_sets) + self.assertIn({5, 6}, all_sets) + self.assertIn({7, 8}, all_sets)