Skip to content

Commit

Permalink
Merge pull request #507 from SpiNNakerManchester/too_big_too_place
Browse files Browse the repository at this point in the history
Too big too place
  • Loading branch information
Christian-B committed Apr 26, 2023
2 parents ee3bdce + 44f45db commit d91b630
Show file tree
Hide file tree
Showing 6 changed files with 193 additions and 9 deletions.
30 changes: 30 additions & 0 deletions pacman/data/pacman_data_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ class _PacmanDataModel(object):
"_placements",
"_plan_n_timesteps",
"_precompressed",
"_monitor_cores",
"_monitor_sdram",
"_routing_infos",
"_routing_table_by_partition",
"_tags",
Expand Down Expand Up @@ -77,6 +79,8 @@ def _hard_reset(self):
self._graph.reset()
self._placements = None
self._precompressed = None
self._monitor_cores = 0
self._monitor_sdram = 0
self._uncompressed = None
self._routing_infos = None
self._routing_table_by_partition = None
Expand Down Expand Up @@ -506,3 +510,29 @@ def get_routing_table_by_partition(cls):
if cls.__pacman_data._routing_table_by_partition is None:
raise cls._exception("routing_table_by_partition")
return cls.__pacman_data._routing_table_by_partition

@classmethod
def get_monitor_cores(cls):
"""
The number of cores on every Chip reported to be used by monitors.
Ethernet Chips may have more.
Does not include the system core reserved by the machine itself
:rtype: int
"""
return cls.__pacman_data._monitor_cores

@classmethod
def get_monitor_sdram(cls):
"""
The amount of sdram on every Chip reported to be used by monitors.
Ethernet Chips may have more.
Does not include the system sdram reserved by the machine itself
:rtype: int
"""
return cls.__pacman_data._monitor_sdram
18 changes: 18 additions & 0 deletions pacman/data/pacman_data_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,3 +172,21 @@ def add_edge(cls, edge, outgoing_edge_partition_name):
raise PacmanConfigurationException(
"This call is only expected if requires mapping is True")
cls.__pacman_data._graph.add_edge(edge, outgoing_edge_partition_name)

def add_monitor_all_chips(self, vertex):
"""
Reports that a monitor has been added to every Chip.
Should be called once for each monitor added to all Chips.
Should not be called for Ethernet only monitors.
Only affect is to change the numbers reported by the get_monitor
methods
:param ~pacman.model.graphs.machine.MachineVertex vertex:
One of the Vertcies added to each Core
"""
self.__pacman_data._monitor_cores += 1
self.__pacman_data._monitor_sdram += \
vertex.sdram_required.get_total_sdram(self.get_plan_n_timestep())
6 changes: 6 additions & 0 deletions pacman/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,12 @@ class PacmanPlaceException(PacmanException):
"""


class PacmanTooBigToPlace(PacmanException):
"""
What is requested to place on a Single Chip is too big
"""


class PacmanPruneException(PacmanException):
"""
Something went wrong with pruning.
Expand Down
41 changes: 39 additions & 2 deletions pacman/operations/placer_algorithms/application_placer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@
from spinn_utilities.ordered_set import OrderedSet
from spinn_utilities.progress_bar import ProgressBar

from spinn_machine import Machine

from pacman.data import PacmanDataView
from pacman.model.placements import Placements, Placement
from pacman.model.graphs import AbstractVirtual
from pacman.exceptions import (
PacmanPlaceException, PacmanConfigurationException)
PacmanPlaceException, PacmanConfigurationException, PacmanTooBigToPlace)

logger = FormatAdapter(logging.getLogger(__name__))

Expand Down Expand Up @@ -115,6 +117,7 @@ def place_application_graph(system_placements):
# fatal since the last space might have just been bound by
# existing placements, and there might be bigger spaces out
# there to use
_check_could_fit(app_vertex, vertices_to_place, sdram)
logger.debug(f"Failed, saving {chips_attempted}")
spaces.save_chips(chips_attempted)
chips_attempted.clear()
Expand Down Expand Up @@ -204,6 +207,40 @@ def _place_error(
f" Report written to {report_file}.")


def _check_could_fit(app_vertex, vertices_to_place, sdram):
max_sdram = (
Machine.DEFAULT_SDRAM_BYTES - PacmanDataView.get_monitor_sdram())
max_cores = (
Machine.DEFAULT_MAX_CORES_PER_CHIP - Machine.NON_USER_CORES -
PacmanDataView.get_monitor_cores())
n_cores = len(vertices_to_place)
if (sdram <= max_sdram and n_cores <= max_cores):
# should fit somewhere
return
message = f"{app_vertex} will not fit on any possible Chip " \
f"the reason is that {vertices_to_place} "
if (sdram > max_sdram):
message += f"requires {sdram} bytes but "
if (sdram > Machine.DEFAULT_SDRAM_BYTES):
message += f"a Chip only has {Machine.DEFAULT_SDRAM_BYTES} bytes "
else:
message += f"after monitors only {max_sdram} bytes are available "
message += "Lowering max_core_per_chip may resolve this."
raise PacmanTooBigToPlace(message)
if n_cores > Machine.DEFAULT_MAX_CORES_PER_CHIP:
message += " is more vertices than the number of cores on a chip."
raise PacmanTooBigToPlace(message)
user_cores = Machine.DEFAULT_MAX_CORES_PER_CHIP - Machine.NON_USER_CORES
if n_cores > user_cores:
message += f"is more vertices than the user cores ({user_cores})" \
f" available on a Chip"
raise PacmanTooBigToPlace(message)
message += f"is more vertices than the {max_cores} cores available on a " \
f"Chip once {PacmanDataView.get_monitor_cores()} " \
f"are reserved for monitors"
raise PacmanTooBigToPlace(message)


def _next_colour():
"""
Get the next (random) RGB colour to use for a vertex for placement drawings
Expand Down Expand Up @@ -518,7 +555,7 @@ def __init__(self, chip, used_processors, used_sdram):
self.cores = set(p.processor_id for p in chip.processors
if not p.is_monitor)
self.cores -= used_processors
self.sdram = chip.sdram.size - used_sdram
self.sdram = chip.sdram - used_sdram

@property
def x(self):
Expand Down
15 changes: 15 additions & 0 deletions unittests/data/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from pacman.model.graphs.application import ApplicationEdge
from pacman.model.graphs.machine import SimpleMachineVertex
from pacman.model.placements import Placement, Placements
from pacman.model.resources import ConstantSDRAM, VariableSDRAM
from pacman.model.routing_info import RoutingInfo
from pacman.model.routing_table_by_partition import (
MulticastRoutingTableByPartition)
Expand Down Expand Up @@ -344,3 +345,17 @@ def test_add_requires_mapping(self):
self.assertTrue(PacmanDataView.get_requires_mapping())
writer.hard_reset()
self.assertTrue(PacmanDataView.get_requires_mapping())

def test_get_monitors(self):
writer = PacmanDataWriter.setup()
writer.set_plan_n_timesteps((45))
self.assertEqual(0, PacmanDataView.get_monitor_cores())
self.assertEqual(0, PacmanDataView.get_monitor_sdram())
writer.add_monitor_all_chips(SimpleMachineVertex(ConstantSDRAM(200)))
self.assertEqual(1, PacmanDataView.get_monitor_cores())
self.assertEqual(200, PacmanDataView.get_monitor_sdram())
writer.add_monitor_all_chips(SimpleMachineVertex(
VariableSDRAM(100, 10)))
self.assertEqual(2, PacmanDataView.get_monitor_cores())
target = 200 + 100 + 10 * 45
self.assertEqual(target, PacmanDataView.get_monitor_sdram())
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,16 @@
# limitations under the License.
import unittest

from spinn_machine import Machine
from spinn_machine.virtual_machine import virtual_machine
from pacman.data.pacman_data_writer import PacmanDataWriter
from pacman.exceptions import PacmanConfigurationException
from pacman.exceptions import (
PacmanConfigurationException, PacmanTooBigToPlace)
from pacman.model.partitioner_splitters.abstract_splitters import (
AbstractSplitterCommon)
from pacman.model.partitioner_splitters import SplitterFixedLegacy
from pacman.operations.placer_algorithms.application_placer import (
place_application_graph)
place_application_graph, _check_could_fit)
from pacman.model.graphs.machine import SimpleMachineVertex
from pacman.model.resources import ConstantSDRAM
from pacman.model.graphs.application import ApplicationVertex
Expand All @@ -32,22 +34,25 @@

class TestSplitter(AbstractSplitterCommon):

def __init__(self, n_groups, n_machine_vertices):
def __init__(self, n_groups, n_machine_vertices, sdram=0):
super().__init__()
self.__n_groups = n_groups
self.__n_machine_vertices = n_machine_vertices
self.__same_chip_groups = list()
self.__sdram = sdram

def create_machine_vertices(self, chip_counter):
for _ in range(self.__n_groups):
m_vertices = [
SimpleMachineVertex(
ConstantSDRAM(0), app_vertex=self._governed_app_vertex,
ConstantSDRAM(0),
app_vertex=self._governed_app_vertex,
label=f"{self._governed_app_vertex.label}_{i}")
for i in range(self.__n_machine_vertices)]
for m_vertex in m_vertices:
self._governed_app_vertex.remember_machine_vertex(m_vertex)
self.__same_chip_groups.append((m_vertices, ConstantSDRAM(0)))
self.__same_chip_groups.append(
(m_vertices, ConstantSDRAM(self.__sdram)))

def get_out_going_slices(self):
return None
Expand Down Expand Up @@ -81,9 +86,10 @@ def n_atoms(self):
return self.__n_atoms


def _make_vertices(writer, n_atoms, n_groups, n_machine_vertices, label):
def _make_vertices(
writer, n_atoms, n_groups, n_machine_vertices, label, sdram=0):
vertex = TestAppVertex(n_atoms, label)
vertex.splitter = TestSplitter(n_groups, n_machine_vertices)
vertex.splitter = TestSplitter(n_groups, n_machine_vertices, sdram)
writer.add_vertex(vertex)
vertex.splitter.create_machine_vertices(None)
return vertex
Expand Down Expand Up @@ -122,3 +128,75 @@ def test_application_placer_late_fixed():
except PacmanConfigurationException:
raise unittest.SkipTest(
"https://github.com/SpiNNakerManchester/PACMAN/issues/444")


def test_sdram_bigger_than_chip():
unittest_setup()
writer = PacmanDataWriter.mock()
_make_vertices(writer, 1, 1, 5, "big_app_vertex",
sdram=Machine.DEFAULT_SDRAM_BYTES + 24)
try:
place_application_graph(Placements())
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("a Chip only has" in str(ex))


def test_sdram_bigger_monitors():
unittest_setup()
writer = PacmanDataWriter.mock()
monitor = SimpleMachineVertex(
ConstantSDRAM(Machine.DEFAULT_SDRAM_BYTES // 2))
# This is purely an info call so test check directly
writer.add_monitor_all_chips(monitor)
try:
_check_could_fit("app_test", ["m_vertex]"],
sdram=Machine.DEFAULT_SDRAM_BYTES // 2 + 5)
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("after monitors only" in str(ex))


def test_more_cores_than_chip():
unittest_setup()
writer = PacmanDataWriter.mock()
_make_vertices(writer, 1, 1, 19, "big_app_vertex")
try:
place_application_graph(Placements())
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("number of cores on a chip" in str(ex))


def test_more_cores_than_user():
unittest_setup()
writer = PacmanDataWriter.mock()
_make_vertices(writer, 1, 1, 18, "big_app_vertex")
try:
place_application_graph(Placements())
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("the user cores" in str(ex))


def test_more_cores_with_monitor():
unittest_setup()
writer = PacmanDataWriter.mock()
monitor = SimpleMachineVertex(ConstantSDRAM(4000))
# This is purely an info call so test check directly
writer.add_monitor_all_chips(monitor)
m_vertexs = [f"m_v_{i}" for i in range(17)]
try:
_check_could_fit("app_test", m_vertexs, 500000)
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("reserved for monitors" in str(ex))


def test_could_fit():
unittest_setup()
writer = PacmanDataWriter.mock()
monitor = SimpleMachineVertex(ConstantSDRAM(0))
writer.add_monitor_all_chips(monitor)
m_vertexs = [f"m_v_{i}" for i in range(16)]
_check_could_fit("app_test", m_vertexs, 500000)

0 comments on commit d91b630

Please sign in to comment.