Skip to content

Commit

Permalink
Merge pull request #520 from SpiNNakerManchester/version
Browse files Browse the repository at this point in the history
Version
  • Loading branch information
rowleya committed Jul 28, 2023
2 parents 059c95a + 9620b4d commit ac0c091
Show file tree
Hide file tree
Showing 18 changed files with 119 additions and 60 deletions.
23 changes: 11 additions & 12 deletions pacman/operations/placer_algorithms/application_placer.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@
from spinn_utilities.ordered_set import OrderedSet
from spinn_utilities.progress_bar import ProgressBar

from spinn_machine import Machine

from pacman.data import PacmanDataView
from pacman.model.placements import Placements, Placement
from pacman.model.graphs import AbstractVirtual
Expand Down Expand Up @@ -138,7 +136,7 @@ def _place_error(
:param Placements system_placements:
:param PacmanPlaceException exception:
:param int plan_n_timesteps:
:param Machine machine:
:param ~spinn_machine.Machine machine:
:raises PacmanPlaceException:
"""
unplaceable = list()
Expand Down Expand Up @@ -224,10 +222,11 @@ def _check_could_fit(app_vertex, vertices_to_place, sdram):
:param int sdram:
:raises PacmanTooBigToPlace:
"""
version = PacmanDataView.get_machine_version()
max_sdram = (
Machine.DEFAULT_SDRAM_BYTES - PacmanDataView.get_monitor_sdram())
version.max_sdram_per_chip - PacmanDataView.get_monitor_sdram())
max_cores = (
Machine.DEFAULT_MAX_CORES_PER_CHIP - Machine.NON_USER_CORES -
version.max_cores_per_chip - version.n_non_user_cores -
PacmanDataView.get_monitor_cores())
n_cores = len(vertices_to_place)
if sdram <= max_sdram and n_cores <= max_cores:
Expand All @@ -238,16 +237,16 @@ def _check_could_fit(app_vertex, vertices_to_place, sdram):
f"the reason is that {vertices_to_place} ")
if sdram > max_sdram:
message += f"requires {sdram} bytes but "
if sdram > Machine.DEFAULT_SDRAM_BYTES:
message += f"a Chip only has {Machine.DEFAULT_SDRAM_BYTES} bytes "
if sdram > version.max_sdram_per_chip:
message += f"a Chip only has {version.max_sdram_per_chip} bytes "
else:
message += f"after monitors only {max_sdram} bytes are available "
message += "Lowering max_core_per_chip may resolve this."
raise PacmanTooBigToPlace(message)
if n_cores > Machine.DEFAULT_MAX_CORES_PER_CHIP:
if n_cores > version.max_cores_per_chip:
message += " is more vertices than the number of cores on a chip."
raise PacmanTooBigToPlace(message)
user_cores = Machine.DEFAULT_MAX_CORES_PER_CHIP - Machine.NON_USER_CORES
user_cores = version.max_cores_per_chip - version.n_non_user_cores
if n_cores > user_cores:
message += (
f"is more vertices than the user cores ({user_cores}) "
Expand Down Expand Up @@ -332,7 +331,7 @@ def _do_fixed_location(vertices, sdram, placements, machine, next_chip_space):
:param list(MachineVertex) vertices:
:param int sdram:
:param Placements placements:
:param Machine machine:
:param ~spinn_machine.Machine machine:
:param _ChipWithSpace next_chip_space:
:rtype: bool
:raise PacmanConfigurationException:
Expand Down Expand Up @@ -401,7 +400,7 @@ class _Spaces(object):

def __init__(self, machine, placements, plan_n_timesteps):
"""
:param Machine machine:
:param ~spinn_machine.Machine machine:
:param Placements placements:
:param int plan_n_timesteps:
"""
Expand Down Expand Up @@ -607,7 +606,7 @@ def __repr__(self):

def _chip_order(machine):
"""
:param Machine machine:
:param ~spinn_machine.Machine machine:
:rtype: iterable(Chip)
"""
s_x, s_y = get_config_str("Mapping", "placer_start_chip").split(",")
Expand Down
18 changes: 8 additions & 10 deletions pacman/operations/router_compressors/abstract_compressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@

from abc import abstractmethod
import logging

from spinn_utilities.config_holder import get_config_bool
from spinn_utilities.log import FormatAdapter
from spinn_utilities.progress_bar import ProgressBar
from spinn_machine import Machine
from pacman.data import PacmanDataView
from pacman.model.routing_tables import (
CompressedMulticastRoutingTable, MulticastRoutingTables)
Expand Down Expand Up @@ -70,7 +70,7 @@ def compress_tables(self, router_tables, progress):
"""
Compress all the unordered routing tables.
Tables who start of smaller than target_length are not compressed
Tables who start of smaller than global_target are not compressed
:param MulticastRoutingTables router_tables: Routing tables
:param ~spinn_utilities.progress_bar.ProgressBar progress:
Expand All @@ -80,14 +80,12 @@ def compress_tables(self, router_tables, progress):
:raises MinimisationFailedError: on failure
"""
compressed_tables = MulticastRoutingTables()
if get_config_bool(
"Mapping", "router_table_compress_as_far_as_possible"):
# Compress as much as possible
target_length = 0
else:
target_length = Machine.ROUTER_ENTRIES
as_needed = not (get_config_bool(
"Mapping", "router_table_compress_as_far_as_possible"))
for table in progress.over(router_tables.routing_tables):
if table.number_of_entries < target_length:
chip = PacmanDataView.get_chip_at(table.x, table.y)
target = chip.router.n_available_multicast_entries
if as_needed and table.number_of_entries <= target:
new_table = table
else:
compressed_table = self.compress_table(table)
Expand All @@ -97,7 +95,7 @@ def compress_tables(self, router_tables, progress):
for entry in compressed_table:
new_table.add_multicast_routing_entry(
entry.to_MulticastRoutingEntry())
if new_table.number_of_entries > Machine.ROUTER_ENTRIES:
if new_table.number_of_entries > target:
self._problems += (
f"(x:{new_table.x},y:{new_table.y})="
f"{new_table.number_of_entries} ")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from spinn_utilities.config_holder import get_config_bool
from spinn_machine import Machine
from pacman.operations.router_compressors import Entry
from pacman.exceptions import MinimisationFailedError
from .remove_default_routes import remove_default_routes
Expand All @@ -23,7 +21,7 @@


def minimise(
routing_table, use_timer_cut_off=False,
routing_table, target_length, use_timer_cut_off=False,
time_to_run_for_before_raising_exception=None):
"""
Reduce the size of a routing table by merging together entries where
Expand All @@ -45,6 +43,7 @@ def minimise(
:param list(Entry) routing_table:
Routing entries to be merged.
:param int target_length: How far to compress
:param bool use_timer_cut_off: flag for timing cut-off to be used.
:param time_to_run_for_before_raising_exception:
The time to run for in seconds before raising an exception
Expand All @@ -55,13 +54,6 @@ def minimise(
If the smallest table that can be produced is larger than
``target_length``.
"""
if get_config_bool(
"Mapping", "router_table_compress_as_far_as_possible"):
# Compress as much as possible
target_length = None
else:
target_length = Machine.ROUTER_ENTRIES

# Keep None values as that flags as much as possible
table, _ = ordered_covering(
routing_table=routing_table, target_length=target_length,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
# limitations under the License.

import logging
from spinn_utilities.config_holder import get_config_bool
from spinn_utilities.log import FormatAdapter
from pacman.data import PacmanDataView
from pacman.operations.router_compressors import (AbstractCompressor, Entry)
from .ordered_covering import minimise

Expand Down Expand Up @@ -46,6 +48,14 @@ def compress_table(self, router_table):
:param UnCompressedMulticastRoutingTable router_table:
:rtype: list(Entry)
"""
if get_config_bool(
"Mapping", "router_table_compress_as_far_as_possible"):
# Compress as much as possible
target_length = None
else:
chip = PacmanDataView.get_chip_at(router_table.x, router_table.y)
target_length = chip.router.n_available_multicast_entries

# convert to rig inspired format
entries = list()

Expand All @@ -55,5 +65,5 @@ def compress_table(self, router_table):
entries.append(Entry.from_MulticastRoutingEntry(router_entry))

# compress the router entries
compressed_router_table_entries = minimise(entries)
compressed_router_table_entries = minimise(entries, target_length)
return compressed_router_table_entries
12 changes: 8 additions & 4 deletions pacman/operations/router_compressors/pair_compressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from spinn_machine import Machine
from pacman.data import PacmanDataView
from pacman.exceptions import PacmanElementAllocationException
from .abstract_compressor import AbstractCompressor
from .entry import Entry
Expand Down Expand Up @@ -43,7 +43,9 @@ def verify_lengths(compressed):
"""
problems = ""
for table in compressed:
if table.number_of_entries > Machine.ROUTER_ENTRIES:
chip = PacmanDataView.get_chip_at(table.x, table.y)
n_entries = chip.router.n_available_multicast_entries
if table.number_of_entries > n_entries:
problems += f"(x:{table.x},y:{table.y})={table.number_of_entries} "
if len(problems) > 0:
raise PacmanElementAllocationException(
Expand Down Expand Up @@ -385,8 +387,10 @@ def compress_table(self, router_table):
self._all_entries = []
self._routes_count = 0
# Imitate creating fixed size arrays
self._routes = Machine.ROUTER_ENTRIES * [None]
self._routes_frequency = Machine.ROUTER_ENTRIES * [None]
chip = PacmanDataView.get_chip_at(router_table.x, router_table.y)
n_routes = chip.router.n_available_multicast_entries
self._routes = n_routes * [None]
self._routes_frequency = n_routes * [None]

for entry in router_table.multicast_routing_entries:
self._all_entries.append(
Expand Down
11 changes: 7 additions & 4 deletions pacman/operations/router_compressors/ranged_compressor.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from spinn_utilities.config_holder import get_config_bool
from spinn_utilities.log import FormatAdapter
from spinn_utilities.progress_bar import ProgressBar
from spinn_machine import Machine, MulticastRoutingEntry
from spinn_machine import MulticastRoutingEntry
from pacman.data import PacmanDataView
from pacman.model.routing_tables import (
CompressedMulticastRoutingTable, MulticastRoutingTables)
Expand All @@ -44,8 +44,9 @@ def range_compressor(accept_overflow=True):
compressed_tables = MulticastRoutingTables()
for table in progress.over(router_tables.routing_tables):
new_table = compressor.compress_table(table)
if (new_table.number_of_entries > Machine.ROUTER_ENTRIES and
not accept_overflow):
chip = PacmanDataView.get_chip_at(table.x, table.y)
target = chip.router.n_available_multicast_entries
if (new_table.number_of_entries > target and not accept_overflow):
raise MinimisationFailedError(
f"The routing table {table.x} {table.y} with "
f"{table.number_of_entries} entries after compression "
Expand Down Expand Up @@ -87,7 +88,9 @@ def compress_table(self, uncompressed):
# Check you need to compress
if not get_config_bool(
"Mapping", "router_table_compress_as_far_as_possible"):
if uncompressed.number_of_entries < Machine.ROUTER_ENTRIES:
chip = PacmanDataView.get_chip_at(uncompressed.x, uncompressed.y)
target = chip.router.n_available_multicast_entries
if uncompressed.number_of_entries < target:
return uncompressed

# Step 1 get the entries and make sure they are sorted by key
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
import unittest

from spinn_machine import Machine
from spinn_utilities.config_holder import set_config
from spinn_machine.virtual_machine import virtual_machine
from pacman.data.pacman_data_writer import PacmanDataWriter
from pacman.exceptions import (
Expand Down Expand Up @@ -96,6 +96,7 @@ def _make_vertices(

def test_application_placer():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
# fixed early works as this vertex is looked at first
fixed = SimpleTestVertex(10, "FIXED", max_atoms_per_core=1)
Expand All @@ -111,6 +112,7 @@ def test_application_placer():

def test_application_placer_late_fixed():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
for i in range(56):
_make_vertices(writer, 1000, 14, 5, f"app_vertex_{i}")
Expand All @@ -131,9 +133,11 @@ def test_application_placer_late_fixed():

def test_sdram_bigger_than_chip():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
max_sdram = writer.get_machine_version().max_sdram_per_chip
_make_vertices(writer, 1, 1, 5, "big_app_vertex",
sdram=Machine.DEFAULT_SDRAM_BYTES + 24)
sdram=max_sdram + 24)
try:
place_application_graph(Placements())
raise AssertionError("Error not raise")
Expand All @@ -143,21 +147,22 @@ def test_sdram_bigger_than_chip():

def test_sdram_bigger_monitors():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
monitor = SimpleMachineVertex(
ConstantSDRAM(Machine.DEFAULT_SDRAM_BYTES // 2))
max_sdram = writer.get_machine_version().max_sdram_per_chip
monitor = SimpleMachineVertex(ConstantSDRAM(max_sdram // 2))
# This is purely an info call so test check directly
writer.add_monitor_all_chips(monitor)
try:
_check_could_fit("app_test", ["m_vertex]"],
sdram=Machine.DEFAULT_SDRAM_BYTES // 2 + 5)
_check_could_fit("app_test", ["m_vertex]"], sdram=max_sdram // 2 + 5)
raise AssertionError("Error not raise")
except PacmanTooBigToPlace as ex:
assert ("after monitors only" in str(ex))


def test_more_cores_than_chip():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
_make_vertices(writer, 1, 1, 19, "big_app_vertex")
try:
Expand All @@ -169,6 +174,7 @@ def test_more_cores_than_chip():

def test_more_cores_than_user():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
_make_vertices(writer, 1, 1, 18, "big_app_vertex")
try:
Expand All @@ -180,6 +186,7 @@ def test_more_cores_than_user():

def test_more_cores_with_monitor():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
monitor = SimpleMachineVertex(ConstantSDRAM(4000))
# This is purely an info call so test check directly
Expand All @@ -194,6 +201,7 @@ def test_more_cores_with_monitor():

def test_could_fit():
unittest_setup()
set_config("Machine", "version", 5)
writer = PacmanDataWriter.mock()
monitor = SimpleMachineVertex(ConstantSDRAM(0))
writer.add_monitor_all_chips(monitor)
Expand Down

0 comments on commit ac0c091

Please sign in to comment.