diff --git a/.coveragerc b/.coveragerc index e9d8cd7..a3bc7eb 100644 --- a/.coveragerc +++ b/.coveragerc @@ -14,5 +14,4 @@ exclude_lines = # Only check coverage for source files include = - cachesimulator/simulator.py - cachesimulator/table.py + cachesimulator/*.py diff --git a/LICENSE.txt b/LICENSE.txt index 28bdd8b..2a4a546 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2015-2016 Caleb Evans +Copyright (c) 2015-2018 Caleb Evans Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 3f9e3c9..45e4893 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,19 @@ # Cache Simulator -*Copyright 2015-2016 Caleb Evans* +*Copyright 2015-2018 Caleb Evans* *Released under the MIT license* [![Build Status](https://travis-ci.org/caleb531/cache-simulator.svg?branch=master)](https://travis-ci.org/caleb531/cache-simulator) [![Coverage Status](https://coveralls.io/repos/caleb531/cache-simulator/badge.svg?branch=master)](https://coveralls.io/r/caleb531/cache-simulator?branch=master) -This program simulates a processor cache for the MIPS instruction set architecture. It can simulate all three fundamental caching schemes: direct-mapped, *n*-way set associative, and fully associative. +This program simulates a processor cache for the MIPS instruction set +architecture. It can simulate all three fundamental caching schemes: +direct-mapped, *n*-way set associative, and fully associative. -The program must be run from the command line and requires Python 3 to run. Executing the program will run the simulation and print an ASCII table containing the details for each supplied word address, as well as the final contents of the cache. +The program must be run from the command line and requires Python 3.4+ to run. +Executing the program will run the simulation and print an ASCII table +containing the details for each supplied word address, as well as the final +contents of the cache. To see example input and output, see `examples.txt`. @@ -30,13 +35,17 @@ The size of the cache in words (recall that one word is four bytes in MIPS). #### --word-addrs -One or more word addresses (separated by spaces), where each word address is a base-10 positive integer. +One or more word addresses (separated by spaces), where each word address is a +base-10 positive integer. ### Optional parameters #### --num-blocks-per-set -The program internally represents all cache schemes using a set associative cache. A value of `1` for this parameter (the default) implies a direct-mapped cache. A value other than `1` implies either a set associative *or* fully associative cache. +The program internally represents all cache schemes using a set associative +cache. A value of `1` for this parameter (the default) implies a direct-mapped +cache. A value other than `1` implies either a set associative *or* fully +associative cache. #### --num-words-per-block @@ -44,8 +53,12 @@ The number of words to store for each block in the cache; the default value is ` #### --num-addr-bits -The number of bits used to represent each given word address; this value is reflected in the *BinAddr* column in the reference table. If omitted, the default value is the number of bits needed to represent the largest of the given word addresses. +The number of bits used to represent each given word address; this value is +reflected in the *BinAddr* column in the reference table. If omitted, the +default value is the number of bits needed to represent the largest of the given +word addresses. #### --replacement-policy -The replacement policy to use for the cache. Accepted values are `lru` (Least Recently Used; the default) and `mru` (Most Recently Used). +The replacement policy to use for the cache. Accepted values are `lru` (Least +Recently Used; the default) and `mru` (Most Recently Used). diff --git a/cachesimulator/__main__.py b/cachesimulator/__main__.py new file mode 100644 index 0000000..2e336ce --- /dev/null +++ b/cachesimulator/__main__.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 + +import argparse + +from cachesimulator.simulator import Simulator + + +# Parse command-line arguments passed to the program +def parse_cli_args(): + + parser = argparse.ArgumentParser() + + parser.add_argument( + '--cache-size', + type=int, + required=True, + help='the size of the cache in words') + + parser.add_argument( + '--num-blocks-per-set', + type=int, + default=1, + help='the number of blocks per set') + + parser.add_argument( + '--num-words-per-block', + type=int, + default=1, + help='the number of words per block') + + parser.add_argument( + '--word-addrs', + nargs='+', + type=int, + required=True, + help='one or more base-10 word addresses') + + parser.add_argument( + '--num-addr-bits', + type=int, + default=1, + help='the number of bits in each given word address') + + parser.add_argument( + '--replacement-policy', + choices=('lru', 'mru'), + default='lru', + # Ignore argument case (e.g. "mru" and "MRU" are equivalent) + type=str.lower, + help='the cache replacement policy (LRU or MRU)') + + return parser.parse_args() + + +def main(): + + cli_args = parse_cli_args() + sim = Simulator() + sim.run_simulation(**vars(cli_args)) + + +if __name__ == '__main__': + main() diff --git a/cachesimulator/bin_addr.py b/cachesimulator/bin_addr.py new file mode 100644 index 0000000..e4cc3cc --- /dev/null +++ b/cachesimulator/bin_addr.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + + +class BinaryAddress(str): + + # Retrieves the binary address of a certain length for a base-10 word + # address; we must define __new__ instead of __init__ because the class we + # are inheriting from (str) is an immutable data type + def __new__(cls, bin_addr=None, word_addr=None, num_addr_bits=0): + + if word_addr is not None: + return super().__new__( + cls, bin(word_addr)[2:].zfill(num_addr_bits)) + else: + return super().__new__(cls, bin_addr) + + @classmethod + def prettify(cls, bin_addr, min_bits_per_group): + + mid = len(bin_addr) // 2 + + if mid < min_bits_per_group: + # Return binary string immediately if bisecting the binary string + # produces a substring which is too short + return bin_addr + else: + # Otherwise, bisect binary string and separate halves with a space + left = cls.prettify(bin_addr[:mid], min_bits_per_group) + right = cls.prettify(bin_addr[mid:], min_bits_per_group) + return ' '.join((left, right)) + + # Retrieves the tag used to distinguish cache entries with the same index + def get_tag(self, num_tag_bits): + + end = num_tag_bits + tag = self[:end] + if len(tag) != 0: + return tag + else: + return None + + # Retrieves the index used to group blocks in the cache + def get_index(self, num_offset_bits, num_index_bits): + + start = len(self) - num_offset_bits - num_index_bits + end = len(self) - num_offset_bits + index = self[start:end] + if len(index) != 0: + return index + else: + return None + + # Retrieves the word offset used to select a word in the data pointed to by + # the given binary address + def get_offset(self, num_offset_bits): + + start = len(self) - num_offset_bits + offset = self[start:] + if len(offset) != 0: + return offset + else: + return None diff --git a/cachesimulator/cache.py b/cachesimulator/cache.py new file mode 100644 index 0000000..1b80cbb --- /dev/null +++ b/cachesimulator/cache.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +from cachesimulator.bin_addr import BinaryAddress +from cachesimulator.reference import ReferenceCacheStatus +from cachesimulator.word_addr import WordAddress + + +class Cache(dict): + + # Initializes the reference cache with a fixed number of sets + def __init__(self, cache=None, num_sets=None, num_index_bits=0): + + # A list of recently ordered addresses, ordered from least-recently + # used to most + self.recently_used_addrs = [] + + if cache is not None: + self.update(cache) + else: + for i in range(num_sets): + index = BinaryAddress( + word_addr=WordAddress(i), num_addr_bits=num_index_bits) + self[index] = [] + + # Every time we see an address, place it at the top of the + # list of recently-seen addresses + def mark_ref_as_last_seen(self, ref): + + # The index and tag (not the offset) uniquely identify each address + addr_id = (ref.index, ref.tag) + if addr_id in self.recently_used_addrs: + self.recently_used_addrs.remove(addr_id) + self.recently_used_addrs.append(addr_id) + + # Returns True if a block at the given index and tag exists in the cache, + # indicating a hit; returns False otherwise, indicating a miss + def is_hit(self, addr_index, addr_tag): + + # Ensure that indexless fully associative caches are accessed correctly + if addr_index is None: + blocks = self['0'] + elif addr_index in self: + blocks = self[addr_index] + else: + return False + + for block in blocks: + if block['tag'] == addr_tag: + return True + + return False + + # Adds the given entry to the cache at the given index + def set_block(self, replacement_policy, + num_blocks_per_set, addr_index, new_entry): + + # Place all cache entries in a single set if cache is fully associative + if addr_index is None: + blocks = self['0'] + else: + blocks = self[addr_index] + # Replace MRU or LRU entry if number of blocks in set exceeds the limit + if len(blocks) == num_blocks_per_set: + # Iterate through the recently-used entries in reverse order for + # MRU + if replacement_policy == 'mru': + recently_used_addrs = reversed(self.recently_used_addrs) + else: + recently_used_addrs = self.recently_used_addrs + # Replace the first matching entry with the entry to add + for recent_index, recent_tag in recently_used_addrs: + for i, block in enumerate(blocks): + if (recent_index == addr_index and + block['tag'] == recent_tag): + blocks[i] = new_entry + return + else: + blocks.append(new_entry) + + # Simulate the cache by reading the given address references into it + def read_refs(self, num_blocks_per_set, + num_words_per_block, replacement_policy, refs): + + for ref in refs: + self.mark_ref_as_last_seen(ref) + + # Record if the reference is already in the cache or not + if self.is_hit(ref.index, ref.tag): + # Give emphasis to hits in contrast to misses + ref.cache_status = ReferenceCacheStatus.hit + else: + ref.cache_status = ReferenceCacheStatus.miss + self.set_block( + replacement_policy=replacement_policy, + num_blocks_per_set=num_blocks_per_set, + addr_index=ref.index, + new_entry=ref.get_cache_entry(num_words_per_block)) diff --git a/cachesimulator/reference.py b/cachesimulator/reference.py new file mode 100644 index 0000000..f5bdab2 --- /dev/null +++ b/cachesimulator/reference.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +from collections import OrderedDict +from enum import Enum + +from cachesimulator.bin_addr import BinaryAddress +from cachesimulator.word_addr import WordAddress + + +# An address reference consisting of the address and all of its components +class Reference(object): + + def __init__(self, word_addr, num_addr_bits, + num_offset_bits, num_index_bits, num_tag_bits): + self.word_addr = WordAddress(word_addr) + self.bin_addr = BinaryAddress( + word_addr=self.word_addr, num_addr_bits=num_addr_bits) + self.offset = self.bin_addr.get_offset(num_offset_bits) + self.index = self.bin_addr.get_index(num_offset_bits, num_index_bits) + self.tag = self.bin_addr.get_tag(num_tag_bits) + self.cache_status = None + + def __str__(self): + return str(OrderedDict(sorted(self.__dict__.items()))) + + __repr__ = __str__ + + # Return a lightweight entry to store in the cache + def get_cache_entry(self, num_words_per_block): + return { + 'tag': self.tag, + 'data': self.word_addr.get_consecutive_words( + num_words_per_block) + } + + +# An enum representing the cache status of a reference (i.e. hit or miss) +class ReferenceCacheStatus(Enum): + + miss = 0 + hit = 1 + + # Define how reference statuses are displayed in simulation results + def __str__(self): + if self.value == ReferenceCacheStatus.hit.value: + return 'HIT' + else: + return 'miss' + + __repr__ = __str__ diff --git a/cachesimulator/simulator.py b/cachesimulator/simulator.py old mode 100755 new mode 100644 index ea124ae..20c8059 --- a/cachesimulator/simulator.py +++ b/cachesimulator/simulator.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 -import argparse import math import shutil -from enum import Enum -from cachesimulator.table import Table +from cachesimulator.bin_addr import BinaryAddress +from cachesimulator.cache import Cache +from cachesimulator.reference import Reference +from cachesimulator.table import Table # The names of all reference table columns REF_COL_NAMES = ('WordAddr', 'BinAddr', 'Tag', 'Index', 'Offset', 'Hit/Miss') @@ -15,364 +16,111 @@ DEFAULT_TABLE_WIDTH = 80 -# Retrieves the binary address of a certain length for a base-10 word address -def get_bin_addr(word_addr, num_addr_bits=None): - - # Strip the '0b' prefix included in the binary string returned by bin() - bin_addr = bin(word_addr)[2:] - if num_addr_bits is None: - return bin_addr - else: - # Pad binary address with zeroes if too short - bin_addr = bin_addr.zfill(num_addr_bits) - return bin_addr - - -# Formats the given binary address by inserting spaces to improve readability -def prettify_bin_addr(bin_addr, min_bits_per_group): - - mid = len(bin_addr) // 2 - - if mid < min_bits_per_group: - # Return binary string immediately if bisecting the binary string - # produces a substring which is too short - return bin_addr - else: - # Otherwise, bisect binary string and separate halves with a space - left = prettify_bin_addr(bin_addr[:mid], min_bits_per_group) - right = prettify_bin_addr(bin_addr[mid:], min_bits_per_group) - return ' '.join((left, right)) - - -# Retrieves the tag used to distinguish cache entries with the same index -def get_tag(bin_addr, num_tag_bits): - - end = num_tag_bits - tag = bin_addr[:end] - if len(tag) != 0: - return tag - else: - return None - - -# Retrieves the index used to group blocks in the cache -def get_index(bin_addr, num_offset_bits, num_index_bits): - - start = len(bin_addr) - num_offset_bits - num_index_bits - end = len(bin_addr) - num_offset_bits - index = bin_addr[start:end] - if len(index) != 0: - return index - else: - return None - - -# Retrieves the word offset used to select a word in the data pointed to by the -# given binary address -def get_offset(bin_addr, num_offset_bits): - - start = len(bin_addr) - num_offset_bits - offset = bin_addr[start:] - if len(offset) != 0: - return offset - else: - return None - - -# Retrieves all consecutive words for the given word address (including itself) -def get_consecutive_words(word_addr, num_words_per_block): - - offset = word_addr % num_words_per_block - return [(word_addr - offset + i) for i in range(num_words_per_block)] - - -# An enum representing the cache status of a reference (i.e. hit or miss) -class RefStatus(Enum): - - miss = 0 - hit = 1 - - # Define how reference statuses are displayed in simulation results - def __str__(self): - if self.value == RefStatus.hit.value: - return 'HIT' - else: - return 'miss' - - -# An address reference consisting of the address and all of its components -class Reference(object): - - def __init__(self, word_addr, num_addr_bits, - num_offset_bits, num_index_bits, num_tag_bits): - self.word_addr = word_addr - self.bin_addr = get_bin_addr(self.word_addr, num_addr_bits) - self.offset = get_offset(self.bin_addr, num_offset_bits) - self.index = get_index(self.bin_addr, num_offset_bits, num_index_bits) - self.tag = get_tag(self.bin_addr, num_tag_bits) - - -# Returns True if a block at the given index and tag exists in the cache, -# indicating a hit; returns False otherwise, indicating a miss -def is_hit(cache, addr_index, addr_tag): - - # Ensure that indexless fully associative caches are accessed correctly - if addr_index is None: - blocks = cache['0'] - elif addr_index in cache: - blocks = cache[addr_index] - else: - return False - - for block in blocks: - if block['tag'] == addr_tag: - return True - - return False - - -# Adds the given entry to the cache at the given index -def set_block(cache, recently_used_addrs, replacement_policy, - num_blocks_per_set, addr_index, new_entry): - - # Place all cache entries in a single set if cache is fully associative - if addr_index is None: - blocks = cache['0'] - else: - blocks = cache[addr_index] - # Replace MRU or LRU entry if number of blocks in set exceeds the limit - if len(blocks) == num_blocks_per_set: - # Iterate through the recently-used entries in reverse order for MRU - if replacement_policy == 'mru': - recently_used_addrs = reversed(recently_used_addrs) - # Replace the first matching entry with the entry to add - for recent_index, recent_tag in recently_used_addrs: - for i, block in enumerate(blocks): - if recent_index == addr_index and block['tag'] == recent_tag: - blocks[i] = new_entry - return - else: - blocks.append(new_entry) - - -# Retrieves a list of address references for use by simulator -def get_addr_refs(word_addrs, num_addr_bits, - num_offset_bits, num_index_bits, num_tag_bits): - - refs = [] - for word_addr in word_addrs: - - ref = Reference( - word_addr, num_addr_bits, num_offset_bits, - num_index_bits, num_tag_bits) - refs.append(ref) - - return refs - - -# Initializes the reference cache with a fixed number of sets -def create_cache(num_sets, num_index_bits): - - cache = {} - for i in range(num_sets): - index = get_bin_addr(i, num_index_bits) - cache[index] = [] - return cache - - -# Simulate the cache by reading the given address references into it -def read_refs_into_cache(num_sets, num_blocks_per_set, num_index_bits, - num_words_per_block, replacement_policy, refs): - - cache = create_cache(num_sets, num_index_bits) - - recently_used_addrs = [] - ref_statuses = [] - - for ref in refs: - - # The index and tag (not the offset) uniquely identify each address - addr_id = (ref.index, ref.tag) - # Add every retrieved address to the list of recently-used addresses - if addr_id in recently_used_addrs: - recently_used_addrs.remove(addr_id) - recently_used_addrs.append(addr_id) - - # Determine the Hit/Miss value for this address to display in the table - if is_hit(cache, ref.index, ref.tag): - # Give emphasis to hits in contrast to misses - ref_status = RefStatus.hit - else: - ref_status = RefStatus.miss - # Create entry dictionary containing tag and data for this address - entry = { - 'tag': ref.tag, - 'data': get_consecutive_words( - ref.word_addr, num_words_per_block) - } - set_block( - cache=cache, - recently_used_addrs=recently_used_addrs, - replacement_policy=replacement_policy, - num_blocks_per_set=num_blocks_per_set, - addr_index=ref.index, - new_entry=entry) - - ref_statuses.append(ref_status) - - return cache, ref_statuses - - -# Displays details for each address reference, including its hit/miss status -def display_addr_refs(refs, ref_statuses, table_width): - - table = Table( - num_cols=len(REF_COL_NAMES), width=table_width, alignment='right') - table.header[:] = REF_COL_NAMES - - for ref, ref_status in zip(refs, ref_statuses): - - if ref.tag is not None: - ref_tag = ref.tag - else: - ref_tag = 'n/a' - - if ref.index is not None: - ref_index = ref.index - else: - ref_index = 'n/a' - - if ref.offset is not None: - ref_offset = ref.offset - else: - ref_offset = 'n/a' - - # Display data for each address as a row in the table - table.rows.append(( - ref.word_addr, - prettify_bin_addr(ref.bin_addr, MIN_BITS_PER_GROUP), - prettify_bin_addr(ref_tag, MIN_BITS_PER_GROUP), - prettify_bin_addr(ref_index, MIN_BITS_PER_GROUP), - prettify_bin_addr(ref_offset, MIN_BITS_PER_GROUP), - ref_status)) - - print(table) - - -# Displays the contents of the given cache as nicely-formatted table -def display_cache(cache, table_width): - - table = Table( - num_cols=len(cache), width=table_width, alignment='center') - table.title = 'Cache' - - cache_set_names = sorted(cache.keys()) - # A cache containing only one set is considered a fully associative cache - if len(cache) != 1: - # Display set names in table header if cache is not fully associative - table.header[:] = cache_set_names - - # Add to table the cache entries for each block - table.rows.append([]) - for index in cache_set_names: - blocks = cache[index] - table.rows[0].append( - ' '.join(','.join(map(str, entry['data'])) for entry in blocks)) - - print(table) - - -# Run the entire cache simulation -def run_simulation(num_blocks_per_set, num_words_per_block, cache_size, - replacement_policy, num_addr_bits, word_addrs): - - num_blocks = cache_size // num_words_per_block - num_sets = num_blocks // num_blocks_per_set - - # Ensure that the number of bits used to represent each address is always - # large enough to represent the largest address - num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1) - - num_offset_bits = int(math.log2(num_words_per_block)) - num_index_bits = int(math.log2(num_sets)) - num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits - - refs = get_addr_refs( - word_addrs, num_addr_bits, - num_offset_bits, num_index_bits, num_tag_bits) - - cache, ref_statuses = read_refs_into_cache( - num_sets, num_blocks_per_set, num_index_bits, - num_words_per_block, replacement_policy, refs) - - # The character-width of all displayed tables - # Attempt to fit table to terminal width, otherwise use default of 80 - table_width = max((shutil.get_terminal_size( - (DEFAULT_TABLE_WIDTH, 20)).columns, DEFAULT_TABLE_WIDTH)) - - print() - display_addr_refs(refs, ref_statuses, table_width) - print() - display_cache(cache, table_width) - print() - - -# Parse command-line arguments passed to the program -def parse_cli_args(): - - parser = argparse.ArgumentParser() - - parser.add_argument( - '--cache-size', - type=int, - required=True, - help='the size of the cache in words') - - parser.add_argument( - '--num-blocks-per-set', - type=int, - default=1, - help='the number of blocks per set') - - parser.add_argument( - '--num-words-per-block', - type=int, - default=1, - help='the number of words per block') - - parser.add_argument( - '--word-addrs', - nargs='+', - type=int, - required=True, - help='one or more base-10 word addresses') - - parser.add_argument( - '--num-addr-bits', - type=int, - default=1, - help='the number of bits in each given word address') - - parser.add_argument( - '--replacement-policy', - choices=('lru', 'mru'), - default='lru', - help='the cache replacement policy (LRU or MRU)') - - return parser.parse_args() - - -def main(): - - cli_args = parse_cli_args() - run_simulation( - num_blocks_per_set=cli_args.num_blocks_per_set, - num_words_per_block=cli_args.num_words_per_block, - cache_size=cli_args.cache_size, - replacement_policy=cli_args.replacement_policy, - num_addr_bits=cli_args.num_addr_bits, - word_addrs=cli_args.word_addrs) - - -if __name__ == '__main__': - main() +class Simulator(object): + + # Retrieves a list of address references for use by simulator + def get_addr_refs(self, word_addrs, num_addr_bits, + num_offset_bits, num_index_bits, num_tag_bits): + + return [Reference( + word_addr, num_addr_bits, num_offset_bits, + num_index_bits, num_tag_bits) for word_addr in word_addrs] + + # Displays details for each address reference, including its hit/miss + # status + def display_addr_refs(self, refs, table_width): + + table = Table( + num_cols=len(REF_COL_NAMES), width=table_width, alignment='right') + table.header[:] = REF_COL_NAMES + + for ref in refs: + + if ref.tag is not None: + ref_tag = ref.tag + else: + ref_tag = 'n/a' + + if ref.index is not None: + ref_index = ref.index + else: + ref_index = 'n/a' + + if ref.offset is not None: + ref_offset = ref.offset + else: + ref_offset = 'n/a' + + # Display data for each address as a row in the table + table.rows.append(( + ref.word_addr, + BinaryAddress.prettify(ref.bin_addr, MIN_BITS_PER_GROUP), + BinaryAddress.prettify(ref_tag, MIN_BITS_PER_GROUP), + BinaryAddress.prettify(ref_index, MIN_BITS_PER_GROUP), + BinaryAddress.prettify(ref_offset, MIN_BITS_PER_GROUP), + ref.cache_status)) + + print(table) + + # Displays the contents of the given cache as nicely-formatted table + def display_cache(self, cache, table_width): + + table = Table( + num_cols=len(cache), width=table_width, alignment='center') + table.title = 'Cache' + + cache_set_names = sorted(cache.keys()) + # A cache containing only one set is considered a fully associative + # cache + if len(cache) != 1: + # Display set names in table header if cache is not fully + # associative + table.header[:] = cache_set_names + + # Add to table the cache entries for each block + table.rows.append([]) + for index in cache_set_names: + blocks = cache[index] + table.rows[0].append(' '.join( + ','.join(map(str, entry['data'])) for entry in blocks)) + + print(table) + + # Run the entire cache simulation + def run_simulation(self, num_blocks_per_set, num_words_per_block, + cache_size, replacement_policy, num_addr_bits, + word_addrs): + + num_blocks = cache_size // num_words_per_block + num_sets = num_blocks // num_blocks_per_set + + # Ensure that the number of bits used to represent each address is + # always large enough to represent the largest address + num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1) + + num_offset_bits = int(math.log2(num_words_per_block)) + num_index_bits = int(math.log2(num_sets)) + num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits + + refs = self.get_addr_refs( + word_addrs, num_addr_bits, + num_offset_bits, num_index_bits, num_tag_bits) + + cache = Cache( + num_sets=num_sets, + num_index_bits=num_index_bits) + + cache.read_refs( + num_blocks_per_set, num_words_per_block, + replacement_policy, refs) + + # The character-width of all displayed tables + # Attempt to fit table to terminal width, otherwise use default of 80 + table_width = max((shutil.get_terminal_size( + (DEFAULT_TABLE_WIDTH, None)).columns, DEFAULT_TABLE_WIDTH)) + + print() + self.display_addr_refs(refs, table_width) + print() + self.display_cache(cache, table_width) + print() diff --git a/cachesimulator/table.py b/cachesimulator/table.py index 3569b45..2992074 100644 --- a/cachesimulator/table.py +++ b/cachesimulator/table.py @@ -43,6 +43,6 @@ def __str__(self): table_strs.append(self.get_separator()) for row in self.rows: - table_strs.append(cell_format_str.format(*row)) + table_strs.append(cell_format_str.format(*map(str, row))) return '\n'.join(table_strs) diff --git a/cachesimulator/word_addr.py b/cachesimulator/word_addr.py new file mode 100644 index 0000000..a57bc16 --- /dev/null +++ b/cachesimulator/word_addr.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python3 + + +class WordAddress(int): + + # Retrieves all consecutive words for the given word address (including + # itself) + def get_consecutive_words(self, num_words_per_block): + + offset = self % num_words_per_block + return [(self - offset + i) for i in range(num_words_per_block)] diff --git a/requirements.txt b/requirements.txt index 63470c3..3290f3c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,16 @@ -appdirs==1.4.3 -colorama==0.3.5 -coverage==4.0.3 -flake8==2.5.4 -mando==0.3.3 -mccabe==0.4.0 +colorama==0.3.9 +coverage==4.5.1 +flake8==3.5.0 +flake8-isort==2.4 +flake8-polyfill==1.0.2 +isort==4.3.4 +mando==0.6.4 +mccabe==0.6.1 nose==1.3.7 -packaging==16.8 -pep8==1.7.0 -pyflakes==1.0.0 -pypandoc==1.4 -pyparsing==2.2.0 -python-termstyle==0.1.10 -radon==1.2.2 -rednose==0.4.3 -six==1.10.0 +pycodestyle==2.3.1 +pyflakes==1.6.0 +radon==2.2.0 +rednose==1.3.0 +six==1.11.0 +termstyle==0.1.11 +testfixtures==5.4.0 diff --git a/setup.py b/setup.py index b41bd33..9647464 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ def get_long_description(): # Use pandoc to create reStructuredText README if possible import pypandoc return pypandoc.convert('README.md', 'rst') - except: + except Exception: return None @@ -28,7 +28,7 @@ def get_long_description(): install_requires=[], entry_points={ 'console_scripts': [ - 'cache-simulator=cachesimulator.simulator:main' + 'cache-simulator=cachesimulator.__main__:main' ] } ) diff --git a/tests/test_compliance.py b/tests/test_compliance.py index 2e9ce3e..5464458 100644 --- a/tests/test_compliance.py +++ b/tests/test_compliance.py @@ -1,15 +1,16 @@ #!/usr/bin/env python3 import glob + import nose.tools as nose -import pep8 +import pycodestyle import radon.complexity as radon def test_pep8(): file_paths = glob.iglob('*/*.py') for file_path in file_paths: - style_guide = pep8.StyleGuide(quiet=True) + style_guide = pycodestyle.StyleGuide(quiet=True) total_errors = style_guide.input_file(file_path) test_pep8.__doc__ = '{} should comply with PEP 8'.format(file_path) fail_msg = '{} does not comply with PEP 8'.format(file_path) diff --git a/tests/test_simulator_display.py b/tests/test_simulator_display.py index 266c44a..ce8fb7d 100644 --- a/tests/test_simulator_display.py +++ b/tests/test_simulator_display.py @@ -2,23 +2,31 @@ import contextlib import io + import nose.tools as nose -import cachesimulator.simulator as sim +from cachesimulator.simulator import Simulator WORD_ADDRS = [43, 14, 253, 186] TABLE_WIDTH = 80 +def apply_cache_statuses_to_refs(cache_statuses, refs): + + for cache_status, ref in zip(cache_statuses, refs): + ref.cache_status = cache_status + + def test_display_addr_refs(): """should display table of address references""" + sim = Simulator() refs = sim.get_addr_refs( word_addrs=WORD_ADDRS, num_addr_bits=8, num_tag_bits=5, num_index_bits=2, num_offset_bits=1) - ref_statuses = ['miss', 'miss', 'HIT', 'miss'] + apply_cache_statuses_to_refs(['miss', 'miss', 'HIT', 'miss'], refs) out = io.StringIO() with contextlib.redirect_stdout(out): - sim.display_addr_refs(refs, ref_statuses, table_width=TABLE_WIDTH) + sim.display_addr_refs(refs, table_width=TABLE_WIDTH) table_output = out.getvalue() num_cols = 6 col_width = TABLE_WIDTH // num_cols @@ -37,13 +45,14 @@ def test_display_addr_refs(): def test_display_addr_refs_no_tag(): """should display n/a for tag when there are no tag bits""" + sim = Simulator() refs = sim.get_addr_refs( word_addrs=WORD_ADDRS, num_addr_bits=2, num_tag_bits=0, num_index_bits=1, num_offset_bits=1) - ref_statuses = ['miss', 'miss', 'miss', 'miss'] + apply_cache_statuses_to_refs(['miss', 'miss', 'miss', 'miss'], refs) out = io.StringIO() with contextlib.redirect_stdout(out): - sim.display_addr_refs(refs, ref_statuses, table_width=TABLE_WIDTH) + sim.display_addr_refs(refs, table_width=TABLE_WIDTH) table_output = out.getvalue() nose.assert_regexp_matches( table_output, r'\s*{}\s*{}\s*{}'.format( @@ -52,13 +61,14 @@ def test_display_addr_refs_no_tag(): def test_display_addr_refs_no_index(): """should display n/a for index when there are no index bits""" + sim = Simulator() refs = sim.get_addr_refs( word_addrs=WORD_ADDRS, num_addr_bits=8, num_tag_bits=7, num_index_bits=0, num_offset_bits=1) - ref_statuses = ['miss', 'miss', 'miss', 'miss'] + apply_cache_statuses_to_refs(['miss', 'miss', 'miss', 'miss'], refs) out = io.StringIO() with contextlib.redirect_stdout(out): - sim.display_addr_refs(refs, ref_statuses, table_width=TABLE_WIDTH) + sim.display_addr_refs(refs, table_width=TABLE_WIDTH) table_output = out.getvalue() nose.assert_regexp_matches( table_output, r'\s*{}\s*{}\s*{}'.format( @@ -67,13 +77,14 @@ def test_display_addr_refs_no_index(): def test_display_addr_refs_no_offset(): """should display n/a for offset when there are no offset bits""" + sim = Simulator() refs = sim.get_addr_refs( word_addrs=WORD_ADDRS, num_addr_bits=8, num_tag_bits=4, num_index_bits=4, num_offset_bits=0) - ref_statuses = ['miss'] * 12 + apply_cache_statuses_to_refs(['miss'] * 12, refs) out = io.StringIO() with contextlib.redirect_stdout(out): - sim.display_addr_refs(refs, ref_statuses, table_width=TABLE_WIDTH) + sim.display_addr_refs(refs, table_width=TABLE_WIDTH) table_output = out.getvalue() nose.assert_regexp_matches( table_output, r'\s*{}\s*{}\s*{}'.format( @@ -82,6 +93,7 @@ def test_display_addr_refs_no_offset(): def test_display_cache(): """should display table for direct-mapped/set associative cache""" + sim = Simulator() out = io.StringIO() with contextlib.redirect_stdout(out): sim.display_cache({ @@ -114,6 +126,7 @@ def test_display_cache(): def test_display_cache_fully_assoc(): """should correctly display table for fully associative cache""" + sim = Simulator() out = io.StringIO() with contextlib.redirect_stdout(out): sim.display_cache({ diff --git a/tests/test_simulator_hit.py b/tests/test_simulator_hit.py index bf72408..f2f65b7 100644 --- a/tests/test_simulator_hit.py +++ b/tests/test_simulator_hit.py @@ -1,34 +1,36 @@ #!/usr/bin/env python3 import nose.tools as nose -import cachesimulator.simulator as sim + +from cachesimulator.cache import Cache +from cachesimulator.reference import ReferenceCacheStatus def test_ref_status_str(): - """RefStatus enum members should display correct string values""" - nose.assert_equal(str(sim.RefStatus.hit), 'HIT') - nose.assert_equal(str(sim.RefStatus.miss), 'miss') + """cache status enum members should display correct string values""" + nose.assert_equal(str(ReferenceCacheStatus.hit), 'HIT') + nose.assert_equal(str(ReferenceCacheStatus.miss), 'miss') class TestIsHit(object): """is_hit should behave correctly in all cases""" def __init__(self): - self.cache = { + self.cache = Cache({ '010': [{ 'tag': '1011', 'data': [180, 181] }] - } + }) def test_is_hit_true(self): """is_hit should return True if index and tag exist in cache""" - nose.assert_true(sim.is_hit(self.cache, '010', '1011')) + nose.assert_true(self.cache.is_hit('010', '1011')) def test_is_hit_false_index_mismatch(self): """is_hit should return False if index does not exist in cache""" - nose.assert_false(sim.is_hit(self.cache, '011', '1011')) + nose.assert_false(self.cache.is_hit('011', '1011')) def test_is_hit_false_tag_mismatch(self): """is_hit should return False if tag does not exist in cache""" - nose.assert_false(sim.is_hit(self.cache, '010', '1010')) + nose.assert_false(self.cache.is_hit('010', '1010')) diff --git a/tests/test_simulator_main.py b/tests/test_simulator_main.py index 562ce1c..db2d0db 100644 --- a/tests/test_simulator_main.py +++ b/tests/test_simulator_main.py @@ -2,19 +2,21 @@ import contextlib import io -import nose.tools as nose -import cachesimulator.simulator as sim from unittest.mock import patch +import nose.tools as nose + +import cachesimulator.__main__ as main + @patch('sys.argv', [ - sim.__file__, '--cache-size', '4', '--num-blocks-per-set', '1', + main.__file__, '--cache-size', '4', '--num-blocks-per-set', '1', '--num-words-per-block', '1', '--word-addrs', '0', '8', '0', '6', '8']) def test_main(): """main function should produce some output""" out = io.StringIO() with contextlib.redirect_stdout(out): - sim.main() + main.main() main_output = out.getvalue() nose.assert_regexp_matches(main_output, r'\bWordAddr\b') nose.assert_regexp_matches(main_output, r'\b0110\b') diff --git a/tests/test_simulator_refs.py b/tests/test_simulator_refs.py index 6a84fb5..c710002 100644 --- a/tests/test_simulator_refs.py +++ b/tests/test_simulator_refs.py @@ -1,12 +1,18 @@ #!/usr/bin/env python3 +from collections import OrderedDict + import nose.tools as nose -import cachesimulator.simulator as sim + +from cachesimulator.cache import Cache +from cachesimulator.reference import Reference, ReferenceCacheStatus +from cachesimulator.simulator import Simulator def test_get_addr_refs(): """get_addr_refs should return correct reference data""" word_addrs = [3, 180, 44, 253] + sim = Simulator() refs = sim.get_addr_refs( word_addrs=word_addrs, num_addr_bits=8, num_tag_bits=4, num_index_bits=3, num_offset_bits=1) @@ -24,21 +30,24 @@ class TestReadRefs(object): WORD_ADDRS = [3, 180, 43, 2, 191, 88, 190, 14, 181, 44, 186, 253] - def get_hits(self, ref_statuses): + def get_hits(self, refs): """retrieves all indices where hits occur in a list of ref statuses""" return { - i for i, status in enumerate(ref_statuses) if status.value == 1} + i for i, ref in enumerate(refs) + if ref.cache_status == ReferenceCacheStatus.hit} def test_read_refs_into_cache_direct_mapped_lru(self): """read_refs_into_cache should work for direct-mapped LRU cache""" word_addrs = [0, 8, 0, 6, 8] + sim = Simulator() refs = sim.get_addr_refs( word_addrs=word_addrs, num_addr_bits=4, num_tag_bits=2, num_index_bits=2, num_offset_bits=0) - cache, ref_statuses = sim.read_refs_into_cache( - refs=refs, num_sets=4, num_blocks_per_set=1, - num_words_per_block=1, num_index_bits=2, replacement_policy='lru') - nose.assert_dict_equal(cache, { + cache = Cache(num_sets=4, num_index_bits=2) + cache.read_refs( + refs=refs, num_blocks_per_set=1, + num_words_per_block=1, replacement_policy='lru') + nose.assert_equal(cache, { '00': [ {'tag': '10', 'data': [8]} ], @@ -48,17 +57,19 @@ def test_read_refs_into_cache_direct_mapped_lru(self): ], '11': [] }) - nose.assert_set_equal(self.get_hits(ref_statuses), set()) + nose.assert_equal(self.get_hits(refs), set()) def test_read_refs_into_cache_set_associative_lru(self): """read_refs_into_cache should work for set associative LRU cache""" + sim = Simulator() refs = sim.get_addr_refs( word_addrs=TestReadRefs.WORD_ADDRS, num_addr_bits=8, num_tag_bits=5, num_index_bits=2, num_offset_bits=1) - cache, ref_statuses = sim.read_refs_into_cache( - refs=refs, num_sets=4, num_blocks_per_set=3, - num_words_per_block=2, num_index_bits=2, replacement_policy='lru') - nose.assert_dict_equal(cache, { + cache = Cache(num_sets=4, num_index_bits=2) + cache.read_refs( + refs=refs, num_blocks_per_set=3, + num_words_per_block=2, replacement_policy='lru') + nose.assert_equal(cache, { '00': [ {'tag': '01011', 'data': [88, 89]} ], @@ -77,17 +88,19 @@ def test_read_refs_into_cache_set_associative_lru(self): {'tag': '00001', 'data': [14, 15]}, ] }) - nose.assert_set_equal(self.get_hits(ref_statuses), {3, 6, 8}) + nose.assert_equal(self.get_hits(refs), {3, 6, 8}) def test_read_refs_into_cache_fully_associative_lru(self): """read_refs_into_cache should work for fully associative LRU cache""" + sim = Simulator() refs = sim.get_addr_refs( word_addrs=TestReadRefs.WORD_ADDRS, num_addr_bits=8, num_tag_bits=7, num_index_bits=0, num_offset_bits=1) - cache, ref_statuses = sim.read_refs_into_cache( - refs=refs, num_sets=1, num_blocks_per_set=4, - num_words_per_block=2, num_index_bits=0, replacement_policy='lru') - nose.assert_dict_equal(cache, { + cache = Cache(num_sets=1, num_index_bits=0) + cache.read_refs( + refs=refs, num_blocks_per_set=4, + num_words_per_block=2, replacement_policy='lru') + nose.assert_equal(cache, { '0': [ {'tag': '1011010', 'data': [180, 181]}, {'tag': '0010110', 'data': [44, 45]}, @@ -95,22 +108,31 @@ def test_read_refs_into_cache_fully_associative_lru(self): {'tag': '1011101', 'data': [186, 187]} ] }) - nose.assert_set_equal(self.get_hits(ref_statuses), {3, 6}) + nose.assert_equal(self.get_hits(refs), {3, 6}) def test_read_refs_into_cache_fully_associative_mru(self): """read_refs_into_cache should work for fully associative MRU cache""" + sim = Simulator() refs = sim.get_addr_refs( word_addrs=TestReadRefs.WORD_ADDRS, num_addr_bits=8, num_tag_bits=7, num_index_bits=0, num_offset_bits=1) - cache, ref_statuses = sim.read_refs_into_cache( - refs=refs, num_sets=1, num_blocks_per_set=4, - num_words_per_block=2, num_index_bits=0, replacement_policy='mru') - nose.assert_dict_equal(cache, { + cache = Cache(num_sets=1, num_index_bits=0) + cache.read_refs( + refs=refs, num_blocks_per_set=4, + num_words_per_block=2, replacement_policy='mru') + nose.assert_equal(cache, Cache({ '0': [ {'tag': '0000001', 'data': [2, 3]}, {'tag': '1111110', 'data': [252, 253]}, {'tag': '0010101', 'data': [42, 43]}, {'tag': '0000111', 'data': [14, 15]} ] - }) - nose.assert_set_equal(self.get_hits(ref_statuses), {3, 8}) + })) + nose.assert_equal(self.get_hits(refs), {3, 8}) + + +def test_get_ref_str(): + """should return string representation of Reference""" + ref = Reference(word_addr=180, num_addr_bits=8, + num_tag_bits=4, num_index_bits=3, num_offset_bits=1) + nose.assert_equal(str(ref), str(OrderedDict(sorted(ref.__dict__.items())))) diff --git a/tests/test_simulator_set_block.py b/tests/test_simulator_set_block.py index 92c6c65..04d6f3b 100644 --- a/tests/test_simulator_set_block.py +++ b/tests/test_simulator_set_block.py @@ -1,22 +1,24 @@ #!/usr/bin/env python3 import copy + import nose.tools as nose -import cachesimulator.simulator as sim + +from cachesimulator.cache import Cache class TestSetBlock(object): """set_block should behave correctly in all cases""" def reset(self): - self.cache = { + self.cache = Cache({ '010': [ {'tag': '1000'}, {'tag': '1100'}, {'tag': '1101'}, {'tag': '1110'} ] - } + }) self.recently_used_addrs = [ ('100', '1100'), ('010', '1101'), @@ -28,28 +30,26 @@ def test_empty_set(self): """set_block should add new block if index set is empty""" self.reset() self.cache['010'][:] = [] - sim.set_block( - cache=self.cache, - recently_used_addrs=[], + self.cache.recently_used_addrs = [] + self.cache.set_block( replacement_policy='lru', num_blocks_per_set=4, addr_index='010', new_entry=self.new_entry) - nose.assert_dict_equal(self.cache, { + nose.assert_equal(self.cache, { '010': [{'tag': '1111'}] }) def test_lru_replacement(self): """set_block should perform LRU replacement as needed""" self.reset() - sim.set_block( - cache=self.cache, - recently_used_addrs=self.recently_used_addrs, + self.cache.recently_used_addrs = self.recently_used_addrs + self.cache.set_block( replacement_policy='lru', num_blocks_per_set=4, addr_index='010', new_entry=self.new_entry) - nose.assert_dict_equal(self.cache, { + nose.assert_equal(self.cache, { '010': [ {'tag': '1000'}, {'tag': '1100'}, @@ -61,14 +61,13 @@ def test_lru_replacement(self): def test_mru_replacement(self): """set_block should optionally perform MRU replacement as needed""" self.reset() - sim.set_block( - cache=self.cache, - recently_used_addrs=self.recently_used_addrs, + self.cache.recently_used_addrs = self.recently_used_addrs + self.cache.set_block( replacement_policy='mru', num_blocks_per_set=4, addr_index='010', new_entry=self.new_entry) - nose.assert_dict_equal(self.cache, { + nose.assert_equal(self.cache, { '010': [ {'tag': '1000'}, {'tag': '1100'}, @@ -81,11 +80,11 @@ def test_no_replacement(self): """set_block should not perform replacement if there are no recents""" self.reset() original_cache = copy.deepcopy(self.cache) - sim.set_block( - cache=self.cache, - recently_used_addrs=[], + self.cache.recently_used_addrs = [] + self.cache.set_block( replacement_policy='lru', num_blocks_per_set=4, addr_index='010', new_entry=self.new_entry) - nose.assert_dict_equal(self.cache, original_cache) + nose.assert_is_not(self.cache, original_cache) + nose.assert_equal(self.cache, original_cache) diff --git a/tests/test_simulator_utility.py b/tests/test_simulator_utility.py index 94f3ea2..0146de8 100644 --- a/tests/test_simulator_utility.py +++ b/tests/test_simulator_utility.py @@ -1,113 +1,115 @@ #!/usr/bin/env python3 import nose.tools as nose -import cachesimulator.simulator as sim + +from cachesimulator.bin_addr import BinaryAddress +from cachesimulator.word_addr import WordAddress def test_get_bin_addr_unpadded(): """get_bin_addr should return unpadded binary address of word address""" nose.assert_equal( - sim.get_bin_addr(180), + BinaryAddress(word_addr=WordAddress(180)), '10110100') def test_get_bin_addr_padded(): """get_bin_addr should return padded binary address of word address""" nose.assert_equal( - sim.get_bin_addr(44, num_addr_bits=8), + BinaryAddress(word_addr=WordAddress(44), num_addr_bits=8), '00101100') def test_prettify_bin_addr_16_bit(): """prettify_bin_addr should prettify 8-bit string into groups of 3""" nose.assert_equal( - sim.prettify_bin_addr('1010101110101011', min_bits_per_group=3), + BinaryAddress.prettify('1010101110101011', min_bits_per_group=3), '1010 1011 1010 1011') def test_prettify_bin_addr_8_bit(): """prettify_bin_addr should prettify 8-bit string into groups of 3""" nose.assert_equal( - sim.prettify_bin_addr('10101011', min_bits_per_group=3), + BinaryAddress.prettify('10101011', min_bits_per_group=3), '1010 1011') def test_prettify_bin_addr_7_bit(): """prettify_bin_addr should prettify 7-bit string into groups of 3""" nose.assert_equal( - sim.prettify_bin_addr('1011010', min_bits_per_group=3), + BinaryAddress.prettify('1011010', min_bits_per_group=3), '101 1010') def test_prettify_bin_addr_6_bit(): """prettify_bin_addr should prettify 6-bit string into groups of 3""" nose.assert_equal( - sim.prettify_bin_addr('101011', min_bits_per_group=3), + BinaryAddress.prettify('101011', min_bits_per_group=3), '101 011') def test_prettify_bin_addr_5_bit(): """prettify_bin_addr should prettify 5-bit string into groups of 3""" nose.assert_equal( - sim.prettify_bin_addr('10110', min_bits_per_group=3), + BinaryAddress.prettify('10110', min_bits_per_group=3), '10110') def test_get_tag_5_bit(): """get_tag should return correct 5 tag bits for an address""" nose.assert_equal( - sim.get_tag('10110100', num_tag_bits=5), + BinaryAddress('10110100').get_tag(num_tag_bits=5), '10110') def test_get_tag_0_bit(): """get_tag should return None if no bits are allocated to a tag""" nose.assert_is_none( - sim.get_tag('10110100', num_tag_bits=0)) + BinaryAddress('10110100').get_tag(num_tag_bits=0)) def test_get_index_2_bit(): """get_index should return correct 2 index bits for an address""" nose.assert_equal( - sim.get_index('11111101', num_offset_bits=1, num_index_bits=2), - '10') + BinaryAddress('11111101').get_index( + num_offset_bits=1, num_index_bits=2), '10') def test_get_index_0_bit(): """get_index should return None if no bits are allocated to an index""" nose.assert_is_none( - sim.get_index('11111111', num_offset_bits=1, num_index_bits=0)) + BinaryAddress('11111111').get_index( + num_offset_bits=1, num_index_bits=0)) def test_get_offset_2_bit(): """get_offset should return correct 2 offset bits for an address""" nose.assert_equal( - sim.get_offset('11111101', num_offset_bits=2), - '01') + BinaryAddress('11111101').get_offset(num_offset_bits=2), '01') def test_get_offset_0_bit(): """get_offset should return None if no bits are allocated to an offset""" nose.assert_is_none( - sim.get_offset('10110100', num_offset_bits=0)) + BinaryAddress('10110100').get_offset(num_offset_bits=0)) def test_get_consecutive_words_1_word(): """get_consecutive_words should return same word for 1-word blocks""" - nose.assert_list_equal( - sim.get_consecutive_words(23, num_words_per_block=1), + nose.assert_equal( + WordAddress(23).get_consecutive_words(num_words_per_block=1), [23]) def test_get_consecutive_words_2_word(): """get_consecutive_words should return correct words for 2-word blocks""" - nose.assert_list_equal( - sim.get_consecutive_words(22, num_words_per_block=2), + nose.assert_equal( + WordAddress(22).get_consecutive_words(num_words_per_block=2), [22, 23]) def test_get_consecutive_words_4_word(): """get_consecutive_words should return correct words for 4-word blocks""" - nose.assert_list_equal( - sim.get_consecutive_words(21, num_words_per_block=4), + nose.assert_equal( + WordAddress(21).get_consecutive_words(num_words_per_block=4), [20, 21, 22, 23]) diff --git a/tests/test_table.py b/tests/test_table.py index 3c37bfe..18ff515 100644 --- a/tests/test_table.py +++ b/tests/test_table.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import nose.tools as nose + from cachesimulator.table import Table