Skip to content
This repository has been archived by the owner on Jul 4, 2023. It is now read-only.

Commit

Permalink
Merge pull request #60 from mbuhidar/develop
Browse files Browse the repository at this point in the history
Bump version to 0.1.3 for PyPi release.
  • Loading branch information
mbuhidar committed Mar 14, 2022
2 parents c83be67 + 26654c6 commit 50cf6be
Show file tree
Hide file tree
Showing 8 changed files with 43 additions and 52 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,10 @@ celerybeat.pid
# Environments
.env
.venv
.rtt_venv
env/
venv/
rtt_venv/
ENV/
env.bak/
venv.bak/
Expand Down
11 changes: 0 additions & 11 deletions conftest.py

This file was deleted.

14 changes: 7 additions & 7 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
attrs==21.2.0
attrs==21.4.0
iniconfig==1.1.1
packaging==20.9
pluggy==0.13.1
py==1.10.0
pyparsing==2.4.7
pytest==6.2.4
toml==0.10.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.0.7
pytest==7.1.0
tomli==2.0.1
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = retro_typein_tools
version = 0.1.2
version = 0.1.3
description = Debug and conversion tool for 1980s magazine type-in programs
long_description = file: README.md
long_description_content_type = text/markdown
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from setuptools import setup; setup()
File renamed without changes.
22 changes: 11 additions & 11 deletions src/debug_tokenize/debug_tokenize.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,14 @@
import sys
import math


# import char_maps.py: Module containing Commodore to magazine conversion maps
try:
from src.debug_tokenize import char_maps
except ImportError:
import char_maps
except ImportError:
# case for executing pytest
from debug_tokenize import char_maps


def parse_args(argv):
"""Parses command line inputs and generate command line interface and
Expand Down Expand Up @@ -176,14 +180,12 @@ def check_line_number_seq(lines_list):
sys.exit(1)


def ahoy_lines_list(lines_list, char_maps):
def ahoy_lines_list(lines_list):
"""For each line in the program, convert Ahoy special characters to Petcat
special characters.
Args:
lines_list (list): List of lines (str) in program.
char_maps (module): Module containing conversion maps between various
Commodore and magazine formats.
Returns:
new_lines (list): List of new lines (str) after special characters are
Expand Down Expand Up @@ -296,7 +298,7 @@ def scan_manager(ln):
bytestr = []

while ln:
(byte, ln) = scan(ln, char_maps, tokenize=not (in_quotes or in_remark))
(byte, ln) = scan(ln, tokenize=not (in_quotes or in_remark))
# if byte is not None:
bytestr.append(byte)
if byte == ord('"'):
Expand All @@ -309,15 +311,13 @@ def scan_manager(ln):

# scan each line segement and convert to tokenized bytes.
# returns byte and remaining line segment
def scan(ln, char_maps, tokenize=True):
def scan(ln, tokenize=True):
"""Scan beginning of each line for BASIC keywords, petcat special
characters, or ascii characters, convert to tokenized bytes, and
return remaining line segment after converted characters are removed
Args:
ln (str): Text of each line segment to parse and convert
char_maps (module): Module containing conversion maps between various
Commodore and magazine formats.
tokenize (bool): Flag to indicate if start of line segment should be
tokenized (False if line segment start is within quotes or after
a REM statement)
Expand Down Expand Up @@ -529,7 +529,7 @@ def main(argv=None, width=None):
# Create lines list while checking for loose brackets/braces and converting
# to common special character codes in braces
if args.source[0][:4] == 'ahoy':
lines_list = ahoy_lines_list(lines_list, char_maps)
lines_list = ahoy_lines_list(lines_list)
line_no = split_line_num(lines_list[1])[0]
# handle loose brace error returned from ahoy_lines_list()
if lines_list[0] is None:
Expand Down Expand Up @@ -586,7 +586,7 @@ def main(argv=None, width=None):
print('Line Checksums:\n')
if not width:
width = get_terminal_size()[0]
print_checksums(ahoy_checksums, width) #, get_terminal_size()[0])
print_checksums(ahoy_checksums, width)


if __name__ == '__main__':
Expand Down
43 changes: 21 additions & 22 deletions tests/debug_tokenize_test.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
import pytest
from io import StringIO
import pytest

from src.debug_tokenize.debug_tokenize import parse_args, \
read_file, \
check_line_number_seq, \
ahoy_lines_list, \
split_line_num, \
scan, \
scan_manager, \
write_binary, \
ahoy1_checksum, \
ahoy2_checksum, \
ahoy3_checksum, \
print_checksums, \
confirm_overwrite, \
main
from debug_tokenize.debug_tokenize import (parse_args,
read_file,
check_line_number_seq,
ahoy_lines_list,
split_line_num,
scan,
scan_manager,
write_binary,
ahoy1_checksum,
ahoy2_checksum,
ahoy3_checksum,
print_checksums,
confirm_overwrite,
main)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -166,15 +166,14 @@ def test_check_line_number_seq_bad(capsys, lines_list, term_capture):
['print"****44444{brn}"']),
],
)
# char_maps is defined in a fixture in conftest.py
def test_ahoy_lines_list(lines_list, new_lines, char_maps):
def test_ahoy_lines_list(lines_list, new_lines):
"""
Unit test to check that function ahoy_lines_list() replaces ahoy special
character codes with petcat special character codes in each line of the
program. Also checks for loose braces and prompt an error message and
program exit.
"""
assert ahoy_lines_list(lines_list, char_maps) == new_lines
assert ahoy_lines_list(lines_list) == new_lines


@pytest.mark.parametrize(
Expand Down Expand Up @@ -274,15 +273,14 @@ def test_scan_manager(ln, bytestr):
('{s ep}start mower', True, 169, 'start mower'),
],
)
# char_maps is defined in a fixture in conftest.py
def test_scan(ln, tokenize, byte, remaining_line, char_maps):
def test_scan(ln, tokenize, byte, remaining_line):
"""
Unit test to check that function scan() is properly converting the start
of each passed in line to a tokenized byte for BASIC keywords, petcat
special characters, and alphanumeric characters.
"""

assert scan(ln, char_maps, tokenize) == (byte, remaining_line)
assert scan(ln, tokenize) == (byte, remaining_line)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -492,7 +490,8 @@ def test_main(tmp_path, capsys, source, lines_list, term):
)


def test_main(tmp_path, capsys, monkeypatch, user_entry, source, lines_list, term):
def test_main(tmp_path, capsys, monkeypatch, user_entry, source,
lines_list, term):
"""
End to end test to check that function main() is propery generating the
correct output for a given command line input.
Expand Down

0 comments on commit 50cf6be

Please sign in to comment.