Skip to content

Commit

Permalink
Fix black formatting issues
Browse files Browse the repository at this point in the history
  • Loading branch information
not-na committed May 4, 2024
1 parent 2bc3ec3 commit 393d8ff
Show file tree
Hide file tree
Showing 7 changed files with 140 additions and 26 deletions.
98 changes: 97 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ poethepoet = "^0.25.1"
[tool.poetry.group.dev.dependencies]
pytest = "^8.1.1"
pytest-cov = "^5.0.0"
black = "^24.4.2"

[tool.poe.tasks]
test = "pytest"
Expand Down
4 changes: 2 additions & 2 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
from tests.crowdanki import load_deck_from_file


DECK_PATH = Path(__file__).parent.parent / 'decks' / "Chinesisch" / "deck.json"
DECK_PATH = Path(__file__).parent.parent / "decks" / "Chinesisch" / "deck.json"

_deck = load_deck_from_file(DECK_PATH)


@pytest.fixture(scope='session')
@pytest.fixture(scope="session")
def deck():
return _deck

Expand Down
25 changes: 12 additions & 13 deletions tests/crowdanki.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,42 +14,41 @@ def load_deck_from_file(filename: Path) -> AnkiDeck:

# Create basic deck info
deck = AnkiDeck(
name=data['name'],
name=data["name"],
notes=[],
media_files=data['media_files'],
media_files=data["media_files"],
notemodels={},
base_dir=filename.parent,
)

# Load all note models
for nmodeldat in data['note_models']:
ctype = CardType(UUID(nmodeldat['crowdanki_uuid']))
for nmodeldat in data["note_models"]:
ctype = CardType(UUID(nmodeldat["crowdanki_uuid"]))
nmodel = AnkiNoteModel(
name=nmodeldat['name'],
uuid=UUID(nmodeldat['crowdanki_uuid']),
name=nmodeldat["name"],
uuid=UUID(nmodeldat["crowdanki_uuid"]),
ctype=ctype,
fields=[fdat["name"] for fdat in nmodeldat['flds']],
fields=[fdat["name"] for fdat in nmodeldat["flds"]],
)
deck.notemodels[nmodel.uuid] = nmodel

# Load notes themselves
for ndat in data['notes']:
assert UUID(ndat["note_model_uuid"]) in deck.notemodels, f"Unknown note model {ndat['note_model_uuid']} for card {ndat['guid']}"
for ndat in data["notes"]:
assert (
UUID(ndat["note_model_uuid"]) in deck.notemodels
), f"Unknown note model {ndat['note_model_uuid']} for card {ndat['guid']}"
nmodel = deck.notemodels[UUID(ndat["note_model_uuid"])]

note = AnkiNote(
guid=ndat["guid"],
cardtype_uuid=UUID(ndat["note_model_uuid"]),
cardtype=nmodel.ctype,
tags=ndat["tags"],
fields={
nmodel.fields[i]: v for i, v in enumerate(ndat["fields"])
},
fields={nmodel.fields[i]: v for i, v in enumerate(ndat["fields"])},
)

assert len(note.fields) == len(nmodel.fields)

deck.notes.append(note)

return deck

1 change: 0 additions & 1 deletion tests/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,3 @@ class AnkiDeck:
media_files: List[str]

notemodels: Dict[UUID, AnkiNoteModel]

1 change: 0 additions & 1 deletion tests/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,3 @@ def test_pronunciation(deck: AnkiDeck, note: AnkiNote):

assert audiofile in deck.media_files
assert (deck.base_dir / "media" / audiofile).exists()

36 changes: 28 additions & 8 deletions tests/test_zeichenliste.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,10 @@
"eh_8": ("白本市心贵黑红钱件裤块来买衣服每店儿百错物", "双超购价色试售货便宜"),
"eh_9": ("东西南远边铁方近前后离米先行走左右", "租站钟附联系楼往银局交通平"),
"eh_10": ("喜欢火车共汽还些当然坐旅游自骑馆第", "厅参观暑假景它需颜利船览爬拍"),
"eh_11": ("足网球比赛想希望过会泳男能候场性别休其踢雪乒乓", "体育格队兰赢跳舞闲卷冲浪滑蹦极"),
"eh_11": (
"足网球比赛想希望过会泳男能候场性别休其踢雪乒乓",
"体育格队兰赢跳舞闲卷冲浪滑蹦极",
),
"eh_12": ("城从算回飞机玩或觉得历史千说应该只主意习", "实加坡兵俑"),
}

Expand All @@ -48,7 +51,7 @@ def test_zeichenliste_plausible():
for level, zl in ZEICHENLISTEN.items():
for unit, chars in zl.items():
assert " " not in chars
for ch in chars[0]+chars[1]:
for ch in chars[0] + chars[1]:
assert ord(ch) >= 0x2E80 # Start of first CJK unicode block


Expand All @@ -59,7 +62,9 @@ def test_mithanzi_field_not_empty(note: AnkiNote):
if not req_zl and not req_readonly:
return # Not relevant for us

assert note.fields["MitHanzi"] != "", f"Note {note} requires hanzi, but field is not set"
assert (
note.fields["MitHanzi"] != ""
), f"Note {note} requires hanzi, but field is not set"


def test_zeichenliste_exists(deck: AnkiDeck):
Expand Down Expand Up @@ -90,8 +95,17 @@ def test_zeichenliste_exists(deck: AnkiDeck):
assert len(levels) == 1, f"{note} should only have one level tag"
cur_level = levels[0]

n_s = list(map(lambda udat: udat[0], filter(lambda udat: udat[1] == cur_level and udat[2] == cur_unit, all_units)))
assert len(n_s) == 1, f"{note} level and unit could not be found in unit list {all_units}"
n_s = list(
map(
lambda udat: udat[0],
filter(
lambda udat: udat[1] == cur_level and udat[2] == cur_unit, all_units
),
)
)
assert (
len(n_s) == 1
), f"{note} level and unit could not be found in unit list {all_units}"
cur_n = n_s[0]

new_remaining = {}
Expand All @@ -118,10 +132,16 @@ def test_zeichenliste_exists(deck: AnkiDeck):
chars.update(readonly)

if (len(remaining) - len(missing)) != 0:
print(f"Also, {len(remaining)-len(missing)} optional unit(s) are missing characters: ")
for unit, (mandatory, readonly) in {k: v for k, v in remaining.items() if (k[1], k[2]) in OPTIONAL_UNITS}.items():
print(
f"Also, {len(remaining)-len(missing)} optional unit(s) are missing characters: "
)
for unit, (mandatory, readonly) in {
k: v for k, v in remaining.items() if (k[1], k[2]) in OPTIONAL_UNITS
}.items():
print(f"Unit {unit}: mandatory: '{mandatory}', readonly: {readonly}'")
chars.update(mandatory)
chars.update(readonly)

pytest.fail(f"Missing a total of {len(chars)} characters in {len(missing)} units: {missing}\nCharacters: {''.join(chars)}")
pytest.fail(
f"Missing a total of {len(chars)} characters in {len(missing)} units: {missing}\nCharacters: {''.join(chars)}"
)

0 comments on commit 393d8ff

Please sign in to comment.