Skip to content

Commit

Permalink
Use pytest instead of nose
Browse files Browse the repository at this point in the history
  • Loading branch information
cdown committed Jan 2, 2020
1 parent d5f215b commit 2a661fb
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 60 deletions.
16 changes: 7 additions & 9 deletions srt_tools/tests/test_srt_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
import subprocess
import sys
import tempfile
from nose.tools import assert_true
from parameterized import parameterized
from shlex import quote


Expand All @@ -27,7 +25,7 @@ def run_srt_util(cmd, shell=False, encoding="utf-8-sig"):
new_path = f.read().strip().split("=", 1)[1]
extra_env = {"PATH": new_path}

env = {"PYTHONPATH": ".", "SystemRoot": "C:\Windows"}
env = {"PYTHONPATH": ".", "SystemRoot": r"C:\Windows"}
env.update(extra_env)

raw_out = subprocess.check_output(cmd, shell=shell, env=env)
Expand Down Expand Up @@ -79,13 +77,13 @@ def assert_supports_all_io_methods(cmd, exclude_output=False, exclude_stdin=Fals
shell=True,
encoding="gb2312",
)
assert_true(len(set(outputs)) == 1, repr(outputs))
assert len(set(outputs)) == 1, repr(outputs)
finally:
os.remove(out_file)


@parameterized(
[
def test_tools_support():
matrix = [
(["srt-normalise"], False),
(["srt-fixed-timeshift", "--seconds", "5"], False),
(
Expand All @@ -109,6 +107,6 @@ def assert_supports_all_io_methods(cmd, exclude_output=False, exclude_stdin=Fals
# Need to sort out time/thread issues
# (('srt-play'), True),
]
)
def test_tools_support(args, exclude_output=False, exclude_stdin=False):
assert_supports_all_io_methods(args, exclude_output, exclude_stdin)

for args in matrix:
assert_supports_all_io_methods(*args)
3 changes: 2 additions & 1 deletion tests/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
nose>=1,<2
pytest>=4,<5
pytest-cov>=2,<3
hypothesis>=4,<5
83 changes: 36 additions & 47 deletions tests/test_srt.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,32 +2,23 @@

from __future__ import unicode_literals
from datetime import timedelta
import collections
import functools
import os
import string
from io import StringIO

import pytest
from hypothesis import given, settings, HealthCheck, assume
import hypothesis.strategies as st
from nose.tools import (
eq_ as eq,
assert_not_equal as neq,
assert_raises,
assert_false,
assert_true,
assert_in,
assert_count_equal,
)

import srt

SUPPRESSED_CHECKS = [HealthCheck.too_slow]

settings.register_profile("base", settings(suppress_health_check=SUPPRESSED_CHECKS))
settings.register_profile(
"base", settings(suppress_health_check=SUPPRESSED_CHECKS),
)
settings.register_profile(
"release", settings(max_examples=1000, suppress_health_check=SUPPRESSED_CHECKS),
"release", settings(max_examples=1000, suppress_health_check=SUPPRESSED_CHECKS)
)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "base"))

Expand Down Expand Up @@ -62,12 +53,12 @@ def subs_eq(got, expected, any_order=False):
Compare Subtitle objects using vars() so that differences are easy to
identify.
"""
got_vars = [vars(sub) for sub in got]
expected_vars = [vars(sub) for sub in expected]
got_vars = [frozenset(vars(sub).items()) for sub in got]
expected_vars = [frozenset(vars(sub).items()) for sub in expected]
if any_order:
assert_count_equal(got_vars, expected_vars)
assert collections.Counter(got_vars) == collections.Counter(expected_vars)
else:
eq(got_vars, expected_vars)
assert got_vars == expected_vars


def timedeltas(min_value=0, max_value=TIMEDELTA_MAX_DAYS):
Expand Down Expand Up @@ -215,33 +206,33 @@ def test_compose_and_parse_strict_mode(content):

# Strict mode should remove blank lines in content, leading, and trailing
# newlines.
assert_false(parsed_strict.content.startswith("\n"))
assert_false(parsed_strict.content.endswith("\n"))
assert_false("\n\n" in parsed_strict.content)
assert not parsed_strict.content.startswith("\n")
assert not parsed_strict.content.endswith("\n")
assert "\n\n" not in parsed_strict.content

# When strict mode is false, no processing should be applied to the
# content (other than \r\n becoming \n).
eq(parsed_unstrict.content, sub.content.replace("\r\n", "\n"))
assert parsed_unstrict.content == sub.content.replace("\r\n", "\n")


@given(st.integers(min_value=1, max_value=TIMEDELTA_MAX_DAYS))
def test_timedelta_to_srt_timestamp_can_go_over_24_hours(days):
srt_timestamp = srt.timedelta_to_srt_timestamp(timedelta(days=days))
srt_timestamp_hours = int(srt_timestamp.split(":")[0])
eq(srt_timestamp_hours, days * HOURS_IN_DAY)
assert srt_timestamp_hours == days * HOURS_IN_DAY


@given(subtitles())
def test_subtitle_equality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
eq(sub_1, sub_2)
assert sub_1 == sub_2


@given(subtitles())
def test_subtitle_inequality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
sub_2.index += 1
neq(sub_1, sub_2)
assert sub_1 != sub_2


@given(subtitles())
Expand All @@ -255,8 +246,8 @@ def test_subtitle_from_scratch_equality(subtitle):

subs_eq([sub_1], [sub_2])
# In case subs_eq and eq disagree for some reason
eq(sub_1, sub_2)
eq(hash(sub_1), hash(sub_2))
assert sub_1 == sub_2
assert hash(sub_1) == hash(sub_2)


@given(st.lists(subtitles()))
Expand Down Expand Up @@ -317,9 +308,8 @@ def test_subs_missing_content_removed(content_subs, contentless_subs, contentles
# The subtitles should be reindexed starting at start_index, excluding
# contentless subs
default_start_index = 1
eq(
[sub.index for sub in composed_subs],
list(range(default_start_index, default_start_index + len(composed_subs))),
assert [sub.index for sub in composed_subs] == list(
range(default_start_index, default_start_index + len(composed_subs))
)


Expand Down Expand Up @@ -353,14 +343,13 @@ def test_sort_and_reindex(input_subs, start_index):
)

# The subtitles should be reindexed starting at start_index
eq(
[sub.index for sub in reindexed_subs],
list(range(start_index, start_index + len(input_subs))),
assert [sub.index for sub in reindexed_subs] == list(
range(start_index, start_index + len(input_subs))
)

# The subtitles should be sorted by start time
expected_sorting = sorted(input_subs, key=lambda sub: sub.start)
eq(reindexed_subs, expected_sorting)
assert reindexed_subs == expected_sorting


@given(st.lists(subtitles()))
Expand All @@ -374,7 +363,7 @@ def test_sort_and_reindex_no_skip(input_subs):
reindexed_subs = list(srt.sort_and_reindex(input_subs, skip=False))

# Nothing should have been skipped
eq(len(reindexed_subs), len(input_subs))
assert len(reindexed_subs) == len(input_subs)


@given(st.lists(subtitles(), min_size=1))
Expand All @@ -387,7 +376,7 @@ def test_sort_and_reindex_same_start_time_uses_end(input_subs):

# The subtitles should be sorted by end time when start time is the same
expected_sorting = sorted(input_subs, key=lambda sub: sub.end)
eq(reindexed_subs, expected_sorting)
assert reindexed_subs == expected_sorting


@given(st.lists(subtitles(), min_size=1), st.integers(min_value=0))
Expand All @@ -411,10 +400,10 @@ def test_sort_and_reindex_not_in_place_matches(input_subs, start_index):
subs_eq(not_in_place_output, in_place_output)

# Not in place sort_and_reindex should have created new subs
assert_false(any(id(sub) in nip_ids for sub in not_in_place_output))
assert not any(id(sub) in nip_ids for sub in not_in_place_output)

# In place sort_and_reindex should be reusing the same subs
assert_true(all(id(sub) in ip_ids for sub in in_place_output))
assert all(id(sub) in ip_ids for sub in in_place_output)


@given(
Expand All @@ -435,7 +424,7 @@ def test_parser_noncontiguous(subs, fake_idx, garbage, fake_timedelta):
"\n\n", "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
)

with assert_raises(srt.SRTParseError):
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))


Expand All @@ -455,7 +444,7 @@ def test_parser_noncontiguous_leading(subs, garbage):
# checks
composed = garbage + srt.compose(subs)

with assert_raises(srt.SRTParseError):
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))


Expand All @@ -472,16 +461,16 @@ def test_parser_didnt_match_to_end_raises(subs, fake_idx, garbage, fake_timedelt
srt_blocks.append(garbage)
composed = "".join(srt_blocks)

with assert_raises(srt.SRTParseError) as thrown_exc:
with pytest.raises(srt.SRTParseError) as thrown_exc:
list(srt.parse(composed))

# Since we will consume as many \n as needed until we meet the lookahead
# assertion, leading newlines in `garbage` will be stripped.
garbage_stripped = garbage.lstrip("\n")

eq(garbage_stripped, thrown_exc.exception.unmatched_content)
eq(len(composed) - len(garbage_stripped), thrown_exc.exception.expected_start)
eq(len(composed), thrown_exc.exception.actual_start)
assert garbage_stripped == thrown_exc.value.unmatched_content
assert len(composed) - len(garbage_stripped) == thrown_exc.value.expected_start
assert len(composed) == thrown_exc.value.actual_start


@given(st.lists(subtitles()))
Expand Down Expand Up @@ -522,8 +511,8 @@ def test_parser_can_parse_with_fullwidth_delimiter(subs):
def test_repr_doesnt_crash(sub):
# Not much we can do here, but we should make sure __repr__ doesn't crash
# or anything and it does at least vaguely look like what we want
assert_in("Subtitle", repr(sub))
assert_in(str(sub.index), repr(sub))
assert "Subtitle" in repr(sub)
assert str(sub.index) in repr(sub)


@given(st.lists(subtitles()))
Expand Down Expand Up @@ -561,14 +550,14 @@ def test_compose_and_parse_strict_custom_eol(input_subs, eol):
@given(equivalent_timestamps())
def test_equal_timestamps_despite_different_fields_parsed_as_equal(timestamps):
ts1, ts2 = timestamps
eq(srt.srt_timestamp_to_timedelta(ts1), srt.srt_timestamp_to_timedelta(ts2))
assert srt.srt_timestamp_to_timedelta(ts1) == srt.srt_timestamp_to_timedelta(ts2)


@given(timedeltas())
def test_bad_timestamp_format_raises(ts):
ts = srt.timedelta_to_srt_timestamp(ts)
ts = ts.replace(":", "t", 1)
with assert_raises(srt.TimestampParseError):
with pytest.raises(srt.TimestampParseError):
srt.srt_timestamp_to_timedelta(ts)


Expand Down
5 changes: 2 additions & 3 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ deps =
-rsrt_tools/tests/requirements.txt
commands =
{basepython} --version
nosetests -vv --processes=-1 --process-timeout=600
pytest
setenv=
release: HYPOTHESIS_PROFILE=release

Expand All @@ -18,8 +18,7 @@ deps =
coveralls
commands =
coverage erase
# Can't run in parallel because of https://github.com/nedbat/coveragepy/issues/883
nosetests -v --with-coverage --cover-branches --cover-min-percentage=100 --cover-package=srt
pytest --cov=srt --cov-branch --cov-fail-under=100
coveralls

[testenv:pylint]
Expand Down

0 comments on commit 2a661fb

Please sign in to comment.