Skip to content

Commit

Permalink
Merge pull request #29 from Erotemic/feature/homogenous_heuristic
Browse files Browse the repository at this point in the history
Implement homogeneous heuristic
  • Loading branch information
Erotemic committed Jan 13, 2023
2 parents e71b7be + 643dc7b commit 41fd395
Show file tree
Hide file tree
Showing 5 changed files with 284 additions and 54 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,14 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm

## [Version: 1.1.0] - Unreleased

### Added
* New argument `homogeneous` which can be set to True to enable a speed
optimization. Default to `auto`, which uses a heuristic to determine a good
setting for this in most cases.

### Fixed
* `time_thresh` is now respected when calculating dynamic display updates
* Fixed an issue where sometimes the final message would not display.


## [Version: 1.0.1] - Released 2022-10-07
Expand Down
6 changes: 6 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,12 @@ ProgIter lets you measure and print the progress of an iterative process. This
can be done either via an iterable interface or using the manual API. Using the
iterable interface is most common.

ProgIter is unthreaded. This differentiates it from tqdm and rich, which are
great, threaded progress indicators have different tradeoffs. Single threaded
progress gives you synchronous uncluttered logging, increased stability, and
unintuitively speed. Meanwhile threaded progress bars are more responsive and
can look prettier (unless you try to log stdout to disk).

.. image:: https://i.imgur.com/HoJJYzd.gif
:height: 300px
:align: left
Expand Down
128 changes: 128 additions & 0 deletions dev/bench_check.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import ubelt as ub
import progiter
import timerit


def basic_benchmark():
"""
Run the simplest benchmark where we iterate over nothing and compare the
slowdown of using a progress iterator versus doing nothing.
"""
N = 10_000

ti = timerit.Timerit(500, bestof=10, verbose=2)
for timer in ti.reset('baseline'):
for i in range(N):
...

for timer in ti.reset('old progiter'):
for i in ub.ProgIter(range(N)):
...

for timer in ti.reset('new progiter, enabled=False'):
for i in progiter.ProgIter(range(N), enabled=False):
...

for timer in ti.reset('new progiter, homogeneous=True'):
for i in progiter.ProgIter(range(N), homogeneous=True):
...

for timer in ti.reset('new progiter, homogeneous=auto'):
for i in progiter.ProgIter(range(N), homogeneous='auto'):
...

for timer in ti.reset('new progiter, homogeneous=False'):
for i in progiter.ProgIter(range(N), homogeneous=False):
...

import tqdm
for timer in ti.reset('tqdm'):
for i in tqdm.tqdm(range(N)):
...

if 1:
from rich.live import Live
from rich.progress import Progress as richProgress
for timer in ti.reset('rich.progress'):
prog_manager = richProgress()
task_id = prog_manager.add_task(description='', total=N)
live_context = Live(prog_manager)
with live_context:
for i in range(N):
prog_manager.update(task_id, advance=1)

import pandas as pd
df = pd.DataFrame.from_dict(ti.rankings['mean'], orient='index', columns=['mean'])
df.loc[list(ti.rankings['min'].keys()), 'min'] = list(ti.rankings['min'].values())
df['mean_rel_overhead'] = df['mean'] / df.loc['baseline', 'mean']
df['min_rel_overhead'] = df['min'] / df.loc['baseline', 'min']
print(df.to_string())


def other_tests():
N = 100
###########
with ub.Timer(label='new fixed freq=10'):
for i in progiter.ProgIter(range(N), freq=10, adjust=False):
pass

with ub.Timer(label='old fixed freq=10'):
for i in ub.ProgIter(range(N), freq=10, adjust=False):
pass

with ub.Timer(label='new fixed freq=1'):
for i in progiter.ProgIter(range(N), freq=1, adjust=False):
pass

with ub.Timer(label='old fixed freq=1'):
for i in ub.ProgIter(range(N), freq=1, adjust=False):
pass

import timerit
import time
ti = timerit.Timerit(100000, bestof=10, verbose=2)

for timer in ti.reset('time.process_time()'):
with timer:
time.process_time()

for timer in ti.reset('time.process_time_ns()'):
with timer:
time.process_time_ns()

for timer in ti.reset('time.time()'):
with timer:
time.time()

for timer in ti.reset('time.time_ns()'):
with timer:
time.time_ns()

for timer in ti.reset('time.perf_counter()'):
with timer:
time.perf_counter()

for timer in ti.reset('time.perf_counter_ns()'):
with timer:
time.perf_counter_ns()

for timer in ti.reset('time.thread_time()'):
with timer:
time.thread_time()

for timer in ti.reset('time.monotonic()'):
with timer:
time.monotonic()

for timer in ti.reset('time.monotonic_ns()'):
with timer:
time.monotonic_ns()

print('ti.rankings = {}'.format(ub.repr2(ti.rankings, nl=2, align=':', precision=8)))

if __name__ == '__main__':
"""
CommandLine:
python ~/code/progiter/dev/bench_check.py
"""
basic_benchmark()

0 comments on commit 41fd395

Please sign in to comment.