Skip to content

Commit

Permalink
Merge branch 'release/0.5.8'
Browse files Browse the repository at this point in the history
  • Loading branch information
cnheider committed Feb 4, 2021
2 parents 4f21148 + 66d988e commit b35a735
Show file tree
Hide file tree
Showing 59 changed files with 4,370 additions and 4,370 deletions.
12 changes: 6 additions & 6 deletions benchmarks/benchmark_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@


def benchmark_func(func, times=100000):
start = time.time()
result = None
for _ in range(times):
result = func()
end = time.time()
return end - start, result
start = time.time()
result = None
for _ in range(times):
result = func()
end = time.time()
return end - start, result
82 changes: 43 additions & 39 deletions benchmarks/pqp_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,53 +6,57 @@


class Zeroes(PooledQueueTask):
def call(self, batch_size, *args, tensor_size=(9, 9, 9, 9), **kwargs):
batch = [(numpy.zeros(tensor_size), i) for i in range(batch_size)]
imgs = numpy.array([i[0] for i in batch], dtype=numpy.float32)
ground_truth = numpy.array([i[1] for i in batch], dtype=numpy.float32)
return (imgs, ground_truth)
def call(self, batch_size, *args, tensor_size=(9, 9, 9, 9), **kwargs):
batch = [(numpy.zeros(tensor_size), i) for i in range(batch_size)]
imgs = numpy.array([i[0] for i in batch], dtype=numpy.float32)
ground_truth = numpy.array([i[1] for i in batch], dtype=numpy.float32)
return (imgs, ground_truth)


Lamb = lambda a, tensor_size:f"{a, tensor_size}"
Lamb = lambda a, tensor_size: f"{a, tensor_size}"


def Func(a, tensor_size):
return f"{a, tensor_size}"
return f"{a, tensor_size}"


def pqp_benchmark():
task = Zeroes()
# task = Lamb #Error: cant be pickled
# task = Func
batch_size = 16
tensor_size = (9, 9, 9, 9, 9)
wait_time = 0.1
samples = 100

df = PooledQueueProcessor(
task, args=[batch_size], kwargs={"tensor_size":tensor_size}, max_queue_size=samples
)

def get():
return df.get()

def wait_get():
time.sleep(wait_time)
return df.get()

def generate():
return task(batch_size, tensor_size=tensor_size)

def wait_generate():
time.sleep(wait_time)
return task(batch_size, tensor_size=tensor_size)

for func, discount in zip(
(get, wait_get, generate, wait_generate), (0, samples * wait_time, 0, samples * wait_time)
):
t, res = benchmark_func(func, samples)
print(f"{func.__name__}: {t - discount} seconds")
task = Zeroes()
# task = Lamb #Error: cant be pickled
# task = Func
batch_size = 16
tensor_size = (9, 9, 9, 9, 9)
wait_time = 0.1
samples = 100

df = PooledQueueProcessor(
task,
args=[batch_size],
kwargs={"tensor_size": tensor_size},
max_queue_size=samples,
)

def get():
return df.get()

def wait_get():
time.sleep(wait_time)
return df.get()

def generate():
return task(batch_size, tensor_size=tensor_size)

def wait_generate():
time.sleep(wait_time)
return task(batch_size, tensor_size=tensor_size)

for func, discount in zip(
(get, wait_get, generate, wait_generate),
(0, samples * wait_time, 0, samples * wait_time),
):
t, res = benchmark_func(func, samples)
print(f"{func.__name__}: {t - discount} seconds")


if __name__ == "__main__":
pqp_benchmark()
pqp_benchmark()
66 changes: 33 additions & 33 deletions benchmarks/returns_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,49 +6,49 @@


def returns_benchmark():
a = 1
b = 2
c = 3
a = 1
b = 2
c = 3

RandomABC = namedtuple("RandomABC", ("a", "b", "c"))
RandomABC = namedtuple("RandomABC", ("a", "b", "c"))

def implicit_return():
return a, b, c
def implicit_return():
return a, b, c

def list_return():
return [a, b, c]
def list_return():
return [a, b, c]

def tuple_return():
return (a, b, c)
def tuple_return():
return (a, b, c)

def dict_return():
return {"a":a, "b":b, "c":c}
def dict_return():
return {"a": a, "b": b, "c": c}

def sorcery_return():
return sorcery.dict_of(a, b, c)
def sorcery_return():
return sorcery.dict_of(a, b, c)

def nod_return():
return NOD(a=a, b=b, c=c)
def nod_return():
return NOD(a=a, b=b, c=c)

def inferred_return():
return NOD.nod_of(a, b, c)
def inferred_return():
return NOD.nod_of(a, b, c)

def namedtuple_return():
return RandomABC(a, b, c)
def namedtuple_return():
return RandomABC(a, b, c)

for func in (
implicit_return,
list_return,
tuple_return,
dict_return,
namedtuple_return,
nod_return,
sorcery_return,
inferred_return,
):
t, res = benchmark_func(func)
print(f"{func.__name__}: {t} seconds, {res}")
for func in (
implicit_return,
list_return,
tuple_return,
dict_return,
namedtuple_return,
nod_return,
sorcery_return,
inferred_return,
):
t, res = benchmark_func(func)
print(f"{func.__name__}: {t} seconds, {res}")


if __name__ == "__main__":
returns_benchmark()
returns_benchmark()
116 changes: 65 additions & 51 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import sys
from pathlib import Path

PACKAGE_ROOT=Path(__file__).parent.parent.parent
PACKAGE_ROOT = Path(__file__).parent.parent.parent
sys.path.insert(0, str(PACKAGE_ROOT.absolute()))

from warg import PROJECT_AUTHOR, PROJECT_NAME, PROJECT_YEAR, PROJECT_VERSION
Expand Down Expand Up @@ -51,7 +51,7 @@
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.graphviz",
]
]

napoleon_use_ivar = True

Expand All @@ -63,7 +63,7 @@
#
# source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
source_suffix = {".rst":"restructuredtext", ".txt":"markdown", ".md":"markdown"}
source_suffix = {".rst": "restructuredtext", ".txt": "markdown", ".md": "markdown"}

# source_parsers = {
# '.md': CommonMarkParser,
Expand Down Expand Up @@ -142,20 +142,28 @@
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, f"{PROJECT_NAME}.tex", f"{PROJECT_NAME} Documentation", PROJECT_AUTHOR, "manual")
]
(
master_doc,
f"{PROJECT_NAME}.tex",
f"{PROJECT_NAME} Documentation",
PROJECT_AUTHOR,
"manual",
)
]

# -- Options for manual page output ---------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, PROJECT_NAME, f"{PROJECT_NAME} Documentation", [PROJECT_AUTHOR], 1)]
man_pages = [
(master_doc, PROJECT_NAME, f"{PROJECT_NAME} Documentation", [PROJECT_AUTHOR], 1)
]

# -- Options for Texinfo output -------------------------------------------

Expand All @@ -171,8 +179,8 @@
PROJECT_NAME,
"One line description of project.",
"Miscellaneous",
)
]
)
]

# -- Options for Epub output ----------------------------------------------

Expand All @@ -196,9 +204,9 @@

# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python":("https://docs.python.org/", None),
"python": ("https://docs.python.org/", None),
"numpy": ("http://docs.scipy.org/doc/numpy/", None),
}
}

# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
Expand All @@ -209,46 +217,52 @@


def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# #type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong("", fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(" (")
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = "".join(n.astext() for n in fieldtype)
typename = typename.replace("int", "python:int")
typename = typename.replace("long", "python:long")
typename = typename.replace("float", "python:float")
typename = typename.replace("type", "python:type")
par.extend(
self.make_xrefs(self.typerolename, domain, typename, addnodes.literal_emphasis, **kw)
)
else:
par += fieldtype
par += nodes.Text(")")
par += nodes.Text(" -- ")
par += content
return par

field_name = nodes.field_name("", self.label)
if len(items) == 1 and self.can_collapse:
field_arg, content = items[0]
body_node = handle_item(field_arg, content)
else:
body_node = self.list_type()
for field_arg, content in items:
body_node += nodes.list_item("", handle_item(field_arg, content))
field_body = nodes.field_body("", body_node)
return nodes.field("", field_name, field_body)
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# #type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong("", fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(" (")
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = "".join(n.astext() for n in fieldtype)
typename = typename.replace("int", "python:int")
typename = typename.replace("long", "python:long")
typename = typename.replace("float", "python:float")
typename = typename.replace("type", "python:type")
par.extend(
self.make_xrefs(
self.typerolename,
domain,
typename,
addnodes.literal_emphasis,
**kw,
)
)
else:
par += fieldtype
par += nodes.Text(")")
par += nodes.Text(" -- ")
par += content
return par

field_name = nodes.field_name("", self.label)
if len(items) == 1 and self.can_collapse:
field_arg, content = items[0]
body_node = handle_item(field_arg, content)
else:
body_node = self.list_type()
for field_arg, content in items:
body_node += nodes.list_item("", handle_item(field_arg, content))
field_body = nodes.field_body("", body_node)
return nodes.field("", field_name, field_body)


TypedField.make_field = patched_make_field
4 changes: 4 additions & 0 deletions requirements_tests.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
pytest>=4.3.0
pytest-cov>=2.6.1
tox
#draugr
6 changes: 3 additions & 3 deletions samples/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

__author__ = 'Christian Heider Nielsen'
__doc__ = r'''
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 11-12-2020
'''
"""
Loading

0 comments on commit b35a735

Please sign in to comment.