Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -190,3 +190,4 @@ integration_tests/structs_03
integration_tests/structs_03.c
integration_tests/expr_08
integration_tests/expr_08.c
benchmark/generated_code/*
18 changes: 18 additions & 0 deletions benchmark/benchmark.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#Possible comparisons
# parser: Parsing time
# generate: file name is the generating script or it's file you want to compare.

[[benchmark]]
filename = 'test_math.py'
parser = true
generate = false

[[benchmark]]
filename = 'long_statement.py'
parser = true
generate = true

[[benchmark]]
filename = 'very_long_statement.py'
parser = true
generate = true
7 changes: 7 additions & 0 deletions benchmark/generating_scripts/long_statement.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/usr/bin/python
N = 100
s1 = "(a*z+3+2*x + 3*y - x/(z**2-4) - x**(y**z))"
s = s1
for n in range(N):
s = s + " * " + s1
print(s)
7 changes: 7 additions & 0 deletions benchmark/generating_scripts/very_long_statement.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/usr/bin/python
N = 1000
s1 = "(a*z+3+2*x + 3*y - x/(z**2-4) - x**(y**z))"
s = s1
for n in range(N):
s = s + " * " + s1
print(s)
150 changes: 150 additions & 0 deletions benchmark/test_math.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@

from math import (factorial, isqrt, perm, comb, degrees, radians, exp, pow,
ldexp, fabs, gcd, lcm, floor, ceil, remainder, expm1, fmod, log1p)
from ltypes import i32, f64

eps: f64
eps = 1e-12

def test_factorial_1():
i: i32
i = factorial(10)
assert i == 3628800


def test_comb():
i: i32
i = comb(10, 2)
assert i == 45


def test_perm():
i: i32
i = perm(5, 2)
assert i == 20


def test_isqrt():
i: i32
i = isqrt(15)
assert i == 3


def test_degrees():
i: f64
i = degrees(32.2)
assert abs(i - 1844.924100321251) < eps


def test_radians():
i: f64
i = radians(100.1)
assert abs(i - 1.7470745812463238) < eps


def test_exp():
i: f64
i = exp(2.34)
assert abs(i - 10.381236562731843) < eps


def test_pow():
i: f64
i = pow(2.4, 4.3)
assert abs(i - 43.14280115650323) < eps


def test_ldexp():
i: f64
i = ldexp(23.3, 2)
assert abs(i - 93.2) < eps


def test_fabs():
i: f64
j: f64
eps: f64
eps = 1e-12
i = fabs(10.3)
j = fabs(-10.3)
assert abs(i - j) < eps


def test_gcd():
i: i32
i = gcd(10, 4)
assert i == 2
i = gcd(21, 14)
assert i == 7
i = gcd(21, -12)
assert i == 3

def test_lcm():
i: i32
i = lcm(10, 4)
assert i == 20
i = lcm(21, 14)
assert i == 42
i = lcm(21, -12)
assert i == 84


def test_floor():
i: i64
i = floor(10.02)
assert i == 10
i = floor(-13)
assert i == -13
i = floor(-13.31)
assert i == -14


def test_ceil():
i: i64
i = ceil(10.02)
assert i == 11
i = ceil(-13)
assert i == -13
i = ceil(-13.31)
assert i == -13


def test_remainder():
assert remainder(9.0, 3.0) == 0.0
assert remainder(12.0, 5.0) == 2.0
assert remainder(13.0, 5.0) == -2.0

def test_fmod():
assert fmod(20.5, 2.5) == 0.5
assert fmod(-20.5, 2.5) == -0.5


def test_log1p():
assert log1p(1.0) - 0.69314718055994529 < eps


def test_expm1():
assert expm1(1.0) - 1.71828182845904509 < eps


def check():
test_factorial_1()
test_comb()
test_isqrt()
test_perm()
test_degrees()
test_radians()
test_exp()
test_pow()
test_fabs()
test_ldexp()
test_gcd()
test_lcm()
test_floor()
test_ceil()
test_remainder()
test_fmod()
test_expm1()
test_log1p()

check()
2 changes: 2 additions & 0 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,5 @@ dependencies:
- setuptools
- toml
- zlib
- tabulate
- termgraph
158 changes: 158 additions & 0 deletions run_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
#!/usr/bin/env python
import argparse
import ast
from lib2to3.pgen2.pgen import generate_grammar
from lib2to3.pytree import Node
import os
import re
import subprocess
from timeit import default_timer as clock
from numpy import PINF, true_divide
import sys
from io import StringIO
import toml
from tabulate import tabulate

# when you add option to implemented_benchmarks you must also add class to get values from cpython and lpython.
implemented_benchmarks = [
"parser"
]


class Parser:
@classmethod
def get_lpython_result(cls, file_path):

lpython_run = subprocess.Popen("lpython --new-parser --time-report " + file_path, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)

try:
stdout = lpython_run.communicate()[0].decode('utf-8')
parsing_value = re.search(r"\bParsing: .*ms\b", stdout)[0]
parsing_value = parsing_value.replace("Parsing: ", '')
parsing_value = parsing_value.replace('ms', '')
parsing_value = float(parsing_value)

except Exception as e :
parsing_value = None
print(e)

return parsing_value

@classmethod
def get_cpython_result(cls, file_path):
input = open(file_path).read()
t1 = clock()
a = ast.parse(input, type_comments=True)
t2 = clock()
return float(t2 - t1) * 1000

class Genrator:
@classmethod
def generate_code(cls, generating_script_file):
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
exec(open(generating_script_file).read())
sys.stdout = old_stdout
generated_code_str = redirected_output.getvalue()
generated_code_file_name = os.path.join('benchmark', 'generated_code', os.path.split(generating_script_file)[-1])
generated_code_file = open(generated_code_file_name, 'w')
generated_code_file.write(generated_code_str)
generated_code_file.close()
return generated_code_file_name



class Graph:

def __init__(self, option_name, plots):
self.option_name = option_name
self.plots = plots

def get_plotting(self):
report_file = open(os.path.join('benchmark', 'report_file.dat'), 'w')
for plot in self.plots:
report_file.write(f"{plot[0]},{plot[1]},{plot[2]}\n")
report_file.close()
termgraph_command = subprocess.run(
"termgraph %s --space-between" % (os.path.join('benchmark', 'report_file.dat')), shell=True,
capture_output=True)
res = termgraph_command.stdout.decode('utf-8')
res = "LPython is first raw, Python is second one.\nTime in ms.\n" + res
return res

def get_numerical(self):
return (tabulate(self.plots, headers=['File Name', 'LPython', 'Python']))



if __name__ == '__main__':
os.environ["PATH"] = os.path.join(os.getcwd(), "src", "bin") + os.pathsep + os.environ["PATH"]
app = argparse.ArgumentParser(description="Lpython benchmark")
app.add_argument("-n", "--numerical", action="store_true", help="show results as numerical table")
app.add_argument("-p", "--plots", action="store_true", help="show results as graph of pars")
app.add_argument("-c", "--compare", action="store", nargs='+',
help=f"What stages you want to compare, for now we have{implemented_benchmarks}")
args = app.parse_args()

show_graph = args.plots or True
show_numerical = args.numerical or True
compare_stage_iwant = args.compare
if compare_stage_iwant == None:
compare_stage_iwant = implemented_benchmarks[:]

files = toml.load("./benchmark/benchmark.toml")
comparsing_map: dict = {}
for option in implemented_benchmarks:
comparsing_map[option] = dict()
comparsing_map[option][0] = [] #basic files
comparsing_map[option][1] = [] #genertaed files


for file in files['benchmark']:
for option in compare_stage_iwant:
if option in file.keys():
comparsing_map[option][int(('generate' in file.keys()) and file['generate'] == True)].append(file["filename"])

for option, files_list in comparsing_map.items():
basic_files_list = files_list[0]
generated_files_list = files_list[1]
if generated_files_list == [] and basic_files_list == []:
pref = "\033["
reset = f"{pref}0m"
print(f"{pref}1;31mThere is no files for this comparision option({option}){reset}")
else:
compare_result = []

if option == 'parser':
for filename in basic_files_list:
basic_code_file = os.path.join('benchmark', filename)
cpython = Parser.get_cpython_result(basic_code_file)
lpython = Parser.get_lpython_result(basic_code_file)
if cpython != None and lpython != None:
compare_result.append((filename, lpython, cpython))

for filename in generated_files_list:
generating_script_file = os.path.join( 'benchmark', 'generating_scripts', filename)
generated_code_file = None
try:
generated_code_file = Genrator.generate_code(generating_script_file)
except Exception as e:
print(e)
cpython = Parser.get_cpython_result(generated_code_file)
lpython = Parser.get_lpython_result(generated_code_file)
if cpython != None and lpython != None:
compare_result.append((filename, lpython, cpython))
# here other comparision

if show_graph or show_numerical:
graph = Graph(option, compare_result)

pref = "\033["
reset = f"{pref}0m"
print(f'\t\t {pref}1;32m{option} comparison {reset}')
if show_graph:
print(graph.get_plotting())

if show_numerical:
print(graph.get_numerical())