Skip to content

Commit

Permalink
DBMSBenchmarker: Silent mode for reading evaluations
Browse files Browse the repository at this point in the history
  • Loading branch information
perdelt committed Feb 19, 2024
1 parent e0f7dfb commit 3eb160f
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 10 deletions.
10 changes: 6 additions & 4 deletions dbmsbenchmarker/benchmarker.py
Expand Up @@ -1570,17 +1570,19 @@ def runBenchmarks(self):
self.generateReportsAll()
# stop logging multiprocessing
mp.log_to_stderr(logging.ERROR)
def readResultfolder(self):
def readResultfolder(self, silent=False):
"""
Reads data of previous benchmark from folder.
:param silent: No output of status
:return: returns nothing
"""
print("Read results")
if not silent:
print("Read results")
self.clearBenchmarks()
# read from stored results
self.logger.debug("Read from "+self.path)
self.reporterStore.readProtocol()
self.reporterStore.readProtocol(silent)
for numQuery,q in enumerate(self.queries):
query = tools.query(q)
loaded = self.reporterStore.load(query, numQuery+1, [self.timerExecution, self.timerTransfer, self.timerConnect])
Expand Down Expand Up @@ -1904,7 +1906,7 @@ class inspector(benchmarker):
def __init__(self, result_path, code, anonymize=False, silent=False):
benchmarker.__init__(self,result_path=result_path+"/"+str(code), anonymize=anonymize)
self.getConfig()
self.readResultfolder()
self.readResultfolder(silent=silent)
if not silent:
print("Connections:")
for c in self.connections:
Expand Down
12 changes: 7 additions & 5 deletions dbmsbenchmarker/evaluator.py
Expand Up @@ -39,23 +39,24 @@ class evaluator():
"""
Class for generating evaluation cube.
"""
def __init__(self, benchmarker, load=False, force=False):
def __init__(self, benchmarker, load=False, force=False, silent=False):
"""
Construct a new 'evaluator' object.
:param benchmarker: Object of benchmarker containing information about queries, connections and benchmark times
:param silent: No output of status
:return: returns nothing
"""
self.benchmarker = benchmarker
if force:
evaluator.evaluation = {}
if len(evaluator.evaluation) == 0:
if load:
self.load()
self.load(silent)
else:
evaluator.evaluation = self.generate()
# force to use stored format
self.load()
self.load(silent)
def get_evaluation(self):
return evaluator.evaluation
def generate(self):
Expand Down Expand Up @@ -414,8 +415,9 @@ def hasInitScript(c):
with open(filename, 'w') as f:
json.dump(evaluation, f)
return evaluation
def load(self):
print("Load Evaluation")
def load(self, silent=False):
if not silent:
print("Load Evaluation")
filename = self.benchmarker.path+'/evaluation.json'
with open(filename,'r') as f:
evaluator.evaluation = json.load(f)
Expand Down
3 changes: 2 additions & 1 deletion dbmsbenchmarker/reporter.py
Expand Up @@ -179,10 +179,11 @@ def writeProtocol(self):
filename = self.benchmarker.path+'/protocol.json'
with open(filename, 'w') as f:
json.dump(self.benchmarker.protocol, f)
def readProtocol(self):
def readProtocol(self, silent=False):
"""
Loads procol of benchmarker in JSON format.
:param silent: No output of status
:return: returns nothing
"""
try:
Expand Down

0 comments on commit 3eb160f

Please sign in to comment.