From e9c0f3c3836f548835fa05a404d15fd168747541 Mon Sep 17 00:00:00 2001 From: Patrick Erdelt Date: Wed, 29 Jun 2022 16:09:06 +0200 Subject: [PATCH] V0.12.1 (#93) * Prepare next release * Inspector: workload dict without metrics and reporting * Docs: JOSS intro --- dbmsbenchmarker/inspector.py | 13 +++++++------ paper.md | 3 ++- setup.py | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/dbmsbenchmarker/inspector.py b/dbmsbenchmarker/inspector.py index 9857e089..db3297f9 100644 --- a/dbmsbenchmarker/inspector.py +++ b/dbmsbenchmarker/inspector.py @@ -25,6 +25,7 @@ from colour import Color from numpy import nan from datetime import datetime, timezone +import copy from dbmsbenchmarker import benchmarker, tools, evaluator, monitor @@ -144,6 +145,11 @@ def load_experiment(self, code, anonymize=None, load=True): self.benchmarks.computeTimerRun() self.benchmarks.computeTimerSession() self.e = evaluator.evaluator(self.benchmarks, load=load, force=True) + self.workload = copy.deepcopy(self.e.evaluation['general']) + # remove metrics + del(self.workload['loadingmetrics']) + del(self.workload['streamingmetrics']) + del(self.workload['reporting']) def get_experiment_list_queries(self): # list of successful queries return self.benchmarks.listQueries() @@ -260,12 +266,7 @@ def get_experiment_query_properties(self, numQuery=None): return self.e.evaluation['query'] def get_experiment_workload_properties(self): # dict of workload properties - workload = self.e.evaluation['general'] - # remove metrics - del(workload['loadingmetrics']) - del(workload['streamingmetrics']) - del(workload['reporting']) - return workload + return self.workload #def get_measures(self, numQuery, timer, warmup=0, cooldown=0): def get_timer(self, numQuery, timer, warmup=0, cooldown=0): # dataframe of dbms x measures diff --git a/paper.md b/paper.md index e44bb04d..f2023447 100644 --- a/paper.md +++ b/paper.md @@ -34,7 +34,8 @@ See the [homepage](https://github.com/Beuth-Erdelt/DBMS-Benchmarker) and the [do # Statement of Need -There are a variety of (relational) database management systems (DBMS) and a lot of products. +Benchmarking of database management systems (DBMS) is an active research area. +There are a variety of DBMS and a lot of products. The types thereof can be divided into for example row-wise, column-wise, in-memory, distributed and GPU-enhanced. All of these products have unique characteristics, special use cases, advantages and disadvantages and their justification. In order to be able to verify and ensure the performance measurement, we want to be able to create and repeat benchmarking scenarios. diff --git a/setup.py b/setup.py index 56a486b1..dde13798 100644 --- a/setup.py +++ b/setup.py @@ -8,7 +8,7 @@ setuptools.setup( name="dbmsbenchmarker", - version="0.11.22", + version="0.12.1", author="Patrick Erdelt", author_email="perdelt@beuth-hochschule.de", description="DBMS-Benchmarker is a Python-based application-level blackbox benchmark tool for Database Management Systems (DBMS). It connects to a given list of DBMS (via JDBC) and runs a given list of parametrized and randomized (SQL) benchmark queries. Evaluations are available via Python interface, in reports and at an interactive multi-dimensional dashboard.",