Skip to content

Commit

Permalink
Improve report
Browse files Browse the repository at this point in the history
  • Loading branch information
bdelbosc committed Sep 2, 2011
1 parent 0fd07d3 commit a5e7ae6
Show file tree
Hide file tree
Showing 5 changed files with 99 additions and 40 deletions.
47 changes: 33 additions & 14 deletions README.txt
Expand Up @@ -4,25 +4,49 @@ benchbase

NAME
----
benchbase - Store JMeter or FunkLoad benchmark results into a database.
Try to do something usefull with the data.
benchbase - Store and manage JMeter or FunkLoad benchmark results.
Produces detail reports

SYNPSIS
-------
USAGE
-----

::
benchbase [--version] [--logfile=LOGFILE] [--database=DATABASE] COMMAND [OPTIONS] [ARGUMENT]

COMMANDS
~~~~~~~~~

list
List the imported benchmark in the database.

info BID
Give more information about the benchmark with the bid number (benchmark identifier).

import [--jmeter|--funkload|--comment] FILE
Import the benchmark result into the database. Output the BID number.

report --output REPORT_DIR BID
Generate the report for the imported benchmark

benchbase ....
EXAMPLES
~~~~~~~~~

benchbase list
List of imported benchmarks.

benchbase import -m"Tir 42" jmeter-2010.xml
Import a JMeter benchmark result file.

benchbase report 12 -o /tmp/report-tir43
Build the report of benchmark bid 12 into /tmp/report-tir43 directory

DESCRIPTION
-----------


REQUIRES
--------

benchbase requires `gnuplot <http://www.gnuplot.info/>`_.
Benchbase requires `gnuplot <http://www.gnuplot.info/>`_ and sqlite3, on Debian/Ubuntu::

sudo aptitude install sqlite3 gnuplot


INSTALLATION
Expand All @@ -32,8 +56,3 @@ INSTALLATION
sudo easy_install benchbase


EXAMPLES
--------

benchbase -h
Gives you the available options.
14 changes: 9 additions & 5 deletions TODO.txt
Expand Up @@ -2,19 +2,18 @@
#+TITLE: benchbase to do list
#+AUTHOR: Benoit Delbosc

* TODO add summary with min/max/avg
* TODO enhance with percentile + stddev
* TODO list should display start and duration date
* TODO command: delete bid
* TODO command: update comment
benchbase comment -m"bla" 1
* TODO command: delete bid
* TODO update the README.txt
* TODO funkload import
* TODO monitoring report ?
- gc -> pb with start date?
- sar
- misc csv input ?
* TODO misc metadata
* TODO add misc file
* TODO command add metadata
* TODO command add misc file
as blob

btrack add-file -m"comment" [-g GROUP] foo.bar
Expand All @@ -23,6 +22,11 @@
|-----+----------+---------|
| | | |
btrack get-file BID [filename]
* DONE add summary with min/max/avg
CLOSED: [2011-09-02 ven. 15:04]
:LOGBOOK:
- State "DONE" from "TODO" [2011-09-02 ven. 15:04]
:END:
* DONE align all chart start/end
CLOSED: [2011-09-02 ven. 12:13]
:LOGBOOK:
Expand Down
67 changes: 51 additions & 16 deletions benchbase/benchbase.py
Expand Up @@ -20,14 +20,32 @@
directories=[pkg_resources.resource_filename('benchbase', '/templates')],
)

USAGE = """benchbase list|import|report
USAGE = """benchbase [--version] [--logfile=LOGFILE] [--database=DATABASE] COMMAND [OPTIONS] [ARGUMENT]
benchbase list: list existing bench results
COMMANDS:
benchbase import [option] bench-result-file.xml
import options: -j -f -m
list
List the imported benchmark in the database.
benchbase report <BID>
info BID
Give more information about the benchmark with the bid number (benchmark identifier).
import [--jmeter|--funkload|--comment] FILE
Import the benchmark result into the database. Output the BID number.
report --output REPORT_DIR BID
Generate the report for the imported benchmark
EXAMPLES:
benchbase list
List of imported benchmarks.
benchbase import -m"Tir 42" jmeter-2010.xml
Import a JMeter benchmark result file.
benchbase report 12 -o /tmp/report-tir43
Build the report of benchmark bid 12 into /tmp/report-tir43 directory
"""

Expand Down Expand Up @@ -187,10 +205,10 @@ def initializeDb(options):

def listBenchmarks(db):
c = db.cursor()
c.execute('SELECT ROWID, date, generator, comment FROM bench')
print "%5s %-19s %8s %s" % ('bid', 'imported', 'from', 'comment')
c.execute('SELECT ROWID, date, generator, filename, comment FROM bench')
print "%5s %-19s %-8s %-30s %s" % ('bid', 'Imported', 'Tool', 'Filename', 'Comment')
for row in c:
print "%5d %19s %8s %s" % (row[0], row[1][:19], row[2], row[3])
print "%5d %19s %-8s %-30s %s" % (row[0], row[1][:19], row[2], os.path.basename(row[3]), row[4])
c.close()


Expand All @@ -214,7 +232,7 @@ def alreadyImported(self, md5, filename):

def registerBench(self, md5, filename):
c = self.db.cursor()
t = (md5, filename, datetime.datetime.now(), self.options.comment, 'jmeter')
t = (md5, filename, datetime.datetime.now(), self.options.comment, 'JMeter')
c.execute("INSERT INTO bench (md5sum, filename, date, comment, generator) VALUES (?, ?, ?, ?, ?)", t)

t = (md5, )
Expand Down Expand Up @@ -265,20 +283,34 @@ def doImport(self, filename):
def getInfo(self, bid):
t = (bid, )
c = self.db.cursor()
c.execute("SELECT date, comment, generator, filename FROM bench WHERE ROWID = ?", t)
try:
imported, comment, generator, filename = c.fetchone()
except TypeError:
logging.error('Invalid bid: %s' % bid)
raise ValueError('Invalid bid: %s' % bid)
c.execute("SELECT COUNT(stamp), datetime(MIN(stamp), 'unixepoch', 'localtime')"
", time(MAX(stamp), 'unixepoch', 'localtime') FROM sample WHERE bid = ?", t)
count, start, end = c.fetchone()
c.execute("SELECT COUNT(stamp) FROM sample WHERE bid = ? AND success = 0", t)
error = c.fetchone()[0]
c.execute("SELECT DISTINCT(lb) FROM sample WHERE bid = ?", t)
samples = [row[0] for row in c]
c.execute("SELECT date, comment FROM bench WHERE ROWID = ?", t)
imported, comment = c.fetchone()
sampleNames = [row[0] for row in c]
c.execute("SELECT MAX(na), MAX(stamp) - MIN(stamp), AVG(t), MAX(t), MIN(t) FROM sample WHERE bid = ?", t)
maxThread, duration, avgt, maxt, mint = c.fetchone()
return {'bid': bid, 'count': count, 'start': start, 'end': end,
samples = {}
for sample in sampleNames:
t = (bid, sample)
c.execute("SELECT AVG(t), MAX(t), MIN(t), COUNT(t) FROM sample WHERE bid = ? AND lb = ?", t)
row = c.fetchone()
samples[sample] = {'avgt': row[0] / 1000., 'maxt': row[1] / 1000., 'mint': row[2] / 1000.,
'count': row[3], 'duration': duration}
c.execute("SELECT COUNT(t) FROM sample WHERE bid = ? AND lb = ? AND success = 0", t)
samples[sample]['error'] = c.fetchone()[0]
return {'bid': bid, 'count': count, 'start': start, 'end': end, 'filename': os.path.basename(filename),
'error': error, 'samples': samples, 'imported': imported[:19], 'comment': comment,
'maxThread': maxThread, 'duration': duration, 'avgt': avgt / 1000., 'maxt': maxt / 1000., 'mint': mint / 1000.}
'maxThread': maxThread, 'duration': duration, 'avgt': avgt / 1000.,
'maxt': maxt / 1000., 'mint': mint / 1000., 'generator': generator}

def buildReport(self, bid):
output_dir = self.options.output
Expand All @@ -290,7 +322,7 @@ def buildReport(self, bid):
'start': info['start'][11:19],
'end': info['end'],
'bid': bid}
samples = info.get('samples') + ['global', ]
samples = info['samples'].keys() + ['global', ]
for sample in samples:
params['sample'] = sample
params['filter'] = " AND lb = '%s' " % sample
Expand All @@ -304,14 +336,14 @@ def buildReport(self, bid):
f.write(script)
f.close()
gnuplot(script_path)
params = self.getInfo(bid)
report = render_template('report.mako', **info)
rst_path = os.path.join(output_dir, "index.rst")
f = open(rst_path, 'w')
f.write(report)
f.close()
html_path = os.path.join(output_dir, "index.html")
generateHtml(rst_path, html_path, output_dir)
logging.info('Report generated: ' + html_path)


def initLogging(options):
Expand Down Expand Up @@ -389,6 +421,9 @@ def main():
if len(args) != 3:
parser.error('Missing bid')
return
if not options.output:
parser.error('Missing --output option')
return
db = initializeDb(options)
jm = Jmeter(db, options)
jm.buildReport(args[2])
Expand Down
9 changes: 5 additions & 4 deletions benchbase/templates/report.mako
Expand Up @@ -14,19 +14,20 @@ Bench configuration
* Launched: ${start}, end: ${end}
* Duration: ${duration}s
* Maximum number of threads: ${maxThread}
* Load test tool: ${generator}
* Imported into benchbase: ${imported}

* From file: ${filename}

Bench summary
---------------

=============== ========== ========== ============ ============= =========== ============ ====================
Sample name Samples Failure Success Rate Average Time Min Time Max Time Average Troughput
Sample name Samples Failures Success Rate Average Time Min Time Max Time Average Troughput
--------------- ---------- ---------- ------------ ------------- ----------- ------------ --------------------
${"ALL %10d %10d %11.2f%% %11.3fs %10.3fs %12.3fs %7.3f" % (count, error, (100. - (error * 100. / count)), avgt, mint, maxt, count / (1.0 * duration))}
=============== ========== ========== ============ ============= =========== ============ ====================
% for sample in samples:
${"%-15s" % sample} ${"%10d" % count} ${"%10d" % error} ${"%9.2f" % (100. - (error * 100. / count))}%
% for name, sample in samples.items():
${"%-15s %10d %10d %11.2f%% %11.3fs %10.3fs %12.3fs %7.3f" % (name, sample['count'], sample['error'], (100. - (sample['error'] * 100. / sample['count'])), sample['avgt'], sample['mint'], sample['maxt'], sample['count'] / (1.0 * sample['duration']))}
% endfor
=============== ========== ========== ============ ============= =========== ============ ====================

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Expand Up @@ -33,7 +33,7 @@
download_url="http://pypi.python.org/packages/source/t/benchbase/benchbase-%s.tar.gz" % __version__,
packages = find_packages(),
license='GPL',
keywords='monitoring csv chart png gnuplot',
keywords='benchmark jmeter funkload report chart',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
Expand Down

0 comments on commit a5e7ae6

Please sign in to comment.