Permalink
Browse files

Basic benchmark app. Very unfinished.

  • Loading branch information...
1 parent 26ce169 commit 42fa76ccc15db212268f0dce15e3f671f4c992db @n1mmy n1mmy committed Oct 31, 2012
@@ -0,0 +1 @@
+local
@@ -0,0 +1,8 @@
+# Meteor packages used by this project, one per line.
+#
+# 'meteor add' and 'meteor remove' will edit this file for you,
+# but you can also edit it by hand.
+
+insecure
+preserve-inputs
+bootstrap
@@ -0,0 +1,58 @@
+// Parameters for simulation:
+//
+// Each document is randomly placed in a collection, with a random
+// 'bucket' field. Clients sub to 1 bucket in each collection.
+//
+// - numCollections
+// how many collections to spread the documents over
+// - numBuckets
+// number of buckets per collection.
+//
+// - initialDocuments: Inital documents added by the server. Probably
+// not usefully combined with maxAgeSeconds
+//
+// - maxAgeSeconds: How long to leave documents in the database. This,
+// combined with all the various rates, determines the steady state
+// database size. In seconds. falsy to disable.
+//
+// Per-client action rates:
+// - insertsPerSecond
+// - updatesPerSecond
+// - removesPerSecond
+//
+// - documentSize: bytes of randomness per document.
+// // XXX make this a random distribution?
+// - documentNumFields: how many fields of randomness per document.
+//
+// XXX also max documents? (count and remove N)
+
+SCENARIOS = {
+
+ default: {
+ numCollections: 1,
+ numBuckets: 3,
+ initialDocuments: 1,
+ maxAgeSeconds: 60,
+ insertsPerSecond: 1,
+ updatesPerSecond: 1,
+ removesPerSecond: 0.1,
+ documentSize: 1024,
+ documentNumFields: 8
+ },
+
+ nodata: {
+ numCollections: 1,
+ numBuckets: 1,
+ initialDocuments: 0
+ },
+
+ bigdata: {
+ numCollections: 1,
+ numBuckets: 1,
+ initialDocuments: 1024,
+ updatesPerSecond: 1,
+ documentSize: 10240,
+ documentNumFields: 16
+ }
+
+};
@@ -0,0 +1 @@
+/* CSS declarations go here */
@@ -0,0 +1,23 @@
+<head>
+ <title>benchmark</title>
+</head>
+
+<body>
+ {{> status}}
+
+ {{> params}}
+</body>
+
+<template name="status">
+ <p>Status: {{status}}</p>
+ <p>Update Rate: {{updateRate}}</p>
+</template>
+
+<template name="params">
+ <dl>
+ {{#each params}}
+ <dt>{{key}}</dt><dd>{{value}}</dd>
+ {{/each}}
+ </dl>
+</template>
+
@@ -0,0 +1,168 @@
+// Pick which scenario we run. Pass the 'SCENARIO' environment variable
+// to change this. See 'benchmark-scenarios.js' for the list of
+// scenarios.
+
+if (Meteor.isServer) {
+ if (process.env.SCENARIO)
+ __meteor_runtime_config__.SCENARIO = process.env.SCENARIO;
+ else
+ __meteor_runtime_config__.SCENARIO = 'default';
+}
+var PARAMS = SCENARIOS[__meteor_runtime_config__.SCENARIO];
+
+
+//////////////////////////////
+// Helper Functions
+//////////////////////////////
+
+var random = function (n) {
+ return Math.floor(Math.random() * n);
+};
+
+var randomChars =
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'.split('');
+var randomString = function (length) {
+ // XXX make more efficient
+ var ret = '';
+ _.times(length, function () {
+ ret += randomChars[random(randomChars.length)];
+ });
+ return ret;
+};
+
+var pickCollection = function () {
+ return Collections[random(Collections.length)];
+};
+
+var generateDoc = function () {
+ var ret = {};
+ ret.bucket = random(PARAMS.numBuckets);
+ // XXX trusting client clock is wrong!!
+ ret.when = +(new Date);
+ _.times(PARAMS.documentNumFields, function (n) {
+ ret['Field' + n] = randomString(PARAMS.documentSize/PARAMS.documentNumFields);
+ });
+
+ return ret;
+};
+
+
+//////////////////////////////
+// Data
+//////////////////////////////
+
+
+var Collections = [];
+_.times(PARAMS.numCollections, function (n) {
+ Collections.push(new Meteor.Collection("Collection" + n));
+});
+
+
+if (Meteor.isServer) {
+ Meteor.startup(function () {
+ // clear all the collections.
+ _.each(Collections, function (C) {
+ C.remove({});
+ });
+
+ // insert initial docs
+ _.times(PARAMS.initialDocuments, function () {
+ pickCollection().insert(generateDoc());
+ });
+ });
+
+ if (PARAMS.maxAgeSeconds) {
+ Meteor.setInterval(function () {
+ var when = +(new Date) - PARAMS.maxAgeSeconds*1000;
+ _.each(Collections, function (C) {
+ C.remove({when: {$lt: when}});
+ });
+ // Clear out 5% of the DB each time, steady state. XXX parameterize?
+ }, 1000*PARAMS.maxAgeSeconds / 20);
+ }
+
+ Meteor.publish("data", function (collection, bucket) {
+ var C = Collections[collection];
+ return C.find({bucket: bucket});
+ });
+
+}
+
+
+
+if (Meteor.isClient) {
+ // sub to data
+ _.times(PARAMS.numCollections, function (n) {
+ Meteor.subscribe("data", n, random(PARAMS.numBuckets));
+ });
+
+ // templates
+ Template.params.params = function () {
+ return _.map(PARAMS, function (v, k) {
+ return {key: k, value: v};
+ });
+ };
+
+ Template.status.status = function () {
+ return Meteor.status().status;
+ };
+
+ Template.status.updateRate = function () {
+ return Session.get('updateRate') + ", " + Session.get('updateAvg');
+ };
+
+ // do stuff periodically
+
+ if (PARAMS.insertsPerSecond) {
+ Meteor.setInterval(function () {
+ pickCollection().insert(generateDoc());
+ }, 1000 / PARAMS.insertsPerSecond);
+ }
+
+ if (PARAMS.removesPerSecond) {
+ Meteor.setInterval(function () {
+ var C = pickCollection();
+ var docs = C.find({}).fetch();
+ var doc = docs[random(docs.length)];
+ if (doc)
+ C.remove(doc._id);
+ }, 1000 / PARAMS.removesPerSecond);
+ }
+
+ if (PARAMS.updatesPerSecond) {
+ Meteor.setInterval(function () {
+ var C = pickCollection();
+ var docs = C.find({}).fetch();
+ var doc = docs[random(docs.length)];
+ if (doc) {
+ var field = 'Field' + random(PARAMS.documentNumFields);
+ var modifer = {};
+ modifer[field] =
+ randomString(PARAMS.documentSize/PARAMS.documentNumFields);
+ C.update(doc._id, {$set: modifer});
+ }
+ }, 1000 / PARAMS.updatesPerSecond);
+ }
+
+
+ // XXX very rough per client update rate. we need to measure this
+ // better. ideally, on the server we could get the global update rate
+ var updateCount = 0;
+ var updateHistory = [];
+ var updateFunc = function () { updateCount += 1; };
+ _.each(Collections, function (C) {
+ C.find({}).observe({
+ added: updateFunc, changed: updateFunc, removed: updateFunc
+ });
+ });
+ Meteor.setInterval(function () {
+ updateHistory.push(updateCount);
+ if (updateHistory.length > 10)
+ updateHistory.shift();
+ Session.set('updateRate', updateCount);
+ Session.set('updateAvg', _.reduce(updateHistory, function(memo, num){
+ return memo + num; }, 0) / updateHistory.length);;
+ updateCount = 0;
+ }, 1000);
+
+}
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+PORT=9000
+NUM_CLIENTS=10
+DURATION=120
+REPORT_INTERVAL=10
+
+set -e
+trap 'echo "FAILED. Killing: $(jobs -pr)" ; for pid in "$(jobs -pr)"; do kill $pid ; done' EXIT
+
+PROJDIR=`dirname $0`
+cd "$PROJDIR"
+PROJDIR=`pwd`
+
+# clean up from previous runs
+# XXX this is gross!
+pkill -f "$PROJDIR/.meteor/local/db" || true
+../../../meteor reset || true
+
+# start the benchmark app
+../../../meteor --production --port 9000 &
+OUTER_PID=$!
+
+
+# start a bunch of phantomjs processes
+PHANTOMSCRIPT=`mktemp -t benchmark-XXXXXXXX`
+cat > "$PHANTOMSCRIPT" <<EOF
+var page = require('webpage').create();
+var url = 'http://localhost:$PORT';
+page.open(url);
+EOF
+for ((i = 0 ; i < $NUM_CLIENTS ; i++)) ; do
+ # sleep between each phantom start both to provide a smoother ramp
+ # to the benchmark and because otherwise their PRNGs get set to the
+ # same seed and you get duplicate key errors!
+ sleep 2
+ phantomjs "$PHANTOMSCRIPT" &
+done
+
+ps -o cputime,ppid,args | grep " $OUTER_PID " | grep main.js || true
+for ((i = 0 ; i < $DURATION/$REPORT_INTERVAL ; i++)) ; do
+ sleep $REPORT_INTERVAL
+ ps -o cputime,ppid,args | grep " $OUTER_PID " | grep main.js || true
+done
+
+# print totals of all processes (outer, mongo, inner)
+echo
+echo TOTALS
+ps -o cputime,pid,ppid,args | grep " $OUTER_PID " | grep -v grep || true
+
+
+# cleanup
+trap - EXIT
+for pid in "$(jobs -pr)"; do
+ # not sure why we need both, but it seems to help clean up rogue
+ # mongo and phantomjs processes.
+ kill -INT $pid
+ kill $pid
+done
+rm "$PHANTOMSCRIPT"
+
+

0 comments on commit 42fa76c

Please sign in to comment.