Permalink
Browse files

First commit

  • Loading branch information...
samalba committed Jun 22, 2012
0 parents commit e600ec17a366efade3fff8001fb3d7b314321cbc
Showing with 1,077 additions and 0 deletions.
  1. +46 −0 README.md
  2. +29 −0 app.js
  3. +18 −0 config.json
  4. +14 −0 config_dev.json
  5. +14 −0 config_test.json
  6. +160 −0 lib/master.js
  7. +111 −0 lib/memorymonitor.js
  8. +8 −0 lib/test.js
  9. +424 −0 lib/worker.js
  10. +25 −0 package.json
  11. +21 −0 static/error_400.html
  12. +20 −0 static/error_502.html
  13. +20 −0 static/error_504.html
  14. +20 −0 static/error_default.html
  15. +70 −0 test/proxy.js
  16. +77 −0 test/test_simple.js
@@ -0,0 +1,46 @@
+dotCloud proxy2
+===============
+
+What is it?
+-----------
+
+dotCloud proxy2 is a complete Proxy solution based on the node-http-proxy
+library.
+
+Features:
+
+ * Multi-backend load-balancing
+ * WebSocket
+ * Custom HTML error pages
+ * Backend health check
+ * Multi-workers (each worker can handle multiple clients)
+ * Remove/Add a frontend or a backend while running
+
+Configuration
+-------------
+
+dotCloud proxy2 uses a Redis server to manage its configuration (and to share
+its state across the multiple workers). You can use the Redis server to change
+its configuration while it's running or simply check the health state of a
+backend.
+
+### Configure a frontend
+
+The hostname is `www.dotcloud.com` and its vhost identifier is `mywebsite`:
+
+ $ redis-cli rpush frontend:www.dotcloud.com mywebsite
+ (integer) 1
+
+My frontend has two backends:
+
+ $ redis-cli rpush frontend:www.dotcloud.com http://1.2.3.4:80
+ (integer) 2
+ $ redis-cli rpush frontend:www.dotcloud.com http://1.2.3.5:80
+ (integer) 3
+
+Let's review the configuration:
+
+ $ redis-cli lrange frontend:www.dotcloud.com 0 -1
+ 1) "mywebsite"
+ 2) "http://1.2.3.4:80"
+ 2) "http://1.2.3.5:80"
29 app.js
@@ -0,0 +1,29 @@
+
+'use strict';
+
+var fs = require('fs'),
+ cluster = require('cluster'),
+ util = require('util'),
+ master = require('./lib/master'),
+ worker = require('./lib/worker');
+
+
+var config = (function (path) {
+ var data;
+
+ if (process.env.SETTINGS_FLAVOR !== undefined) {
+ path = path.replace(/\.json$/, '_' + process.env.SETTINGS_FLAVOR + '.json');
+ }
+ util.log('Loading config from ' + path);
+ data = fs.readFileSync(path);
+ return JSON.parse(data);
+}(__dirname + '/config.json'));
+
+if (cluster.isMaster) {
+ // Run the master
+ master(config);
+ util.log('Server is running. ' + JSON.stringify(config.server));
+} else {
+ // Run the worker
+ worker(config);
+}
@@ -0,0 +1,18 @@
+{
+ "server": {
+ "accessLog": "/var/log/nginx/access.log",
+ "port": 80,
+ "workers": 10,
+ "maxSockets": 100,
+ "deadBackendTTL": 30,
+ "https": {
+ "port": 443,
+ "key": "/etc/ssl/ssl.key",
+ "cert": "/etc/ssl/ssl.crt"
+ }
+ },
+ "redis": {
+ "port": 6379,
+ "host": "127.0.0.1"
+ }
+}
@@ -0,0 +1,14 @@
+{
+ "server": {
+ "accessLog": "/tmp/proxy2-access.log",
+ "port": 80,
+ "workers": 2,
+ "maxSockets": 100,
+ "deadBackendTTL": 5,
+ "debug": true
+ },
+ "redis": {
+ "port": 6379,
+ "host": "127.0.0.1"
+ }
+}
@@ -0,0 +1,14 @@
+{
+ "server": {
+ "accessLog": "/tmp/proxy2_access.log",
+ "port": 1080,
+ "workers": 2,
+ "maxSockets": 100,
+ "deadBackendTTL": 30,
+ "debug": true
+ },
+ "redis": {
+ "port": 6379,
+ "host": "127.0.0.1"
+ }
+}
@@ -0,0 +1,160 @@
+
+'use strict';
+
+var fs = require('fs'),
+ cluster = require('cluster'),
+ events = require('events'),
+ util = require('util');
+
+var accessLog = function (self, path) {
+ var openStream = function () {
+ return fs.createWriteStream(path, {
+ flags: 'a+',
+ mode: 0x1A4 // 0644
+ });
+ },
+ stream = openStream(),
+ months = [
+ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'
+ ];
+
+ process.on('SIGUSR1', function () {
+ // Reload the Stream on signal
+ stream.end();
+ stream = openStream();
+ });
+ self.on('exit', function () {
+ stream.end();
+ });
+ // Format log
+ return function (data) {
+ var addDigit = function (n) {
+ if (n < 10) {
+ return '0' + n;
+ }
+ return n;
+ },
+ line = '',
+ date = new Date(data.currentTime);
+ // Remote addr
+ if (data.remoteAddr.slice(0, 2) !== '::') {
+ line += '::ffff:';
+ }
+ line += data.remoteAddr;
+ // Empty
+ line += ' - - ';
+ // Date
+ line += '[';
+ line += addDigit(date.getUTCDate());
+ line += '/';
+ line += months[date.getUTCMonth()];
+ line += '/';
+ line += date.getFullYear();
+ line += ':';
+ line += addDigit(date.getUTCHours());
+ line += ':';
+ line += addDigit(date.getUTCMinutes());
+ line += ':';
+ line += addDigit(date.getUTCSeconds());
+ line += ' +0000] "';
+ // Request
+ line += data.method;
+ line += ' ';
+ line += data.url;
+ line += ' HTTP/';
+ line += data.httpVersion;
+ line += '" ';
+ // Status code
+ line += data.statusCode;
+ line += ' ';
+ // Bytes sent
+ //FIXME, sometimes we cannot read socketBytesWritten (maybe because of a websocket?)
+ line += data.socketBytesWritten || 0;
+ line += ' "';
+ // Referer
+ line += data.referer || '';
+ line += '" "';
+ // User-Agent
+ line += data.userAgent || '';
+ line += '" "';
+ // Virtual host
+ line += data.virtualHost;
+ line += '" ';
+ // Total time spent
+ line += (data.totalTimeSpent / 1000);
+ line += ' ';
+ // Backend time spent
+ line += (data.backendTimeSpent / 1000);
+ stream.write(line + '\n');
+ };
+};
+
+
+function Master(config) {
+ if (!(this instanceof Master)) {
+ return new Master(config);
+ }
+
+ accessLog = accessLog(this, config.server.accessLog);
+ this.spawnWorkers(config.server.workers);
+}
+
+Master.prototype = new events.EventEmitter();
+
+Master.prototype.spawnWorkers = function (number) {
+ var self = this,
+ workers = [],
+ spawnWorker,
+ onExit,
+ n;
+
+ spawnWorker = function () {
+ var worker = cluster.fork();
+ worker.on('message', function (message) {
+ // Gather the logs from the workers
+ if (message.type === 1) {
+ // normal log
+ util.log('(worker #' + message.from + ') ' + message.data);
+ } else if (message.type === 2) {
+ // access log
+ accessLog(message.data);
+ }
+ });
+ workers.push(worker);
+ };
+
+ // Spawn all workers
+ for (n = 0; n < number; n += 1) {
+ util.log('Spawning worker #' + n);
+ spawnWorker();
+ }
+
+ // When one worker is dead, let's respawn one
+ cluster.on('death', function (worker) {
+ var idx = workers.indexOf(worker),
+ pid;
+
+ pid = (worker.process === undefined) ? worker.pid : worker.process.pid;
+ if (idx >= 0) {
+ workers.splice(idx, 1);
+ }
+ util.log('worker (pid: ' + pid + ') died. Spawning a new one.');
+ spawnWorker();
+ });
+
+ // Set an exit handler
+ onExit = function () {
+ self.emit('exit');
+ util.log('Exiting, killing the workers');
+ workers.forEach(function (worker) {
+ process.kill(worker.pid);
+ });
+ process.exit(0);
+ };
+ process.on('exit', onExit);
+ process.on('SIGINT', onExit);
+ process.on('SIGTERM', onExit);
+};
+
+module.exports = Master;
@@ -0,0 +1,111 @@
+
+'use strict';
+
+/*
+ * This module monitors the RSS memory at a specific interval.
+ *
+ * If the memory theshold is reached, it will try to closed all the servers
+ * attached to stop accepting new connections. If the servers did not stopped
+ * after the graceful time period, the process will be forced to exit.
+ * Statistics are logged before exiting.
+ */
+
+function MemoryMonitor(options) {
+ var self = this;
+
+ if (!(this instanceof MemoryMonitor)) {
+ return new MemoryMonitor(options);
+ }
+
+ options = options || {};
+ options.memoryLimit = options.memoryLimit || 100; // 100MB
+ options.gracefulWait = options.gracefulWait || 30; // 30 seconds
+ options.checkInterval = options.checkInterval || 60; // 60 seconds
+ options.logHandler = options.logHandler || console.log;
+ this.options = options;
+ this._servers = [];
+ this._closing = 0;
+ this.stats = {
+ startTime: (new Date()).getTime(),
+ requests: 0,
+ connections: 0,
+ gracefullyExited: true
+ };
+ this.log = function (msg) {
+ this.options.logHandler('MemoryMonitor: ' + msg);
+ };
+ setInterval(function () {
+ self.tick();
+ }, this.options.checkInterval * 1000);
+}
+
+MemoryMonitor.prototype.tick = function () {
+ var memoryLimit = (this.options.memoryLimit * 1024 * 1024),
+ currentMemory = process.memoryUsage().rss;
+
+ if (currentMemory < memoryLimit) {
+ this.log('Memory usage is OK (' + Math.round(currentMemory / (1024 * 1024)) + 'MB)');
+ return;
+ }
+ // Limit reached, starting the exit phase
+ this.log('Memory limit exceeded (' + Math.round(currentMemory / (1024 * 1024)) + 'MB), exiting...');
+ this.exit();
+};
+
+MemoryMonitor.prototype.dumpStatistics = function () {
+ var uptime;
+
+ uptime = (new Date()).getTime() - this.stats.startTime;
+ this.log('=== Exceeded memory report ===');
+ this.log('Gracefully exited: ' + this.stats.gracefullyExited);
+ this.log('Uptime: ' + Math.round(uptime / 1000 / 60) + ' minutes');
+ this.log('Requests: ' + this.stats.requests);
+ this.log('Connections: ' + this.stats.connections);
+};
+
+MemoryMonitor.prototype.exit = function () {
+ var self = this,
+ n = this._servers.length;
+
+ this.log('Waiting for ' + n + ' server handlers...');
+ if (n === 0) {
+ self.dumpStatistics();
+ process.exit(1);
+ }
+ n -= 1;
+ while (n >= 0) {
+ this._closing += 1;
+ this._servers[n].close();
+ n -= 1;
+ }
+ this._servers = [];
+ setTimeout(function () {
+ this._log(this._closing + ' server handler is stuck, force exiting...');
+ self.stats.gracefullyExited = false;
+ self.dumpStatistics();
+ process.exit(1);
+ }, this.options.gracefulWait * 1000);
+};
+
+MemoryMonitor.prototype.addServer = function (server) {
+ var self = this;
+
+ server.on('request', function () {
+ self.stats.requests += 1;
+ });
+ server.on('connection', function () {
+ self.stats.connections += 1;
+ });
+ server.on('close', function () {
+ this._closing -= 1;
+ if (this._closing > 0) {
+ return;
+ }
+ // All servers closed, exiting the current process
+ self.dumpStatistics();
+ process.exit(1);
+ });
+ this._servers.push(server);
+};
+
+module.exports = MemoryMonitor;
Oops, something went wrong.

0 comments on commit e600ec1

Please sign in to comment.