Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Work in progress with tabs > spaces

  • Loading branch information...
commit da4e6c9b3c88b48779d26e35d21d1f4dba9215d8 1 parent 5e78d40
@3rd-Eden authored
Showing with 755 additions and 3,130 deletions.
  1. +1 −0  index.js
  2. +718 −0 lib/memcached.js
  3. +0 −2,387 lib/zip.js
  4. +0 −708 nMemcached.js
  5. +36 −35 package.json
View
1  index.js
@@ -0,0 +1 @@
+module.exports = require('./lib/memcached.js');
View
718 lib/memcached.js
@@ -0,0 +1,718 @@
+var EventEmitter = require('events').EventEmitter
+ , Stream = require('net').Stream
+ , Buffer = require('buffer').Buffer;
+
+var HashRing = require('./hashring').HashRing
+ , Connection = require('./connection')
+ , Utils = require('./utils')
+ , Manager = Connection.Manager
+ , IssueLog = Connection.IssueLog;
+
+// The constructor
+function Client(args, options){
+ var servers = []
+ , weights = {}
+ , key;
+
+ // Parse down the connection arguments
+ switch (Object.prototype.toString.call(args)){
+ case '[object String]':
+ servers.push(args);
+ break;
+ case '[object Object]':
+ weights = args;
+ servers = Object.keys(args);
+ case '[object Array]':
+ default:
+ servers = args;
+ break;
+ }
+
+ if (!servers.length) throw new Error('No servers where supplied in the arguments');
+
+ // merge with global and user config
+ Utils.merge(this, Client.config);
+ Utils.merge(this, options);
+ EventEmitter.call(this);
+
+ this.servers = servers;
+ this.HashRing = new HashRing(servers, weights, this.algorithm);
+ this.connections = {};
+ this.issues = [];
+};
+
+// Allows users to configure the memcached globally or per memcached client
+Client.config = {
+ maxKeySize: 251 // max keysize allowed by Memcached
+, maxExpiration: 2592000 // max expiration duration allowed by Memcached
+, maxValue: 1048576 // max length of value allowed by Memcached
+
+, algorithm: 'crc32' // hashing algorithm that is used for key mapping
+
+, poolSize: 10 // maximal parallel connections
+, reconnect: 18000000 // if dead, attempt reconnect each xx ms
+, timeout: 5000 // after x ms the server should send a timeout if we can't connect
+, retries: 5 // amount of retries before server is dead
+, retry: 30000 // timeout between retries, all call will be marked as cache miss
+, remove: false // remove server if dead if false, we will attempt to reconnect
+, redundancy: false // allows you do re-distribute the keys over a x amount of servers
+
+, keyCompression: true // compress keys if they are to large (md5)
+};
+
+// There some functions we don't want users to touch so we scope them
+(function(nMemcached){
+ const LINEBREAK = '\r\n'
+ , NOREPLY = ' noreply'
+ , FLUSH = 1E3
+ , BUFFER = 1E2
+ , CONTINUE = 1E1
+ , FLAG_JSON = 1<<1
+ , FLAG_BINARY = 2<<1;
+
+ var memcached = nMemcached.prototype = new EventEmitter
+ , private = {}
+ , undefined;
+
+ // Creates or generates a new connection for the give server, the callback will recieve the connection
+ // if the operation was sucessfull
+ memcached.connect = function connect(server, callback){
+ // server is dead, bail out
+ if (server in this.issues && this.issues[server].failed) return callback(false, false);
+
+ // fetch from connection pool
+ if (server in this.connections) return this.connections[server].allocate(callback);
+
+ // No connection factory created yet, so we must build one
+ var serverTokens = /(.*):(\d+){1,}$/.exec(server).reverse()
+ , memcached = this
+ , serverTokens.pop();
+
+ this.connections[server] = new Manager(server, this.poolSize, function(callback){
+ var S = new Stream
+ , Manager = this;
+
+ // config the Stream
+ S.setTimeout(memcached.timeout);
+ S.setNoDelay(true);
+ S.metaData = [];
+ S.responseBuffer = "";
+ S.bufferArray = [];
+ S.server = server;
+ S.tokens = serverTokens;
+
+ // Add the event listeners
+ Utils.fuse(S, {
+ connect: function streamConnect(){ callback(false, this) }
+ , close: function streamClose(){ Manager.remove(this) }
+ , error: function streamError(err){ memcached.connectionIssue(err, S, callback) }
+ , data: Utils.curry(memcached, private.buffer, S)
+ , timeout: function streamTimeout(){ Manager.remove(this) }
+ , end: S.end
+ });
+
+ // connect the net.Stream [port, hostname]
+ S.connect.apply(S, serverTokens);
+ return S;
+ });
+
+ // now that we have setup our connection factory we can allocate a new connection
+ this.connections[server].allocate(callback);
+ };
+
+ // Creates a multi stream, so it's easier to query agains
+ // multiple memcached servers.
+ memcached.multi = function memcachedMulti(keys, callback){
+ var map = {}
+ , memcached = this
+ , servers
+ , i;
+
+ // gets all servers based on the supplied keys,
+ // or just gives all servers if we don't have keys
+ if (keys){
+ keys.forEach(function fetchMultipleServers(key){
+ var server = memcached.HashRing.getNode(key);
+ if (map[server]){
+ map[server].push(key);
+ } else {
+ map[server] = [key];
+ }
+ });
+ // store the servers
+ servers = Object.keys(map);
+ } else {
+ servers = this.servers;
+ }
+
+ i = servers.length;
+ while(i--){
+ callback.call(this, servers[i], map[servers[i]], i, servers.length);
+ }
+ };
+
+ // Executes the command on the net.Stream, if no server is supplied it will use the query.key to get
+ // the server from the HashRing
+ memcached.command = function memcachedCommand(queryCompiler, server){
+
+ // generate a regular query,
+ var query = queryCompiler()
+ , redundancy = this.redundancy && this.redundancy < this.servers.length
+ , queryRedundancy = query.redundancyEnabled
+ , memcached = this;
+
+ // validate the arguments
+ if (query.validation && !Utils.validateArg(query, this)) return;
+
+ // fetch servers
+ server = server ? server : redundancy && queryRedundancy ? (redundancy = this.HashRing.createRange(query.key, (this.redundancy + 1), true)).shift() : this.HashRing.getNode(query.key);
+
+ // check if the server is still alive
+ if(server in this.issues && this.issues[server].failed) return query.callback && query.callback(false, false);
+
+ this.connect(server, function allocateMemcachedConnection(error, S){
+ // check for issues
+ if (!S) return query.callback && query.callback(false, false);
+ if (error) return query.callback && query.callback(error);
+ if (S.readyState !== 'open') return query.callback && query.callback('Connection readyState is set to ' + S.readySate);
+
+ // used for request timing
+ query.start = +new Date;
+ S.metaData.push(query);
+ S.write(query.command + LINEBREAK);
+ });
+
+ // if we have redundancy enabled and the query is used for redundancy, than we are going loop over
+ // the servers, check if we can reach them, and connect to the correct net connection.
+ // because all redundancy querys are executed with "no reply" we do not need to store the callback
+ // as there will be no value to parse.
+ if (redundancy && queryRedundancy){
+ queryRedundancy = queryCompiler(queryRedundancy);
+ redundancy.forEach(function(server){
+ if (server in memcached.issues && memcached.issues[server]failed) return;
+
+ memcached.connect(server, function allocateMemcachedConnection(error, S){
+ if (!S || error || S.readyState !== 'open') return;
+ S.write(queryRedundancy.command + LINEBREAK);
+ });
+ })
+ }
+ };
+
+ // Logs all connection issues, and handles them off. Marking all requests as cache misses.
+ memcached.connectionIssue = function connectionIssue(error, S, callback){
+ // end connection and mark callback as cache miss
+ if (S && S.end) S.end();
+ if (callback) callback(false, false);
+
+ var issues
+ , server = S.server
+ , memcached = this;
+
+ // check for existing issue logs, or create a new log
+ if(server in this.issues){
+ issues = this.issues[server];
+ } else {
+ issues = this.issues[server] = new IssueLog({
+ server: server
+ , tokens: S.tokens
+ , reconnect: this.reconnect
+ , retries: this.retries
+ , retry: this.retry
+ , remove: this.remove
+ });
+
+ // proxy the events
+ Utils.fuse(issues, {
+ , issue: function(details){ memcached.emit('issue', details) }
+ , failure: function(details){ memcached.emit('failure', details) }
+ , reconnecting: function(details){ memcached.emit('reconnecting', details) }
+ , reconnected: function(details){ memcached.emit('reconnect', details) }
+ , remove: function(details){
+ // emit event and remove servers
+ memcached.emit('remove', details);
+ memcached.connections[server].end();
+
+ if (this.failOverServers && this.failOverServers.length){
+ memcached.HashRing.replaceServer(server, this.failOverServers.shift());
+ } else {
+ memcached.HashRing.removeServer(server);
+ }
+ }
+ });
+ }
+
+ // log the issue
+ issues.log(error);
+ };
+
+ // Kills all active connections
+ memcached.end = function endMemcached(){
+ var memcached = this;
+ Object.keys(this.connections).forEach(function closeConnection(key){
+ memcached.connections[key].free(0)
+ });
+ };
+
+ // These do not need to be publicly available as it's one of the most important
+ // parts of the whole client, the parser commands:
+ private.parsers = {
+ // handle error respones
+ , 'NOT_FOUND': function(tokens, dataSet, err){ return [CONTINUE, false] }
+ , 'NOT_STORED': function(tokens, dataSet, err){ return [CONTINUE, false] }
+ , 'ERROR': function(tokens, dataSet, err){ err.push('Received an ERROR response'); return [FLUSH, false] }
+ , 'CLIENT_ERROR': function(tokens, dataSet, err){ err.push(tokens.splice(1).join(' ')); return [BUFFER, false] }
+ , 'SERVER_ERROR': function(tokens, dataSet, err, queue, S, memcached){ memcached.connectionIssue(tokens.splice(1).join(' '), S); return [CONTINUE, false] }
+
+ // keyword based responses
+ , 'STORED': function(tokens, dataSet){ return [CONTINUE, true] }
+ , 'DELETED': function(tokens, dataSet){ return [CONTINUE, true] }
+ , 'OK': function(tokens, dataSet){ return [CONTINUE, true] }
+ , 'EXISTS': function(tokens, dataSet){ return [CONTINUE, false] }
+ , 'END': function(tokens, dataSet, err, queue){ if (!queue.length) queue.push(false); return [FLUSH, true] }
+
+ // value parsing:
+ , 'VALUE': function(tokens, dataSet, err, queue){
+ var key = tokens[1]
+ , flag = +tokens[2]
+ , expire = tokens[3]
+ , cas = tokens[4]
+ , multi = this.metaData[0] && this.metaData[0].multi || cas ? {} : false
+ , tmp;
+
+ switch (flag){
+ case FLAG_JSON:
+ dataSet = JSON.parse(dataSet);
+ break;
+ case FLAG_BINARY:
+ tmp = new Buffer(dataSet.length);
+ tmp.write(dataSet, 0, 'binary');
+ dataSet = tmp;
+ break;
+ }
+
+ // Add to queue as multiple get key key key key key returns multiple values
+ if (!multi){
+ queue.push(dataSet);
+ } else {
+ multi[key] = dataSet;
+ if (cas) multi.cas = cas;
+ queue.push(multi);
+ }
+
+ return [BUFFER, false]
+ }
+ , 'INCRDECR': function(tokens){ return [CONTINUE, +tokens[1]] }
+ , 'STAT': function(tokens, dataSet, err, queue){
+ queue.push([tokens[1], /^\d+$/.test(tokens[2]) ? +tokens[2] : tokens[2]]);
+ return [BUFFER, true]
+ }
+ , 'VERSION': function(tokens, dataSet){
+ var versionTokens = /(\d+)(?:\.)(\d+)(?:\.)(\d+)$/.exec(tokens.pop());
+
+ return [CONTINUE, {
+ server: this.server
+ , version: versionTokens[0]
+ , major: versionTokens[1] || 0
+ , minor: versionTokens[2] || 0
+ , bugfix: versionTokens[3] || 0
+ }];
+ }
+ , 'ITEM': function(tokens, dataSet, err, queue){
+ queue.push({
+ , key: tokens[1]
+ , b: +tokens[2].substr(1)
+ , s: +tokens[4]
+ });
+ return [BUFFER, false]
+ }
+ };
+
+ // Parses down result sets
+ private.resultParsers = {
+ // combines the stats array, in to an object
+ 'stats': function(resultSet){
+ var response = {};
+
+ // add references to the retrieved server
+ response.server = this.server;
+
+ // Fill the object
+ resultSet.forEach(function(statSet){
+ response[statSet[0]] = statSet[1];
+ });
+
+ return response;
+ },
+
+ // the settings uses the same parse format as the regular stats
+ 'stats settings':function(){
+ return private.resultParsers.stats.apply(this, arguments);
+ },
+
+ // Group slabs by slab id
+ 'stats slabs': function(resultSet){
+ var response = {};
+
+ // add references to the retrieved server
+ response.server = this.server;
+
+ // Fill the object
+ resultSet.forEach(function(statSet){
+ var identifier = statSet[0].split(':');
+
+ if(!response[identifier[0]])
+ response[identifier[0]] = {};
+
+ response[identifier[0]][identifier[1]] = statSet[1];
+
+ });
+
+ return response;
+ },
+ 'stats items': function(resultSet){
+ var response = {};
+
+ // add references to the retrieved server
+ response.server = this.server;
+
+ // Fill the object
+ resultSet.forEach(function(statSet){
+ var identifier = statSet[0].split(':');
+
+ if(!response[identifier[1]])
+ response[identifier[1]] = {};
+
+ response[identifier[1]][identifier[2]] = statSet[1];
+
+ });
+
+ return response;
+ }
+ };
+
+ // Generates a RegExp that can be used to check if a chunk is memcached response identifier
+ private.allCommands = new RegExp('^(?:' + Object.keys(private.parsers).join('|') + '|\\d' + ')');
+ private.bufferedCommands = new RegExp('^(?:' + Object.keys(private.parsers).join('|') + ')');
+
+ // When working with large chunks of responses, node chunks it in to peices. So we might have
+ // half responses. So we are going to buffer up the buffer and user our buffered buffer to query
+ // against. Also when you execute allot of .writes to the same stream, node will combine the responses
+ // in to one response stream. With no indication where it had cut the data. So it can be it cuts inside the value response,
+ // or even right in the middle of a linebreak, so we need to make sure, the last peice in the buffer is a LINEBREAK
+ // because that is all what is sure about the Memcached Protocol, all responds end with them.
+ private.buffer = function BufferBuffer(S, BufferStream){
+ S.responseBuffer += BufferStream;
+
+ // only call transform the data once we are sure, 100% sure, that we valid response ending
+ if(S.responseBuffer.substr(S.responseBuffer.length - 2) === LINEBREAK){
+ var chunks = S.responseBuffer.split(LINEBREAK);
+
+ S.responseBuffer = ""; // clear!
+ this.rawDataReceived(S, S.bufferArray = S.bufferArray.concat(chunks));
+ }
+ };
+
+ // The actual parsers function that scan over the responseBuffer in search of Memcached response
+ // identifiers. Once we have found one, we will send it to the dedicated parsers that will transform
+ // the data in a human readable format, deciding if we should queue it up, or send it to a callback fn.
+ memcached.rawDataReceived = function rawDataReceived(S){
+ var queue = [], token, tokenSet, dataSet = '', resultSet, metaData, err = [], tmp;
+
+ while(S.bufferArray.length && private.allCommands.test(S.bufferArray[0])){
+
+ token = S.bufferArray.shift();
+ tokenSet = token.split(' ');
+
+ // special case for digit only's these are responses from INCR and DECR
+ if(/\d+/.test(tokenSet[0]))
+ tokenSet.unshift('INCRDECR');
+
+ // special case for value, it's required that it has a second response!
+ // add the token back, and wait for the next response, we might be handling a big
+ // ass response here.
+ if(tokenSet[0] == 'VALUE' && S.bufferArray.indexOf('END') == -1){
+ return S.bufferArray.unshift(token);
+ }
+
+ // check for dedicated parser
+ if(private.parsers[tokenSet[0]]){
+
+ // fetch the response content
+ while(S.bufferArray.length){
+ if(private.bufferedCommands.test(S.bufferArray[0]))
+ break;
+
+ dataSet += S.bufferArray.shift();
+ };
+
+ resultSet = private.parsers[tokenSet[0]].call(S, tokenSet, dataSet || token, err, queue, this);
+
+ // check how we need to handle the resultSet response
+ switch(resultSet.shift()){
+ case BUFFER:
+ break;
+
+ case FLUSH:
+ metaData = S.metaData.shift();
+ resultSet = queue;
+
+ // if we have a callback, call it
+ if(metaData && metaData.callback){
+ metaData.execution = +new Date - metaData.start;
+ metaData.callback.call(
+ metaData, err.length ? err : err[0],
+
+ // see if optional parsing needs to be applied to make the result set more readable
+ private.resultParsers[metaData.type] ? private.resultParsers[metaData.type].call(S, resultSet, err) :
+ !Array.isArray(queue) || queue.length > 1 ? queue : queue[0]
+ );
+ }
+
+ queue.length = 0;
+ err.length = 0;
+ break;
+
+ case CONTINUE:
+ default:
+ metaData = S.metaData.shift();
+
+ if(metaData && metaData.callback){
+ metaData.execution = +new Date - metaData.start;
+ metaData.callback.call(metaData, err.length > 1 ? err : err[0], resultSet[0]);
+ }
+
+ err.length = 0;
+ break;
+ }
+ } else {
+ // handle unkown responses
+ metaData = S.metaData.shift();
+ if(metaData && metaData.callback){
+ metaData.execution = +new Date - metaData.start;
+ metaData.callback.call(metaData, 'Unknown response from the memcached server: "' + token + '"', false);
+ }
+ }
+
+ // cleanup
+ dataSet = ''
+ tokenSet = undefined;
+ metaData = undefined;
+
+ // check if we need to remove an empty item from the array, as splitting on /r/n might cause an empty
+ // item at the end..
+ if(S.bufferArray[0] === '')
+ S.bufferArray.shift();
+ };
+ };
+
+ // Small wrapper function that only executes errors when we have a callback
+ private.errorResponse = function errorResponse(error, callback){
+ if(typeof callback == 'function')
+ callback(error, false);
+
+ return false;
+ };
+
+ // This is where the actual Memcached API layer begins:
+ memcached.get = function get(key, callback){
+ if(Array.isArray(key))
+ return this.getMulti.apply(this, arguments);
+
+ this.command(function getCommand(noreply){ return {
+ key: key, callback: callback,
+
+ // validate the arguments
+ validate: [['key', String], ['callback', Function]],
+
+ // used for the query
+ type: 'get',
+ command: 'get ' + key
+ }});
+ };
+
+ // the difference between get and gets is that gets, also returns a cas value
+ // and gets doesnt support multigets atm
+ memcached.gets = function get(key, callback){
+ this.command(function getCommand(noreply){ return {
+ key: key, callback: callback,
+
+ // validate the arguments
+ validate: [['key', String], ['callback', Function]],
+
+ // used for the query
+ type: 'gets',
+ command: 'gets ' + key
+ }});
+ };
+
+ // Handles get's with multiple keys
+ memcached.getMulti = function getMulti(keys, callback){
+ var memcached = this, responses = {}, errors = [], calls,
+ handle = function(err, results){
+ if(err) errors.push(err);
+ // add all responses to the array
+ (Array.isArray(results) ? results : [results]).forEach(function(value){ Utils.merge(responses, value) });
+
+ if(!--calls) callback(errors.length ? errors : false, responses);
+ };
+
+ this.multi(keys, function(server, key, index, totals){
+ if(!calls) calls = totals;
+
+
+ memcached.command(function getMultiCommand(noreply){ return {
+ callback: handle,
+
+ multi:true,
+ type: 'get',
+ command: 'get ' + key.join(' ')
+ }},
+ server
+ );
+ });
+ };
+
+ // As all command nearly use the same syntax we are going to proxy them all to this
+ // function to ease maintainance. This is possible because most set commands will use the same
+ // syntax for the Memcached server. Some commands do not require a lifetime and a flag, but the
+ // memcached server is smart enough to ignore those.
+ private.setters = function setters(type, validate, key, value, lifetime, callback, cas){
+ var flag = 0,
+ memcached = this;
+
+ if(Buffer.isBuffer(value)){
+ flag = FLAG_BINARY;
+ value = value.toString('binary');
+ } else if(typeof value !== 'string'){
+ flag = FLAG_JSON;
+ value = JSON.stringify(value);
+ } else {
+ value = value.toString();
+ }
+
+ if(value.length > memcached.maxValue)
+ return private.errorResponse('The length of the value is greater-than ' + memcached.maxValue, callback);
+
+ memcached.command(function settersCommand(noreply){ return {
+ key: key, callback: callback, lifetime: lifetime, value: value, cas: cas,
+
+ // validate the arguments
+ validate: validate,
+
+ type: type,
+ redundancyEnabled: true,
+ command: [type, key, flag, lifetime, Buffer.byteLength(value)].join(' ') +
+ (cas ? ' ' + cas : '') +
+ (noreply ? NOREPLY : '') +
+ LINEBREAK + value
+ }});
+ };
+
+ // Curry the function and so we can tell the type our private set function
+ memcached.set = Utils.curry(false, private.setters, 'set', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]]);
+ memcached.replace = Utils.curry(false, private.setters, 'replace', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]]);
+ memcached.add = Utils.curry(false, private.setters, 'add', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]]);
+
+ memcached.cas = function checkandset(key, value, cas, lifetime, callback){
+ private.setters.call(this, 'cas', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]], key, value, lifetime, callback, cas);
+ };
+
+ memcached.append = function append(key, value, callback){
+ private.setters.call(this, 'append', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]], key, value, 0, callback);
+ };
+
+ memcached.prepend = function prepend(key, value, callback){
+ private.setters.call(this, 'prepend', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]], key, value, 0, callback);
+ };
+
+ // Small handler for incr and decr's
+ private.incrdecr = function incrdecr(type, key, value, callback){
+ this.command(function incredecrCommand(noreply){ return {
+ key: key, callback: callback, value: value,
+
+ // validate the arguments
+ validate: [['key', String], ['value', Number], ['callback', Function]],
+
+ // used for the query
+ type: type,
+ redundancyEnabled: true,
+ command: [type, key, value].join(' ') +
+ (noreply ? NOREPLY : '')
+ }});
+ };
+
+ // Curry the function and so we can tell the type our private incrdecr
+ memcached.increment = memcached.incr = Utils.curry(false, private.incrdecr, 'incr');
+ memcached.decrement = memcached.decr = Utils.curry(false, private.incrdecr, 'decr');
+
+ // Deletes the keys from the servers
+ memcached.del = function del(key, callback){
+ this.command(function deleteCommand(noreply){ return {
+ key: key, callback: callback,
+
+ // validate the arguments
+ validate: [['key', String], ['callback', Function]],
+
+ // used for the query
+ type: 'delete',
+ redundancyEnabled: true,
+ command: 'delete ' + key +
+ (noreply ? NOREPLY : '')
+ }});
+ };
+
+
+ // Small wrapper that handle single keyword commands such as FLUSH ALL, VERSION and STAT
+ private.singles = function singles(type, callback){
+ var memcached = this, responses = [], errors = [], calls,
+ handle = function(err, results){
+ if(err) errors.push(err);
+ if(results) responses = responses.concat(results);
+
+ // multi calls should ALWAYS return an array!
+ if(!--calls) callback(errors, responses);
+ };
+
+ this.multi(false, function(server, keys, index, totals){
+ if(!calls) calls = totals;
+
+ memcached.command(function singlesCommand(noreply){ return {
+ callback: handle,
+ type: type,
+ command: type
+ }},
+ server
+ );
+ });
+ };
+
+ // Curry the function and so we can tell the type our private singles
+ memcached.version = Utils.curry(false, private.singles, 'version');
+ memcached.flush = Utils.curry(false, private.singles, 'flush_all');
+ memcached.stats = Utils.curry(false, private.singles, 'stats');
+ memcached.settings = Utils.curry(false, private.singles, 'stats settings');
+ memcached.slabs = Utils.curry(false, private.singles, 'stats slabs');
+ memcached.items = Utils.curry(false, private.singles, 'stats items');
+
+ // You need to use the items dump to get the correct server and slab settings
+ // see simple_cachedump.js for an example
+ memcached.cachedump = function cachedump(server, slabid, number, callback){
+ this.command(function cachedumpCommand(noreply){ return {
+ callback: callback,
+ number: number,
+ slabid: slabid,
+
+ // validate the arguments
+ validate: [['number', Number], ['slabid', Number], ['callback', Function]],
+
+ type: 'stats cachedump',
+ command: 'stats cachedump ' + slabid + ' ' + number
+ }},
+ server
+ );
+ };
+
+})(Client);
+
+module.exports = Client;
View
2,387 lib/zip.js
@@ -1,2387 +0,0 @@
-// The code for this is kinda messy, but it works.
-// if more native compression methods will become available for Node.js will use those instead.
-// but for time beeing, this will do.
-
-(function( expose ){
-/* Copyright (C) 1999 Masanao Izumo <iz@onicos.co.jp>
- * Version: 1.0.0.1
- * LastModified: Dec 25 1999
- */
-
-/* Interface:
- * data = zip_inflate(src);
- */
-
-/* constant parameters */
-var zip_WSIZE = 32768; // Sliding Window size
-var zip_STORED_BLOCK = 0;
-var zip_STATIC_TREES = 1;
-var zip_DYN_TREES = 2;
-
-/* for inflate */
-var zip_lbits = 9; // bits in base literal/length lookup table
-var zip_dbits = 6; // bits in base distance lookup table
-var zip_INBUFSIZ = 32768; // Input buffer size
-var zip_INBUF_EXTRA = 64; // Extra buffer
-
-/* variables (inflate) */
-var zip_slide;
-var zip_wp; // current position in slide
-var zip_fixed_tl = null; // inflate static
-var zip_fixed_td; // inflate static
-var zip_fixed_bl, fixed_bd; // inflate static
-var zip_bit_buf; // bit buffer
-var zip_bit_len; // bits in bit buffer
-var zip_method;
-var zip_eof;
-var zip_copy_leng;
-var zip_copy_dist;
-var zip_tl, zip_td; // literal/length and distance decoder tables
-var zip_bl, zip_bd; // number of bits decoded by tl and td
-
-var zip_inflate_data;
-var zip_inflate_pos;
-
-
-/* constant tables (inflate) */
-var zip_MASK_BITS = new Array(
- 0x0000,
- 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
- 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff);
-// Tables for deflate from PKZIP's appnote.txt.
-var zip_cplens = new Array( // Copy lengths for literal codes 257..285
- 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
- 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0);
-/* note: see note #13 above about the 258 in this list. */
-var zip_cplext = new Array( // Extra bits for literal codes 257..285
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99); // 99==invalid
-var zip_cpdist = new Array( // Copy offsets for distance codes 0..29
- 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
- 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
- 8193, 12289, 16385, 24577);
-var zip_cpdext = new Array( // Extra bits for distance codes
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
- 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
- 12, 12, 13, 13);
-var zip_border = new Array( // Order of the bit length code lengths
- 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15);
-/* objects (inflate) */
-
-function zip_HuftList() {
- this.next = null;
- this.list = null;
-}
-
-function zip_HuftNode() {
- this.e = 0; // number of extra bits or operation
- this.b = 0; // number of bits in this code or subcode
-
- // union
- this.n = 0; // literal, length base, or distance base
- this.t = null; // (zip_HuftNode) pointer to next level of table
-}
-
-function zip_HuftBuild(b, // code lengths in bits (all assumed <= BMAX)
- n, // number of codes (assumed <= N_MAX)
- s, // number of simple-valued codes (0..s-1)
- d, // list of base values for non-simple codes
- e, // list of extra bits for non-simple codes
- mm // maximum lookup bits
- ) {
- this.BMAX = 16; // maximum bit length of any code
- this.N_MAX = 288; // maximum number of codes in any set
- this.status = 0; // 0: success, 1: incomplete table, 2: bad input
- this.root = null; // (zip_HuftList) starting table
- this.m = 0; // maximum lookup bits, returns actual
-
-/* Given a list of code lengths and a maximum table size, make a set of
- tables to decode that set of codes. Return zero on success, one if
- the given code set is incomplete (the tables are still built in this
- case), two if the input is invalid (all zero length codes or an
- oversubscribed set of lengths), and three if not enough memory.
- The code with value 256 is special, and the tables are constructed
- so that no bits beyond that code are fetched when that code is
- decoded. */
- {
- var a; // counter for codes of length k
- var c = new Array(this.BMAX+1); // bit length count table
- var el; // length of EOB code (value 256)
- var f; // i repeats in table every f entries
- var g; // maximum code length
- var h; // table level
- var i; // counter, current code
- var j; // counter
- var k; // number of bits in current code
- var lx = new Array(this.BMAX+1); // stack of bits per table
- var p; // pointer into c[], b[], or v[]
- var pidx; // index of p
- var q; // (zip_HuftNode) points to current table
- var r = new zip_HuftNode(); // table entry for structure assignment
- var u = new Array(this.BMAX); // zip_HuftNode[BMAX][] table stack
- var v = new Array(this.N_MAX); // values in order of bit length
- var w;
- var x = new Array(this.BMAX+1);// bit offsets, then code stack
- var xp; // pointer into x or c
- var y; // number of dummy codes added
- var z; // number of entries in current table
- var o;
- var tail; // (zip_HuftList)
-
- tail = this.root = null;
- for(i = 0; i < c.length; i++)
- c[i] = 0;
- for(i = 0; i < lx.length; i++)
- lx[i] = 0;
- for(i = 0; i < u.length; i++)
- u[i] = null;
- for(i = 0; i < v.length; i++)
- v[i] = 0;
- for(i = 0; i < x.length; i++)
- x[i] = 0;
-
- // Generate counts for each bit length
- el = n > 256 ? b[256] : this.BMAX; // set length of EOB code, if any
- p = b; pidx = 0;
- i = n;
- do {
- c[p[pidx]]++; // assume all entries <= BMAX
- pidx++;
- } while(--i > 0);
- if(c[0] == n) { // null input--all zero length codes
- this.root = null;
- this.m = 0;
- this.status = 0;
- return;
- }
-
- // Find minimum and maximum length, bound *m by those
- for(j = 1; j <= this.BMAX; j++)
- if(c[j] != 0)
- break;
- k = j; // minimum code length
- if(mm < j)
- mm = j;
- for(i = this.BMAX; i != 0; i--)
- if(c[i] != 0)
- break;
- g = i; // maximum code length
- if(mm > i)
- mm = i;
-
- // Adjust last length count to fill out codes, if needed
- for(y = 1 << j; j < i; j++, y <<= 1)
- if((y -= c[j]) < 0) {
- this.status = 2; // bad input: more codes than bits
- this.m = mm;
- return;
- }
- if((y -= c[i]) < 0) {
- this.status = 2;
- this.m = mm;
- return;
- }
- c[i] += y;
-
- // Generate starting offsets into the value table for each length
- x[1] = j = 0;
- p = c;
- pidx = 1;
- xp = 2;
- while(--i > 0) // note that i == g from above
- x[xp++] = (j += p[pidx++]);
-
- // Make a table of values in order of bit lengths
- p = b; pidx = 0;
- i = 0;
- do {
- if((j = p[pidx++]) != 0)
- v[x[j]++] = i;
- } while(++i < n);
- n = x[g]; // set n to length of v
-
- // Generate the Huffman codes and for each, make the table entries
- x[0] = i = 0; // first Huffman code is zero
- p = v; pidx = 0; // grab values in bit order
- h = -1; // no tables yet--level -1
- w = lx[0] = 0; // no bits decoded yet
- q = null; // ditto
- z = 0; // ditto
-
- // go through the bit lengths (k already is bits in shortest code)
- for(; k <= g; k++) {
- a = c[k];
- while(a-- > 0) {
- // here i is the Huffman code of length k bits for value p[pidx]
- // make tables up to required level
- while(k > w + lx[1 + h]) {
- w += lx[1 + h]; // add bits already decoded
- h++;
-
- // compute minimum size table less than or equal to *m bits
- z = (z = g - w) > mm ? mm : z; // upper limit
- if((f = 1 << (j = k - w)) > a + 1) { // try a k-w bit table
- // too few codes for k-w bit table
- f -= a + 1; // deduct codes from patterns left
- xp = k;
- while(++j < z) { // try smaller tables up to z bits
- if((f <<= 1) <= c[++xp])
- break; // enough codes to use up j bits
- f -= c[xp]; // else deduct codes from patterns
- }
- }
- if(w + j > el && w < el)
- j = el - w; // make EOB code end at table
- z = 1 << j; // table entries for j-bit table
- lx[1 + h] = j; // set table size in stack
-
- // allocate and link in new table
- q = new Array(z);
- for(o = 0; o < z; o++) {
- q[o] = new zip_HuftNode();
- }
-
- if(tail == null)
- tail = this.root = new zip_HuftList();
- else
- tail = tail.next = new zip_HuftList();
- tail.next = null;
- tail.list = q;
- u[h] = q; // table starts after link
-
- /* connect to last table, if there is one */
- if(h > 0) {
- x[h] = i; // save pattern for backing up
- r.b = lx[h]; // bits to dump before this table
- r.e = 16 + j; // bits in this table
- r.t = q; // pointer to this table
- j = (i & ((1 << w) - 1)) >> (w - lx[h]);
- u[h-1][j].e = r.e;
- u[h-1][j].b = r.b;
- u[h-1][j].n = r.n;
- u[h-1][j].t = r.t;
- }
- }
-
- // set up table entry in r
- r.b = k - w;
- if(pidx >= n)
- r.e = 99; // out of values--invalid code
- else if(p[pidx] < s) {
- r.e = (p[pidx] < 256 ? 16 : 15); // 256 is end-of-block code
- r.n = p[pidx++]; // simple code is just the value
- } else {
- r.e = e[p[pidx] - s]; // non-simple--look up in lists
- r.n = d[p[pidx++] - s];
- }
-
- // fill code-like entries with r //
- f = 1 << (k - w);
- for(j = i >> w; j < z; j += f) {
- q[j].e = r.e;
- q[j].b = r.b;
- q[j].n = r.n;
- q[j].t = r.t;
- }
-
- // backwards increment the k-bit code i
- for(j = 1 << (k - 1); (i & j) != 0; j >>= 1)
- i ^= j;
- i ^= j;
-
- // backup over finished tables
- while((i & ((1 << w) - 1)) != x[h]) {
- w -= lx[h]; // don't need to update q
- h--;
- }
- }
- }
-
- /* return actual size of base table */
- this.m = lx[1];
-
- /* Return true (1) if we were given an incomplete table */
- this.status = ((y != 0 && g != 1) ? 1 : 0);
- } /* end of constructor */
-}
-
-
-/* routines (inflate) */
-
-function zip_GET_BYTE() {
- if(zip_inflate_data.length == zip_inflate_pos)
- return -1;
- return zip_inflate_data.charCodeAt(zip_inflate_pos++) & 0xff;
-}
-
-function zip_NEEDBITS(n) {
- while(zip_bit_len < n) {
- zip_bit_buf |= zip_GET_BYTE() << zip_bit_len;
- zip_bit_len += 8;
- }
-}
-
-function zip_GETBITS(n) {
- return zip_bit_buf & zip_MASK_BITS[n];
-}
-
-function zip_DUMPBITS(n) {
- zip_bit_buf >>= n;
- zip_bit_len -= n;
-}
-
-function zip_inflate_codes(buff, off, size) {
- /* inflate (decompress) the codes in a deflated (compressed) block.
- Return an error code or zero if it all goes ok. */
- var e; // table entry flag/number of extra bits
- var t; // (zip_HuftNode) pointer to table entry
- var n;
-
- if(size == 0)
- return 0;
-
- // inflate the coded data
- n = 0;
- for(;;) { // do until end of block
- zip_NEEDBITS(zip_bl);
- t = zip_tl.list[zip_GETBITS(zip_bl)];
- e = t.e;
- while(e > 16) {
- if(e == 99)
- return -1;
- zip_DUMPBITS(t.b);
- e -= 16;
- zip_NEEDBITS(e);
- t = t.t[zip_GETBITS(e)];
- e = t.e;
- }
- zip_DUMPBITS(t.b);
-
- if(e == 16) { // then it's a literal
- zip_wp &= zip_WSIZE - 1;
- buff[off + n++] = zip_slide[zip_wp++] = t.n;
- if(n == size)
- return size;
- continue;
- }
-
- // exit if end of block
- if(e == 15)
- break;
-
- // it's an EOB or a length
-
- // get length of block to copy
- zip_NEEDBITS(e);
- zip_copy_leng = t.n + zip_GETBITS(e);
- zip_DUMPBITS(e);
-
- // decode distance of block to copy
- zip_NEEDBITS(zip_bd);
- t = zip_td.list[zip_GETBITS(zip_bd)];
- e = t.e;
-
- while(e > 16) {
- if(e == 99)
- return -1;
- zip_DUMPBITS(t.b);
- e -= 16;
- zip_NEEDBITS(e);
- t = t.t[zip_GETBITS(e)];
- e = t.e;
- }
- zip_DUMPBITS(t.b);
- zip_NEEDBITS(e);
- zip_copy_dist = zip_wp - t.n - zip_GETBITS(e);
- zip_DUMPBITS(e);
-
- // do the copy
- while(zip_copy_leng > 0 && n < size) {
- zip_copy_leng--;
- zip_copy_dist &= zip_WSIZE - 1;
- zip_wp &= zip_WSIZE - 1;
- buff[off + n++] = zip_slide[zip_wp++]
- = zip_slide[zip_copy_dist++];
- }
-
- if(n == size)
- return size;
- }
-
- zip_method = -1; // done
- return n;
-}
-
-function zip_inflate_stored(buff, off, size) {
- /* "decompress" an inflated type 0 (stored) block. */
- var n;
-
- // go to byte boundary
- n = zip_bit_len & 7;
- zip_DUMPBITS(n);
-
- // get the length and its complement
- zip_NEEDBITS(16);
- n = zip_GETBITS(16);
- zip_DUMPBITS(16);
- zip_NEEDBITS(16);
- if(n != ((~zip_bit_buf) & 0xffff))
- return -1; // error in compressed data
- zip_DUMPBITS(16);
-
- // read and output the compressed data
- zip_copy_leng = n;
-
- n = 0;
- while(zip_copy_leng > 0 && n < size) {
- zip_copy_leng--;
- zip_wp &= zip_WSIZE - 1;
- zip_NEEDBITS(8);
- buff[off + n++] = zip_slide[zip_wp++] =
- zip_GETBITS(8);
- zip_DUMPBITS(8);
- }
-
- if(zip_copy_leng == 0)
- zip_method = -1; // done
- return n;
-}
-
-function zip_inflate_fixed(buff, off, size) {
- /* decompress an inflated type 1 (fixed Huffman codes) block. We should
- either replace this with a custom decoder, or at least precompute the
- Huffman tables. */
-
- // if first time, set up tables for fixed blocks
- if(zip_fixed_tl == null) {
- var i; // temporary variable
- var l = new Array(288); // length list for huft_build
- var h; // zip_HuftBuild
-
- // literal table
- for(i = 0; i < 144; i++)
- l[i] = 8;
- for(; i < 256; i++)
- l[i] = 9;
- for(; i < 280; i++)
- l[i] = 7;
- for(; i < 288; i++) // make a complete, but wrong code set
- l[i] = 8;
- zip_fixed_bl = 7;
-
- h = new zip_HuftBuild(l, 288, 257, zip_cplens, zip_cplext,
- zip_fixed_bl);
- if(h.status != 0) {
- alert("HufBuild error: "+h.status);
- return -1;
- }
- zip_fixed_tl = h.root;
- zip_fixed_bl = h.m;
-
- // distance table
- for(i = 0; i < 30; i++) // make an incomplete code set
- l[i] = 5;
- zip_fixed_bd = 5;
-
- h = new zip_HuftBuild(l, 30, 0, zip_cpdist, zip_cpdext, zip_fixed_bd);
- if(h.status > 1) {
- zip_fixed_tl = null;
- alert("HufBuild error: "+h.status);
- return -1;
- }
- zip_fixed_td = h.root;
- zip_fixed_bd = h.m;
- }
-
- zip_tl = zip_fixed_tl;
- zip_td = zip_fixed_td;
- zip_bl = zip_fixed_bl;
- zip_bd = zip_fixed_bd;
- return zip_inflate_codes(buff, off, size);
-}
-
-function zip_inflate_dynamic(buff, off, size) {
- // decompress an inflated type 2 (dynamic Huffman codes) block.
- var i; // temporary variables
- var j;
- var l; // last length
- var n; // number of lengths to get
- var t; // (zip_HuftNode) literal/length code table
- var nb; // number of bit length codes
- var nl; // number of literal/length codes
- var nd; // number of distance codes
- var ll = new Array(286+30); // literal/length and distance code lengths
- var h; // (zip_HuftBuild)
-
- for(i = 0; i < ll.length; i++)
- ll[i] = 0;
-
- // read in table lengths
- zip_NEEDBITS(5);
- nl = 257 + zip_GETBITS(5); // number of literal/length codes
- zip_DUMPBITS(5);
- zip_NEEDBITS(5);
- nd = 1 + zip_GETBITS(5); // number of distance codes
- zip_DUMPBITS(5);
- zip_NEEDBITS(4);
- nb = 4 + zip_GETBITS(4); // number of bit length codes
- zip_DUMPBITS(4);
- if(nl > 286 || nd > 30)
- return -1; // bad lengths
-
- // read in bit-length-code lengths
- for(j = 0; j < nb; j++)
- {
- zip_NEEDBITS(3);
- ll[zip_border[j]] = zip_GETBITS(3);
- zip_DUMPBITS(3);
- }
- for(; j < 19; j++)
- ll[zip_border[j]] = 0;
-
- // build decoding table for trees--single level, 7 bit lookup
- zip_bl = 7;
- h = new zip_HuftBuild(ll, 19, 19, null, null, zip_bl);
- if(h.status != 0)
- return -1; // incomplete code set
-
- zip_tl = h.root;
- zip_bl = h.m;
-
- // read in literal and distance code lengths
- n = nl + nd;
- i = l = 0;
- while(i < n) {
- zip_NEEDBITS(zip_bl);
- t = zip_tl.list[zip_GETBITS(zip_bl)];
- j = t.b;
- zip_DUMPBITS(j);
- j = t.n;
- if(j < 16) // length of code in bits (0..15)
- ll[i++] = l = j; // save last length in l
- else if(j == 16) { // repeat last length 3 to 6 times
- zip_NEEDBITS(2);
- j = 3 + zip_GETBITS(2);
- zip_DUMPBITS(2);
- if(i + j > n)
- return -1;
- while(j-- > 0)
- ll[i++] = l;
- } else if(j == 17) { // 3 to 10 zero length codes
- zip_NEEDBITS(3);
- j = 3 + zip_GETBITS(3);
- zip_DUMPBITS(3);
- if(i + j > n)
- return -1;
- while(j-- > 0)
- ll[i++] = 0;
- l = 0;
- } else { // j == 18: 11 to 138 zero length codes
- zip_NEEDBITS(7);
- j = 11 + zip_GETBITS(7);
- zip_DUMPBITS(7);
- if(i + j > n)
- return -1;
- while(j-- > 0)
- ll[i++] = 0;
- l = 0;
- }
- }
-
- // build the decoding tables for literal/length and distance codes
- zip_bl = zip_lbits;
- h = new zip_HuftBuild(ll, nl, 257, zip_cplens, zip_cplext, zip_bl);
- if(zip_bl == 0) // no literals or lengths
- h.status = 1;
- if(h.status != 0) {
- if(h.status == 1)
- ;// **incomplete literal tree**
- return -1; // incomplete code set
- }
- zip_tl = h.root;
- zip_bl = h.m;
-
- for(i = 0; i < nd; i++)
- ll[i] = ll[i + nl];
- zip_bd = zip_dbits;
- h = new zip_HuftBuild(ll, nd, 0, zip_cpdist, zip_cpdext, zip_bd);
- zip_td = h.root;
- zip_bd = h.m;
-
- if(zip_bd == 0 && nl > 257) { // lengths but no distances
- // **incomplete distance tree**
- return -1;
- }
-
- if(h.status == 1) {
- ;// **incomplete distance tree**
- }
- if(h.status != 0)
- return -1;
-
- // decompress until an end-of-block code
- return zip_inflate_codes(buff, off, size);
-}
-
-function zip_inflate_start() {
- var i;
-
- if(zip_slide == null)
- zip_slide = new Array(2 * zip_WSIZE);
- zip_wp = 0;
- zip_bit_buf = 0;
- zip_bit_len = 0;
- zip_method = -1;
- zip_eof = false;
- zip_copy_leng = zip_copy_dist = 0;
- zip_tl = null;
-}
-
-function zip_inflate_internal(buff, off, size) {
- // decompress an inflated entry
- var n, i;
-
- n = 0;
- while(n < size) {
- if(zip_eof && zip_method == -1)
- return n;
-
- if(zip_copy_leng > 0) {
- if(zip_method != zip_STORED_BLOCK) {
- // STATIC_TREES or DYN_TREES
- while(zip_copy_leng > 0 && n < size) {
- zip_copy_leng--;
- zip_copy_dist &= zip_WSIZE - 1;
- zip_wp &= zip_WSIZE - 1;
- buff[off + n++] = zip_slide[zip_wp++] =
- zip_slide[zip_copy_dist++];
- }
- } else {
- while(zip_copy_leng > 0 && n < size) {
- zip_copy_leng--;
- zip_wp &= zip_WSIZE - 1;
- zip_NEEDBITS(8);
- buff[off + n++] = zip_slide[zip_wp++] = zip_GETBITS(8);
- zip_DUMPBITS(8);
- }
- if(zip_copy_leng == 0)
- zip_method = -1; // done
- }
- if(n == size)
- return n;
- }
-
- if(zip_method == -1) {
- if(zip_eof)
- break;
-
- // read in last block bit
- zip_NEEDBITS(1);
- if(zip_GETBITS(1) != 0)
- zip_eof = true;
- zip_DUMPBITS(1);
-
- // read in block type
- zip_NEEDBITS(2);
- zip_method = zip_GETBITS(2);
- zip_DUMPBITS(2);
- zip_tl = null;
- zip_copy_leng = 0;
- }
-
- switch(zip_method) {
- case 0: // zip_STORED_BLOCK
- i = zip_inflate_stored(buff, off + n, size - n);
- break;
-
- case 1: // zip_STATIC_TREES
- if(zip_tl != null)
- i = zip_inflate_codes(buff, off + n, size - n);
- else
- i = zip_inflate_fixed(buff, off + n, size - n);
- break;
-
- case 2: // zip_DYN_TREES
- if(zip_tl != null)
- i = zip_inflate_codes(buff, off + n, size - n);
- else
- i = zip_inflate_dynamic(buff, off + n, size - n);
- break;
-
- default: // error
- i = -1;
- break;
- }
-
- if(i == -1) {
- if(zip_eof)
- return 0;
- return -1;
- }
- n += i;
- }
- return n;
-}
-
-/* for deflate */
-var zip_DEFAULT_LEVEL = 6;
-var zip_FULL_SEARCH = true;
-var zip_INBUFSIZ = 32768; // Input buffer size
-var zip_INBUF_EXTRA = 64; // Extra buffer
-var zip_OUTBUFSIZ = 1024 * 8;
-var zip_window_size = 2 * zip_WSIZE;
-var zip_MIN_MATCH = 3;
-var zip_MAX_MATCH = 258;
-var zip_BITS = 16;
-// for SMALL_MEM
-var zip_LIT_BUFSIZE = 0x2000;
-var zip_HASH_BITS = 13;
-// for MEDIUM_MEM
-// var zip_LIT_BUFSIZE = 0x4000;
-// var zip_HASH_BITS = 14;
-// for BIG_MEM
-// var zip_LIT_BUFSIZE = 0x8000;
-// var zip_HASH_BITS = 15;
-if(zip_LIT_BUFSIZE > zip_INBUFSIZ)
- alert("error: zip_INBUFSIZ is too small");
-if((zip_WSIZE<<1) > (1<<zip_BITS))
- alert("error: zip_WSIZE is too large");
-if(zip_HASH_BITS > zip_BITS-1)
- alert("error: zip_HASH_BITS is too large");
-if(zip_HASH_BITS < 8 || zip_MAX_MATCH != 258)
- alert("error: Code too clever");
-var zip_DIST_BUFSIZE = zip_LIT_BUFSIZE;
-var zip_HASH_SIZE = 1 << zip_HASH_BITS;
-var zip_HASH_MASK = zip_HASH_SIZE - 1;
-var zip_WMASK = zip_WSIZE - 1;
-var zip_NIL = 0; // Tail of hash chains
-var zip_TOO_FAR = 4096;
-var zip_MIN_LOOKAHEAD = zip_MAX_MATCH + zip_MIN_MATCH + 1;
-var zip_MAX_DIST = zip_WSIZE - zip_MIN_LOOKAHEAD;
-var zip_SMALLEST = 1;
-var zip_MAX_BITS = 15;
-var zip_MAX_BL_BITS = 7;
-var zip_LENGTH_CODES = 29;
-var zip_LITERALS =256;
-var zip_END_BLOCK = 256;
-var zip_L_CODES = zip_LITERALS + 1 + zip_LENGTH_CODES;
-var zip_D_CODES = 30;
-var zip_BL_CODES = 19;
-var zip_REP_3_6 = 16;
-var zip_REPZ_3_10 = 17;
-var zip_REPZ_11_138 = 18;
-var zip_HEAP_SIZE = 2 * zip_L_CODES + 1;
-var zip_H_SHIFT = parseInt((zip_HASH_BITS + zip_MIN_MATCH - 1) /
- zip_MIN_MATCH);
-
-/* variables */
-var zip_free_queue;
-var zip_qhead, zip_qtail;
-var zip_initflag;
-var zip_outbuf = null;
-var zip_outcnt, zip_outoff;
-var zip_complete;
-var zip_window;
-var zip_d_buf;
-var zip_l_buf;
-var zip_prev;
-var zip_bi_buf;
-var zip_bi_valid;
-var zip_block_start;
-var zip_ins_h;
-var zip_hash_head;
-var zip_prev_match;
-var zip_match_available;
-var zip_match_length;
-var zip_prev_length;
-var zip_strstart;
-var zip_match_start;
-var zip_eofile;
-var zip_lookahead;
-var zip_max_chain_length;
-var zip_max_lazy_match;
-var zip_compr_level;
-var zip_good_match;
-var zip_nice_match;
-var zip_dyn_ltree;
-var zip_dyn_dtree;
-var zip_static_ltree;
-var zip_static_dtree;
-var zip_bl_tree;
-var zip_l_desc;
-var zip_d_desc;
-var zip_bl_desc;
-var zip_bl_count;
-var zip_heap;
-var zip_heap_len;
-var zip_heap_max;
-var zip_depth;
-var zip_length_code;
-var zip_dist_code;
-var zip_base_length;
-var zip_base_dist;
-var zip_flag_buf;
-var zip_last_lit;
-var zip_last_dist;
-var zip_last_flags;
-var zip_flags;
-var zip_flag_bit;
-var zip_opt_len;
-var zip_static_len;
-var zip_deflate_data;
-var zip_deflate_pos;
-
-/* constant tables */
-var zip_extra_lbits = new Array(
- 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0);
-var zip_extra_dbits = new Array(
- 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13);
-var zip_extra_blbits = new Array(
- 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7);
-var zip_bl_order = new Array(
- 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15);
-var zip_configuration_table = new Array(
- new zip_DeflateConfiguration(0, 0, 0, 0),
- new zip_DeflateConfiguration(4, 4, 8, 4),
- new zip_DeflateConfiguration(4, 5, 16, 8),
- new zip_DeflateConfiguration(4, 6, 32, 32),
- new zip_DeflateConfiguration(4, 4, 16, 16),
- new zip_DeflateConfiguration(8, 16, 32, 32),
- new zip_DeflateConfiguration(8, 16, 128, 128),
- new zip_DeflateConfiguration(8, 32, 128, 256),
- new zip_DeflateConfiguration(32, 128, 258, 1024),
- new zip_DeflateConfiguration(32, 258, 258, 4096));
-
-/* objects (deflate) */
-
-function zip_DeflateCT() {
- this.fc = 0; // frequency count or bit string
- this.dl = 0; // father node in Huffman tree or length of bit string
-}
-
-function zip_DeflateTreeDesc() {
- this.dyn_tree = null; // the dynamic tree
- this.static_tree = null; // corresponding static tree or NULL
- this.extra_bits = null; // extra bits for each code or NULL
- this.extra_base = 0; // base index for extra_bits
- this.elems = 0; // max number of elements in the tree
- this.max_length = 0; // max bit length for the codes
- this.max_code = 0; // largest code with non zero frequency
-}
-
-/* Values for max_lazy_match, good_match and max_chain_length, depending on
- * the desired pack level (0..9). The values given below have been tuned to
- * exclude worst case performance for pathological files. Better values may be
- * found for specific files.
- */
-function zip_DeflateConfiguration(a, b, c, d) {
- this.good_length = a; // reduce lazy search above this match length
- this.max_lazy = b; // do not perform lazy search above this match length
- this.nice_length = c; // quit search above this match length
- this.max_chain = d;
-}
-
-function zip_DeflateBuffer() {
- this.next = null;
- this.len = 0;
- this.ptr = new Array(zip_OUTBUFSIZ);
- this.off = 0;
-}
-
-/* routines (deflate) */
-
-function zip_deflate_start(level) {
- var i;
-
- if(!level)
- level = zip_DEFAULT_LEVEL;
- else if(level < 1)
- level = 1;
- else if(level > 9)
- level = 9;
-
- zip_compr_level = level;
- zip_initflag = false;
- zip_eofile = false;
- if(zip_outbuf != null)
- return;
-
- zip_free_queue = zip_qhead = zip_qtail = null;
- zip_outbuf = new Array(zip_OUTBUFSIZ);
- zip_window = new Array(zip_window_size);
- zip_d_buf = new Array(zip_DIST_BUFSIZE);
- zip_l_buf = new Array(zip_INBUFSIZ + zip_INBUF_EXTRA);
- zip_prev = new Array(1 << zip_BITS);
- zip_dyn_ltree = new Array(zip_HEAP_SIZE);
- for(i = 0; i < zip_HEAP_SIZE; i++)
- zip_dyn_ltree[i] = new zip_DeflateCT();
- zip_dyn_dtree = new Array(2*zip_D_CODES+1);
- for(i = 0; i < 2*zip_D_CODES+1; i++)
- zip_dyn_dtree[i] = new zip_DeflateCT();
- zip_static_ltree = new Array(zip_L_CODES+2);
- for(i = 0; i < zip_L_CODES+2; i++)
- zip_static_ltree[i] = new zip_DeflateCT();
- zip_static_dtree = new Array(zip_D_CODES);
- for(i = 0; i < zip_D_CODES; i++)
- zip_static_dtree[i] = new zip_DeflateCT();
- zip_bl_tree = new Array(2*zip_BL_CODES+1);
- for(i = 0; i < 2*zip_BL_CODES+1; i++)
- zip_bl_tree[i] = new zip_DeflateCT();
- zip_l_desc = new zip_DeflateTreeDesc();
- zip_d_desc = new zip_DeflateTreeDesc();
- zip_bl_desc = new zip_DeflateTreeDesc();
- zip_bl_count = new Array(zip_MAX_BITS+1);
- zip_heap = new Array(2*zip_L_CODES+1);
- zip_depth = new Array(2*zip_L_CODES+1);
- zip_length_code = new Array(zip_MAX_MATCH-zip_MIN_MATCH+1);
- zip_dist_code = new Array(512);
- zip_base_length = new Array(zip_LENGTH_CODES);
- zip_base_dist = new Array(zip_D_CODES);
- zip_flag_buf = new Array(parseInt(zip_LIT_BUFSIZE / 8));
-}
-
-function zip_deflate_end() {
- zip_free_queue = zip_qhead = zip_qtail = null;
- zip_outbuf = null;
- zip_window = null;
- zip_d_buf = null;
- zip_l_buf = null;
- zip_prev = null;
- zip_dyn_ltree = null;
- zip_dyn_dtree = null;
- zip_static_ltree = null;
- zip_static_dtree = null;
- zip_bl_tree = null;
- zip_l_desc = null;
- zip_d_desc = null;
- zip_bl_desc = null;
- zip_bl_count = null;
- zip_heap = null;
- zip_depth = null;
- zip_length_code = null;
- zip_dist_code = null;
- zip_base_length = null;
- zip_base_dist = null;
- zip_flag_buf = null;
-}
-
-function zip_reuse_queue(p) {
- p.next = zip_free_queue;
- zip_free_queue = p;
-}
-
-function zip_new_queue() {
- var p;
-
- if(zip_free_queue != null)
- {
- p = zip_free_queue;
- zip_free_queue = zip_free_queue.next;
- }
- else
- p = new zip_DeflateBuffer();
- p.next = null;
- p.len = p.off = 0;
-
- return p;
-}
-
-function zip_head1(i) {
- return zip_prev[zip_WSIZE + i];
-}
-
-function zip_head2(i, val) {
- return zip_prev[zip_WSIZE + i] = val;
-}
-
-/* put_byte is used for the compressed output, put_ubyte for the
- * uncompressed output. However unlzw() uses window for its
- * suffix table instead of its output buffer, so it does not use put_ubyte
- * (to be cleaned up).
- */
-function zip_put_byte(c) {
- zip_outbuf[zip_outoff + zip_outcnt++] = c;
- if(zip_outoff + zip_outcnt == zip_OUTBUFSIZ)
- zip_qoutbuf();
-}
-
-/* Output a 16 bit value, lsb first */
-function zip_put_short(w) {
- w &= 0xffff;
- if(zip_outoff + zip_outcnt < zip_OUTBUFSIZ - 2) {
- zip_outbuf[zip_outoff + zip_outcnt++] = (w & 0xff);
- zip_outbuf[zip_outoff + zip_outcnt++] = (w >>> 8);
- } else {
- zip_put_byte(w & 0xff);
- zip_put_byte(w >>> 8);
- }
-}
-
-/* ==========================================================================
- * Insert string s in the dictionary and set match_head to the previous head
- * of the hash chain (the most recent string with same hash key). Return
- * the previous length of the hash chain.
- * IN assertion: all calls to to INSERT_STRING are made with consecutive
- * input characters and the first MIN_MATCH bytes of s are valid
- * (except for the last MIN_MATCH-1 bytes of the input file).
- */
-function zip_INSERT_STRING() {
- zip_ins_h = ((zip_ins_h << zip_H_SHIFT)
- ^ (zip_window[zip_strstart + zip_MIN_MATCH - 1] & 0xff))
- & zip_HASH_MASK;
- zip_hash_head = zip_head1(zip_ins_h);
- zip_prev[zip_strstart & zip_WMASK] = zip_hash_head;
- zip_head2(zip_ins_h, zip_strstart);
-}
-
-/* Send a code of the given tree. c and tree must not have side effects */
-function zip_SEND_CODE(c, tree) {
- zip_send_bits(tree[c].fc, tree[c].dl);
-}
-
-/* Mapping from a distance to a distance code. dist is the distance - 1 and
- * must not have side effects. dist_code[256] and dist_code[257] are never
- * used.
- */
-function zip_D_CODE(dist) {
- return (dist < 256 ? zip_dist_code[dist]
- : zip_dist_code[256 + (dist>>7)]) & 0xff;
-}
-
-/* ==========================================================================
- * Compares to subtrees, using the tree depth as tie breaker when
- * the subtrees have equal frequency. This minimizes the worst case length.
- */
-function zip_SMALLER(tree, n, m) {
- return tree[n].fc < tree[m].fc ||
- (tree[n].fc == tree[m].fc && zip_depth[n] <= zip_depth[m]);
-}
-
-/* ==========================================================================
- * read string data
- */
-function zip_read_buff(buff, offset, n) {
- var i;
- for(i = 0; i < n && zip_deflate_pos < zip_deflate_data.length; i++)
- buff[offset + i] =
- zip_deflate_data.charCodeAt(zip_deflate_pos++) & 0xff;
- return i;
-}
-
-/* ==========================================================================
- * Initialize the "longest match" routines for a new file
- */
-function zip_lm_init() {
- var j;
-
- /* Initialize the hash table. */
- for(j = 0; j < zip_HASH_SIZE; j++)
-// zip_head2(j, zip_NIL);
- zip_prev[zip_WSIZE + j] = 0;
- /* prev will be initialized on the fly */
-
- /* Set the default configuration parameters:
- */
- zip_max_lazy_match = zip_configuration_table[zip_compr_level].max_lazy;
- zip_good_match = zip_configuration_table[zip_compr_level].good_length;
- if(!zip_FULL_SEARCH)
- zip_nice_match = zip_configuration_table[zip_compr_level].nice_length;
- zip_max_chain_length = zip_configuration_table[zip_compr_level].max_chain;
-
- zip_strstart = 0;
- zip_block_start = 0;
-
- zip_lookahead = zip_read_buff(zip_window, 0, 2 * zip_WSIZE);
- if(zip_lookahead <= 0) {
- zip_eofile = true;
- zip_lookahead = 0;
- return;
- }
- zip_eofile = false;
- /* Make sure that we always have enough lookahead. This is important
- * if input comes from a device such as a tty.
- */
- while(zip_lookahead < zip_MIN_LOOKAHEAD && !zip_eofile)
- zip_fill_window();
-
- /* If lookahead < MIN_MATCH, ins_h is garbage, but this is
- * not important since only literal bytes will be emitted.
- */
- zip_ins_h = 0;
- for(j = 0; j < zip_MIN_MATCH - 1; j++) {
-// UPDATE_HASH(ins_h, window[j]);
- zip_ins_h = ((zip_ins_h << zip_H_SHIFT) ^ (zip_window[j] & 0xff)) & zip_HASH_MASK;
- }
-}
-
-/* ==========================================================================
- * Set match_start to the longest match starting at the given string and
- * return its length. Matches shorter or equal to prev_length are discarded,
- * in which case the result is equal to prev_length and match_start is
- * garbage.
- * IN assertions: cur_match is the head of the hash chain for the current
- * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
- */
-function zip_longest_match(cur_match) {
- var chain_length = zip_max_chain_length; // max hash chain length
- var scanp = zip_strstart; // current string
- var matchp; // matched string
- var len; // length of current match
- var best_len = zip_prev_length; // best match length so far
-
- /* Stop when cur_match becomes <= limit. To simplify the code,
- * we prevent matches with the string of window index 0.
- */
- var limit = (zip_strstart > zip_MAX_DIST ? zip_strstart - zip_MAX_DIST : zip_NIL);
-
- var strendp = zip_strstart + zip_MAX_MATCH;
- var scan_end1 = zip_window[scanp + best_len - 1];
- var scan_end = zip_window[scanp + best_len];
-
- /* Do not waste too much time if we already have a good match: */
- if(zip_prev_length >= zip_good_match)
- chain_length >>= 2;
-
-// Assert(encoder->strstart <= window_size-MIN_LOOKAHEAD, "insufficient lookahead");
-
- do {
-// Assert(cur_match < encoder->strstart, "no future");
- matchp = cur_match;
-
- /* Skip to next match if the match length cannot increase
- * or if the match length is less than 2:
- */
- if(zip_window[matchp + best_len] != scan_end ||
- zip_window[matchp + best_len - 1] != scan_end1 ||
- zip_window[matchp] != zip_window[scanp] ||
- zip_window[++matchp] != zip_window[scanp + 1]) {
- continue;
- }
-
- /* The check at best_len-1 can be removed because it will be made
- * again later. (This heuristic is not always a win.)
- * It is not necessary to compare scan[2] and match[2] since they
- * are always equal when the other bytes match, given that
- * the hash keys are equal and that HASH_BITS >= 8.
- */
- scanp += 2;
- matchp++;
-
- /* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
- */
- do {
- } while(zip_window[++scanp] == zip_window[++matchp] &&
- zip_window[++scanp] == zip_window[++matchp] &&
- zip_window[++scanp] == zip_window[++matchp] &&
- zip_window[++scanp] == zip_window[++matchp] &&
- zip_window[++scanp] == zip_window[++matchp] &&
- zip_window[++scanp] == zip_window[++matchp] &&
- zip_window[++scanp] == zip_window[++matchp] &&
- zip_window[++scanp] == zip_window[++matchp] &&
- scanp < strendp);
-
- len = zip_MAX_MATCH - (strendp - scanp);
- scanp = strendp - zip_MAX_MATCH;
-
- if(len > best_len) {
- zip_match_start = cur_match;
- best_len = len;
- if(zip_FULL_SEARCH) {
- if(len >= zip_MAX_MATCH) break;
- } else {
- if(len >= zip_nice_match) break;
- }
-
- scan_end1 = zip_window[scanp + best_len-1];
- scan_end = zip_window[scanp + best_len];
- }
- } while((cur_match = zip_prev[cur_match & zip_WMASK]) > limit
- && --chain_length != 0);
-
- return best_len;
-}
-
-/* ==========================================================================
- * Fill the window when the lookahead becomes insufficient.
- * Updates strstart and lookahead, and sets eofile if end of input file.
- * IN assertion: lookahead < MIN_LOOKAHEAD && strstart + lookahead > 0
- * OUT assertions: at least one byte has been read, or eofile is set;
- * file reads are performed for at least two bytes (required for the
- * translate_eol option).
- */
-function zip_fill_window() {
- var n, m;
-
- // Amount of free space at the end of the window.
- var more = zip_window_size - zip_lookahead - zip_strstart;
-
- /* If the window is almost full and there is insufficient lookahead,
- * move the upper half to the lower one to make room in the upper half.
- */
- if(more == -1) {
- /* Very unlikely, but possible on 16 bit machine if strstart == 0
- * and lookahead == 1 (input done one byte at time)
- */
- more--;
- } else if(zip_strstart >= zip_WSIZE + zip_MAX_DIST) {
- /* By the IN assertion, the window is not empty so we can't confuse
- * more == 0 with more == 64K on a 16 bit machine.
- */
-// Assert(window_size == (ulg)2*WSIZE, "no sliding with BIG_MEM");
-
-// System.arraycopy(window, WSIZE, window, 0, WSIZE);
- for(n = 0; n < zip_WSIZE; n++)
- zip_window[n] = zip_window[n + zip_WSIZE];
-
- zip_match_start -= zip_WSIZE;
- zip_strstart -= zip_WSIZE; /* we now have strstart >= MAX_DIST: */
- zip_block_start -= zip_WSIZE;
-
- for(n = 0; n < zip_HASH_SIZE; n++) {
- m = zip_head1(n);
- zip_head2(n, m >= zip_WSIZE ? m - zip_WSIZE : zip_NIL);
- }
- for(n = 0; n < zip_WSIZE; n++) {
- /* If n is not on any hash chain, prev[n] is garbage but
- * its value will never be used.
- */
- m = zip_prev[n];
- zip_prev[n] = (m >= zip_WSIZE ? m - zip_WSIZE : zip_NIL);
- }
- more += zip_WSIZE;
- }
- // At this point, more >= 2
- if(!zip_eofile) {
- n = zip_read_buff(zip_window, zip_strstart + zip_lookahead, more);
- if(n <= 0)
- zip_eofile = true;
- else
- zip_lookahead += n;
- }
-}
-
-/* ==========================================================================
- * Processes a new input file and return its compressed length. This
- * function does not perform lazy evaluationof matches and inserts
- * new strings in the dictionary only for unmatched strings or for short
- * matches. It is used only for the fast compression options.
- */
-function zip_deflate_fast() {
- while(zip_lookahead != 0 && zip_qhead == null) {
- var flush; // set if current block must be flushed
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- zip_INSERT_STRING();
-
- /* Find the longest match, discarding those <= prev_length.
- * At this point we have always match_length < MIN_MATCH
- */
- if(zip_hash_head != zip_NIL &&
- zip_strstart - zip_hash_head <= zip_MAX_DIST) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- zip_match_length = zip_longest_match(zip_hash_head);
- /* longest_match() sets match_start */
- if(zip_match_length > zip_lookahead)
- zip_match_length = zip_lookahead;
- }
- if(zip_match_length >= zip_MIN_MATCH) {
-// check_match(strstart, match_start, match_length);
-
- flush = zip_ct_tally(zip_strstart - zip_match_start,
- zip_match_length - zip_MIN_MATCH);
- zip_lookahead -= zip_match_length;
-
- /* Insert new strings in the hash table only if the match length
- * is not too large. This saves time but degrades compression.
- */
- if(zip_match_length <= zip_max_lazy_match) {
- zip_match_length--; // string at strstart already in hash table
- do {
- zip_strstart++;
- zip_INSERT_STRING();
- /* strstart never exceeds WSIZE-MAX_MATCH, so there are
- * always MIN_MATCH bytes ahead. If lookahead < MIN_MATCH
- * these bytes are garbage, but it does not matter since
- * the next lookahead bytes will be emitted as literals.
- */
- } while(--zip_match_length != 0);
- zip_strstart++;
- } else {
- zip_strstart += zip_match_length;
- zip_match_length = 0;
- zip_ins_h = zip_window[zip_strstart] & 0xff;
-// UPDATE_HASH(ins_h, window[strstart + 1]);
- zip_ins_h = ((zip_ins_h<<zip_H_SHIFT) ^ (zip_window[zip_strstart + 1] & 0xff)) & zip_HASH_MASK;
-
-//#if MIN_MATCH != 3
-// Call UPDATE_HASH() MIN_MATCH-3 more times
-//#endif
-
- }
- } else {
- /* No match, output a literal byte */
- flush = zip_ct_tally(0, zip_window[zip_strstart] & 0xff);
- zip_lookahead--;
- zip_strstart++;
- }
- if(flush) {
- zip_flush_block(0);
- zip_block_start = zip_strstart;
- }
-
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- while(zip_lookahead < zip_MIN_LOOKAHEAD && !zip_eofile)
- zip_fill_window();
- }
-}
-
-function zip_deflate_better() {
- /* Process the input block. */
- while(zip_lookahead != 0 && zip_qhead == null) {
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- zip_INSERT_STRING();
-
- /* Find the longest match, discarding those <= prev_length.
- */
- zip_prev_length = zip_match_length;
- zip_prev_match = zip_match_start;
- zip_match_length = zip_MIN_MATCH - 1;
-
- if(zip_hash_head != zip_NIL &&
- zip_prev_length < zip_max_lazy_match &&
- zip_strstart - zip_hash_head <= zip_MAX_DIST) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- zip_match_length = zip_longest_match(zip_hash_head);
- /* longest_match() sets match_start */
- if(zip_match_length > zip_lookahead)
- zip_match_length = zip_lookahead;
-
- /* Ignore a length 3 match if it is too distant: */
- if(zip_match_length == zip_MIN_MATCH &&
- zip_strstart - zip_match_start > zip_TOO_FAR) {
- /* If prev_match is also MIN_MATCH, match_start is garbage
- * but we will ignore the current match anyway.
- */
- zip_match_length--;
- }
- }
- /* If there was a match at the previous step and the current
- * match is not better, output the previous match:
- */
- if(zip_prev_length >= zip_MIN_MATCH &&
- zip_match_length <= zip_prev_length) {
- var flush; // set if current block must be flushed
-
-// check_match(strstart - 1, prev_match, prev_length);
- flush = zip_ct_tally(zip_strstart - 1 - zip_prev_match,
- zip_prev_length - zip_MIN_MATCH);
-
- /* Insert in hash table all strings up to the end of the match.
- * strstart-1 and strstart are already inserted.
- */
- zip_lookahead -= zip_prev_length - 1;
- zip_prev_length -= 2;
- do {
- zip_strstart++;
- zip_INSERT_STRING();
- /* strstart never exceeds WSIZE-MAX_MATCH, so there are
- * always MIN_MATCH bytes ahead. If lookahead < MIN_MATCH
- * these bytes are garbage, but it does not matter since the
- * next lookahead bytes will always be emitted as literals.
- */
- } while(--zip_prev_length != 0);
- zip_match_available = 0;
- zip_match_length = zip_MIN_MATCH - 1;
- zip_strstart++;
- if(flush) {
- zip_flush_block(0);
- zip_block_start = zip_strstart;
- }
- } else if(zip_match_available != 0) {
- /* If there was no match at the previous position, output a
- * single literal. If there was a match but the current match
- * is longer, truncate the previous match to a single literal.
- */
- if(zip_ct_tally(0, zip_window[zip_strstart - 1] & 0xff)) {
- zip_flush_block(0);
- zip_block_start = zip_strstart;
- }
- zip_strstart++;
- zip_lookahead--;
- } else {
- /* There is no previous match to compare with, wait for
- * the next step to decide.
- */
- zip_match_available = 1;
- zip_strstart++;
- zip_lookahead--;
- }
-
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- while(zip_lookahead < zip_MIN_LOOKAHEAD && !zip_eofile)
- zip_fill_window();
- }
-}
-
-function zip_init_deflate() {
- if(zip_eofile)
- return;
- zip_bi_buf = 0;
- zip_bi_valid = 0;
- zip_ct_init();
- zip_lm_init();
-
- zip_qhead = null;
- zip_outcnt = 0;
- zip_outoff = 0;
-
- if(zip_compr_level <= 3)
- {
- zip_prev_length = zip_MIN_MATCH - 1;
- zip_match_length = 0;
- }
- else
- {
- zip_match_length = zip_MIN_MATCH - 1;
- zip_match_available = 0;
- }
-
- zip_complete = false;
-}
-
-/* ==========================================================================
- * Same as above, but achieves better compression. We use a lazy
- * evaluation for matches: a match is finally adopted only if there is
- * no better match at the next window position.
- */
-function zip_deflate_internal(buff, off, buff_size) {
- var n;
-
- if(!zip_initflag)
- {
- zip_init_deflate();
- zip_initflag = true;
- if(zip_lookahead == 0) { // empty
- zip_complete = true;
- return 0;
- }
- }
-
- if((n = zip_qcopy(buff, off, buff_size)) == buff_size)
- return buff_size;
-
- if(zip_complete)
- return n;
-
- if(zip_compr_level <= 3) // optimized for speed
- zip_deflate_fast();
- else
- zip_deflate_better();
- if(zip_lookahead == 0) {
- if(zip_match_available != 0)
- zip_ct_tally(0, zip_window[zip_strstart - 1] & 0xff);
- zip_flush_block(1);
- zip_complete = true;
- }
- return n + zip_qcopy(buff, n + off, buff_size - n);
-}
-
-function zip_qcopy(buff, off, buff_size) {
- var n, i, j;
-
- n = 0;
- while(zip_qhead != null && n < buff_size)
- {
- i = buff_size - n;
- if(i > zip_qhead.len)
- i = zip_qhead.len;
-// System.arraycopy(qhead.ptr, qhead.off, buff, off + n, i);
- for(j = 0; j < i; j++)
- buff[off + n + j] = zip_qhead.ptr[zip_qhead.off + j];
-
- zip_qhead.off += i;
- zip_qhead.len -= i;
- n += i;
- if(zip_qhead.len == 0) {
- var p;
- p = zip_qhead;
- zip_qhead = zip_qhead.next;
- zip_reuse_queue(p);
- }
- }
-
- if(n == buff_size)
- return n;
-
- if(zip_outoff < zip_outcnt) {
- i = buff_size - n;
- if(i > zip_outcnt - zip_outoff)
- i = zip_outcnt - zip_outoff;
- // System.arraycopy(outbuf, outoff, buff, off + n, i);
- for(j = 0; j < i; j++)
- buff[off + n + j] = zip_outbuf[zip_outoff + j];
- zip_outoff += i;
- n += i;
- if(zip_outcnt == zip_outoff)
- zip_outcnt = zip_outoff = 0;
- }
- return n;
-}
-
-/* ==========================================================================
- * Allocate the match buffer, initialize the various tables and save the
- * location of the internal file attribute (ascii/binary) and method
- * (DEFLATE/STORE).
- */
-function zip_ct_init() {
- var n; // iterates over tree elements
- var bits; // bit counter
- var length; // length value
- var code; // code value
- var dist; // distance index
-
- if(zip_static_dtree[0].dl != 0) return; // ct_init already called
-
- zip_l_desc.dyn_tree = zip_dyn_ltree;
- zip_l_desc.static_tree = zip_static_ltree;
- zip_l_desc.extra_bits = zip_extra_lbits;
- zip_l_desc.extra_base = zip_LITERALS + 1;
- zip_l_desc.elems = zip_L_CODES;
- zip_l_desc.max_length = zip_MAX_BITS;
- zip_l_desc.max_code = 0;
-
- zip_d_desc.dyn_tree = zip_dyn_dtree;
- zip_d_desc.static_tree = zip_static_dtree;
- zip_d_desc.extra_bits = zip_extra_dbits;
- zip_d_desc.extra_base = 0;
- zip_d_desc.elems = zip_D_CODES;
- zip_d_desc.max_length = zip_MAX_BITS;
- zip_d_desc.max_code = 0;
-
- zip_bl_desc.dyn_tree = zip_bl_tree;
- zip_bl_desc.static_tree = null;
- zip_bl_desc.extra_bits = zip_extra_blbits;
- zip_bl_desc.extra_base = 0;
- zip_bl_desc.elems = zip_BL_CODES;
- zip_bl_desc.max_length = zip_MAX_BL_BITS;
- zip_bl_desc.max_code = 0;
-
- // Initialize the mapping length (0..255) -> length code (0..28)
- length = 0;
- for(code = 0; code < zip_LENGTH_CODES-1; code++) {
- zip_base_length[code] = length;
- for(n = 0; n < (1<<zip_extra_lbits[code]); n++)
- zip_length_code[length++] = code;
- }
- // Assert (length == 256, "ct_init: length != 256");
-
- /* Note that the length 255 (match length 258) can be represented
- * in two different ways: code 284 + 5 bits or code 285, so we
- * overwrite length_code[255] to use the best encoding:
- */
- zip_length_code[length-1] = code;
-
- /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
- dist = 0;
- for(code = 0 ; code < 16; code++) {
- zip_base_dist[code] = dist;
- for(n = 0; n < (1<<zip_extra_dbits[code]); n++) {
- zip_dist_code[dist++] = code;
- }
- }
- // Assert (dist == 256, "ct_init: dist != 256");
- dist >>= 7; // from now on, all distances are divided by 128
- for( ; code < zip_D_CODES; code++) {
- zip_base_dist[code] = dist << 7;
- for(n = 0; n < (1<<(zip_extra_dbits[code]-7)); n++)
- zip_dist_code[256 + dist++] = code;
- }
- // Assert (dist == 256, "ct_init: 256+dist != 512");
-
- // Construct the codes of the static literal tree
- for(bits = 0; bits <= zip_MAX_BITS; bits++)
- zip_bl_count[bits] = 0;
- n = 0;
- while(n <= 143) { zip_static_ltree[n++].dl = 8; zip_bl_count[8]++; }
- while(n <= 255) { zip_static_ltree[n++].dl = 9; zip_bl_count[9]++; }
- while(n <= 279) { zip_static_ltree[n++].dl = 7; zip_bl_count[7]++; }
- while(n <= 287) { zip_static_ltree[n++].dl = 8; zip_bl_count[8]++; }
- /* Codes 286 and 287 do not exist, but we must include them in the
- * tree construction to get a canonical Huffman tree (longest code
- * all ones)
- */
- zip_gen_codes(zip_static_ltree, zip_L_CODES + 1);
-
- /* The static distance tree is trivial: */
- for(n = 0; n < zip_D_CODES; n++) {
- zip_static_dtree[n].dl = 5;
- zip_static_dtree[n].fc = zip_bi_reverse(n, 5);
- }
-
- // Initialize the first block of the first file:
- zip_init_block();
-}
-
-/* ==========================================================================
- * Initialize a new block.
- */
-function zip_init_block() {
- var n; // iterates over tree elements
-
- // Initialize the trees.
- for(n = 0; n < zip_L_CODES; n++) zip_dyn_ltree[n].fc = 0;
- for(n = 0; n < zip_D_CODES; n++) zip_dyn_dtree[n].fc = 0;
- for(n = 0; n < zip_BL_CODES; n++) zip_bl_tree[n].fc = 0;
-
- zip_dyn_ltree[zip_END_BLOCK].fc = 1;
- zip_opt_len = zip_static_len = 0;
- zip_last_lit = zip_last_dist = zip_last_flags = 0;
- zip_flags = 0;
- zip_flag_bit = 1;
-}
-
-/* ==========================================================================
- * Restore the heap property by moving down the tree starting at node k,
- * exchanging a node with the smallest of its two sons if necessary, stopping
- * when the heap property is re-established (each father smaller than its
- * two sons).
- */
-function zip_pqdownheap(
- tree, // the tree to restore
- k) { // node to move down
- var v = zip_heap[k];
- var j = k << 1; // left son of k
-
- while(j <= zip_heap_len) {
- // Set j to the smallest of the two sons:
- if(j < zip_heap_len &&
- zip_SMALLER(tree, zip_heap[j + 1], zip_heap[j]))
- j++;
-
- // Exit if v is smaller than both sons
- if(zip_SMALLER(tree, v, zip_heap[j]))
- break;
-
- // Exchange v with the smallest son
- zip_heap[k] = zip_heap[j];
- k = j;
-
- // And continue down the tree, setting j to the left son of k
- j <<= 1;
- }
- zip_heap[k] = v;
-}
-
-/* ==========================================================================
- * Compute the optimal bit lengths for a tree and update the total bit length
- * for the current block.
- * IN assertion: the fields freq and dad are set, heap[heap_max] and
- * above are the tree nodes sorted by increasing frequency.
- * OUT assertions: the field len is set to the optimal bit length, the
- * array bl_count contains the frequencies for each bit length.
- * The length opt_len is updated; static_len is also updated if stree is
- * not null.
- */
-function zip_gen_bitlen(desc) { // the tree descriptor
- var tree = desc.dyn_tree;
- var extra = desc.extra_bits;
- var base = desc.extra_base;
- var max_code = desc.max_code;
- var max_length = desc.max_length;
- var stree = desc.static_tree;
- var h; // heap index
- var n, m; // iterate over the tree elements
- var bits; // bit length
- var xbits; // extra bits
- var f; // frequency
- var overflow = 0; // number of elements with bit length too large
-
- for(bits = 0; bits <= zip_MAX_BITS; bits++)
- zip_bl_count[bits] = 0;
-
- /* In a first pass, compute the optimal bit lengths (which may
- * overflow in the case of the bit length tree).
- */
- tree[zip_heap[zip_heap_max]].dl = 0; // root of the heap
-
- for(h = zip_heap_max + 1; h < zip_HEAP_SIZE; h++) {
- n = zip_heap[h];
- bits = tree[tree[n].dl].dl + 1;
- if(bits > max_length) {
- bits = max_length;
- overflow++;
- }
- tree[n].dl = bits;
- // We overwrite tree[n].dl which is no longer needed
-
- if(n > max_code)
- continue; // not a leaf node
-
- zip_bl_count[bits]++;
- xbits = 0;
- if(n >= base)
- xbits = extra[n - base];
- f = tree[n].fc;
- zip_opt_len += f * (bits + xbits);
- if(stree != null)
- zip_static_len += f * (stree[n].dl + xbits);
- }
- if(overflow == 0)
- return;
-
- // This happens for example on obj2 and pic of the Calgary corpus
-
- // Find the first bit length which could increase:
- do {
- bits = max_length - 1;
- while(zip_bl_count[bits] == 0)
- bits--;
- zip_bl_count[bits]--; // move one leaf down the tree
- zip_bl_count[bits + 1] += 2; // move one overflow item as its brother
- zip_bl_count[max_length]--;
- /* The brother of the overflow item also moves one step up,
- * but this does not affect bl_count[max_length]
- */
- overflow -= 2;
- } while(overflow > 0);
-
- /* Now recompute all bit lengths, scanning in increasing frequency.
- * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
- * lengths instead of fixing only the wrong ones. This idea is taken
- * from 'ar' written by Haruhiko Okumura.)
- */
- for(bits = max_length; bits != 0; bits--) {
- n = zip_bl_count[bits];
- while(n != 0) {
- m = zip_heap[--h];
- if(m > max_code)
- continue;
- if(tree[m].dl != bits) {
- zip_opt_len += (bits - tree[m].dl) * tree[m].fc;
- tree[m].fc = bits;
- }
- n--;
- }
- }
-}
-
- /* ==========================================================================
- * Generate the codes for a given tree and bit counts (which need not be
- * optimal).
- * IN assertion: the array bl_count contains the bit length statistics for
- * the given tree and the field len is set for all tree elements.
- * OUT assertion: the field code is set for all tree elements of non
- * zero code length.
- */
-function zip_gen_codes(tree, // the tree to decorate
- max_code) { // largest code with non zero frequency
- var next_code = new Array(zip_MAX_BITS+1); // next code value for each bit length
- var code = 0; // running code value
- var bits; // bit index
- var n; // code index
-
- /* The distribution counts are first used to generate the code values
- * without bit reversal.
- */
- for(bits = 1; bits <= zip_MAX_BITS; bits++) {
- code = ((code + zip_bl_count[bits-1]) << 1);
- next_code[bits] = code;
- }
-
- /* Check that the bit counts in bl_count are consistent. The last code
- * must be all ones.
- */
-// Assert (code + encoder->bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
-// "inconsistent bit counts");
-// Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
-
- for(n = 0; n <= max_code; n++) {
- var len = tree[n].dl;
- if(len == 0)
- continue;
- // Now reverse the bits
- tree[n].fc = zip_bi_reverse(next_code[len]++, len);
-
-// Tracec(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
-// n, (isgraph(n) ? n : ' '), len, tree[n].fc, next_code[len]-1));
- }
-}
-
-/* ==========================================================================
- * Construct one Huffman tree and assigns the code bit strings and lengths.
- * Update the total bit length for the current block.
- * IN assertion: the field freq is set for all tree elements.
- * OUT assertions: the fields len and code are set to the optimal bit length
- * and corresponding code. The length opt_len is updated; static_len is
- * also updated if stree is not null. The field max_code is set.
- */
-function zip_build_tree(desc) { // the tree descriptor
- var tree = desc.dyn_tree;
- var stree = desc.static_tree;
- var elems = desc.elems;
- var n, m; // iterate over heap elements
- var max_code = -1; // largest code with non zero frequency
- var node = elems; // next internal node of the tree
-
- /* Construct the initial heap, with least frequent element in
- * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
- * heap[0] is not used.
- */
- zip_heap_len = 0;
- zip_heap_max = zip_HEAP_SIZE;
-
- for(n = 0; n < elems; n++) {
- if(tree[n].fc != 0) {
- zip_heap[++zip_heap_len] = max_code = n;
- zip_depth[n] = 0;
- } else
- tree[n].dl = 0;
- }
-
- /* The pkzip format requires that at least one distance code exists,
- * and that at least one bit should be sent even if there is only one
- * possible code. So to avoid special checks later on we force at least
- * two codes of non zero frequency.
- */
- while(zip_heap_len < 2) {
- var xnew = zip_heap[++zip_heap_len] = (max_code < 2 ? ++max_code : 0);
- tree[xnew].fc = 1;
- zip_depth[xnew] = 0;
- zip_opt_len--;
- if(stree != null)
- zip_static_len -= stree[xnew].dl;
- // new is 0 or 1 so it does not have extra bits
- }
- desc.max_code = max_code;
-
- /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
- * establish sub-heaps of increasing lengths:
- */
- for(n = zip_heap_len >> 1; n >= 1; n--)
- zip_pqdownheap(tree, n);
-
- /* Construct the Huffman tree by repeatedly combining the least two
- * frequent nodes.
- */
- do {
- n = zip_heap[zip_SMALLEST];
- zip_heap[zip_SMALLEST] = zip_heap[zip_heap_len--];
- zip_pqdownheap(tree, zip_SMALLEST);
-
- m = zip_heap[zip_SMALLEST]; // m = node of next least frequency
-
- // keep the nodes sorted by frequency
- zip_heap[--zip_heap_max] = n;
- zip_heap[--zip_heap_max] = m;
-
- // Create a new node father of n and m
- tree[node].fc = tree[n].fc + tree[m].fc;
-// depth[node] = (char)(MAX(depth[n], depth[m]) + 1);
- if(zip_depth[n] > zip_depth[m] + 1)
- zip_depth[node] = zip_depth[n];
- else
- zip_depth[node] = zip_depth[m] + 1;
- tree[n].dl = tree[m].dl = node;
-
- // and insert the new node in the heap
- zip_heap[zip_SMALLEST] = node++;
- zip_pqdownheap(tree, zip_SMALLEST);
-
- } while(zip_heap_len >= 2);
-
- zip_heap[--zip_heap_max] = zip_heap[zip_SMALLEST];
-
- /* At this point, the fields freq and dad are set. We can now
- * generate the bit lengths.
- */