diff --git a/packages/pg-pool/README.md b/packages/pg-pool/README.md index 3ff657fe0..f3bb2d6be 100644 --- a/packages/pg-pool/README.md +++ b/packages/pg-pool/README.md @@ -1,9 +1,11 @@ # pg-pool + [![Build Status](https://travis-ci.org/brianc/node-pg-pool.svg?branch=master)](https://travis-ci.org/brianc/node-pg-pool) A connection pool for node-postgres ## install + ```sh npm i pg-pool pg ``` @@ -48,14 +50,15 @@ const pgNativePool = new Pool({ Client: PgNativeClient }) ``` ##### Note: + The Pool constructor does not support passing a Database URL as the parameter. To use pg-pool on heroku, for example, you need to parse the URL into a config object. Here is an example of how to parse a Database URL. ```js -const Pool = require('pg-pool'); +const Pool = require('pg-pool') const url = require('url') -const params = url.parse(process.env.DATABASE_URL); -const auth = params.auth.split(':'); +const params = url.parse(process.env.DATABASE_URL) +const auth = params.auth.split(':') const config = { user: auth[0], @@ -63,10 +66,10 @@ const config = { host: params.hostname, port: params.port, database: params.pathname.split('/')[1], - ssl: true -}; + ssl: true, +} -const pool = new Pool(config); +const pool = new Pool(config) /* Transforms, 'postgres://DBuser:secret@DBHost:#####/myDB', into @@ -79,7 +82,7 @@ const pool = new Pool(config); ssl: true } */ -``` +``` ### acquire clients with a promise @@ -87,15 +90,17 @@ pg-pool supports a fully promise-based api for acquiring clients ```js const pool = new Pool() -pool.connect().then(client => { - client.query('select $1::text as name', ['pg-pool']).then(res => { - client.release() - console.log('hello from', res.rows[0].name) - }) - .catch(e => { - client.release() - console.error('query error', e.message, e.stack) - }) +pool.connect().then((client) => { + client + .query('select $1::text as name', ['pg-pool']) + .then((res) => { + client.release() + console.log('hello from', res.rows[0].name) + }) + .catch((e) => { + client.release() + console.error('query error', e.message, e.stack) + }) }) ``` @@ -105,7 +110,7 @@ this ends up looking much nicer if you're using [co](https://github.com/tj/co) o ```js // with async/await -(async () => { +;(async () => { const pool = new Pool() const client = await pool.connect() try { @@ -114,10 +119,10 @@ this ends up looking much nicer if you're using [co](https://github.com/tj/co) o } finally { client.release() } -})().catch(e => console.error(e.message, e.stack)) +})().catch((e) => console.error(e.message, e.stack)) // with co -co(function * () { +co(function* () { const client = yield pool.connect() try { const result = yield client.query('select $1::text as name', ['brianc']) @@ -125,7 +130,7 @@ co(function * () { } finally { client.release() } -}).catch(e => console.error(e.message, e.stack)) +}).catch((e) => console.error(e.message, e.stack)) ``` ### your new favorite helper method @@ -148,14 +153,14 @@ pool.query('SELECT $1::text as name', ['brianc'], function (err, res) { }) ``` -__pro tip:__ unless you need to run a transaction (which requires a single client for multiple queries) or you +**pro tip:** unless you need to run a transaction (which requires a single client for multiple queries) or you have some other edge case like [streaming rows](https://github.com/brianc/node-pg-query-stream) or using a [cursor](https://github.com/brianc/node-pg-cursor) -you should almost always just use `pool.query`. Its easy, it does the right thing :tm:, and wont ever forget to return +you should almost always just use `pool.query`. Its easy, it does the right thing :tm:, and wont ever forget to return clients back to the pool after the query is done. ### drop-in backwards compatible -pg-pool still and will always support the traditional callback api for acquiring a client. This is the exact API node-postgres has shipped with for years: +pg-pool still and will always support the traditional callback api for acquiring a client. This is the exact API node-postgres has shipped with for years: ```js const pool = new Pool() @@ -175,7 +180,7 @@ pool.connect((err, client, done) => { ### shut it down When you are finished with the pool if all the clients are idle the pool will close them after `config.idleTimeoutMillis` and your app -will shutdown gracefully. If you don't want to wait for the timeout you can end the pool as follows: +will shutdown gracefully. If you don't want to wait for the timeout you can end the pool as follows: ```js const pool = new Pool() @@ -187,7 +192,7 @@ await pool.end() ### a note on instances -The pool should be a __long-lived object__ in your application. Generally you'll want to instantiate one pool when your app starts up and use the same instance of the pool throughout the lifetime of your application. If you are frequently creating a new pool within your code you likely don't have your pool initialization code in the correct place. Example: +The pool should be a **long-lived object** in your application. Generally you'll want to instantiate one pool when your app starts up and use the same instance of the pool throughout the lifetime of your application. If you are frequently creating a new pool within your code you likely don't have your pool initialization code in the correct place. Example: ```js // assume this is a file in your program at ./your-app/lib/db.js @@ -215,11 +220,11 @@ module.exports.connect = () => { ### events -Every instance of a `Pool` is an event emitter. These instances emit the following events: +Every instance of a `Pool` is an event emitter. These instances emit the following events: #### error -Emitted whenever an idle client in the pool encounters an error. This is common when your PostgreSQL server shuts down, reboots, or a network partition otherwise causes it to become unavailable while your pool has connected clients. +Emitted whenever an idle client in the pool encounters an error. This is common when your PostgreSQL server shuts down, reboots, or a network partition otherwise causes it to become unavailable while your pool has connected clients. Example: @@ -229,7 +234,7 @@ const pool = new Pool() // attach an error handler to the pool for when a connected, idle client // receives an error by being disconnected, etc -pool.on('error', function(error, client) { +pool.on('error', function (error, client) { // handle this in the same way you would treat process.on('uncaughtException') // it is supplied the error as well as the idle client which received the error }) @@ -237,7 +242,7 @@ pool.on('error', function(error, client) { #### connect -Fired whenever the pool creates a __new__ `pg.Client` instance and successfully connects it to the backend. +Fired whenever the pool creates a **new** `pg.Client` instance and successfully connects it to the backend. Example: @@ -247,20 +252,19 @@ const pool = new Pool() const count = 0 -pool.on('connect', client => { +pool.on('connect', (client) => { client.count = count++ }) pool .connect() - .then(client => { + .then((client) => { return client .query('SELECT $1::int AS "clientCount"', [client.count]) - .then(res => console.log(res.rows[0].clientCount)) // outputs 0 + .then((res) => console.log(res.rows[0].clientCount)) // outputs 0 .then(() => client) }) - .then(client => client.release()) - + .then((client) => client.release()) ``` #### acquire @@ -293,12 +297,11 @@ setTimeout(function () { console.log('connect count:', connectCount) // output: connect count: 10 console.log('acquire count:', acquireCount) // output: acquire count: 200 }, 100) - ``` ### environment variables -pg-pool & node-postgres support some of the same environment variables as `psql` supports. The most common are: +pg-pool & node-postgres support some of the same environment variables as `psql` supports. The most common are: ``` PGDATABASE=my_db @@ -308,40 +311,19 @@ PGPORT=5432 PGSSLMODE=require ``` -Usually I will export these into my local environment via a `.env` file with environment settings or export them in `~/.bash_profile` or something similar. This way I get configurability which works with both the postgres suite of tools (`psql`, `pg_dump`, `pg_restore`) and node, I can vary the environment variables locally and in production, and it supports the concept of a [12-factor app](http://12factor.net/) out of the box. - -## bring your own promise - -In versions of node `<=0.12.x` there is no native promise implementation available globally. You can polyfill the promise globally like this: - -```js -// first run `npm install promise-polyfill --save -if (typeof Promise == 'undefined') { - global.Promise = require('promise-polyfill') -} -``` - -You can use any other promise implementation you'd like. The pool also allows you to configure the promise implementation on a per-pool level: - -```js -const bluebirdPool = new Pool({ - Promise: require('bluebird') -}) -``` - -__please note:__ in node `<=0.12.x` the pool will throw if you do not provide a promise constructor in one of the two ways mentioned above. In node `>=4.0.0` the pool will use the native promise implementation by default; however, the two methods above still allow you to "bring your own." +Usually I will export these into my local environment via a `.env` file with environment settings or export them in `~/.bash_profile` or something similar. This way I get configurability which works with both the postgres suite of tools (`psql`, `pg_dump`, `pg_restore`) and node, I can vary the environment variables locally and in production, and it supports the concept of a [12-factor app](http://12factor.net/) out of the box. ## maxUses and read-replica autoscaling (e.g. AWS Aurora) The maxUses config option can help an application instance rebalance load against a replica set that has been auto-scaled after the connection pool is already full of healthy connections. -The mechanism here is that a connection is considered "expended" after it has been acquired and released `maxUses` number of times. Depending on the load on your system, this means there will be an approximate time in which any given connection will live, thus creating a window for rebalancing. +The mechanism here is that a connection is considered "expended" after it has been acquired and released `maxUses` number of times. Depending on the load on your system, this means there will be an approximate time in which any given connection will live, thus creating a window for rebalancing. -Imagine a scenario where you have 10 app instances providing an API running against a replica cluster of 3 that are accessed via a round-robin DNS entry. Each instance runs a connection pool size of 20. With an ambient load of 50 requests per second, the connection pool will likely fill up in a few minutes with healthy connections. +Imagine a scenario where you have 10 app instances providing an API running against a replica cluster of 3 that are accessed via a round-robin DNS entry. Each instance runs a connection pool size of 20. With an ambient load of 50 requests per second, the connection pool will likely fill up in a few minutes with healthy connections. -If you have weekly bursts of traffic which peak at 1,000 requests per second, you might want to grow your replicas to 10 during this period. Without setting `maxUses`, the new replicas will not be adopted by the app servers without an intervention -- namely, restarting each in turn in order to build up new connection pools that are balanced against all the replicas. Adding additional app server instances will help to some extent because they will adopt all the replicas in an even way, but the initial app servers will continue to focus additional load on the original replicas. +If you have weekly bursts of traffic which peak at 1,000 requests per second, you might want to grow your replicas to 10 during this period. Without setting `maxUses`, the new replicas will not be adopted by the app servers without an intervention -- namely, restarting each in turn in order to build up new connection pools that are balanced against all the replicas. Adding additional app server instances will help to some extent because they will adopt all the replicas in an even way, but the initial app servers will continue to focus additional load on the original replicas. -This is where the `maxUses` configuration option comes into play. Setting `maxUses` to 7500 will ensure that over a period of 30 minutes or so the new replicas will be adopted as the pre-existing connections are closed and replaced with new ones, thus creating a window for eventual balance. +This is where the `maxUses` configuration option comes into play. Setting `maxUses` to 7500 will ensure that over a period of 30 minutes or so the new replicas will be adopted as the pre-existing connections are closed and replaced with new ones, thus creating a window for eventual balance. You'll want to test based on your own scenarios, but one way to make a first guess at `maxUses` is to identify an acceptable window for rebalancing and then solve for the value: @@ -362,7 +344,7 @@ To run tests clone the repo, `npm i` in the working dir, and then run `npm test` ## contributions -I love contributions. Please make sure they have tests, and submit a PR. If you're not sure if the issue is worth it or will be accepted it never hurts to open an issue to begin the conversation. If you're interested in keeping up with node-postgres releated stuff, you can follow me on twitter at [@briancarlson](https://twitter.com/briancarlson) - I generally announce any noteworthy updates there. +I love contributions. Please make sure they have tests, and submit a PR. If you're not sure if the issue is worth it or will be accepted it never hurts to open an issue to begin the conversation. If you're interested in keeping up with node-postgres releated stuff, you can follow me on twitter at [@briancarlson](https://twitter.com/briancarlson) - I generally announce any noteworthy updates there. ## license diff --git a/packages/pg-pool/test/bring-your-own-promise.js b/packages/pg-pool/test/bring-your-own-promise.js deleted file mode 100644 index e905ccc0b..000000000 --- a/packages/pg-pool/test/bring-your-own-promise.js +++ /dev/null @@ -1,42 +0,0 @@ -'use strict' -const co = require('co') -const expect = require('expect.js') - -const describe = require('mocha').describe -const it = require('mocha').it -const BluebirdPromise = require('bluebird') - -const Pool = require('../') - -const checkType = (promise) => { - expect(promise).to.be.a(BluebirdPromise) - return promise.catch((e) => undefined) -} - -describe('Bring your own promise', function () { - it( - 'uses supplied promise for operations', - co.wrap(function* () { - const pool = new Pool({ Promise: BluebirdPromise }) - const client1 = yield checkType(pool.connect()) - client1.release() - yield checkType(pool.query('SELECT NOW()')) - const client2 = yield checkType(pool.connect()) - // TODO - make sure pg supports BYOP as well - client2.release() - yield checkType(pool.end()) - }) - ) - - it( - 'uses promises in errors', - co.wrap(function* () { - const pool = new Pool({ Promise: BluebirdPromise, port: 48484 }) - yield checkType(pool.connect()) - yield checkType(pool.end()) - yield checkType(pool.connect()) - yield checkType(pool.query()) - yield checkType(pool.end()) - }) - ) -}) diff --git a/packages/pg/lib/client.js b/packages/pg/lib/client.js index 8b44961d4..903db6c66 100644 --- a/packages/pg/lib/client.js +++ b/packages/pg/lib/client.js @@ -2,6 +2,7 @@ const EventEmitter = require('events').EventEmitter const utils = require('./utils') +const nodeUtils = require('node:util') const sasl = require('./crypto/sasl') const TypeOverrides = require('./type-overrides') @@ -11,6 +12,27 @@ const defaults = require('./defaults') const Connection = require('./connection') const crypto = require('./crypto/utils') +const activeQueryDeprecationNotice = nodeUtils.deprecate( + () => {}, + 'Client.activeQuery is deprecated and will be removed in a future version.' +) + +const queryQueueDeprecationNotice = nodeUtils.deprecate( + () => {}, + 'Client.queryQueue is deprecated and will be removed in a future version.' +) + +const pgPassDeprecationNotice = nodeUtils.deprecate( + () => {}, + 'pgpass support is deprecated and will be removed in a future version. ' + + 'You can provide an async function as the password property to the Client/Pool constructor that returns a password instead. Within this funciton you can call the pgpass module in your own code.' +) + +const byoPromiseDeprecationNotice = nodeUtils.deprecate( + () => {}, + 'Passing a custom Promise implementation to the Client/Pool constructor is deprecated and will be removed in a future version.' +) + class Client extends EventEmitter { constructor(config) { super() @@ -34,6 +56,9 @@ class Client extends EventEmitter { const c = config || {} + if (c.Promise) { + byoPromiseDeprecationNotice() + } this._Promise = c.Promise || global.Promise this._types = new TypeOverrides(c.types) this._ending = false @@ -42,6 +67,7 @@ class Client extends EventEmitter { this._connected = false this._connectionError = false this._queryable = true + this._activeQuery = null this.enableChannelBinding = Boolean(c.enableChannelBinding) // set true to use SCRAM-SHA-256-PLUS when offered this.connection = @@ -53,7 +79,7 @@ class Client extends EventEmitter { keepAliveInitialDelayMillis: c.keepAliveInitialDelayMillis || 0, encoding: this.connectionParameters.client_encoding || 'utf8', }) - this.queryQueue = [] + this._queryQueue = [] this.binary = c.binary || defaults.binary this.processID = null this.secretKey = null @@ -70,6 +96,20 @@ class Client extends EventEmitter { this._connectionTimeoutMillis = c.connectionTimeoutMillis || 0 } + get activeQuery() { + activeQueryDeprecationNotice() + return this._activeQuery + } + + set activeQuery(val) { + activeQueryDeprecationNotice() + this._activeQuery = val + } + + _getActiveQuery() { + return this._activeQuery + } + _errorAllQueries(err) { const enqueueError = (query) => { process.nextTick(() => { @@ -77,13 +117,14 @@ class Client extends EventEmitter { }) } - if (this.activeQuery) { - enqueueError(this.activeQuery) - this.activeQuery = null + const activeQuery = this._getActiveQuery() + if (activeQuery) { + enqueueError(activeQuery) + this._activeQuery = null } - this.queryQueue.forEach(enqueueError) - this.queryQueue.length = 0 + this._queryQueue.forEach(enqueueError) + this._queryQueue.length = 0 } _connect(callback) { @@ -203,9 +244,7 @@ class Client extends EventEmitter { con.on('notification', this._handleNotification.bind(this)) } - // TODO(bmc): deprecate pgpass "built in" integration since this.password can be a function - // it can be supplied by the user if required - this is a breaking change! - _checkPgPass(cb) { + _getPassword(cb) { const con = this.connection if (typeof this.password === 'function') { this._Promise @@ -233,6 +272,7 @@ class Client extends EventEmitter { const pgPass = require('pgpass') pgPass(this.connectionParameters, (pass) => { if (undefined !== pass) { + pgPassDeprecationNotice() this.connectionParameters.password = this.password = pass } cb() @@ -244,13 +284,13 @@ class Client extends EventEmitter { } _handleAuthCleartextPassword(msg) { - this._checkPgPass(() => { + this._getPassword(() => { this.connection.password(this.password) }) } _handleAuthMD5Password(msg) { - this._checkPgPass(async () => { + this._getPassword(async () => { try { const hashedPassword = await crypto.postgresMd5PasswordHash(this.user, this.password, msg.salt) this.connection.password(hashedPassword) @@ -261,7 +301,7 @@ class Client extends EventEmitter { } _handleAuthSASL(msg) { - this._checkPgPass(() => { + this._getPassword(() => { try { this.saslSession = sasl.startSession(msg.mechanisms, this.enableChannelBinding && this.connection.stream) this.connection.sendSASLInitialResponseMessage(this.saslSession.mechanism, this.saslSession.response) @@ -314,8 +354,8 @@ class Client extends EventEmitter { } this.emit('connect') } - const { activeQuery } = this - this.activeQuery = null + const activeQuery = this._getActiveQuery() + this._activeQuery = null this.readyForQuery = true if (activeQuery) { activeQuery.handleReadyForQuery(this.connection) @@ -355,49 +395,51 @@ class Client extends EventEmitter { if (this._connecting) { return this._handleErrorWhileConnecting(msg) } - const activeQuery = this.activeQuery + const activeQuery = this._getActiveQuery() if (!activeQuery) { this._handleErrorEvent(msg) return } - this.activeQuery = null + this._activeQuery = null activeQuery.handleError(msg, this.connection) } _handleRowDescription(msg) { // delegate rowDescription to active query - this.activeQuery.handleRowDescription(msg) + this._getActiveQuery().handleRowDescription(msg) } _handleDataRow(msg) { // delegate dataRow to active query - this.activeQuery.handleDataRow(msg) + this._getActiveQuery().handleDataRow(msg) } _handlePortalSuspended(msg) { // delegate portalSuspended to active query - this.activeQuery.handlePortalSuspended(this.connection) + this._getActiveQuery().handlePortalSuspended(this.connection) } _handleEmptyQuery(msg) { // delegate emptyQuery to active query - this.activeQuery.handleEmptyQuery(this.connection) + this._getActiveQuery().handleEmptyQuery(this.connection) } _handleCommandComplete(msg) { - if (this.activeQuery == null) { + const activeQuery = this._getActiveQuery() + if (activeQuery == null) { const error = new Error('Received unexpected commandComplete message from backend.') this._handleErrorEvent(error) return } // delegate commandComplete to active query - this.activeQuery.handleCommandComplete(msg, this.connection) + activeQuery.handleCommandComplete(msg, this.connection) } _handleParseComplete() { - if (this.activeQuery == null) { + const activeQuery = this._getActiveQuery() + if (activeQuery == null) { const error = new Error('Received unexpected parseComplete message from backend.') this._handleErrorEvent(error) return @@ -405,17 +447,17 @@ class Client extends EventEmitter { // if a prepared statement has a name and properly parses // we track that its already been executed so we don't parse // it again on the same client - if (this.activeQuery.name) { - this.connection.parsedStatements[this.activeQuery.name] = this.activeQuery.text + if (activeQuery.name) { + this.connection.parsedStatements[activeQuery.name] = activeQuery.text } } _handleCopyInResponse(msg) { - this.activeQuery.handleCopyInResponse(this.connection) + this._getActiveQuery().handleCopyInResponse(this.connection) } _handleCopyData(msg) { - this.activeQuery.handleCopyData(msg, this.connection) + this._getActiveQuery().handleCopyData(msg, this.connection) } _handleNotification(msg) { @@ -471,8 +513,8 @@ class Client extends EventEmitter { con.on('connect', function () { con.cancel(client.processID, client.secretKey) }) - } else if (client.queryQueue.indexOf(query) !== -1) { - client.queryQueue.splice(client.queryQueue.indexOf(query), 1) + } else if (client._queryQueue.indexOf(query) !== -1) { + client._queryQueue.splice(client._queryQueue.indexOf(query), 1) } } @@ -497,21 +539,22 @@ class Client extends EventEmitter { _pulseQueryQueue() { if (this.readyForQuery === true) { - this.activeQuery = this.queryQueue.shift() - if (this.activeQuery) { + this._activeQuery = this._queryQueue.shift() + const activeQuery = this._getActiveQuery() + if (activeQuery) { this.readyForQuery = false this.hasExecuted = true - const queryError = this.activeQuery.submit(this.connection) + const queryError = activeQuery.submit(this.connection) if (queryError) { process.nextTick(() => { - this.activeQuery.handleError(queryError, this.connection) + activeQuery.handleError(queryError, this.connection) this.readyForQuery = true this._pulseQueryQueue() }) } } else if (this.hasExecuted) { - this.activeQuery = null + this._activeQuery = null this.emit('drain') } } @@ -565,9 +608,9 @@ class Client extends EventEmitter { query.callback = () => {} // Remove from queue - const index = this.queryQueue.indexOf(query) + const index = this._queryQueue.indexOf(query) if (index > -1) { - this.queryQueue.splice(index, 1) + this._queryQueue.splice(index, 1) } this._pulseQueryQueue() @@ -601,7 +644,7 @@ class Client extends EventEmitter { return result } - this.queryQueue.push(query) + this._queryQueue.push(query) this._pulseQueryQueue() return result } @@ -626,7 +669,7 @@ class Client extends EventEmitter { } } - if (this.activeQuery || !this._queryable) { + if (this._getActiveQuery() || !this._queryable) { // if we have an active query we need to force a disconnect // on the socket - otherwise a hung query could block end forever this.connection.stream.destroy() @@ -642,6 +685,10 @@ class Client extends EventEmitter { }) } } + get queryQueue() { + queryQueueDeprecationNotice() + return this._queryQueue + } } // expose a Query constructor diff --git a/packages/pg/test/unit/client/stream-and-query-error-interaction-tests.js b/packages/pg/test/unit/client/stream-and-query-error-interaction-tests.js index 8a5e4656c..166cd9f35 100644 --- a/packages/pg/test/unit/client/stream-and-query-error-interaction-tests.js +++ b/packages/pg/test/unit/client/stream-and-query-error-interaction-tests.js @@ -31,8 +31,6 @@ suite.test('emits end when not in query', function () { client.connection.emit('connect') process.nextTick(function () { client.connection.emit('readyForQuery') - assert.equal(client.queryQueue.length, 0) - assert(client.activeQuery, 'client should have issued query') process.nextTick(function () { stream.emit('close') })