Permalink
Switch branches/tags
Nothing to show
Find file Copy path
c4ebbf1 Jun 16, 2018
1 contributor

Users who have contributed to this file

312 lines (264 sloc) 12.4 KB
# -------------------------------------------
# Simple PostgreSQL Configuration File v. 10
# -------------------------------------------
# This file provides a simple configuration with the most common options
# which most users need to modify for running PostgreSQL in production,
# including extensive notes on how to set each of these. If your configuration
# needs are more specific, then use the standard postgresql.conf, or add
# additional configuration options to the bottom of this file.
#
# This file is re-read when you send a SIGHUP to the server, or on a full
# restart. Note that on a SIGHUP simply recommenting the settings is not
# enough to reset to default value; the last explicit value you set will
# still be in effect.
#
# AvRAM: Several of the formulas below ask for "AvRAM", which is short for
# "Available RAM". This refers to the amount of memory which is available for
# running PostgreSQL. On a dedicated PostgreSQL server, you can use the total
# system RAM, but on shared servers you need to estimate what portion of RAM
# is usually available for PostgreSQL.
#
# Each setting below lists one recommended starting setting, followed by
# several alternate settings which are commented out. If multiple settings
# are uncommented, the *last* one will take effect.
# CONNECTIONS AND SECURITY
# ========================
# listen_addresses
# ------------------------
# listen_addresses takes a list of network interfaces the Postmaster will
# listen on. The setting below, '*', listens on all interfaces, and is only
# appropriate for development servers and initial setup. Otherwise, it
# should be restrictively set to only specific addresses. Note that most
# PostgreSQL access control settings are in the pg_hba.conf file.
listen_addresses = '*' # all interfaces
# listen_addresses = 'localhost' # unix sockets and loopback only
# listen_addresses = 'localhost,192.168.1.1' # local and one external interface
# max_connections
# ------------------------
# An integer setting a limit on the number of new connection processes which
# PostgreSQL will create. Should be set to the maximum number of connections
# which you expect to need at peak load. Note that each connection uses
# shared_buffer memory, as well as additional non-shared memory, so be careful
# not to run the system out of memory. In general, if you need more than 200
# connections, you should probably be making more use of connection pooling.
#
# Note that by default 3 connections are reserved for autovacuum and
# administration, and more may be used by replication.
max_connections = 100 # small server
# max_connections = 500 # web application database
# max_connections = 40 # data warehousing database
# SSL
# ------------
#
# All PostgreSQL installations should use SSL connections whenever and
# wherever possible. This requires setting up SSL as part of your
# PostgreSQL installation; standard Linux and Windows packaging do this,
# usually including the configuration settings as well. Note that if
# SSL is not set up correctly and ssl = on Postgres will not start.
# SSL file locations default to PGDATA unless given a full path.
ssl = on
# ssl_ca_file = server.ca
# ssl_cert_file = server.crt
# ssl_crl_file = server.crl
# ssl_key_file = server.key
# MEMORY AND DISK
# =======================
# shared_buffers
# ------------------------
# A memory quantity defining PostgreSQL's "dedicated" RAM, which is used
# for connection control, active operations, and more. However, since
# PostgreSQL also needs free RAM for file system buffers, sorts and
# maintenance operations, it is not advisable to set shared_buffers to a
# majority of RAM.
#
# Note that shared_buffers over 2GB is
# only supported on 64-bit systems. At this time, it's unclear on whether
# buffers larger than around 8GB are actually beneficial even on machines
# with very high RAM.
#
# The setting below is a formula. Calculate the resulting value, then
# uncomment it. Values should be expressed in kB, MB or GB.
# shared_buffers = ( AvRAM / 4 ) limit 8GB
# shared_buffers = 512MB # basic 2GB web server
# shared_buffers = 8GB # 64-bit server with 512GB RAM
# work_mem
# ------------------------
# This memory quantity sets the limit for the amount of non-shared RAM
# available for each query operation, including sorts and hashes. This limit
# acts as a primitive resource control, preventing the server from going
# into swap due to overallocation. Note that this is non-shared RAM per
# *operation*, which means large complex queries can use multple times
# this amount. Also, work_mem is allocated by powers of two, so round
# to the nearest binary step.
# The setting below is a formula. Calculate the resulting value, then
# uncomment it. Values should be expressed in kB, MB or GB. Note that
# it's seldom effective to set work_mem over 2GB, and that you cannot do
# so at all on 32-bit servers. Set for the maximum connections you
# expect to actually use if max_connections is higher than that.
# Most web applications should use the formula below, because their
# queries often require no work_mem.
# work_mem = ( AvRAM / max_connections ) ROUND DOWN to 2^x
# work_mem = 2MB # for 2GB server with 700 connections
# Formula for most BI/DW applications, or others running many complex
# queries:
# work_mem = ( AvRAM / ( 2 * max_connections ) ) ROUND DOWN to 2^x
# work_mem = 128MB # DW server with 32GB RAM and 40 connections
# maintenance_work_mem
# -------------------------
# This memory value sets the limit for the amount that autovacuum,
# manual vacuum, bulk index build and other maintenance routines are
# permitted to use. Setting it to a moderately high value will increase
# the efficiency of manual vacuum and other operations. It has not been
# shown that values over 2GB speed up operations.
# The setting below is a formula. Calculate the resulting value, then
# uncomment it. Values should be expressed in kB, MB or GB.
# maintenance_work_mem = ( AvRAM / 8 ) LIMIT 2GB, ROUND 2^x
# maintenance_work_mem = 2GB #DW server with 32GB RAM
# autovacuum_work_mem
# ----------------------
#
# This is the amount of RAM autovacuum has available for operations.
# This defaults to maintenance_work_mem, but with multiple autovacuum workers,
# it makes sense to set it to a different, lower value. Values over
# 1GB may be counter-productive.
# The setting below is a formula. Calculate the resulting value, then
# uncomment it. Values should be expressed in kB, MB or GB.
# autovacuum_work_mem = ( AvRAM / 16 ) LIMIT 1GB ROUND 2^x
# autovacuum_work_mem = 1GB #servers with 16GB or more
# effective_cache_size
# --------------------------
# This memory setting tells the PostgreSQL query planner how much RAM
# is estimated to be available for caching data, in both shared_buffers and
# in the filesystem cache. This setting just helps the planner make good
# cost estimates; it does not actually allocate the memory.
# The default of 4GB works for a lot of users even if they have more
# RAM than that.
# The setting below is a formula. Calculate the resulting value, then
# uncomment it.
# effective_cache_size = ( AvRAM * 0.75 )
# random_page_cost
# -------------------
# Helps the query planner estimate the cost of seek operations on disk.
# the default, 4.0, is good for spinning HDDs, but is not appropriate for
# SSDs, SANs, or many types of cloud storage.
# random_page_cost = 4.0 # if using HDDs
# random_page_cost = 1.5 # if using SSDs or high-end storage
# SAFETY
# =======
# data_checksums
# ----------------
# This is not a setting that you can set at runtime. Instead, it is an
# initdb option to set when creating your database. Please set it; while
# data checksums do increase IO activity, you will be thankful for them
# when you eventually have data corruption due to hardware or storage
# issues. Checksums allow you to detect the corruption right after it
# happens, instead of much later when it's been copied to replicas and
# backups.
# initdb --data-checksums -D $PGDATA
# idle_in_transaction_timeout
# -----------------------------
#
# Idle Transactions (that is, open transactions that have been
# abandoned) cause an assortment of problems for PostgreSQL, both
# in terms of maintenance and query performance. In older versions
# of Postgres, many admins wrote cron jobs to kill these hourly.
# Now, built in, you can terminate abandoned transactions. Make
# sure your application can handle the connection termination, though!
idle_in_transaction_session_timeout = 30min
# idle_in_transaction_session_timeout = 1min # if you know your workload well
# statement_timeout
# -------------------
#
# Set this to prevent "run-on" queries which consume all resources on
# the system. Setting should be ten times as long as the longest
# query you expect to legitimately run. Remember that this affects
# reporting and pgdumps as well, so it's generally better to set this
# as a property of the application database user.
# statement_timeout = 10*longest query
# statement_timeout = 10min #good starter setting for webapp
# better:
# ALTER ROLE webapp SET statement_timeout = '1min';
# temp_file_limit
# -------------------
#
# This is the limit on how big a temporary file for sorting or merging
# is allowed to grow before the request gets cancelled. Setting it
# prevents a DOS on your server by running you out of disk space.
# However, this needs to be set relative to the size of your queries
# and your available disk space, so some suggested sizes for you to
# modify are below.
# temp_file_limit = 250MB # web DB on cloud hosting
# temp_file_limit = 1GB # mixed use server with 30GB free
# temp_file_limit = 25GB # analytics server with terabyte storage
# REPLICATION & ARCHIVING
# =======================
#
# Pretty much every PostgreSQL server uses replication, archiving, or both.
# Configuring replication can require changing several settings together.
# It also requires changes to pg_hba.conf, recovery.conf, and possibly
# creating a replication user. The sets below assume that you'll have
# the same settings on both master and standby, and only include
# non-default settings.
# Simple binary async streaming replication
# wal_keep_segments = 4 #up to 32 for very busy servers
# hot_standby_feedback = on
# track_commit_timestamp = on #for monitoring purposes
# max_wal_senders = 2xReplicas # if you expect more than 6 replicas
# max_replication_slots = 2xReplicas # if you expect more than 6 replicas
# Logical replication, for example for upgrades
# wal_level = logical
# wal_keep_segments = 4 #up to 32 for very busy servers
# hot_standby_feedback = off #on logical replica
# track_commit_timestamp = on #needed for some tools
# max_wal_senders = 2xReplicas # if you expect more than 6 replicas
# max_replication_slots = 2xReplicas # if you expect more than 6 replicas
# Archiving
# ----------
#
# For continuous backup, archiving needs to be enabled on the current
# master server. The archive command will be supplied by whatever utility
# you are using to manage backups (e.g. WAL-E, pgbackrest).
# archive_mode = on
# archive_command = '/path/to/archive/command'
# log_destination & logging settings
# --------------------------
# This list value determines where PostgreSQL's logs are sent. What
# setting to use really depends on your server room setup and the
# production status and OS of your server.
#
# Note that there are several dozen settings on what and how often
# to log; these will not be covered in detail in this quick
# configuration file. Instead, several common combinations are
# given.
# Syslog setup for centralized monitoring
# log_destination = 'syslog'
# syslog_facility = 'LOCAL0' #local syslog
# syslog_ident = 'POSTGRES1' #or something more unique per server
# log_line_prefix = 'db=%d,user=%u,app=%a,client=%h '
# Windows
# log_destination = 'eventlog'
# Private PostgreSQL Log, rotates weekly
# Includes logging all queries over 100ms
# log_destination = 'stderr'
# logging_collector = on
# log_directory = '/path/to/log/dir'
# log_filename = 'postgresql-%a'
# log_truncate_on_rotation = on
# log_min_duration_statement = 100ms
# CSV logging for pgbadger log analyzer
# summarizes all queries and their times
# Warning: may generate many large log files, and
# affect performance due to IO actiivty.
# log_destination = 'csvlog'
# logging_collector = on
# log_directory = '/path/to/log/dir'
# log_min_duration_statement = 0
# log_rotation_size = 1GB
# log_checkpoints = on
# log_connections = on
# log_disconnections = on
# log_lock_waits = on
# deadlock_timeout = 100ms
# log_temp_files = 0
# log_autovacuum_min_duration = 0
# log_error_verbosity = default