diff --git a/cookbooks/delayed_job/README.markdown b/cookbooks/delayed_job/README.markdown new file mode 100644 index 000000000..e896c444b --- /dev/null +++ b/cookbooks/delayed_job/README.markdown @@ -0,0 +1,28 @@ +# Delayed Job + +This cookbook can serve as a good starting point for setting up Delayed Job support in your application. +In this recipe your Delayed Job workers will be set up to run under monit. The number of workers will +vary based on the size of the instance running Delayed Job. + +## Installation + +To install this, you will need to add the following to cookbooks/main/recipes/default.rb: + + require_recipe "delayed_job" + +Make sure this and any customizations to the recipe are committed to your own fork of this +repository. + +## Restarting your workers + +This recipe does NOT restart your workers. The reason for this is that shipping your application and +rebuilding your instances (i.e. running chef) are not always done at the same time. It is best to +restart your Delayed Job workers when you ship (deploy) your application code. To do this, add a +deploy hook to perform the following: + + sudo "monit -g dj_ restart all" + +Make sure to replace with the name of your application. You likely want to use the +after_restart hook for this. See our [Deploy Hook](http://docs.engineyard.com/appcloud/howtos/deployment/use-deploy-hooks-with-engine-yard-appcloud) documentation +for more information on using deploy hooks. + diff --git a/cookbooks/delayed_job/recipes/default.rb b/cookbooks/delayed_job/recipes/default.rb index 150442dad..c567cf3f7 100644 --- a/cookbooks/delayed_job/recipes/default.rb +++ b/cookbooks/delayed_job/recipes/default.rb @@ -3,7 +3,7 @@ # Recipe:: default # -if ['solo', 'util'].include?(node[:instance_role]) && !node[:name].match(/^mongodb_/) +if node[:instance_role] == "solo" || (node[:instance_role] == "util" && node[:name] !~ /^(mongodb|redis|memcache)/) node[:applications].each do |app_name,data| # determine the number of workers to run based on instance size @@ -35,7 +35,7 @@ end execute "monit-reload-restart" do - command "sleep 30 && monit quit" + command "sleep 30 && monit reload" action :run end diff --git a/cookbooks/delayed_job/templates/default/dj.monitrc.erb b/cookbooks/delayed_job/templates/default/dj.monitrc.erb index 3df3212f0..266612fa8 100644 --- a/cookbooks/delayed_job/templates/default/dj.monitrc.erb +++ b/cookbooks/delayed_job/templates/default/dj.monitrc.erb @@ -1,6 +1,6 @@ check process <%= @worker_name %> with pidfile /var/run/engineyard/dj/<%= @app_name %>/dj_<%= @worker_name %>.pid - start program = "/engineyard/bin/dj <%= @app_name %> start <%= @framework_env %> <%= @worker_name %>" - stop program = "/engineyard/bin/dj <%= @app_name %> stop <%= @framework_env %> <%= @worker_name %>" + start program = "/engineyard/bin/dj <%= @app_name %> start <%= @framework_env %> <%= @worker_name %>" with timeout 60 seconds + stop program = "/engineyard/bin/dj <%= @app_name %> stop <%= @framework_env %> <%= @worker_name %>" with timeout 60 seconds if totalmem is greater than 300 MB then restart # eating up memory? group dj_<%= @app_name %> \ No newline at end of file diff --git a/cookbooks/exim/README.rdoc b/cookbooks/exim/README.rdoc index 4ac8dd077..a000b932c 100644 --- a/cookbooks/exim/README.rdoc +++ b/cookbooks/exim/README.rdoc @@ -1,52 +1,30 @@ -= WARNING: - -This Cookbook contains an that configures exim to SmartRelay to another SMTP server; as well as support SMTP Authentication so you can securely deploy a SMTP Server on Amazon EC2 to relay email via Sendgrid for example. - = DESCRIPTION: -This cookbook removes ssmtp, and installs Exim and moves Exim's configuration files to the EBS for usage. - -= USAGE: - -Modify main/recipes/default.rb and add - -exim_auth "auth" do - my_hostname = "mywebsite.com" - smtp_username = "user" - smtp_password = "password" - smtp_host = "smtp.sendgrid.com" -end - -= RECIPES: +This cookbook configures exim on each instance. -== default += SECURITY: -This Cookbook will create /data/exim on your EBS and then copy the Exim configuration files over to it if they are not there already, and then start exim. This allows you to modify and keep the exim.conf and the passwd file (if you choose to enable that) on the EBS which allows great customization. +Security concerns? If you are an AppCloud customer we do not open port 25 outside of your environment in the Security Groups. So no one outside of your environment can contact your exim server. -= /data/exim/passwd: += WARNING: -This is the 'passwd' file for SMTP Authentication (clients connecting to Exim to relay out to your sendgrid account for example), it's format is very specific. +Please keep your SMTP credentials private, do not ever post them in a public github repo. If you use this recipe please mark your repository as private and never post your credentials on github. -#{username}:#{cryptedpassword}:#{plaintextpassword} - -You need to use the sytem crypt() function to generate the cryptedpassword. Great example is passwd deploy and then grep deploy /etc/shadow - -= SMTP VARIABLES: - -== my_hostname - -The hostname you want Exim to think it is. You can set this to your domain name so email appears to come from the same domain as your web site. - -== smtp_username += USAGE: -The Username you use to authenticate against on the Smarthost. +Step 1) Modify main/recipes/default.rb to uncomment the exim_auth block like below with your credentials. -== smtp_password +exim_auth "auth" do + my_hostname "domain.com" + smtp_host "smtp.sendgrid.com" + username "username" + password "password" +end -The Password you use to authenticate on the Smarthost. +Then commit and ey recipes upload -e and then apply ey recipes apply -e -== smtp_host += NOTES: -The Smarthost IP or FQDN, (e.g. smtp.sendgrid.com) +There is no queue monitoring with this enabled. As ideally you would be sending directly to Sendgrid. -= NOTE: If your ey-cloud-recipes repo is private you can ignore my warnings, or if you host it on a private server. +Additionally it is not monitored under monit, as we have not found a reason to do so yet. You are more then welcome to add one if you'd like it's very trivial. diff --git a/cookbooks/exim/definitions/exim_auth.rb b/cookbooks/exim/definitions/exim_auth.rb index 157cf40f4..8bebe389f 100644 --- a/cookbooks/exim/definitions/exim_auth.rb +++ b/cookbooks/exim/definitions/exim_auth.rb @@ -2,15 +2,12 @@ # Cookbook Name:: exim_ey # # Copyright 2009, Engine Yard, Inc. -# -# All rights reserved - Do Not Redistribute -# define :exim_auth, :my_hostname => nil, :smtp_host => nil, :username => nil, :password => nil do - include_recipe "exim::default" + include_recipe "exim::auth" - template "/data/exim/exim.conf" do + template "/etc/exim/exim.conf" do cookbook "exim" source "exim.conf.erb" owner "root" @@ -19,4 +16,11 @@ backup 2 variables(:p => params) end + + execute "ensure-exim-is-running" do + command %Q{ + /etc/init.d/exim start + } + not_if "pgrep exim" + end end diff --git a/cookbooks/exim/recipes/auth.rb b/cookbooks/exim/recipes/auth.rb new file mode 100644 index 000000000..82d4c8e15 --- /dev/null +++ b/cookbooks/exim/recipes/auth.rb @@ -0,0 +1,34 @@ +# +# Cookbook Name:: exim +# Recipe:: default +# +# Configuration settings + +package "mail-mta/ssmtp" do + action :remove + ignore_failure true +end + +directory "/etc/ssmtp" do + action :create + owner "root" + group "root" + mode "0755" +end + +execute "touch /etc/ssmtp/ssmtp.conf" do + command "touch /etc/ssmtp/ssmtp.conf" +end + +package "mail-mta/exim" do + action :install +end + +execute "symlink ssmtp" do + command "ln -sfv /usr/sbin/exim /usr/sbin/ssmtp" + not_if { FileTest.exists?("/usr/sbin/ssmtp") } +end + +package "mail-client/mailx" do + action :install +end diff --git a/cookbooks/exim/recipes/default.rb b/cookbooks/exim/recipes/default.rb index cbaa3288c..e69de29bb 100644 --- a/cookbooks/exim/recipes/default.rb +++ b/cookbooks/exim/recipes/default.rb @@ -1,67 +0,0 @@ -# -# Cookbook Name:: exim -# Recipe:: default -# -# Configuration settings - - package "mail-mta/ssmtp" do - action :remove - ignore_failure true - end - - package "mail-mta/exim" do - action :install - end - - directory "/data/exim" do - action :create - owner "root" - group "root" - mode "0755" - end - - execute "copy dist-config over to shared" do - command "cp /etc/exim/exim.conf.dist /data/exim/exim.conf" - not_if { FileTest.exists?("/data/exim/exim.conf") } - only_if { FileTest.directory?("/data/exim") } - end - - execute "copy system_filter.exim" do - command "cp /etc/exim/system_filter.exim /data/exim/system_filter.exim" - not_if { FileTest.exists?("/data/exim/system_filter.exim") } - only_if { FileTest.directory?("/data/exim") } - end - - execute "copy auth_conf.sub" do - command "cp /etc/exim/auth_conf.sub /data/exim/auth_conf.sub" - not_if { FileTest.exists?("/data/exim/auth_conf.sub") } - only_if { FileTest.directory?("/data/exim") } - end - - execute "remove /etc/exim" do - command "rm -rf /etc/exim && sync" - only_if { FileTest.exists?("/data/exim/exim.conf") } - end - - execute "add_symlink" do - command "cd /;ln -sfv /data/exim /etc/exim && sync" - end - - package "mail-client/mailx" do - action :install - end - - execute "symlink ssmtp" do - command "cd /;ln -sfv /usr/sbin/exim /usr/sbin/ssmtp" - end - - execute "ssmtp fixes" do - command "mkdir -p /etc/ssmtp && touch /etc/ssmtp/ssmtp.conf" - end - - execute "ensure-exim-is-running" do - command %Q{ - /etc/init.d/exim start - } - not_if "pgrep exim" - end diff --git a/cookbooks/exim/templates/default/exim.conf.erb b/cookbooks/exim/templates/default/exim.conf.erb index aef8cfee4..f4d0aa2eb 100644 --- a/cookbooks/exim/templates/default/exim.conf.erb +++ b/cookbooks/exim/templates/default/exim.conf.erb @@ -1,163 +1,795 @@ -# Copyright 2009, Engine Yard, Inc. -# -# All rights reserved - Do Not Redistribute +# $Cambridge: exim/exim-src/src/configure.default,v 1.13 2007/06/26 11:21:36 ph10 Exp $ + +###################################################################### +# Runtime configuration file for Exim # +###################################################################### + + +# This is a default configuration file which will operate correctly in +# uncomplicated installations. Please see the manual for a complete list +# of all the runtime configuration options that can be included in a +# configuration file. There are many more than are mentioned here. The +# manual is in the file doc/spec.txt in the Exim distribution as a plain +# ASCII file. Other formats (PostScript, Texinfo, HTML, PDF) are available +# from the Exim ftp sites. The manual is also online at the Exim web sites. + + +# This file is divided into several parts, all but the first of which are +# headed by a line starting with the word "begin". Only those parts that +# are required need to be present. Blank lines, and lines starting with # +# are ignored. + + +########### IMPORTANT ########## IMPORTANT ########### IMPORTANT ########### +# # +# Whenever you change Exim's configuration file, you *must* remember to # +# HUP the Exim daemon, because it will not pick up the new configuration # +# until you do. However, any other Exim processes that are started, for # +# example, a process started by an MUA in order to send a message, will # +# see the new configuration as soon as it is in place. # +# # +# You do not need to HUP the daemon for changes in auxiliary files that # +# are referenced from this file. They are read every time they are used. # +# # +# It is usually a good idea to test a new configuration for syntactic # +# correctness before installing it (for example, by running the command # +# "exim -C /config/file.new -bV"). # +# # +########### IMPORTANT ########## IMPORTANT ########### IMPORTANT ########### + + + +###################################################################### +# MAIN CONFIGURATION SETTINGS # +###################################################################### + +# Specify your host's canonical name here. This should normally be the fully +# qualified "official" name of your host. If this option is not set, the +# uname() function is called to obtain the name. In many cases this does +# the right thing and you need not set anything explicitly. -domainlist local_domains = @ -domainlist relay_to_domains = -hostlist relay_from_hosts = 127.0.0.1 primary_hostname = <%= @p[:my_hostname] %> + +# The next three settings create two lists of domains and one list of hosts. +# These lists are referred to later in this configuration using the syntax +# +local_domains, +relay_to_domains, and +relay_from_hosts, respectively. They +# are all colon-separated lists: + +domainlist local_domains = domU-12-35-31-04-E1-81.compute-1.internal +domainlist relay_to_domains = domU-12-35-31-04-E1-81.compute-1.internal +hostlist relay_from_hosts = 127.0.0.1 : 10.0.0.0/8 + +# Most straightforward access control requirements can be obtained by +# appropriate settings of the above options. In more complicated situations, +# you may need to modify the Access Control Lists (ACLs) which appear later in +# this file. + +# The first setting specifies your local domains, for example: +# +# domainlist local_domains = my.first.domain : my.second.domain +# +# You can use "@" to mean "the name of the local host", as in the default +# setting above. This is the name that is specified by primary_hostname, +# as specified above (or defaulted). If you do not want to do any local +# deliveries, remove the "@" from the setting above. If you want to accept mail +# addressed to your host's literal IP address, for example, mail addressed to +# "user@[192.168.23.44]", you can add "@[]" as an item in the local domains +# list. You also need to uncomment "allow_domain_literals" below. This is not +# recommended for today's Internet. + +# The second setting specifies domains for which your host is an incoming relay. +# If you are not doing any relaying, you should leave the list empty. However, +# if your host is an MX backup or gateway of some kind for some domains, you +# must set relay_to_domains to match those domains. For example: +# +# domainlist relay_to_domains = *.myco.com : my.friend.org +# +# This will allow any host to relay through your host to those domains. +# See the section of the manual entitled "Control of relaying" for more +# information. + +# The third setting specifies hosts that can use your host as an outgoing relay +# to any other host on the Internet. Such a setting commonly refers to a +# complete local network as well as the localhost. For example: +# +# hostlist relay_from_hosts = 127.0.0.1 : 192.168.0.0/16 +# +# The "/16" is a bit mask (CIDR notation), not a number of hosts. Note that you +# have to include 127.0.0.1 if you want to allow processes on your host to send +# SMTP mail by using the loopback address. A number of MUAs use this method of +# sending mail. + +# All three of these lists may contain many different kinds of item, including +# wildcarded names, regular expressions, and file lookups. See the reference +# manual for details. The lists above are used in the access control lists for +# checking incoming messages. The names of these ACLs are defined here: + acl_smtp_rcpt = acl_check_rcpt acl_smtp_data = acl_check_data +# You should not change those settings until you understand how ACLs work. + + +# If you are running a version of Exim that was compiled with the content- +# scanning extension, you can cause incoming messages to be automatically +# scanned for viruses. You have to modify the configuration in two places to +# set this up. The first of them is here, where you define the interface to +# your scanner. This example is typical for ClamAV; see the manual for details +# of what to set for other virus scanners. The second modification is in the +# acl_check_data access control list (see below). + +# av_scanner = clamd:/tmp/clamd + + +# For spam scanning, there is a similar option that defines the interface to +# SpamAssassin. You do not need to set this if you are using the default, which +# is shown in this commented example. As for virus scanning, you must also +# modify the acl_check_data access control list to enable spam scanning. + +# spamd_address = 127.0.0.1 783 + + +# If Exim is compiled with support for TLS, you may want to enable the +# following options so that Exim allows clients to make encrypted +# connections. In the authenticators section below, there are template +# configurations for plaintext username/password authentication. This kind +# of authentication is only safe when used within a TLS connection, so the +# authenticators will only work if the following TLS settings are turned on +# as well. + +# Allow any client to use TLS. + +# tls_advertise_hosts = * + +# Specify the location of the Exim server's TLS certificate and private key. +# The private key must not be encrypted (password protected). You can put +# the certificate and private key in the same file, in which case you only +# need the first setting, or in separate files, in which case you need both +# options. + +# tls_certificate = /etc/ssl/exim.crt +# tls_privatekey = /etc/ssl/exim.pem + +# In order to support roaming users who wish to send email from anywhere, +# you may want to make Exim listen on other ports as well as port 25, in +# case these users need to send email from a network that blocks port 25. +# The standard port for this purpose is port 587, the "message submission" +# port. See RFC 4409 for details. Microsoft MUAs cannot be configured to +# talk the message submission protocol correctly, so if you need to support +# them you should also allow TLS-on-connect on the traditional but +# non-standard port 465. + +# daemon_smtp_ports = 25 : 465 : 587 +# tls_on_connect_ports = 465 + + +# Specify the domain you want to be added to all unqualified addresses +# here. An unqualified address is one that does not contain an "@" character +# followed by a domain. For example, "caesar@rome.example" is a fully qualified +# address, but the string "caesar" (i.e. just a login name) is an unqualified +# email address. Unqualified addresses are accepted only from local callers by +# default. See the recipient_unqualified_hosts option if you want to permit +# unqualified addresses from remote sources. If this option is not set, the +# primary_hostname value is used for qualification. + # qualify_domain = -never_users = nobody -# host_lookup = * +# If you want unqualified recipient addresses to be qualified with a different +# domain to unqualified sender addresses, specify the recipient domain here. +# If this option is not set, the qualify_domain value is used. + +# qualify_recipient = + + +# The following line must be uncommented if you want Exim to recognize +# addresses of the form "user@[10.11.12.13]" that is, with a "domain literal" +# (an IP address) instead of a named domain. The RFCs still require this form, +# but it makes little sense to permit mail to be sent to specific hosts by +# their IP address in the modern Internet. This ancient format has been used +# by those seeking to abuse hosts by using them for unwanted relaying. If you +# really do want to support domain literals, uncomment the following line, and +# see also the "domain_literal" router below. + +# allow_domain_literals + + +# No deliveries will ever be run under the uids of users specified by +# never_users (a colon-separated list). An attempt to do so causes a panic +# error to be logged, and the delivery to be deferred. This is a paranoic +# safety catch. There is an even stronger safety catch in the form of the +# FIXED_NEVER_USERS setting in the configuration for building Exim. The list of +# users that it specifies is built into the binary, and cannot be changed. The +# option below just adds additional users to the list. The default for +# FIXED_NEVER_USERS is "root", but just to be absolutely sure, the default here +# is also "root". + +# Note that the default setting means you cannot deliver mail addressed to root +# as if it were a normal user. This isn't usually a problem, as most sites have +# an alias for root that redirects such mail to a human administrator. + +never_users = root -rfc1413_hosts = * -rfc1413_query_timeout = 2s + +# The setting below causes Exim to do a reverse DNS lookup on all incoming +# IP calls, in order to get the true host name. If you feel this is too +# expensive, you can specify the networks for which a lookup is done, or +# remove the setting entirely. + +host_lookup = * + + +# The settings below, which are actually the same as the defaults in the +# code, cause Exim to make RFC 1413 (ident) callbacks for all incoming SMTP +# calls. You can limit the hosts to which these calls are made, and/or change +# the timeout that is used. If you set the timeout to zero, all RFC 1413 calls +# are disabled. RFC 1413 calls are cheap and can provide useful information +# for tracing problem messages, but some hosts and firewalls have problems +# with them. This can result in a timeout instead of an immediate refused +# connection, leading to delays on starting up SMTP sessions. (The default was +# reduced from 30s to 5s for release 4.61.) + +#rfc1413_hosts = * +#rfc1413_query_timeout = 5s + + +# By default, Exim expects all envelope addresses to be fully qualified, that +# is, they must contain both a local part and a domain. If you want to accept +# unqualified addresses (just a local part) from certain hosts, you can specify +# these hosts by setting one or both of +# +# sender_unqualified_hosts = +# recipient_unqualified_hosts = +# +# to control sender and recipient addresses, respectively. When this is done, +# unqualified addresses are qualified using the settings of qualify_domain +# and/or qualify_recipient (see above). + + +# If you want Exim to support the "percent hack" for certain domains, +# uncomment the following line and provide a list of domains. The "percent +# hack" is the feature by which mail addressed to x%y@z (where z is one of +# the domains listed) is locally rerouted to x@y and sent on. If z is not one +# of the "percent hack" domains, x%y is treated as an ordinary local part. This +# hack is rarely needed nowadays; you should not enable it unless you are sure +# that you really need it. +# +# percent_hack_domains = +# +# As well as setting this option you will also need to remove the test +# for local parts containing % in the ACL definition below. + + +# When Exim can neither deliver a message nor return it to sender, it "freezes" +# the delivery error message (aka "bounce message"). There are also other +# circumstances in which messages get frozen. They will stay on the queue for +# ever unless one of the following options is set. + +# This option unfreezes frozen bounce messages after two days, tries +# once more to deliver them, and ignores any delivery failures. ignore_bounce_errors_after = 2d +# This option cancels (removes) frozen messages that are older than a week. + timeout_frozen_after = 7d -split_spool_directory = true -begin acl +# By default, messages that are waiting on Exim's queue are all held in a +# single directory called "input" which it itself within Exim's spool +# directory. (The default spool directory is specified when Exim is built, and +# is often /var/spool/exim/.) Exim works best when its queue is kept short, but +# there are circumstances where this is not always possible. If you uncomment +# the setting below, messages on the queue are held in 62 subdirectories of +# "input" instead of all in the same directory. The subdirectories are called +# 0, 1, ... A, B, ... a, b, ... z. This has two benefits: (1) If your file +# system degrades with many files in one directory, this is less likely to +# happen; (2) Exim can process the queue one subdirectory at a time instead of +# all at once, which can give better performance with large queues. -acl_check_rcpt: +# split_spool_directory = true - accept hosts = : - deny message = Restricted characters in address - domains = +local_domains - local_parts = ^[.] : ^.*[@%!/|] - deny message = Restricted characters in address - domains = !+local_domains - local_parts = ^[./|] : ^.*[@%!] : ^.*/\\.\\./ +###################################################################### +# ACL CONFIGURATION # +# Specifies access control lists for incoming SMTP mail # +###################################################################### -# accept local_parts = postmaster -# domains = +local_domains +begin acl - # Deny unless the sender address can be verified. +# This access control list is used for every RCPT command in an incoming +# SMTP message. The tests are run in order until the address is either +# accepted or denied. - require verify = sender +acl_check_rcpt: - accept hosts = +relay_from_hosts - control = submission + # Accept if the source is local SMTP (i.e. not over TCP/IP). We do this by + # testing for an empty sending host field. + + accept hosts = : + + ############################################################################# + # The following section of the ACL is concerned with local parts that contain + # @ or % or ! or / or | or dots in unusual places. + # + # The characters other than dots are rarely found in genuine local parts, but + # are often tried by people looking to circumvent relaying restrictions. + # Therefore, although they are valid in local parts, these rules lock them + # out, as a precaution. + # + # Empty components (two dots in a row) are not valid in RFC 2822, but Exim + # allows them because they have been encountered. (Consider local parts + # constructed as "firstinitial.secondinitial.familyname" when applied to + # someone like me, who has no second initial.) However, a local part starting + # with a dot or containing /../ can cause trouble if it is used as part of a + # file name (e.g. for a mailing list). This is also true for local parts that + # contain slashes. A pipe symbol can also be troublesome if the local part is + # incorporated unthinkingly into a shell command line. + # + # Two different rules are used. The first one is stricter, and is applied to + # messages that are addressed to one of the local domains handled by this + # host. The line "domains = +local_domains" restricts it to domains that are + # defined by the "domainlist local_domains" setting above. The rule blocks + # local parts that begin with a dot or contain @ % ! / or |. If you have + # local accounts that include these characters, you will have to modify this + # rule. + + deny message = Restricted characters in address + domains = +local_domains + local_parts = ^[.] : ^.*[@%!/|] + + # The second rule applies to all other domains, and is less strict. The line + # "domains = !+local_domains" restricts it to domains that are NOT defined by + # the "domainlist local_domains" setting above. The exclamation mark is a + # negating operator. This rule allows your own users to send outgoing + # messages to sites that use slashes and vertical bars in their local parts. + # It blocks local parts that begin with a dot, slash, or vertical bar, but + # allows these characters within the local part. However, the sequence /../ + # is barred. The use of @ % and ! is blocked, as before. The motivation here + # is to prevent your users (or your users' viruses) from mounting certain + # kinds of attack on remote sites. + + deny message = Restricted characters in address + domains = !+local_domains + local_parts = ^[./|] : ^.*[@%!] : ^.*/\\.\\./ + ############################################################################# + + # Accept mail to postmaster in any local domain, regardless of the source, + # and without verifying the sender. + + accept local_parts = postmaster + domains = +local_domains + + # Deny unless the sender address can be verified. + + # Accept if the message comes from one of the hosts for which we are an + # outgoing relay. It is assumed that such hosts are most likely to be MUAs, + # so we set control=submission to make Exim treat the message as a + # submission. It will fix up various errors in the message, for example, the + # lack of a Date: header line. If you are actually relaying out out from + # MTAs, you may want to disable this. If you are handling both relaying from + # MTAs and submissions from MUAs you should probably split them into two + # lists, and handle them differently. + + # Recipient verification is omitted here, because in many cases the clients + # are dumb MUAs that don't cope well with SMTP error responses. If you are + # actually relaying out from MTAs, you should probably add recipient + # verification here. + + # Note that, by putting this test before any DNS black list checks, you will + # always accept from these hosts, even if they end up on a black list. The + # assumption is that they are your friends, and if they get onto a black + # list, it is a mistake. + + accept hosts = +relay_from_hosts + control = submission + + # Accept if the message arrived over an authenticated connection, from + # any host. Again, these messages are usually from MUAs, so recipient + # verification is omitted, and submission mode is set. And again, we do this + # check before any black list tests. + + accept authenticated = * + control = submission + + # Insist that any other recipient address that we accept is either in one of + # our local domains, or is in a domain for which we explicitly allow + # relaying. Any other domain is rejected as being unacceptable for relaying. + + require message = relay not permitted + domains = +local_domains : +relay_to_domains + + # We also require all accepted addresses to be verifiable. This check will + # do local part verification for local domains, but only check the domain + # for remote domains. The only way to check local parts for the remote + # relay domains is to use a callout (add /callout), but please read the + # documentation about callouts before doing this. + + ############################################################################# + # There are no default checks on DNS black lists because the domains that + # contain these lists are changing all the time. However, here are two + # examples of how you can get Exim to perform a DNS black list lookup at this + # point. The first one denies, whereas the second just warns. + # + # deny message = rejected because $sender_host_address is in a black list at $dnslist_domain\n$dnslist_text + # dnslists = black.list.example + # + # warn dnslists = black.list.example + # add_header = X-Warning: $sender_host_address is in a black list at $dnslist_domain + # log_message = found in $dnslist_domain + ############################################################################# + + ############################################################################# + # This check is commented out because it is recognized that not every + # sysadmin will want to do it. If you enable it, the check performs + # Client SMTP Authorization (csa) checks on the sending host. These checks + # do DNS lookups for SRV records. The CSA proposal is currently (May 2005) + # an Internet draft. You can, of course, add additional conditions to this + # ACL statement to restrict the CSA checks to certain hosts only. + # + # require verify = csa + ############################################################################# + + # At this point, the address has passed all the checks that have been + # configured, so we accept it unconditionally. + + accept + + +# This ACL is used after the contents of a message have been received. This +# is the ACL in which you can test a message's headers or body, and in +# particular, this is where you can invoke external virus or spam scanners. +# Some suggested ways of configuring these tests are shown below, commented +# out. Without any tests, this ACL accepts all messages. If you want to use +# such tests, you must ensure that Exim is compiled with the content-scanning +# extension (WITH_CONTENT_SCAN=yes in Local/Makefile). - accept authenticated = * - control = submission +acl_check_data: - require message = relay not permitted - domains = +local_domains : +relay_to_domains + # Deny if the message contains a virus. Before enabling this check, you + # must install a virus scanner and set the av_scanner option above. + # + # deny malware = * + # message = This message contains a virus ($malware_name). - require verify = recipient + # Add headers to a message if it is judged to be spam. Before enabling this, + # you must install SpamAssassin. You may also need to set the spamd_address + # option above. + # + # warn spam = nobody + # add_header = X-Spam_score: $spam_score\n\ + # X-Spam_score_int: $spam_score_int\n\ + # X-Spam_bar: $spam_bar\n\ + # X-Spam_report: $spam_report - accept + # Accept the message. + accept -acl_check_data: - accept -begin routers +###################################################################### +# ROUTERS CONFIGURATION # +# Specifies how addresses are handled # +###################################################################### +# THE ORDER IN WHICH THE ROUTERS ARE DEFINED IS IMPORTANT! # +# An address is passed to each router in turn until it is accepted. # +###################################################################### -send_via_smarthost: - driver = manualroute - domains = ! +local_domains - transport = smarthost_smtp - route_list = "* <%= @p[:smtp_host] %> byname" - host_find_failed = defer - no_more +begin routers +send_via_sendgrid: + driver = manualroute + domains = ! +local_domains + transport = sendgrid_smtp + route_list = "* <%= @p[:smtp_host] %> byname" + host_find_failed = defer + no_more + +# This router routes to remote hosts over SMTP by explicit IP address, +# when an email address is given in "domain literal" form, for example, +# . The RFCs require this facility. However, it is +# little-known these days, and has been exploited by evil people seeking +# to abuse SMTP relays. Consequently it is commented out in the default +# configuration. If you uncomment this router, you also need to uncomment +# allow_domain_literals above, so that Exim can recognize the syntax of +# domain literal addresses. + +# domain_literal: +# driver = ipliteral +# domains = ! +local_domains +# transport = remote_smtp + + +# This router routes addresses that are not in local domains by doing a DNS +# lookup on the domain name. The exclamation mark that appears in "domains = ! +# +local_domains" is a negating operator, that is, it can be read as "not". The +# recipient's domain must not be one of those defined by "domainlist +# local_domains" above for this router to be used. +# +# If the router is used, any domain that resolves to 0.0.0.0 or to a loopback +# interface address (127.0.0.0/8) is treated as if it had no DNS entry. Note +# that 0.0.0.0 is the same as 0.0.0.0/32, which is commonly treated as the +# local host inside the network stack. It is not 0.0.0.0/0, the default route. +# If the DNS lookup fails, no further routers are tried because of the no_more +# setting, and consequently the address is unrouteable. dnslookup: - driver = dnslookup - domains = ! +local_domains - transport = remote_smtp - ignore_target_hosts = 0.0.0.0 : 127.0.0.0/8 - no_more + driver = dnslookup + domains = ! +local_domains + transport = remote_smtp + ignore_target_hosts = 0.0.0.0 : 127.0.0.0/8 + no_more -system_aliases: - driver = redirect - allow_fail - allow_defer - data = ${lookup{$local_part}lsearch{/etc/mail/aliases}} - file_transport = address_file - pipe_transport = address_pipe +# The remaining routers handle addresses in the local domain(s), that is those +# domains that are defined by "domainlist local_domains" above. + + +# This router handles aliasing using a linearly searched alias file with the +# name /etc/mail/aliases. When this configuration is installed automatically, +# the name gets inserted into this file from whatever is set in Exim's +# build-time configuration. The default path is the traditional /etc/aliases. +# If you install this configuration by hand, you need to specify the correct +# path in the "data" setting below. +# +##### NB You must ensure that the alias file exists. It used to be the case +##### NB that every Unix had that file, because it was the Sendmail default. +##### NB These days, there are systems that don't have it. Your aliases +##### NB file should at least contain an alias for "postmaster". +# +# If any of your aliases expand to pipes or files, you will need to set +# up a user and a group for these deliveries to run under. You can do +# this by uncommenting the "user" option below (changing the user name +# as appropriate) and adding a "group" option if necessary. Alternatively, you +# can specify "user" on the transports that are used. Note that the transports +# listed below are the same as are used for .forward files; you might want +# to set up different ones for pipe and file deliveries from aliases. + +system_aliases: + driver = redirect + allow_fail + allow_defer + data = ${lookup{$local_part}lsearch{/etc/mail/aliases}} +# user = exim + file_transport = address_file + pipe_transport = address_pipe + + +# This router handles forwarding using traditional .forward files in users' +# home directories. If you want it also to allow mail filtering when a forward +# file starts with the string "# Exim filter" or "# Sieve filter", uncomment +# the "allow_filter" option. + +# If you want this router to treat local parts with suffixes introduced by "-" +# or "+" characters as if the suffixes did not exist, uncomment the two local_ +# part_suffix options. Then, for example, xxxx-foo@your.domain will be treated +# in the same way as xxxx@your.domain by this router. You probably want to make +# the same change to the localuser router. + +# The no_verify setting means that this router is skipped when Exim is +# verifying addresses. Similarly, no_expn means that this router is skipped if +# Exim is processing an EXPN command. + +# The check_ancestor option means that if the forward file generates an +# address that is an ancestor of the current one, the current one gets +# passed on instead. This covers the case where A is aliased to B and B +# has a .forward file pointing to A. + +# The three transports specified at the end are those that are used when +# forwarding generates a direct delivery to a file, or to a pipe, or sets +# up an auto-reply, respectively. userforward: - driver = redirect - check_local_user + driver = redirect + check_local_user # local_part_suffix = +* : -* # local_part_suffix_optional - file = $home/.forward + file = $home/.forward # allow_filter - no_verify - no_expn - check_ancestor - file_transport = address_file - pipe_transport = address_pipe - reply_transport = address_reply + no_verify + no_expn + check_ancestor + file_transport = address_file + pipe_transport = address_pipe + reply_transport = address_reply + + +# This router matches local user mailboxes. If the router fails, the error +# message is "Unknown user". +# If you want this router to treat local parts with suffixes introduced by "-" +# or "+" characters as if the suffixes did not exist, uncomment the two local_ +# part_suffix options. Then, for example, xxxx-foo@your.domain will be treated +# in the same way as xxxx@your.domain by this router. localuser: - driver = accept - check_local_user - domains = ! +local_domains + driver = accept + check_local_user # local_part_suffix = +* : -* # local_part_suffix_optional - transport = remote_smtp - cannot_route_message = Unknown user + transport = local_delivery + cannot_route_message = Unknown user + + + +###################################################################### +# TRANSPORTS CONFIGURATION # +###################################################################### +# ORDER DOES NOT MATTER # +# Only one appropriate transport is called for each delivery. # +###################################################################### + +# A transport is used only when referenced from a router that successfully +# handles an address. begin transports +sendgrid_smtp: + driver = smtp + hosts = <%= @p[:smtp_host] %> + hosts_require_auth = <%= @p[:smtp_host] %> + hosts_require_tls = <%= @p[:smtp_host] %> # This transport is used for delivering messages over SMTP connections. -smarthost_smtp: - driver = smtp - hosts = <%= @p[:smtp_host] %> - hosts_require_auth = <%= @p[:smtp_host] %> - hosts_require_tls = <%= @p[:smtp_host] %> - remote_smtp: - driver = smtp - hosts_try_auth = smarthost + driver = smtp + + +# This transport is used for local delivery to user mailboxes in traditional +# BSD mailbox format. By default it will be run under the uid and gid of the +# local user, and requires the sticky bit to be set on the /var/mail directory. +# Some systems use the alternative approach of running mail deliveries under a +# particular group instead of using the sticky bit. The commented options below +# show how this can be done. + +local_delivery: + driver = appendfile +# file = /var/mail/$local_part + directory = /home/$local_part/.maildir + maildir_format + delivery_date_add + envelope_to_add + return_path_add +# group = mail +# mode = 0660 + + +# This transport is used for handling pipe deliveries generated by alias or +# .forward files. If the pipe generates any standard output, it is returned +# to the sender of the message as a delivery error. Set return_fail_output +# instead of return_output if you want this to happen only when the pipe fails +# to complete normally. You can set different transports for aliases and +# forwards if you want to - see the references to address_pipe in the routers +# section above. + +address_pipe: + driver = pipe + return_output + + +# This transport is used for handling deliveries directly to files that are +# generated by aliasing or forwarding. + +address_file: + driver = appendfile + delivery_date_add + envelope_to_add + return_path_add + + +# This transport is used for handling autoreplies generated by the filtering +# option of the userforward router. + +address_reply: + driver = autoreply + + + +###################################################################### +# RETRY CONFIGURATION # +###################################################################### + +begin retry + +# This single retry rule applies to all domains and all errors. It specifies +# retries every 15 minutes for 2 hours, then increasing retry intervals, +# starting at 1 hour and increasing each time by a factor of 1.5, up to 16 +# hours, then retries every 6 hours until 4 days have passed since the first +# failed delivery. + +# WARNING: If you do not have any retry rules at all (this section of the +# configuration is non-existent or empty), Exim will not do any retries of +# messages that fail to get delivered at the first attempt. The effect will +# be to treat temporary errors as permanent. Therefore, DO NOT remove this +# retry rule unless you really don't want any retries. + +# Address or Domain Error Retries +# ----------------- ----- ------- + +* * F,2h,15m; G,16h,1h,1.5; F,4d,6h + + + +###################################################################### +# REWRITE CONFIGURATION # +###################################################################### + +# There are no rewriting specifications in this default configuration file. + +begin rewrite + + + +###################################################################### +# AUTHENTICATION CONFIGURATION # +###################################################################### + +# The following authenticators support plaintext username/password +# authentication using the standard PLAIN mechanism and the traditional +# but non-standard LOGIN mechanism, with Exim acting as the server. +# PLAIN and LOGIN are enough to support most MUA software. +# +# These authenticators are not complete: you need to change the +# server_condition settings to specify how passwords are verified. +# They are set up to offer authentication to the client only if the +# connection is encrypted with TLS, so you also need to add support +# for TLS. See the global configuration options section at the start +# of this file for more about TLS. +# +# The default RCPT ACL checks for successful authentication, and will accept +# messages from authenticated users from anywhere on the Internet. begin authenticators -# this is for the SmartHost SMTP Authentication, we do PlainText Auth over TLS... -smarthost_login: - driver = plaintext - public_name = LOGIN - client_send = : <%= @p[:username] %> : <%= @p[:password] %> - -plain_server: -driver = plaintext -public_name = PLAIN -server_condition = "${if crypteq{$auth3}{${extract{1}{:}{${lookup{$auth2}lsearch{/etc/exim/passwd}{$value}{*:*}}}}}{1}{0}}" -server_set_id = $auth2 -server_prompts = : -server_advertise_condition = * - -cram_md5_server: -driver = cram_md5 -public_name = CRAM-MD5 -server_secret = ${extract{2}{:}{${lookup{$auth1}lsearch{/etc/exim/passwd}{$value}fail}}} -server_set_id = $auth1 - -login_server: -driver = plaintext -public_name = LOGIN -server_prompts = "Username:: : Password::" -server_condition = "${if crypteq{$auth2}{${extract{1}{:}{${lookup{$auth1}lsearch{/etc/exim/passwd}{$value}{*:*}}}}}{1}{0}}" -server_set_id = $auth1 -server_advertise_condition = * -# End of Exim configuration file +sendgrid_login: + driver = plaintext + public_name = LOGIN + client_send = : <%= @p[:username] %> : <%= @p[:password] %> + + +# PLAIN authentication has no server prompts. The client sends its +# credentials in one lump, containing an authorization ID (which we do not +# use), an authentication ID, and a password. The latter two appear as +# $auth2 and $auth3 in the configuration and should be checked against a +# valid username and password. In a real configuration you would typically +# use $auth2 as a lookup key, and compare $auth3 against the result of the +# lookup, perhaps using the crypteq{}{} condition. + +#PLAIN: +# driver = plaintext +# server_set_id = $auth2 +# server_prompts = : +# server_condition = Authentication is not yet configured +# server_advertise_condition = ${if def:tls_cipher } +# LOGIN authentication has traditional prompts and responses. There is no +# authorization ID in this mechanism, so unlike PLAIN the username and +# password are $auth1 and $auth2. Apart from that you can use the same +# server_condition setting for both authenticators. + +#LOGIN: +# driver = plaintext +# server_set_id = $auth1 +# server_prompts = <| Username: | Password: +# server_condition = Authentication is not yet configured +# server_advertise_condition = ${if def:tls_cipher } + + +###################################################################### +# CONFIGURATION FOR local_scan() # +###################################################################### + +# If you have built Exim to include a local_scan() function that contains +# tables for private options, you can define those options here. Remember to +# uncomment the "begin" line. It is commented by default because it provokes +# an error with Exim binaries that are not built with LOCAL_SCAN_HAS_OPTIONS +# set in the Local/Makefile. + +# begin local_scan + + +# End of Exim configuration file diff --git a/cookbooks/logrotate/recipes/default.rb b/cookbooks/logrotate/recipes/default.rb index 129d76015..3f51924c7 100644 --- a/cookbooks/logrotate/recipes/default.rb +++ b/cookbooks/logrotate/recipes/default.rb @@ -8,6 +8,7 @@ owner "root" group "root" mode "0655" + backup 0 end cron "logrotate -f /etc/logrotate.d/nginx" do diff --git a/cookbooks/main/recipes/default.rb b/cookbooks/main/recipes/default.rb index b12f657be..ecdb3eb38 100644 --- a/cookbooks/main/recipes/default.rb +++ b/cookbooks/main/recipes/default.rb @@ -35,6 +35,12 @@ # require_recipe "sunspot" # #uncomment to run the exim recipe +#exim_auth "auth" do +# my_hostname "my_hostname.com" +# smtp_host "smtp.sendgrid.com" +# username "username" +# password "password" +#end #require_recipe "exim" #uncomment to run the exim::auth recipe @@ -57,4 +63,4 @@ #uncomment to include the eybackup_verbose recipe #require_recipe "eybackup_verbose" -require_recipe 'nginx' \ No newline at end of file +require_recipe 'nginx' diff --git a/cookbooks/redis/recipes/default.rb b/cookbooks/redis/recipes/default.rb index 68831ea70..c502f5f68 100644 --- a/cookbooks/redis/recipes/default.rb +++ b/cookbooks/redis/recipes/default.rb @@ -5,12 +5,17 @@ if ['util'].include?(node[:instance_role]) +execute "set_overcommit_memory" do + command "echo 1 > /proc/sys/vm/overcommit_memory" + action :run +end + enable_package "dev-db/redis" do - version "1.3.12_pre1" + version "2.0.2" end package "dev-db/redis" do - version "1.3.12_pre1" + version "2.0.2" action :install end diff --git a/cookbooks/redis/templates/default/redis.conf.erb b/cookbooks/redis/templates/default/redis.conf.erb index 0b895c346..e4deb08d4 100644 --- a/cookbooks/redis/templates/default/redis.conf.erb +++ b/cookbooks/redis/templates/default/redis.conf.erb @@ -1,20 +1,32 @@ # Redis configuration file example +# Note on units: when memory size is needed, it is possible to specifiy +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + # By default Redis does not run as a daemon. Use 'yes' if you need it. # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. daemonize yes -# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default. -# You can specify a custom pid file location here. +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by +# default. You can specify a custom pid file location here. pidfile <%= @pidfile %> # Accept connections on the specified port, default is 6379 port <%= @port %> # If you want you can bind a single interface, if the bind option is not -# specified all the interfaces will listen for connections. +# specified all the interfaces will listen for incoming connections. # -# bind 127.0.0.1 +#bind 127.0.0.1 # Close the connection after a client is idle for N seconds (0 to disable) timeout <%= @timeout %> @@ -22,12 +34,13 @@ timeout <%= @timeout %> # Set server verbosity to 'debug' # it can be one of: # debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) # notice (moderately verbose, what you want in production probably) # warning (only very important / critical messages are logged) loglevel <%= @loglevel %> # Specify the log file name. Also 'stdout' can be used to force -# the demon to log on the standard output. Note that if you use standard +# Redis to log on the standard output. Note that if you use standard # output for logging but daemonize, logs will be sent to /dev/null logfile <%= @logfile %> @@ -49,6 +62,9 @@ databases 16 # after 900 sec (15 min) if at least 1 key changed # after 300 sec (5 min) if at least 10 keys changed # after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving at all commenting all the "save" lines. + save 900 1 save 300 10 save 60 10000 @@ -57,13 +73,19 @@ save 60 10000 # For default that's set to 'yes' as it's almost always a win. # If you want to save some CPU in the saving child set it to 'no' but # the dataset will likely be bigger if you have compressible values or keys. -# FOR 1.2.1 -#rdbcompression yes +rdbcompression yes # The filename where to dump the DB -dbfilename redis_state.rdb +dbfilename /db/redis/dump.rdb -# For default save/load DB in/from the working directory +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# Also the Append Only File will be created inside this directory. +# +# Note that you must specify a directory here, not a file name. dir <%= @basedir %> ################################# REPLICATION ################################# @@ -90,6 +112,10 @@ dir <%= @basedir %> # # This should stay commented out for backward compatibility and because most # people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. # # requirepass foobared @@ -97,7 +123,7 @@ dir <%= @basedir %> # Set the max number of connected clients at the same time. By default there # is no limit, and it's up to the number of file descriptors the Redis process -# is able to open. The special value '0' means no limts. +# is able to open. The special value '0' means no limits. # Once the limit is reached Redis will close all the new connections sending # an error 'max number of clients reached'. # @@ -129,7 +155,7 @@ dir <%= @basedir %> # happens this is the preferred way to run Redis. If instead you care a lot # about your data and don't want to that a single record can get lost you should # enable the append only mode: when this mode is enabled Redis will append -# every write operation received in the file appendonly.log. This file will +# every write operation received in the file appendonly.aof. This file will # be read on startup in order to rebuild the full dataset in memory. # # Note that you can have both the async dumps and the append only file if you @@ -137,13 +163,13 @@ dir <%= @basedir %> # Still if append only mode is enabled Redis will load the data from the # log file at startup ignoring the dump.rdb file. # -# The name of the append only file is "appendonly.log" -# # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append # log file in background when it gets too big. -# FOR 1.2.1 -#appendonly no +appendonly no + +# The name of the append only file (default: "appendonly.aof") +# appendfilename appendonly.aof # The fsync() call tells the Operating System to actually write data on disk # instead to wait for more data in the output buffer. Some OS will really flush @@ -155,17 +181,92 @@ dir <%= @basedir %> # always: fsync after every write to the append only log . Slow, Safest. # everysec: fsync only if one second passed since the last fsync. Compromise. # -# The default is "always" that's the safer of the options. It's up to you to -# understand if you can relax this to "everysec" that will fsync every second -# or to "no" that will let the operating system flush the output buffer when -# it want, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting). +# The default is "everysec" that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# If unsure, use "everysec". # appendfsync always -# FOR 1.2.1 -#appendfsync everysec +appendfsync everysec # appendfsync no +################################ VIRTUAL MEMORY ############################### + +# Virtual Memory allows Redis to work with datasets bigger than the actual +# amount of RAM needed to hold the whole dataset in memory. +# In order to do so very used keys are taken in memory while the other keys +# are swapped into a swap file, similarly to what operating systems do +# with memory pages. +# +# To enable VM just set 'vm-enabled' to yes, and set the following three +# VM parameters accordingly to your needs. + +vm-enabled no +# vm-enabled yes + +# This is the path of the Redis swap file. As you can guess, swap files +# can't be shared by different Redis instances, so make sure to use a swap +# file for every redis process you are running. Redis will complain if the +# swap file is already in use. +# +# The best kind of storage for the Redis swap file (that's accessed at random) +# is a Solid State Disk (SSD). +# +# *** WARNING *** if you are using a shared hosting the default of putting +# the swap file under /tmp is not secure. Create a dir with access granted +# only to Redis user and configure Redis to create the swap file there. +vm-swap-file /mnt/redis.swap + +# vm-max-memory configures the VM to use at max the specified amount of +# RAM. Everything that deos not fit will be swapped on disk *if* possible, that +# is, if there is still enough contiguous space in the swap file. +# +# With vm-max-memory 0 the system will swap everything it can. Not a good +# default, just specify the max amount of RAM you can in bytes, but it's +# better to leave some margin. For instance specify an amount of RAM +# that's more or less between 60 and 80% of your free RAM. +vm-max-memory 0 + +# Redis swap files is split into pages. An object can be saved using multiple +# contiguous pages, but pages can't be shared between different objects. +# So if your page is too big, small objects swapped out on disk will waste +# a lot of space. If you page is too small, there is less space in the swap +# file (assuming you configured the same number of total swap file pages). +# +# If you use a lot of small objects, use a page size of 64 or 32 bytes. +# If you use a lot of big objects, use a bigger page size. +# If unsure, use the default :) +vm-page-size 32 + +# Number of total memory pages in the swap file. +# Given that the page table (a bitmap of free/used pages) is taken in memory, +# every 8 pages on disk will consume 1 byte of RAM. +# +# The total swap size is vm-page-size * vm-pages +# +# With the default of 32-bytes memory pages and 134217728 pages Redis will +# use a 4 GB swap file, that will use 16 MB of RAM for the page table. +# +# It's better to use the smallest acceptable value for your application, +# but the default is large in order to work in most conditions. +vm-pages 134217728 + +# Max number of VM I/O threads running at the same time. +# This threads are used to read/write data from/to swap file, since they +# also encode and decode objects from disk to memory or the reverse, a bigger +# number of threads can help with big objects even if they can't help with +# I/O itself as the physical device may not be able to couple with many +# reads/writes operations at the same time. +# +# The special value of 0 turn off threaded I/O and enables the blocking +# Virtual Memory implementation. +vm-max-threads 4 + ############################### ADVANCED CONFIG ############################### # Glue small output buffers together in order to send small replies in a @@ -173,17 +274,40 @@ dir <%= @basedir %> # in terms of number of queries per second. Use 'yes' if unsure. glueoutputbuf yes -# Use object sharing. Can save a lot of memory if you have many common -# string in your dataset, but performs lookups against the shared objects -# pool so it uses more CPU and can be a bit slower. Usually it's a good -# idea. +# Hashes are encoded in a special way (much more memory efficient) when they +# have at max a given numer of elements, and the biggest element does not +# exceed a given threshold. You can configure this limits with the following +# configuration directives. +hash-max-zipmap-entries 64 +hash-max-zipmap-value 512 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into an hash table +# that is rhashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# active rehashing the main dictionaries, freeing memory when possible. # -# When object sharing is enabled (shareobjects yes) you can use -# shareobjectspoolsize to control the size of the pool used in order to try -# object sharing. A bigger pool size will lead to better sharing capabilities. -# In general you want this value to be at least the double of the number of -# very common strings you have in your dataset. +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply form time to time +# to queries with 2 milliseconds delay. # -# WARNING: object sharing is experimental, don't enable this feature -# in production before of Redis 1.0-stable. Still please try this feature in -# your development environment so that we can test it better. +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all redis server but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# include /path/to/local.conf +# include /path/to/other.conf + diff --git a/cookbooks/solr/recipes/default.rb b/cookbooks/solr/recipes/default.rb index aaca2b180..913359764 100644 --- a/cookbooks/solr/recipes/default.rb +++ b/cookbooks/solr/recipes/default.rb @@ -5,7 +5,7 @@ # We specify what version we want below. solr_desiredversion = 1.4 if ['solo', 'util'].include?(node[:instance_role]) - if solr_desiredverison = 1.3 + if solr_desiredverison == 1.3 solr_file = "apache-solr-1.3.0.tgz" solr_dir = "apache-solr-1.3.0" solr_url = "http://mirror.its.uidaho.edu/pub/apache/lucene/solr/1.3.0/apache-solr-1.3.0.tgz" diff --git a/cookbooks/sphinx/README.md b/cookbooks/sphinx/README.md new file mode 100644 index 000000000..d494f6723 --- /dev/null +++ b/cookbooks/sphinx/README.md @@ -0,0 +1,59 @@ +ey-cloud-recipes/sphinx +======================== + +A chef recipe for enabling sphinx on the EY AppCloud. + +Dependencies +============ + +If you're using the ultrasphinx flavor in this recipe, you'll need to make sure +you install the chronic gem in your environment (this is not handled by the recipe). + +As previously mentioned, your application needs to have the appropriate plugin installed +already. + +For thinking_sphinx: + + script/plugin install git://github.com/freelancing-god/thinking-sphinx.git + +For ultrasphinx: + + script/plugin install git://github.com/fauna/ultrasphinx.git + +Also note that searchd won't actually start unless you've already specified indexes +in your application. + +Using it +======== + +Edit the recipe, changing the appropriate fields as annotated in recipes/default.rb. +Namely: + + * Add your application name. + * Uncomment the flavor you want to use (thinking_sphinx or ultrasphinx). + * Set the cron_interval to specify how frequently you want to reindex. + +Add the following before_migrate.rb [deploy hooks](http://docs.engineyard.com/appcloud/howtos/deployment/use-deploy-hooks-with-engine-yard-appcloud): + + run "ln -nfs #{shared_path}/config/sphinx #{release_path}/config/sphinx" + run "ln -nfs #{shared_path}/config/sphinx.yml #{release_path}/config/sphinx.yml" + +By default, the recipe will install and run sphinx on all app instances. If you want to +use a dedicated utility instance, just set the "utility_name" variable to the name of +your utility instance. By default this is set to nil. + +Caveats +======== +If you have multiple app slaves or are installing to a dedicated utility instance, the it's +likely that the recipe run will fail on those instances the very first run because the database +migrations will not have run yet on your application master. If this occurs, simply deploy again +and the recipe should succeed the second time around. This should only occur going forward +if you set new indexes on fields that are in migrations that have to be run. + +Additional Resources +======== + +You can get additional information on sphinx configuration and setup here: + + * [thinking_sphinx](http://freelancing-god.github.com/ts/en/) + * [ultrasphinx](http://blog.evanweaver.com/files/doc/fauna/ultrasphinx/files/README.html) \ No newline at end of file diff --git a/cookbooks/sphinx/recipes/default.rb b/cookbooks/sphinx/recipes/default.rb index 9005de006..078d65723 100644 --- a/cookbooks/sphinx/recipes/default.rb +++ b/cookbooks/sphinx/recipes/default.rb @@ -10,110 +10,248 @@ #flavor = "thinking_sphinx" #flavor = "ultrasphinx" +# If you want to install on a specific utility instance rather than +# all application instances, uncomment and set the utility instance +# name here. Note that if you use a utility instance, your very first +# deploy may fail because the initial database migration will not have +# run by the time this executes on the utility instance. If that occurs +# just deploy again and the recipe should succeed. +utility_name = nil +# utility_name = "sphinx" + # If you want to have scheduled reindexes in cron, enter the minute # interval here. This is passed directly to cron via /, so you should # only use numbers between 1 - 59. # -# If you don't want scheduled reindexes, just leave this commented. -# -# Uncommenting this line as-is will reindex once every 10 minutes. -# cron_interval = 10 +# If you don't want scheduled reindexes, just leave this set to nil. +# Setting it equal to 10 would run the cron job every 10 minutes. +cron_interval = nil -if ['solo', 'app', 'app_master'].include?(node[:instance_role]) +if utility_name + if ['solo', 'app', 'app_master'].include?(node[:instance_role]) + run_for_app(appname) do |app_name, data| + ey_cloud_report "Sphinx" do + message "configuring #{flavor}" + end - # be sure to replace "app_name" with the name of your application. - run_for_app(appname) do |app_name, data| + directory "/data/#{app_name}/shared/config/sphinx" do + recursive true + owner node[:owner_name] + group node[:owner_name] + mode 0755 + end - ey_cloud_report "Sphinx" do - message "configuring #{flavor}" + template "/data/#{app_name}/shared/config/sphinx.yml" do + owner node[:owner_name] + group node[:owner_name] + mode 0644 + source "sphinx.yml.erb" + variables({ + :app_name => app_name, + :user => node[:owner_name], + :mem_limit => 32 + }) + end end + end - directory "/var/run/sphinx" do - owner node[:owner_name] - group node[:owner_name] - mode 0755 - end + if node[:name] == utility_name + run_for_app(appname) do |app_name, data| + ey_cloud_report "Sphinx" do + message "configuring #{flavor}" + end - directory "/var/log/engineyard/sphinx/#{app_name}" do - recursive true - owner node[:owner_name] - group node[:owner_name] - mode 0755 - end + directory "/var/run/sphinx" do + owner node[:owner_name] + group node[:owner_name] + mode 0755 + end - remote_file "/etc/logrotate.d/sphinx" do - owner "root" - group "root" - mode 0755 - source "sphinx.logrotate" - backup false - action :create - end + directory "/var/log/engineyard/sphinx/#{app_name}" do + recursive true + owner node[:owner_name] + group node[:owner_name] + mode 0755 + end - template "/etc/monit.d/sphinx.#{app_name}.monitrc" do - source "sphinx.monitrc.erb" - owner node[:owner_name] - group node[:owner_name] - mode 0644 - variables({ - :app_name => app_name, - :user => node[:owner_name], - :flavor => flavor - }) - end + directory "/data/#{app_name}/shared/config/sphinx" do + recursive true + owner node[:owner_name] + group node[:owner_name] + mode 0755 + end - template "/data/#{app_name}/shared/config/sphinx.yml" do - owner node[:owner_name] - group node[:owner_name] - mode 0644 - source "sphinx.yml.erb" - variables({ - :app_name => app_name, - :user => node[:owner_name], - :flavor => flavor.eql?("thinking_sphinx") ? "thinkingsphinx" : flavor, - :mem_limit => 32 - }) - end + remote_file "/etc/logrotate.d/sphinx" do + owner "root" + group "root" + mode 0755 + source "sphinx.logrotate" + backup false + action :create + end - execute "sphinx config" do - command "rake #{flavor}:configure" - user node[:owner_name] - environment({ - 'HOME' => "/home/#{node[:owner_name]}", - 'RAILS_ENV' => node[:environment][:framework_env] - }) - cwd "/data/#{app_name}/current" - end + template "/etc/monit.d/sphinx.#{app_name}.monitrc" do + source "sphinx.monitrc.erb" + owner node[:owner_name] + group node[:owner_name] + mode 0644 + variables({ + :app_name => app_name, + :user => node[:owner_name], + :flavor => flavor + }) + end - ey_cloud_report "indexing #{flavor}" do - message "indexing #{flavor}" - end + template "/data/#{app_name}/shared/config/sphinx.yml" do + owner node[:owner_name] + group node[:owner_name] + mode 0644 + source "sphinx.yml.erb" + variables({ + :app_name => app_name, + :user => node[:owner_name], + :mem_limit => 32 + }) + end - execute "#{flavor} index" do - command "rake #{flavor}:index" - user node[:owner_name] - environment({ - 'HOME' => "/home/#{node[:owner_name]}", - 'RAILS_ENV' => node[:environment][:framework_env] - }) - cwd "/data/#{app_name}/current" - end + execute "sphinx config" do + command "rake #{flavor}:configure" + user node[:owner_name] + environment({ + 'HOME' => "/home/#{node[:owner_name]}", + 'RAILS_ENV' => node[:environment][:framework_env] + }) + cwd "/data/#{app_name}/current" + end + + ey_cloud_report "indexing #{flavor}" do + message "indexing #{flavor}" + end - execute "monit quit" - - if cron_interval - cron "sphinx index" do - action :create - minute "*/#{cron_interval}" - hour '*' - day '*' - month '*' - weekday '*' - command "cd /data/#{app_name}/current && RAILS_ENV=#{node[:environment][:framework_env]} rake #{flavor}:index" + execute "#{flavor} index" do + command "rake #{flavor}:index" user node[:owner_name] + environment({ + 'HOME' => "/home/#{node[:owner_name]}", + 'RAILS_ENV' => node[:environment][:framework_env] + }) + cwd "/data/#{app_name}/current" end - end + execute "monit reload" + + if cron_interval + cron "sphinx index" do + action :create + minute "*/#{cron_interval}" + hour '*' + day '*' + month '*' + weekday '*' + command "cd /data/#{app_name}/current && RAILS_ENV=#{node[:environment][:framework_env]} rake #{flavor}:index" + user node[:owner_name] + end + end + end end +else + if ['solo', 'app', 'app_master'].include?(node[:instance_role]) + run_for_app(appname) do |app_name, data| + ey_cloud_report "Sphinx" do + message "configuring #{flavor}" + end + + directory "/var/run/sphinx" do + owner node[:owner_name] + group node[:owner_name] + mode 0755 + end + + directory "/var/log/engineyard/sphinx/#{app_name}" do + recursive true + owner node[:owner_name] + group node[:owner_name] + mode 0755 + end + directory "/data/#{app_name}/shared/config/sphinx" do + recursive true + owner node[:owner_name] + group node[:owner_name] + mode 0755 + end + + remote_file "/etc/logrotate.d/sphinx" do + owner "root" + group "root" + mode 0755 + source "sphinx.logrotate" + backup false + action :create + end + + template "/etc/monit.d/sphinx.#{app_name}.monitrc" do + source "sphinx.monitrc.erb" + owner node[:owner_name] + group node[:owner_name] + mode 0644 + variables({ + :app_name => app_name, + :user => node[:owner_name], + :flavor => flavor + }) + end + + template "/data/#{app_name}/shared/config/sphinx.yml" do + owner node[:owner_name] + group node[:owner_name] + mode 0644 + source "sphinx.yml.erb" + variables({ + :app_name => app_name, + :user => node[:owner_name], + :mem_limit => 32 + }) + end + + execute "sphinx config" do + command "rake #{flavor}:configure" + user node[:owner_name] + environment({ + 'HOME' => "/home/#{node[:owner_name]}", + 'RAILS_ENV' => node[:environment][:framework_env] + }) + cwd "/data/#{app_name}/current" + end + + ey_cloud_report "indexing #{flavor}" do + message "indexing #{flavor}" + end + + execute "#{flavor} index" do + command "rake #{flavor}:index" + user node[:owner_name] + environment({ + 'HOME' => "/home/#{node[:owner_name]}", + 'RAILS_ENV' => node[:environment][:framework_env] + }) + cwd "/data/#{app_name}/current" + end + + execute "monit reload" + + if cron_interval + cron "sphinx index" do + action :create + minute "*/#{cron_interval}" + hour '*' + day '*' + month '*' + weekday '*' + command "cd /data/#{app_name}/current && RAILS_ENV=#{node[:environment][:framework_env]} rake #{flavor}:index" + user node[:owner_name] + end + end + end + end end diff --git a/cookbooks/sphinx/templates/default/sphinx.yml.erb b/cookbooks/sphinx/templates/default/sphinx.yml.erb index 82fe6d2a6..60080c45e 100644 --- a/cookbooks/sphinx/templates/default/sphinx.yml.erb +++ b/cookbooks/sphinx/templates/default/sphinx.yml.erb @@ -3,8 +3,8 @@ query_log_file: /var/log/engineyard/sphinx/<%= @app_name %>/query.log pid_file: /var/run/sphinx/<%= @app_name %>.pid address: localhost - port: 3312 + port: 9312 mem_limit: <%= @mem_limit %> - config_file: /data/<%= @app_name %>/current/config/<%= @flavor %>/<%= @node[:environment][:framework_env] %>.sphinx.conf + config_file: /data/<%= @app_name %>/current/config/sphinx/<%= @node[:environment][:framework_env] %>.sphinx.conf searchd_file_path: /var/log/engineyard/sphinx/<%= @app_name %>/indexes diff --git a/cookbooks/varnish/templates/default/app.vcl.erb b/cookbooks/varnish/templates/default/app.vcl.erb index 0d808c72b..3d566df18 100644 --- a/cookbooks/varnish/templates/default/app.vcl.erb +++ b/cookbooks/varnish/templates/default/app.vcl.erb @@ -9,27 +9,91 @@ backend default { sub vcl_recv { set req.grace = 120s; +##### Exclude specific urls +# # This following section can be used to give Varnish regular expression to # compare the requested URL to. If there is a match, the requested item # will not be served from cache, nor cached by Varnish. The request will # be passed immediately to the backend. +# +##### +# # if (req.url ~ "/do_not_cache_me") { # return (pass); # } +# +### Sometimes you may want to remove incoming cookies, tooxs, as +### normally unique cookies will prevent caching. Use this with the +### remove cookies code in vcl_fetch, too. +# +# else { +# # Get rid of cookies so that a normal cache lookup can be done. +# unset req.http.cookie; +# } +##### + +##### Normalize Accept-Encoding +# +# The Accept-Encoding header can be formatted multiple ways by different +# browsers, resulting in multiple identical copies of things in the cache. +# This normalizes it. +# +##### +# +# if (req.http.Accept-Encoding) { +# if (req.http.Accept-Encoding ~ "gzip") { +# set req.http.Accept-Encoding = "gzip"; +# } elsif (req.http.Accept-Encoding ~ "deflate") { +# set req.http.Accept-Encoding = "deflate"; +# } else { +# remove req.http.Accept-Encoding; +# } +# } +##### + } sub vcl_fetch { - # If a thread is fetching this object, set the grace period on it to - # 120 seconds so that stale content can be served from cache for that period. +##### Grace period +# +# If a thread is fetching this object, set the grace period on it to +# 120 seconds so that stale content can be served from cache for that period, +# while waiting for the thread to return a new copy of the content. Change or +# eliminate this value as needed. +# +##### set obj.grace = 120s; - # If the normal TTL on the content is less than 10 seconds, set it to 10 seconds. - # Rails apps often come out of the box being cache-unfriendly. This is an easy - # workaround that forces them to be cached anyway. The right answer is to fix the - # app, though. - # if (obj.ttl < 120s) { - # set obj.ttl = 120s; - # } +##### Force caching +# +# If the normal TTL on the content is less than 120 seconds, set it to 120 seconds. +# Rails apps often come out of the box being cache-unfriendly. This is an easy +# workaround that forces them to be cached anyway. Be careful with this, though, as +# you may end up caching more than you want to. The right answer is to fix the +# app to use proper cache control headers for things that should be cached, but this +# is a quick work-around. +# +##### +# if (obj.ttl < 120s) { +# set obj.ttl = 120s; +# } + +##### Remove Cookies +# +# If there are requests that have to be able to pass cookies on, and/or cache +# based on cookies (private caches per user), then this and the code in vcl_recv +# will have to be adjusted. Refer to the Varnish docs for details, as needed. +# +##### +# +# if (req.url ~ "^/(application/dyn.js|login|signup|logout|.*/edit/|.*/delete/|.*/new|.*/users|.*/csvs)") { +# # No-oping here at the moment. The assumption is that since this stuff here is never to be cached, +# # it will be excluded from any other cookie manipulation, too. I'm leaving this block here, though, +# # in case someone might want to do something else with these requests. +# } else { +# # The Cookie Monster eats the cookies. +# unset obj.http.set-cookie; +# } } sub vcl_deliver { diff --git a/cookbooks/varnish/templates/default/varnishd.monitrc.erb b/cookbooks/varnish/templates/default/varnishd.monitrc.erb index b1c6115ed..114ed9f8b 100644 --- a/cookbooks/varnish/templates/default/varnishd.monitrc.erb +++ b/cookbooks/varnish/templates/default/varnishd.monitrc.erb @@ -1,4 +1,4 @@ check process varnish_80 with pidfile /var/run/varnish.80.pid start program = "/usr/sbin/varnishd -a :<%= @varnish_port %> -T 127.0.0.1:6082 -s <%= @cache %> -f /etc/varnish/app.vcl -P /var/run/varnish.80.pid -u nobody -g nobody -p obj_workspace=4096 -p sess_workspace=262144 -p listen_depth=2048 -p overflow_max=<%= @overflow_max %> -p ping_interval=2 -p log_hashstring=off -h classic,5000009 -p thread_pool_max=<%= @thread_pool_max %> -p lru_interval=60 -p esi_syntax=0x00000003 -p sess_timeout=10 -p thread_pools=<%= @thread_pools %> -p thread_pool_min=100 -p shm_workspace=32768 -p thread_pool_add_delay=1" - stop program = "/usr/bin/pkill varnish" + stop program = "/usr/bin/pkill -KILL varnish"