Permalink
Browse files

Created new varnish_frontend recipe to allow varnish to listen on por…

…t 80 in front of HAProxy.

This required an iptables trick to prevent failovers, and the addition of instances to cause
issues in the cluster.
  • Loading branch information...
1 parent dec74fa commit de698cb47eb6271abe7fe443372406071c25943e @jamez01 jamez01 committed Feb 10, 2012
@@ -46,7 +46,7 @@
#uncomment to include the emacs recipe
#require_recipe "emacs"
-
+require_recipe "varnish_frontend"
#uncomment to include the eybackup_verbose recipe
#require_recipe "eybackup_verbose"
@@ -0,0 +1,137 @@
+#
+# Cookbook Name:: varnish
+# Recipe:: default
+#
+
+require 'etc'
+
+if ['solo','app_master', 'app'].include?(node[:instance_role])
+
+ # This needs to be in keywords: www-servers/varnish ~x86
+ # This makes sure that it is.
+
+ enable_package "www-servers/varnish" do
+ version '2.0.6'
+ end
+
+ package "www-servers/varnish" do
+ version '2.0.6'
+ action :install
+ end
+
+ ## Edit interface if needed
+ INTERFACE="eth0"
+
+ #####
+ #
+ # These are generic tuning parameters for each instance size. You may want to
+ # tune them if they prove inadequate.
+ #
+ #####
+
+ CACHE_DIR = '/var/lib/varnish'
+ size = `curl -s http://instance-data.ec2.internal/latest/meta-data/instance-type`
+ case size
+ when /m1.small/ # 1.7G RAM, 1 ECU, 32-bit, 1 core
+ THREAD_POOLS=1
+ THREAD_POOL_MAX=1000
+ OVERFLOW_MAX=2000
+ CACHE="malloc,1GB"
+ when /m1.large/ # 7.5G RAM, 4 ECU, 64-bit, 2 cores
+ THREAD_POOLS=2
+ THREAD_POOL_MAX=2000
+ OVERFLOW_MAX=4000
+ CACHE="malloc,1GB"
+ when /m1.xlarge/ # 15G RAM, 8 ECU, 64-bit, 4 cores
+ THREAD_POOLS=4
+ THREAD_POOL_MAX=4000
+ OVERFLOW_MAX=8000
+ CACHE="malloc,1GB"
+ when /c1.medium/ # 1.7G RAM, 5 ECU, 32-bit, 2 cores
+ THREAD_POOLS=2
+ THREAD_POOL_MAX=2000
+ OVERFLOW_MAX=4000
+ CACHE="malloc,1GB"
+ when /c1.xlarge/ # 7G RAM, 20 ECU, 64-bit, 8 cores
+ THREAD_POOLS=8
+ THREAD_POOL_MAX=8000 # This might be too much.
+ OVERFLOW_MAX=16000
+ CACHE="malloc,1GB"
+ when /m2.xlarge/ # 17.1G RAM, 6.5 ECU, 64-bit, 2 cores
+ THREAD_POOLS=2
+ THREAD_POOL_MAX=2000
+ OVERFLOW_MAX=4000
+ CACHE="malloc,1GB"
+ when /m2.2xlarge/ # 34.2G RAM, 13 ECU, 64-bit, 4 cores
+ THREAD_POOLS=4
+ THREAD_POOL_MAX=4000
+ OVERFLOW_MAX=8000
+ CACHE="malloc,1GB"
+ when /m2.4xlarge/ # 68.4G RAM, 26 ECU, 64-bit, 8 cores
+ THREAD_POOLS=8
+ THREAD_POOL_MAX=8000 # This might be too much.
+ OVERFLOW_MAX=16000
+ CACHE="malloc,1GB"
+ else # This shouldn't happen, but do something rational if it does.
+ THREAD_POOLS=1
+ THREAD_POOL_MAX=2000
+ OVERFLOW_MAX=2000
+ CACHE="malloc,1GB"
+ end
+
+ # Install the varnish monit file.
+ template '/usr/local/bin/varnishd_wrapper' do
+ mode 755
+ source 'varnishd_wrapper.erb'
+ variables({
+ :thread_pools => THREAD_POOLS,
+ :thread_pool_max => THREAD_POOL_MAX,
+ :overflow_max => OVERFLOW_MAX,
+ :cache => CACHE,
+ :varnish_port => 882
+ })
+ end
+
+ # Install MOTD to ensure support is aware of iptables hackery.
+ template '/etc/motd' do
+ mode 655
+ source 'motd.erb'
+ variables({ :interface => INTERFACE })
+ end
+
+ template '/etc/monit.d/varnishd.monitrc' do
+ owner node[:owner_name]
+ group node[:owner_name]
+ source 'varnishd.monitrc.erb'
+ end
+
+ # Install the app VCL file.
+ template '/etc/varnish/app.vcl' do
+ owner node[:owner_name]
+ group node[:owner_name]
+ source 'app.vcl.erb'
+ end
+
+ # Make sure the cache directory exists.
+ unless FileTest.exist? CACHE_DIR
+ user = Etc::getpwnam(node[:owner_name])
+ Dir.mkdir(CACHE_DIR)
+ File.chown(user.uid,user.gid,CACHE_DIR)
+ end
+
+ # Configure IPTables to redirect incomming traffic
+ # Start/restart varnish
+
+ execute "Configure iptables" do
+ command %Q{
+ iptables -t nat -F && iptables -t nat -A PREROUTING -p tcp --dport 80 -i #{INTERFACE} -j REDIRECT --to-ports 882 && /etc/init.d/iptables save
+ }
+ end
+
+ execute "Stop Varnish and bounce monit" do
+ command %Q{
+ sleep 20 ; pkill -9 monit && telinit q ; sleep 10 && monit
+ }
+ end
+
+end
@@ -0,0 +1,108 @@
+# Here is a simple default VCL file for your app. You will want to customize
+# this for your own needs.
+
+backend default {
+ .host = "127.0.0.1";
+ .port = "80";
+}
+
+sub vcl_recv {
+ set req.grace = 120s;
+
+##### Exclude specific urls
+#
+# This following section can be used to give Varnish regular expression to
+# compare the requested URL to. If there is a match, the requested item
+# will not be served from cache, nor cached by Varnish. The request will
+# be passed immediately to the backend.
+#
+#####
+#
+# if (req.url ~ "/do_not_cache_me") {
+# return (pass);
+# }
+#
+### Sometimes you may want to remove incoming cookies, tooxs, as
+### normally unique cookies will prevent caching. Use this with the
+### remove cookies code in vcl_fetch, too.
+#
+# else {
+# # Get rid of cookies so that a normal cache lookup can be done.
+# unset req.http.cookie;
+# }
+#####
+
+##### Normalize Accept-Encoding
+#
+# The Accept-Encoding header can be formatted multiple ways by different
+# browsers, resulting in multiple identical copies of things in the cache.
+# This normalizes it.
+#
+#####
+#
+# if (req.http.Accept-Encoding) {
+# if (req.http.Accept-Encoding ~ "gzip") {
+# set req.http.Accept-Encoding = "gzip";
+# } elsif (req.http.Accept-Encoding ~ "deflate") {
+# set req.http.Accept-Encoding = "deflate";
+# } else {
+# remove req.http.Accept-Encoding;
+# }
+# }
+#####
+
+}
+
+sub vcl_fetch {
+##### Grace period
+#
+# If a thread is fetching this object, set the grace period on it to
+# 120 seconds so that stale content can be served from cache for that period,
+# while waiting for the thread to return a new copy of the content. Change or
+# eliminate this value as needed.
+#
+#####
+ set obj.grace = 120s;
+
+##### Force caching
+#
+# If the normal TTL on the content is less than 120 seconds, set it to 120 seconds.
+# Rails apps often come out of the box being cache-unfriendly. This is an easy
+# workaround that forces them to be cached anyway. Be careful with this, though, as
+# you may end up caching more than you want to. The right answer is to fix the
+# app to use proper cache control headers for things that should be cached, but this
+# is a quick work-around.
+#
+#####
+# if (obj.ttl < 120s) {
+# set obj.ttl = 120s;
+# }
+
+##### Remove Cookies
+#
+# If there are requests that have to be able to pass cookies on, and/or cache
+# based on cookies (private caches per user), then this and the code in vcl_recv
+# will have to be adjusted. Refer to the Varnish docs for details, as needed.
+#
+#####
+#
+# if (req.url ~ "^/(application/dyn.js|login|signup|logout|.*/edit/|.*/delete/|.*/new|.*/users|.*/csvs)") {
+# # No-oping here at the moment. The assumption is that since this stuff here is never to be cached,
+# # it will be excluded from any other cookie manipulation, too. I'm leaving this block here, though,
+# # in case someone might want to do something else with these requests.
+# } else {
+# # The Cookie Monster eats the cookies.
+# unset obj.http.set-cookie;
+# }
+}
+
+sub vcl_deliver {
+ # This just inserts a diagnostic header to let us know if the content
+ # was served via a cache hit, or whether it was a miss.
+
+ if (obj.hits > 0) {
+ set resp.http.X-Cache = "HIT";
+ } else {
+ set resp.http.X-Cache = "MISS";
+ }
+}
@@ -0,0 +1,12 @@
+################
+ NOTICE
+###############
+This environment has been configured with varnish.
+
+Varnish listens IN FRONT of haproxy, although both applications listen on port 80.
+
+Currently iptables is being used to take incomming traffic from <%= @interface %>:80
+and redirecting it to port 882. Varnish is then connecting to HAProxy via 127.0.0.1:80.
+
+If you can curl localhost:80 but cannot connect to port 80 remotely there is an issue
+with varnish. Varnish is currently monitored by monit.
@@ -0,0 +1,4 @@
+check process varnish_80
+ with pidfile /var/run/varnish.882.pid
+ start program = "/usr/local/bin/varnishd_wrapper"
+ stop program = "/usr/bin/killall varnish"
@@ -0,0 +1,2 @@
+#!/bin/bash
+/usr/sbin/varnishd -a 0.0.0.0:<%= @varnish_port %> -T 127.0.0.1:6082 -s <%= @cache %> -f /etc/varnish/app.vcl -P /var/run/varnish.882.pid -u nobody -g nobody -p obj_workspace=4096 -p sess_workspace=262144 -p listen_depth=2048 -p overflow_max=<%= @overflow_max %> -p ping_interval=2 -p log_hashstring=off -h classic,5000009 -p thread_pool_max=<%= @thread_pool_max %> -p lru_interval=60 -p esi_syntax=0x00000003 -p sess_timeout=10 -p thread_pools=<%= @thread_pools %> -p thread_pool_min=100 -p shm_workspace=32768 -p thread_pool_add_delay=1

0 comments on commit de698cb

Please sign in to comment.