1 # -*- encoding: binary -*-
8 # Implements a simple DSL for configuring a Unicorn server.
10 # Example (when used with the Unicorn config file):
12 # working_directory "/path/to/deploy/app/current"
13 # listen '/tmp/my_app.sock', :backlog => 1
14 # listen 9292, :tcp_nopush => true
16 # pid "/tmp/my_app.pid"
18 # # combine REE with "preload_app true" for memory savings
19 # # http://rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
21 # GC.respond_to?(:copy_on_write_friendly=) and
22 # GC.copy_on_write_friendly = true
24 # before_fork do |server, worker|
25 # # the following is highly recomended for Rails + "preload_app true"
26 # # as there's no need for the master process to hold a connection
27 # defined?(ActiveRecord::Base) and
28 # ActiveRecord::Base.connection.disconnect!
30 # # The following is only recommended for memory/DB-constrained
31 # # installations. It is not needed if your system can house
32 # # twice as many worker_processes as you have configured.
34 # # This allows a new master process to incrementally
35 # # phase out the old master process with SIGTTOU to avoid a
36 # # thundering herd (especially in the "preload_app false" case)
37 # # when doing a transparent upgrade. The last worker spawned
38 # # will then kill off the old master process with a SIGQUIT.
39 # old_pid = "#{server.config[:pid]}.oldbin"
40 # if old_pid != server.pid
42 # sig = (worker.nr + 1) >= server.worker_processes ? :QUIT : :TTOU
43 # Process.kill(sig, File.read(old_pid).to_i)
44 # rescue Errno::ENOENT, Errno::ESRCH
48 # # *optionally* throttle the master from forking too quickly by sleeping
52 # after_fork do |server, worker|
53 # # per-process listener ports for debugging/admin/migrations
54 # addr = "127.0.0.1:#{9293 + worker.nr}"
55 # server.listen(addr, :tries => -1, :delay => 5, :tcp_nopush => true)
57 # # the following is *required* for Rails + "preload_app true",
58 # defined?(ActiveRecord::Base) and
59 # ActiveRecord::Base.establish_connection
61 # # if preload_app is true, then you may also want to check and
62 # # restart any other shared sockets/descriptors such as Memcached,
63 # # and Redis. TokyoCabinet file handles are safe to reuse
64 # # between any number of forked children (assuming your kernel
65 # # correctly implements pread()/pwrite() system calls)
67 class Configurator < Struct.new(:set, :config_file)
69 # Default settings for Unicorn
72 :logger => Logger.new($stderr),
73 :worker_processes => 1,
74 :after_fork => lambda { |server, worker|
75 server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
77 :before_fork => lambda { |server, worker|
78 server.logger.info("worker=#{worker.nr} spawning...")
80 :before_exec => lambda { |server|
81 server.logger.info("forked child re-executing...")
84 :preload_app => false,
87 def initialize(defaults = {}) #:nodoc:
88 self.set = Hash.new(:unset)
89 use_defaults = defaults.delete(:use_defaults)
90 self.config_file = defaults.delete(:config_file)
91 set.merge!(DEFAULTS) if use_defaults
92 defaults.each { |key, value| self.send(key, value) }
93 Hash === set[:listener_opts] or
94 set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
95 Array === set[:listeners] or set[:listeners] = []
100 instance_eval(File.read(config_file), config_file) if config_file
102 # working_directory binds immediately (easier error checking that way),
103 # now ensure any paths we changed are correctly set.
104 [ :pid, :stderr_path, :stdout_path ].each do |var|
105 String === (path = set[var]) or next
106 path = File.expand_path(path)
107 test(?w, path) || test(?w, File.dirname(path)) or \
108 raise ArgumentError, "directory for #{var}=#{path} not writable"
112 def commit!(server, options = {}) #:nodoc:
113 skip = options[:skip] || []
114 set.each do |key, value|
115 value == :unset and next
116 skip.include?(key) and next
117 server.__send__("#{key}=", value)
121 def [](key) # :nodoc:
125 # sets object to the +new+ Logger-like object. The new logger-like
126 # object must respond to the following methods:
127 # +debug+, +info+, +warn+, +error+, +fatal+, +close+
129 %w(debug info warn error fatal close).each do |m|
130 new.respond_to?(m) and next
131 raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
137 # sets after_fork hook to a given block. This block will be called by
138 # the worker after forking. The following is an example hook which adds
139 # a per-process listener to every worker:
141 # after_fork do |server,worker|
142 # # per-process listener ports for debugging/admin:
143 # addr = "127.0.0.1:#{9293 + worker.nr}"
145 # # the negative :tries parameter indicates we will retry forever
146 # # waiting on the existing process to exit with a 5 second :delay
147 # # Existing options for Unicorn::Configurator#listen such as
148 # # :backlog, :rcvbuf, :sndbuf are available here as well.
149 # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
151 # # drop permissions to "www-data" in the worker
152 # # generally there's no reason to start Unicorn as a priviledged user
153 # # as it is not recommended to expose Unicorn to public clients.
154 # worker.user('www-data', 'www-data') if Process.euid == 0
156 def after_fork(*args, &block)
157 set_hook(:after_fork, block_given? ? block : args[0])
160 # sets before_fork got be a given Proc object. This Proc
161 # object will be called by the master process before forking
163 def before_fork(*args, &block)
164 set_hook(:before_fork, block_given? ? block : args[0])
167 # sets the before_exec hook to a given Proc object. This
168 # Proc object will be called by the master process right
169 # before exec()-ing the new unicorn binary. This is useful
170 # for freeing certain OS resources that you do NOT wish to
171 # share with the reexeced child process.
172 # There is no corresponding after_exec hook (for obvious reasons).
173 def before_exec(*args, &block)
174 set_hook(:before_exec, block_given? ? block : args[0], 1)
177 # sets the timeout of worker processes to +seconds+. Workers
178 # handling the request/app.call/response cycle taking longer than
179 # this time period will be forcibly killed (via SIGKILL). This
180 # timeout is enforced by the master process itself and not subject
181 # to the scheduling limitations by the worker process. Due the
182 # low-complexity, low-overhead implementation, timeouts of less
183 # than 3.0 seconds can be considered inaccurate and unsafe.
185 # For running Unicorn behind nginx, it is recommended to set
186 # "fail_timeout=0" for in your nginx configuration like this
187 # to have nginx always retry backends that may have had workers
188 # SIGKILL-ed due to timeouts.
190 # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
191 # # on nginx upstream configuration:
192 # upstream unicorn_backend {
193 # # for UNIX domain socket setups:
194 # server unix:/path/to/unicorn.sock fail_timeout=0;
197 # server 192.168.0.7:8080 fail_timeout=0;
198 # server 192.168.0.8:8080 fail_timeout=0;
199 # server 192.168.0.9:8080 fail_timeout=0;
202 Numeric === seconds or raise ArgumentError,
203 "not numeric: timeout=#{seconds.inspect}"
204 seconds >= 3 or raise ArgumentError,
205 "too low: timeout=#{seconds.inspect}"
206 set[:timeout] = seconds
209 # sets the current number of worker_processes to +nr+. Each worker
210 # process will serve exactly one client at a time. You can
211 # increment or decrement this value at runtime by sending SIGTTIN
212 # or SIGTTOU respectively to the master process without reloading
213 # the rest of your Unicorn configuration. See the SIGNALS document
214 # for more information.
215 def worker_processes(nr)
216 Integer === nr or raise ArgumentError,
217 "not an integer: worker_processes=#{nr.inspect}"
218 nr >= 0 or raise ArgumentError,
219 "not non-negative: worker_processes=#{nr.inspect}"
220 set[:worker_processes] = nr
223 # sets listeners to the given +addresses+, replacing or augmenting the
224 # current set. This is for the global listener pool shared by all
225 # worker processes. For per-worker listeners, see the after_fork example
226 # This is for internal API use only, do not use it in your Unicorn
227 # config file. Use listen instead.
228 def listeners(addresses) # :nodoc:
229 Array === addresses or addresses = Array(addresses)
230 addresses.map! { |addr| expand_addr(addr) }
231 set[:listeners] = addresses
234 # adds an +address+ to the existing listener set.
236 # The following options may be specified (but are generally not needed):
238 # +:backlog+: this is the backlog of the listen() syscall.
240 # Some operating systems allow negative values here to specify the
241 # maximum allowable value. In most cases, this number is only
242 # recommendation and there are other OS-specific tunables and
243 # variables that can affect this number. See the listen(2)
244 # syscall documentation of your OS for the exact semantics of
247 # If you are running unicorn on multiple machines, lowering this number
248 # can help your load balancer detect when a machine is overloaded
249 # and give requests to a different machine.
253 # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
255 # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
256 # can be set via the setsockopt(2) syscall. Some kernels
257 # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
258 # there is no need (and it is sometimes detrimental) to specify them.
260 # See the socket API documentation of your operating system
261 # to determine the exact semantics of these settings and
262 # other operating system-specific knobs where they can be
265 # Defaults: operating system defaults
267 # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
269 # This has no effect on UNIX sockets.
271 # Default: operating system defaults (usually Nagle's algorithm enabled)
273 # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
275 # This will prevent partial TCP frames from being sent out.
276 # Enabling +tcp_nopush+ is generally not needed or recommended as
277 # controlling +tcp_nodelay+ already provides sufficient latency
278 # reduction whereas Unicorn does not know when the best times are
279 # for flushing corked sockets.
281 # This has no effect on UNIX sockets.
283 # +:tries+: times to retry binding a socket if it is already in use
285 # A negative number indicates we will retry indefinitely, this is
286 # useful for migrations and upgrades when individual workers
287 # are binding to different ports.
291 # +:delay+: seconds to wait between successive +tries+
293 # Default: 0.5 seconds
294 def listen(address, opt = {})
295 address = expand_addr(address)
296 if String === address
297 [ :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
298 value = opt[key] or next
300 raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
302 [ :tcp_nodelay, :tcp_nopush ].each do |key|
303 (value = opt[key]).nil? and next
304 TrueClass === value || FalseClass === value or
305 raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
307 unless (value = opt[:delay]).nil?
309 raise ArgumentError, "not numeric: delay=#{value.inspect}"
311 set[:listener_opts][address].merge!(opt)
314 set[:listeners] << address
317 # sets the +path+ for the PID file of the unicorn master process
318 def pid(path); set_path(:pid, path); end
320 # Enabling this preloads an application before forking worker
321 # processes. This allows memory savings when using a
322 # copy-on-write-friendly GC but can cause bad things to happen when
323 # resources like sockets are opened at load time by the master
324 # process and shared by multiple children. People enabling this are
325 # highly encouraged to look at the before_fork/after_fork hooks to
326 # properly close/reopen sockets. Files opened for logging do not
327 # have to be reopened as (unbuffered-in-userspace) files opened with
328 # the File::APPEND flag are written to atomically on UNIX.
330 # In addition to reloading the unicorn-specific config settings,
331 # SIGHUP will reload application code in the working
332 # directory/symlink when workers are gracefully restarted.
333 def preload_app(bool)
335 when TrueClass, FalseClass
336 set[:preload_app] = bool
338 raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
342 # Allow redirecting $stderr to a given path. Unlike doing this from
343 # the shell, this allows the unicorn process to know the path its
344 # writing to and rotate the file if it is used for logging. The
345 # file will be opened with the File::APPEND flag and writes
346 # synchronized to the kernel (but not necessarily to _disk_) so
347 # multiple processes can safely append to it.
348 def stderr_path(path)
349 set_path(:stderr_path, path)
352 # Same as stderr_path, except for $stdout
353 def stdout_path(path)
354 set_path(:stdout_path, path)
357 # sets the working directory for Unicorn. This ensures USR2 will
358 # start a new instance of Unicorn in this directory. This may be
360 def working_directory(path)
361 # just let chdir raise errors
362 path = File.expand_path(path)
364 HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
367 # expands "unix:path/to/foo" to a socket relative to the current path
368 # expands pathnames of sockets if relative to "~" or "~username"
369 # expands "*:port and ":port" to "0.0.0.0:port"
370 def expand_addr(address) #:nodoc
371 return "0.0.0.0:#{address}" if Integer === address
372 return address unless String === address
375 when %r{\Aunix:(.*)\z}
378 File.expand_path(address)
379 when %r{\A(?:\*:)?(\d+)\z}
381 when %r{\A(.*):(\d+)\z}
382 # canonicalize the name
383 packed = Socket.pack_sockaddr_in($2.to_i, $1)
384 Socket.unpack_sockaddr_in(packed).reverse!.join(':')
392 def set_path(var, path) #:nodoc:
394 when NilClass, String
401 def set_hook(var, my_proc, req_arity = 2) #:nodoc:
404 arity = my_proc.arity
405 (arity == req_arity) or \
407 "#{var}=#{my_proc.inspect} has invalid arity: " \
408 "#{arity} (need #{req_arity})"
410 my_proc = DEFAULTS[var]
412 raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"