1 # -*- encoding: binary -*-
8 # Implements a simple DSL for configuring a Unicorn server.
10 # See http://unicorn.bogomips.org/examples/unicorn.conf.rb and
11 # http://unicorn.bogomips.org/examples/unicorn.conf.minimal.rb
12 # example configuration files. An example config file for use with
13 # nginx is also available at
14 # http://unicorn.bogomips.org/examples/nginx.conf
15 class Configurator < Struct.new(:set, :config_file, :after_reload)
17 # used to stash stuff for deferred processing of cli options in
18 # config.ru after "working_directory" is bound. Do not rely on
19 # this being around later on...
23 # Default settings for Unicorn
26 :logger => Logger.new($stderr),
27 :worker_processes => 1,
28 :after_fork => lambda { |server, worker|
29 server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
31 :before_fork => lambda { |server, worker|
32 server.logger.info("worker=#{worker.nr} spawning...")
34 :before_exec => lambda { |server|
35 server.logger.info("forked child re-executing...")
38 :preload_app => false,
41 def initialize(defaults = {}) #:nodoc:
42 self.set = Hash.new(:unset)
43 use_defaults = defaults.delete(:use_defaults)
44 self.config_file = defaults.delete(:config_file)
46 # after_reload is only used by unicorn_rails, unsupported otherwise
47 self.after_reload = defaults.delete(:after_reload)
49 set.merge!(DEFAULTS) if use_defaults
50 defaults.each { |key, value| self.send(key, value) }
51 Hash === set[:listener_opts] or
52 set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
53 Array === set[:listeners] or set[:listeners] = []
58 instance_eval(File.read(config_file), config_file) if config_file
62 # unicorn_rails creates dirs here after working_directory is bound
63 after_reload.call if after_reload
65 # working_directory binds immediately (easier error checking that way),
66 # now ensure any paths we changed are correctly set.
67 [ :pid, :stderr_path, :stdout_path ].each do |var|
68 String === (path = set[var]) or next
69 path = File.expand_path(path)
70 File.writable?(path) || File.writable?(File.dirname(path)) or \
71 raise ArgumentError, "directory for #{var}=#{path} not writable"
75 def commit!(server, options = {}) #:nodoc:
76 skip = options[:skip] || []
77 if ready_pipe = RACKUP.delete(:ready_pipe)
78 server.ready_pipe = ready_pipe
80 set.each do |key, value|
81 value == :unset and next
82 skip.include?(key) and next
83 server.__send__("#{key}=", value)
91 # sets object to the +new+ Logger-like object. The new logger-like
92 # object must respond to the following methods:
93 # +debug+, +info+, +warn+, +error+, +fatal+
94 # The default Logger will log its output to the path specified
95 # by +stderr_path+. If you're running Unicorn daemonized, then
96 # you must specify a path to prevent error messages from going
99 %w(debug info warn error fatal).each do |m|
100 new.respond_to?(m) and next
101 raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
107 # sets after_fork hook to a given block. This block will be called by
108 # the worker after forking. The following is an example hook which adds
109 # a per-process listener to every worker:
111 # after_fork do |server,worker|
112 # # per-process listener ports for debugging/admin:
113 # addr = "127.0.0.1:#{9293 + worker.nr}"
115 # # the negative :tries parameter indicates we will retry forever
116 # # waiting on the existing process to exit with a 5 second :delay
117 # # Existing options for Unicorn::Configurator#listen such as
118 # # :backlog, :rcvbuf, :sndbuf are available here as well.
119 # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
121 # # drop permissions to "www-data" in the worker
122 # # generally there's no reason to start Unicorn as a priviledged user
123 # # as it is not recommended to expose Unicorn to public clients.
124 # worker.user('www-data', 'www-data') if Process.euid == 0
126 def after_fork(*args, &block)
127 set_hook(:after_fork, block_given? ? block : args[0])
130 # sets before_fork got be a given Proc object. This Proc
131 # object will be called by the master process before forking
133 def before_fork(*args, &block)
134 set_hook(:before_fork, block_given? ? block : args[0])
137 # sets the before_exec hook to a given Proc object. This
138 # Proc object will be called by the master process right
139 # before exec()-ing the new unicorn binary. This is useful
140 # for freeing certain OS resources that you do NOT wish to
141 # share with the reexeced child process.
142 # There is no corresponding after_exec hook (for obvious reasons).
143 def before_exec(*args, &block)
144 set_hook(:before_exec, block_given? ? block : args[0], 1)
147 # sets the timeout of worker processes to +seconds+. Workers
148 # handling the request/app.call/response cycle taking longer than
149 # this time period will be forcibly killed (via SIGKILL). This
150 # timeout is enforced by the master process itself and not subject
151 # to the scheduling limitations by the worker process. Due the
152 # low-complexity, low-overhead implementation, timeouts of less
153 # than 3.0 seconds can be considered inaccurate and unsafe.
155 # For running Unicorn behind nginx, it is recommended to set
156 # "fail_timeout=0" for in your nginx configuration like this
157 # to have nginx always retry backends that may have had workers
158 # SIGKILL-ed due to timeouts.
160 # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
161 # # on nginx upstream configuration:
162 # upstream unicorn_backend {
163 # # for UNIX domain socket setups:
164 # server unix:/path/to/unicorn.sock fail_timeout=0;
167 # server 192.168.0.7:8080 fail_timeout=0;
168 # server 192.168.0.8:8080 fail_timeout=0;
169 # server 192.168.0.9:8080 fail_timeout=0;
172 Numeric === seconds or raise ArgumentError,
173 "not numeric: timeout=#{seconds.inspect}"
174 seconds >= 3 or raise ArgumentError,
175 "too low: timeout=#{seconds.inspect}"
176 set[:timeout] = seconds
179 # sets the current number of worker_processes to +nr+. Each worker
180 # process will serve exactly one client at a time. You can
181 # increment or decrement this value at runtime by sending SIGTTIN
182 # or SIGTTOU respectively to the master process without reloading
183 # the rest of your Unicorn configuration. See the SIGNALS document
184 # for more information.
185 def worker_processes(nr)
186 Integer === nr or raise ArgumentError,
187 "not an integer: worker_processes=#{nr.inspect}"
188 nr >= 0 or raise ArgumentError,
189 "not non-negative: worker_processes=#{nr.inspect}"
190 set[:worker_processes] = nr
193 # sets listeners to the given +addresses+, replacing or augmenting the
194 # current set. This is for the global listener pool shared by all
195 # worker processes. For per-worker listeners, see the after_fork example
196 # This is for internal API use only, do not use it in your Unicorn
197 # config file. Use listen instead.
198 def listeners(addresses) # :nodoc:
199 Array === addresses or addresses = Array(addresses)
200 addresses.map! { |addr| expand_addr(addr) }
201 set[:listeners] = addresses
204 # adds an +address+ to the existing listener set.
206 # The following options may be specified (but are generally not needed):
208 # +:backlog+: this is the backlog of the listen() syscall.
210 # Some operating systems allow negative values here to specify the
211 # maximum allowable value. In most cases, this number is only
212 # recommendation and there are other OS-specific tunables and
213 # variables that can affect this number. See the listen(2)
214 # syscall documentation of your OS for the exact semantics of
217 # If you are running unicorn on multiple machines, lowering this number
218 # can help your load balancer detect when a machine is overloaded
219 # and give requests to a different machine.
223 # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
225 # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
226 # can be set via the setsockopt(2) syscall. Some kernels
227 # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
228 # there is no need (and it is sometimes detrimental) to specify them.
230 # See the socket API documentation of your operating system
231 # to determine the exact semantics of these settings and
232 # other operating system-specific knobs where they can be
235 # Defaults: operating system defaults
237 # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
239 # This has no effect on UNIX sockets.
241 # Default: operating system defaults (usually Nagle's algorithm enabled)
243 # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
245 # This will prevent partial TCP frames from being sent out.
246 # Enabling +tcp_nopush+ is generally not needed or recommended as
247 # controlling +tcp_nodelay+ already provides sufficient latency
248 # reduction whereas Unicorn does not know when the best times are
249 # for flushing corked sockets.
251 # This has no effect on UNIX sockets.
253 # +:tries+: times to retry binding a socket if it is already in use
255 # A negative number indicates we will retry indefinitely, this is
256 # useful for migrations and upgrades when individual workers
257 # are binding to different ports.
261 # +:delay+: seconds to wait between successive +tries+
263 # Default: 0.5 seconds
265 # +:umask+: sets the file mode creation mask for UNIX sockets
267 # Typically UNIX domain sockets are created with more liberal
268 # file permissions than the rest of the application. By default,
269 # we create UNIX domain sockets to be readable and writable by
270 # all local users to give them the same accessibility as
271 # locally-bound TCP listeners.
273 # This has no effect on TCP listeners.
275 # Default: 0 (world read/writable)
276 def listen(address, opt = {})
277 address = expand_addr(address)
278 if String === address
279 [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
280 value = opt[key] or next
282 raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
284 [ :tcp_nodelay, :tcp_nopush ].each do |key|
285 (value = opt[key]).nil? and next
286 TrueClass === value || FalseClass === value or
287 raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
289 unless (value = opt[:delay]).nil?
291 raise ArgumentError, "not numeric: delay=#{value.inspect}"
293 set[:listener_opts][address].merge!(opt)
296 set[:listeners] << address
299 # sets the +path+ for the PID file of the unicorn master process
300 def pid(path); set_path(:pid, path); end
302 # Enabling this preloads an application before forking worker
303 # processes. This allows memory savings when using a
304 # copy-on-write-friendly GC but can cause bad things to happen when
305 # resources like sockets are opened at load time by the master
306 # process and shared by multiple children. People enabling this are
307 # highly encouraged to look at the before_fork/after_fork hooks to
308 # properly close/reopen sockets. Files opened for logging do not
309 # have to be reopened as (unbuffered-in-userspace) files opened with
310 # the File::APPEND flag are written to atomically on UNIX.
312 # In addition to reloading the unicorn-specific config settings,
313 # SIGHUP will reload application code in the working
314 # directory/symlink when workers are gracefully restarted when
315 # preload_app=false (the default). As reloading the application
316 # sometimes requires RubyGems updates, +Gem.refresh+ is always
317 # called before the application is loaded (for RubyGems users).
319 # During deployments, care should _always_ be taken to ensure your
320 # applications are properly deployed and running. Using
321 # preload_app=false (the default) means you _must_ check if
322 # your application is responding properly after a deployment.
323 # Improperly deployed applications can go into a spawn loop
324 # if the application fails to load. While your children are
325 # in a spawn loop, it is is possible to fix an application
326 # by properly deploying all required code and dependencies.
327 # Using preload_app=true means any application load error will
328 # cause the master process to exit with an error.
330 def preload_app(bool)
332 when TrueClass, FalseClass
333 set[:preload_app] = bool
335 raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
339 # Allow redirecting $stderr to a given path. Unlike doing this from
340 # the shell, this allows the unicorn process to know the path its
341 # writing to and rotate the file if it is used for logging. The
342 # file will be opened with the File::APPEND flag and writes
343 # synchronized to the kernel (but not necessarily to _disk_) so
344 # multiple processes can safely append to it.
346 # If you are daemonizing and using the default +logger+, it is important
347 # to specify this as errors will otherwise be lost to /dev/null.
348 # Some applications/libraries may also triggering warnings that go to
349 # stderr, and they will end up here.
350 def stderr_path(path)
351 set_path(:stderr_path, path)
354 # Same as stderr_path, except for $stdout. Not many Rack applications
355 # write to $stdout, but any that do will have their output written here.
356 # It is safe to point this to the same location a stderr_path.
357 # Like stderr_path, this defaults to /dev/null when daemonized.
358 def stdout_path(path)
359 set_path(:stdout_path, path)
362 # sets the working directory for Unicorn. This ensures SIGUSR2 will
363 # start a new instance of Unicorn in this directory. This may be
364 # a symlink, a common scenario for Capistrano users.
365 def working_directory(path)
366 # just let chdir raise errors
367 path = File.expand_path(path)
369 config_file[0] != ?/ &&
370 ! File.readable?("#{path}/#{config_file}")
372 "config_file=#{config_file} would not be accessible in" \
373 " working_directory=#{path}"
376 HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
379 # Runs worker processes as the specified +user+ and +group+.
380 # The master process always stays running as the user who started it.
381 # This switch will occur after calling the after_fork hook, and only
382 # if the Worker#user method is not called in the after_fork hook
383 def user(user, group = nil)
384 # raises ArgumentError on invalid user/group
386 Etc.getgrnam(group) if group
387 set[:user] = [ user, group ]
390 # expands "unix:path/to/foo" to a socket relative to the current path
391 # expands pathnames of sockets if relative to "~" or "~username"
392 # expands "*:port and ":port" to "0.0.0.0:port"
393 def expand_addr(address) #:nodoc
394 return "0.0.0.0:#{address}" if Integer === address
395 return address unless String === address
398 when %r{\Aunix:(.*)\z}
401 File.expand_path(address)
402 when %r{\A(?:\*:)?(\d+)\z}
404 when %r{\A(.*):(\d+)\z}
405 # canonicalize the name
406 packed = Socket.pack_sockaddr_in($2.to_i, $1)
407 Socket.unpack_sockaddr_in(packed).reverse!.join(':')
415 def set_path(var, path) #:nodoc:
417 when NilClass, String
424 def set_hook(var, my_proc, req_arity = 2) #:nodoc:
427 arity = my_proc.arity
428 (arity == req_arity) or \
430 "#{var}=#{my_proc.inspect} has invalid arity: " \
431 "#{arity} (need #{req_arity})"
433 my_proc = DEFAULTS[var]
435 raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
440 # this is called _after_ working_directory is bound. This only
441 # parses the embedded switches in .ru files
442 # (for "rackup" compatibility)
443 def parse_rackup_file # :nodoc:
444 ru = RACKUP[:file] or return # we only return here in unit tests
446 # :rails means use (old) Rails autodetect
448 File.readable?('config.ru') or return
452 File.readable?(ru) or
453 raise ArgumentError, "rackup file (#{ru}) not readable"
455 # it could be a .rb file, too, we don't parse those manually
456 ru =~ /\.ru\z/ or return
458 /^#\\(.*)/ =~ File.read(ru) or return
459 RACKUP[:optparse].parse!($1.split(/\s+/))
461 # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
462 host, port, set_listener, options, daemonize =
463 eval("[ host, port, set_listener, options, daemonize ]",
466 # XXX duplicate code from bin/unicorn{,_rails}
467 set[:listeners] << "#{host}:#{port}" if set_listener
470 # unicorn_rails wants a default pid path, (not plain 'unicorn')
473 pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
475 unless RACKUP[:daemonized]
476 Unicorn::Launcher.daemonize!(options)
477 RACKUP[:ready_pipe] = options.delete(:ready_pipe)