1 # -*- encoding: binary -*-
8 # Implements a simple DSL for configuring a Unicorn server.
10 # See http://unicorn.bogomips.org/examples/unicorn.conf.rb for an
11 # example config file. An example config file for use with nginx is
12 # also available at http://unicorn.bogomips.org/examples/nginx.conf
13 class Configurator < Struct.new(:set, :config_file, :after_reload)
15 # Default settings for Unicorn
18 :logger => Logger.new($stderr),
19 :worker_processes => 1,
20 :after_fork => lambda { |server, worker|
21 server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
23 :before_fork => lambda { |server, worker|
24 server.logger.info("worker=#{worker.nr} spawning...")
26 :before_exec => lambda { |server|
27 server.logger.info("forked child re-executing...")
30 :preload_app => false,
33 def initialize(defaults = {}) #:nodoc:
34 self.set = Hash.new(:unset)
35 use_defaults = defaults.delete(:use_defaults)
36 self.config_file = defaults.delete(:config_file)
38 # after_reload is only used by unicorn_rails, unsupported otherwise
39 self.after_reload = defaults.delete(:after_reload)
41 set.merge!(DEFAULTS) if use_defaults
42 defaults.each { |key, value| self.send(key, value) }
43 Hash === set[:listener_opts] or
44 set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
45 Array === set[:listeners] or set[:listeners] = []
50 instance_eval(File.read(config_file), config_file) if config_file
52 # working_directory binds immediately (easier error checking that way),
53 # now ensure any paths we changed are correctly set.
54 [ :pid, :stderr_path, :stdout_path ].each do |var|
55 String === (path = set[var]) or next
56 path = File.expand_path(path)
57 test(?w, path) || test(?w, File.dirname(path)) or \
58 raise ArgumentError, "directory for #{var}=#{path} not writable"
61 # unicorn_rails creates dirs here after working_directory is bound
62 after_reload.call if after_reload
65 def commit!(server, options = {}) #:nodoc:
66 skip = options[:skip] || []
67 set.each do |key, value|
68 value == :unset and next
69 skip.include?(key) and next
70 server.__send__("#{key}=", value)
78 # sets object to the +new+ Logger-like object. The new logger-like
79 # object must respond to the following methods:
80 # +debug+, +info+, +warn+, +error+, +fatal+, +close+
82 %w(debug info warn error fatal close).each do |m|
83 new.respond_to?(m) and next
84 raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
90 # sets after_fork hook to a given block. This block will be called by
91 # the worker after forking. The following is an example hook which adds
92 # a per-process listener to every worker:
94 # after_fork do |server,worker|
95 # # per-process listener ports for debugging/admin:
96 # addr = "127.0.0.1:#{9293 + worker.nr}"
98 # # the negative :tries parameter indicates we will retry forever
99 # # waiting on the existing process to exit with a 5 second :delay
100 # # Existing options for Unicorn::Configurator#listen such as
101 # # :backlog, :rcvbuf, :sndbuf are available here as well.
102 # server.listen(addr, :tries => -1, :delay => 5, :backlog => 128)
104 # # drop permissions to "www-data" in the worker
105 # # generally there's no reason to start Unicorn as a priviledged user
106 # # as it is not recommended to expose Unicorn to public clients.
107 # worker.user('www-data', 'www-data') if Process.euid == 0
109 def after_fork(*args, &block)
110 set_hook(:after_fork, block_given? ? block : args[0])
113 # sets before_fork got be a given Proc object. This Proc
114 # object will be called by the master process before forking
116 def before_fork(*args, &block)
117 set_hook(:before_fork, block_given? ? block : args[0])
120 # sets the before_exec hook to a given Proc object. This
121 # Proc object will be called by the master process right
122 # before exec()-ing the new unicorn binary. This is useful
123 # for freeing certain OS resources that you do NOT wish to
124 # share with the reexeced child process.
125 # There is no corresponding after_exec hook (for obvious reasons).
126 def before_exec(*args, &block)
127 set_hook(:before_exec, block_given? ? block : args[0], 1)
130 # sets the timeout of worker processes to +seconds+. Workers
131 # handling the request/app.call/response cycle taking longer than
132 # this time period will be forcibly killed (via SIGKILL). This
133 # timeout is enforced by the master process itself and not subject
134 # to the scheduling limitations by the worker process. Due the
135 # low-complexity, low-overhead implementation, timeouts of less
136 # than 3.0 seconds can be considered inaccurate and unsafe.
138 # For running Unicorn behind nginx, it is recommended to set
139 # "fail_timeout=0" for in your nginx configuration like this
140 # to have nginx always retry backends that may have had workers
141 # SIGKILL-ed due to timeouts.
143 # # See http://wiki.nginx.org/NginxHttpUpstreamModule for more details
144 # # on nginx upstream configuration:
145 # upstream unicorn_backend {
146 # # for UNIX domain socket setups:
147 # server unix:/path/to/unicorn.sock fail_timeout=0;
150 # server 192.168.0.7:8080 fail_timeout=0;
151 # server 192.168.0.8:8080 fail_timeout=0;
152 # server 192.168.0.9:8080 fail_timeout=0;
155 Numeric === seconds or raise ArgumentError,
156 "not numeric: timeout=#{seconds.inspect}"
157 seconds >= 3 or raise ArgumentError,
158 "too low: timeout=#{seconds.inspect}"
159 set[:timeout] = seconds
162 # sets the current number of worker_processes to +nr+. Each worker
163 # process will serve exactly one client at a time. You can
164 # increment or decrement this value at runtime by sending SIGTTIN
165 # or SIGTTOU respectively to the master process without reloading
166 # the rest of your Unicorn configuration. See the SIGNALS document
167 # for more information.
168 def worker_processes(nr)
169 Integer === nr or raise ArgumentError,
170 "not an integer: worker_processes=#{nr.inspect}"
171 nr >= 0 or raise ArgumentError,
172 "not non-negative: worker_processes=#{nr.inspect}"
173 set[:worker_processes] = nr
176 # sets listeners to the given +addresses+, replacing or augmenting the
177 # current set. This is for the global listener pool shared by all
178 # worker processes. For per-worker listeners, see the after_fork example
179 # This is for internal API use only, do not use it in your Unicorn
180 # config file. Use listen instead.
181 def listeners(addresses) # :nodoc:
182 Array === addresses or addresses = Array(addresses)
183 addresses.map! { |addr| expand_addr(addr) }
184 set[:listeners] = addresses
187 # adds an +address+ to the existing listener set.
189 # The following options may be specified (but are generally not needed):
191 # +:backlog+: this is the backlog of the listen() syscall.
193 # Some operating systems allow negative values here to specify the
194 # maximum allowable value. In most cases, this number is only
195 # recommendation and there are other OS-specific tunables and
196 # variables that can affect this number. See the listen(2)
197 # syscall documentation of your OS for the exact semantics of
200 # If you are running unicorn on multiple machines, lowering this number
201 # can help your load balancer detect when a machine is overloaded
202 # and give requests to a different machine.
206 # +:rcvbuf+, +:sndbuf+: maximum receive and send buffer sizes of sockets
208 # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
209 # can be set via the setsockopt(2) syscall. Some kernels
210 # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
211 # there is no need (and it is sometimes detrimental) to specify them.
213 # See the socket API documentation of your operating system
214 # to determine the exact semantics of these settings and
215 # other operating system-specific knobs where they can be
218 # Defaults: operating system defaults
220 # +:tcp_nodelay+: disables Nagle's algorithm on TCP sockets
222 # This has no effect on UNIX sockets.
224 # Default: operating system defaults (usually Nagle's algorithm enabled)
226 # +:tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
228 # This will prevent partial TCP frames from being sent out.
229 # Enabling +tcp_nopush+ is generally not needed or recommended as
230 # controlling +tcp_nodelay+ already provides sufficient latency
231 # reduction whereas Unicorn does not know when the best times are
232 # for flushing corked sockets.
234 # This has no effect on UNIX sockets.
236 # +:tries+: times to retry binding a socket if it is already in use
238 # A negative number indicates we will retry indefinitely, this is
239 # useful for migrations and upgrades when individual workers
240 # are binding to different ports.
244 # +:delay+: seconds to wait between successive +tries+
246 # Default: 0.5 seconds
248 # +:umask+: sets the file mode creation mask for UNIX sockets
250 # Typically UNIX domain sockets are created with more liberal
251 # file permissions than the rest of the application. By default,
252 # we create UNIX domain sockets to be readable and writable by
253 # all local users to give them the same accessibility as
254 # locally-bound TCP listeners.
256 # This has no effect on TCP listeners.
258 # Default: 0 (world read/writable)
259 def listen(address, opt = {})
260 address = expand_addr(address)
261 if String === address
262 [ :umask, :backlog, :sndbuf, :rcvbuf, :tries ].each do |key|
263 value = opt[key] or next
265 raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
267 [ :tcp_nodelay, :tcp_nopush ].each do |key|
268 (value = opt[key]).nil? and next
269 TrueClass === value || FalseClass === value or
270 raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
272 unless (value = opt[:delay]).nil?
274 raise ArgumentError, "not numeric: delay=#{value.inspect}"
276 set[:listener_opts][address].merge!(opt)
279 set[:listeners] << address
282 # sets the +path+ for the PID file of the unicorn master process
283 def pid(path); set_path(:pid, path); end
285 # Enabling this preloads an application before forking worker
286 # processes. This allows memory savings when using a
287 # copy-on-write-friendly GC but can cause bad things to happen when
288 # resources like sockets are opened at load time by the master
289 # process and shared by multiple children. People enabling this are
290 # highly encouraged to look at the before_fork/after_fork hooks to
291 # properly close/reopen sockets. Files opened for logging do not
292 # have to be reopened as (unbuffered-in-userspace) files opened with
293 # the File::APPEND flag are written to atomically on UNIX.
295 # In addition to reloading the unicorn-specific config settings,
296 # SIGHUP will reload application code in the working
297 # directory/symlink when workers are gracefully restarted.
298 def preload_app(bool)
300 when TrueClass, FalseClass
301 set[:preload_app] = bool
303 raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
307 # Allow redirecting $stderr to a given path. Unlike doing this from
308 # the shell, this allows the unicorn process to know the path its
309 # writing to and rotate the file if it is used for logging. The
310 # file will be opened with the File::APPEND flag and writes
311 # synchronized to the kernel (but not necessarily to _disk_) so
312 # multiple processes can safely append to it.
313 def stderr_path(path)
314 set_path(:stderr_path, path)
317 # Same as stderr_path, except for $stdout
318 def stdout_path(path)
319 set_path(:stdout_path, path)
322 # sets the working directory for Unicorn. This ensures USR2 will
323 # start a new instance of Unicorn in this directory. This may be
325 def working_directory(path)
326 # just let chdir raise errors
327 path = File.expand_path(path)
329 config_file[0] != ?/ &&
330 ! test(?r, "#{path}/#{config_file}")
332 "config_file=#{config_file} would not be accessible in" \
333 " working_directory=#{path}"
336 HttpServer::START_CTX[:cwd] = ENV["PWD"] = path
339 # Runs worker processes as the specified +user+ and +group+.
340 # The master process always stays running as the user who started it.
341 # This switch will occur after calling the after_fork hook, and only
342 # if the Worker#user method is not called in the after_fork hook
343 def user(user, group = nil)
344 # raises ArgumentError on invalid user/group
346 Etc.getgrnam(group) if group
347 set[:user] = [ user, group ]
350 # expands "unix:path/to/foo" to a socket relative to the current path
351 # expands pathnames of sockets if relative to "~" or "~username"
352 # expands "*:port and ":port" to "0.0.0.0:port"
353 def expand_addr(address) #:nodoc
354 return "0.0.0.0:#{address}" if Integer === address
355 return address unless String === address
358 when %r{\Aunix:(.*)\z}
361 File.expand_path(address)
362 when %r{\A(?:\*:)?(\d+)\z}
364 when %r{\A(.*):(\d+)\z}
365 # canonicalize the name
366 packed = Socket.pack_sockaddr_in($2.to_i, $1)
367 Socket.unpack_sockaddr_in(packed).reverse!.join(':')
375 def set_path(var, path) #:nodoc:
377 when NilClass, String
384 def set_hook(var, my_proc, req_arity = 2) #:nodoc:
387 arity = my_proc.arity
388 (arity == req_arity) or \
390 "#{var}=#{my_proc.inspect} has invalid arity: " \
391 "#{arity} (need #{req_arity})"
393 my_proc = DEFAULTS[var]
395 raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"