6 # Implements a simple DSL for configuring a unicorn server.
8 # Example (when used with the unicorn config file):
10 # listen '/tmp/my_app.sock', :backlog => 1
11 # listen '0.0.0.0:9292'
13 # pid "/tmp/my_app.pid"
14 # after_fork do |server,worker|
15 # server.listen("127.0.0.1:#{9293 + worker.nr}") rescue nil
17 class Configurator < Struct.new(:set, :config_file)
18 # The default logger writes its output to $stderr
19 DEFAULT_LOGGER = Logger.new($stderr)
21 # Default settings for Unicorn
24 :logger => DEFAULT_LOGGER,
25 :worker_processes => 1,
26 :after_fork => lambda { |server, worker|
27 server.logger.info("worker=#{worker.nr} spawned pid=#{$$}")
29 # per-process listener ports for debugging/admin:
30 # "rescue nil" statement is needed because USR2 will
31 # cause the master process to reexecute itself and the
32 # per-worker ports can be taken, necessitating another
33 # HUP after QUIT-ing the original master:
34 # server.listen("127.0.0.1:#{8081 + worker.nr}") rescue nil
36 :before_fork => lambda { |server, worker|
37 server.logger.info("worker=#{worker.nr} spawning...")
39 :before_exec => lambda { |server|
40 server.logger.info("forked child re-executing...")
43 :preload_app => false,
46 def initialize(defaults = {}) #:nodoc:
47 self.set = Hash.new(:unset)
48 use_defaults = defaults.delete(:use_defaults)
49 self.config_file = defaults.delete(:config_file)
50 set.merge!(DEFAULTS) if use_defaults
51 defaults.each { |key, value| self.send(key, value) }
56 instance_eval(File.read(config_file)) if config_file
59 def commit!(server, options = {}) #:nodoc:
60 skip = options[:skip] || []
61 set.each do |key, value|
62 value == :unset and next
63 skip.include?(key) and next
64 server.__send__("#{key}=", value)
72 # sets object to the +new+ Logger-like object. The new logger-like
73 # object must respond to the following methods:
74 # +debug+, +info+, +warn+, +error+, +fatal+, +close+
76 %w(debug info warn error fatal close).each do |m|
77 new.respond_to?(m) and next
78 raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
84 # sets after_fork hook to a given block. This block will be called by
85 # the worker after forking. The following is an example hook which adds
86 # a per-process listener to every worker:
88 # after_fork do |server,worker|
89 # # per-process listener ports for debugging/admin:
90 # # "rescue nil" statement is needed because USR2 will
91 # # cause the master process to reexecute itself and the
92 # # per-worker ports can be taken, necessitating another
93 # # HUP after QUIT-ing the original master:
94 # server.listen("127.0.0.1:#{9293 + worker.nr}") rescue nil
96 # # drop permissions to "www-data" in the worker
97 # # generally there's no reason to start Unicorn as a priviledged user
98 # # as it is not recommended to expose Unicorn to public clients.
99 # uid, gid = Process.euid, Process.egid
100 # user, group = 'www-data', 'www-data'
101 # target_uid = Etc.getpwnam(user).uid
102 # target_gid = Etc.getgrnam(group).gid
103 # worker.tempfile.chown(target_uid, target_gid)
104 # if uid != target_uid || gid != target_gid
105 # Process.initgroups(user, target_gid)
106 # Process::GID.change_privilege(target_gid)
107 # Process::UID.change_privilege(target_uid)
110 def after_fork(*args, &block)
111 set_hook(:after_fork, block_given? ? block : args[0])
114 # sets before_fork got be a given Proc object. This Proc
115 # object will be called by the master process before forking
117 def before_fork(*args, &block)
118 set_hook(:before_fork, block_given? ? block : args[0])
121 # sets the before_exec hook to a given Proc object. This
122 # Proc object will be called by the master process right
123 # before exec()-ing the new unicorn binary. This is useful
124 # for freeing certain OS resources that you do NOT wish to
125 # share with the reexeced child process.
126 # There is no corresponding after_exec hook (for obvious reasons).
127 def before_exec(*args, &block)
128 set_hook(:before_exec, block_given? ? block : args[0], 1)
131 # sets the timeout of worker processes to +seconds+. Workers
132 # handling the request/app.call/response cycle taking longer than
133 # this time period will be forcibly killed (via SIGKILL). This
134 # timeout is enforced by the master process itself and not subject
135 # to the scheduling limitations by the worker process. Due the
136 # low-complexity, low-overhead implementation, timeouts of less
137 # than 3.0 seconds can be considered inaccurate and unsafe.
139 Numeric === seconds or raise ArgumentError,
140 "not numeric: timeout=#{seconds.inspect}"
141 seconds >= 3 or raise ArgumentError,
142 "too low: timeout=#{seconds.inspect}"
143 set[:timeout] = seconds
146 # sets the current number of worker_processes to +nr+. Each worker
147 # process will serve exactly one client at a time.
148 def worker_processes(nr)
149 Integer === nr or raise ArgumentError,
150 "not an integer: worker_processes=#{nr.inspect}"
151 nr >= 0 or raise ArgumentError,
152 "not non-negative: worker_processes=#{nr.inspect}"
153 set[:worker_processes] = nr
156 # sets listeners to the given +addresses+, replacing or augmenting the
157 # current set. This is for the global listener pool shared by all
158 # worker processes. For per-worker listeners, see the after_fork example
159 # This is for internal API use only, do not use it in your Unicorn
160 # config file. Use listen instead.
161 def listeners(addresses) # :nodoc:
162 Array === addresses or addresses = Array(addresses)
163 addresses.map! { |addr| expand_addr(addr) }
164 set[:listeners] = addresses
167 # adds an +address+ to the existing listener set.
169 # The following options may be specified (but are generally not needed):
171 # +backlog+: this is the backlog of the listen() syscall.
173 # Some operating systems allow negative values here to specify the
174 # maximum allowable value. In most cases, this number is only
175 # recommendation and there are other OS-specific tunables and
176 # variables that can affect this number. See the listen(2)
177 # syscall documentation of your OS for the exact semantics of
180 # If you are running unicorn on multiple machines, lowering this number
181 # can help your load balancer detect when a machine is overloaded
182 # and give requests to a different machine.
186 # +rcvbuf+, +sndbuf+: maximum send and receive buffer sizes of sockets
188 # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
189 # can be set via the setsockopt(2) syscall. Some kernels
190 # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
191 # there is no need (and it is sometimes detrimental) to specify them.
193 # See the socket API documentation of your operating system
194 # to determine the exact semantics of these settings and
195 # other operating system-specific knobs where they can be
198 # Defaults: operating system defaults
200 # +tcp_nodelay+: disables Nagle's algorithm on TCP sockets
202 # This has no effect on UNIX sockets.
204 # Default: operating system defaults (usually Nagle's algorithm enabled)
206 # +tcp_nopush+: enables TCP_CORK in Linux or TCP_NOPUSH in FreeBSD
208 # This will prevent partial TCP frames from being sent out.
209 # Enabling +tcp_nopush+ is generally not needed or recommended as
210 # controlling +tcp_nodelay+ already provides sufficient latency
211 # reduction whereas Unicorn does not know when the best times are
212 # for flushing corked sockets.
214 # This has no effect on UNIX sockets.
216 def listen(address, opt = {})
217 address = expand_addr(address)
218 if String === address
219 Hash === set[:listener_opts] or
220 set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
221 [ :backlog, :sndbuf, :rcvbuf ].each do |key|
222 value = opt[key] or next
224 raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
226 [ :tcp_nodelay, :tcp_nopush ].each do |key|
227 (value = opt[key]).nil? and next
228 TrueClass === value || FalseClass === value or
229 raise ArgumentError, "not boolean: #{key}=#{value.inspect}"
231 set[:listener_opts][address].merge!(opt)
234 set[:listeners] = [] unless Array === set[:listeners]
235 set[:listeners] << address
238 # sets the +path+ for the PID file of the unicorn master process
239 def pid(path); set_path(:pid, path); end
241 # Enabling this preloads an application before forking worker
242 # processes. This allows memory savings when using a
243 # copy-on-write-friendly GC but can cause bad things to happen when
244 # resources like sockets are opened at load time by the master
245 # process and shared by multiple children. People enabling this are
246 # highly encouraged to look at the before_fork/after_fork hooks to
247 # properly close/reopen sockets. Files opened for logging do not
248 # have to be reopened as (unbuffered-in-userspace) files opened with
249 # the File::APPEND flag are written to atomically on UNIX.
251 # In addition to reloading the unicorn-specific config settings,
252 # SIGHUP will reload application code in the working
253 # directory/symlink when workers are gracefully restarted.
254 def preload_app(bool)
256 when TrueClass, FalseClass
257 set[:preload_app] = bool
259 raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
263 # Allow redirecting $stderr to a given path. Unlike doing this from
264 # the shell, this allows the unicorn process to know the path its
265 # writing to and rotate the file if it is used for logging. The
266 # file will be opened with the File::APPEND flag and writes
267 # synchronized to the kernel (but not necessarily to _disk_) so
268 # multiple processes can safely append to it.
269 def stderr_path(path)
270 set_path(:stderr_path, path)
273 # Same as stderr_path, except for $stdout
274 def stdout_path(path)
275 set_path(:stdout_path, path)
280 def set_path(var, path) #:nodoc:
284 path = File.expand_path(path)
285 File.writable?(File.dirname(path)) or \
286 raise ArgumentError, "directory for #{var}=#{path} not writable"
293 def set_hook(var, my_proc, req_arity = 2) #:nodoc:
296 arity = my_proc.arity
297 (arity == req_arity) or \
299 "#{var}=#{my_proc.inspect} has invalid arity: " \
300 "#{arity} (need #{req_arity})"
302 my_proc = DEFAULTS[var]
304 raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
309 # expands "unix:path/to/foo" to a socket relative to the current path
310 # expands pathnames of sockets if relative to "~" or "~username"
311 # expands "*:port and ":port" to "0.0.0.0:port"
312 def expand_addr(address) #:nodoc
313 return "0.0.0.0:#{address}" if Integer === address
314 return address unless String === address
317 when %r{\Aunix:(.*)\z}
320 File.expand_path(address)
321 when %r{\A(?:\*:)?(\d+)\z}
323 when %r{\A(.*):(\d+)\z}
324 # canonicalize the name
325 packed = Socket.pack_sockaddr_in($2.to_i, $1)
326 Socket.unpack_sockaddr_in(packed).reverse!.join(':')