1 require 'unicorn/socket'
2 require 'unicorn/const'
7 # Implements a simple DSL for configuring a unicorn server.
9 # Example (when used with the unicorn config file):
11 # listen '/tmp/my_app.sock', :backlog => 1
12 # listen '0.0.0.0:9292'
14 # pid "/tmp/my_app.pid"
15 # after_fork do |server,worker_nr|
16 # server.listen("127.0.0.1:#{9293 + worker_nr}") rescue nil
19 # The default logger writes its output to $stderr
20 DEFAULT_LOGGER = Logger.new($stderr) unless defined?(DEFAULT_LOGGER)
22 # Default settings for Unicorn
26 :logger => DEFAULT_LOGGER,
27 :worker_processes => 1,
28 :after_fork => lambda { |server, worker_nr|
29 server.logger.info("worker=#{worker_nr} spawned pid=#{$$}")
31 # per-process listener ports for debugging/admin:
32 # "rescue nil" statement is needed because USR2 will
33 # cause the master process to reexecute itself and the
34 # per-worker ports can be taken, necessitating another
35 # HUP after QUIT-ing the original master:
36 # server.listen("127.0.0.1:#{8081 + worker_nr}") rescue nil
38 :before_fork => lambda { |server, worker_nr|
39 server.logger.info("worker=#{worker_nr} spawning...")
41 :before_exec => lambda { |server|
42 server.logger.info("forked child re-executing...")
45 :preload_app => false,
50 attr_reader :config_file #:nodoc:
52 def initialize(defaults = {}) #:nodoc:
53 @set = Hash.new(:unset)
54 use_defaults = defaults.delete(:use_defaults)
55 @config_file = defaults.delete(:config_file)
57 @set.merge!(DEFAULTS) if use_defaults
58 defaults.each { |key, value| self.send(key, value) }
63 instance_eval(File.read(@config_file)) if @config_file
66 def commit!(server, options = {}) #:nodoc:
67 skip = options[:skip] || []
68 @set.each do |key, value|
69 (Symbol === value && value == :unset) and next
70 skip.include?(key) and next
72 if server.respond_to?(setter)
73 server.send(setter, value)
75 server.instance_variable_set("@#{key}", value)
84 # sets object to the +new+ Logger-like object. The new logger-like
85 # object must respond to the following methods:
86 # +debug+, +info+, +warn+, +error+, +fatal+, +close+
88 %w(debug info warn error fatal close).each do |m|
89 new.respond_to?(m) and next
90 raise ArgumentError, "logger=#{new} does not respond to method=#{m}"
96 # sets after_fork hook to a given block. This block will be called by
97 # the worker after forking. The following is an example hook which adds
98 # a per-process listener to every worker:
100 # after_fork do |server,worker_nr|
101 # # per-process listener ports for debugging/admin:
102 # # "rescue nil" statement is needed because USR2 will
103 # # cause the master process to reexecute itself and the
104 # # per-worker ports can be taken, necessitating another
105 # # HUP after QUIT-ing the original master:
106 # server.listen("127.0.0.1:#{9293 + worker_nr}") rescue nil
108 def after_fork(*args, &block)
109 set_hook(:after_fork, block_given? ? block : args[0])
112 # sets before_fork got be a given Proc object. This Proc
113 # object will be called by the master process before forking
115 def before_fork(*args, &block)
116 set_hook(:before_fork, block_given? ? block : args[0])
119 # sets the before_exec hook to a given Proc object. This
120 # Proc object will be called by the master process right
121 # before exec()-ing the new unicorn binary. This is useful
122 # for freeing certain OS resources that you do NOT wish to
123 # share with the reexeced child process.
124 # There is no corresponding after_exec hook (for obvious reasons).
125 def before_exec(*args, &block)
126 set_hook(:before_exec, block_given? ? block : args[0], 1)
129 # sets the timeout of worker processes to +seconds+. Workers
130 # handling the request/app.call/response cycle taking longer than
131 # this time period will be forcibly killed (via SIGKILL). This
132 # timeout is enforced by the master process itself and not subject
133 # to the scheduling limitations by the worker process.
135 Numeric === seconds or raise ArgumentError,
136 "not numeric: timeout=#{seconds.inspect}"
137 seconds > 0 or raise ArgumentError,
138 "not positive: timeout=#{seconds.inspect}"
139 @set[:timeout] = seconds
142 # sets the current number of worker_processes to +nr+. Each worker
143 # process will serve exactly one client at a time.
144 def worker_processes(nr)
145 Integer === nr or raise ArgumentError,
146 "not an integer: worker_processes=#{nr.inspect}"
147 nr >= 0 or raise ArgumentError,
148 "not non-negative: worker_processes=#{nr.inspect}"
149 @set[:worker_processes] = nr
152 # sets listeners to the given +addresses+, replacing or augmenting the
153 # current set. This is for the global listener pool shared by all
154 # worker processes. For per-worker listeners, see the after_fork example
155 # This is for internal API use only, do not use it in your Unicorn
156 # config file. Use listen instead.
157 def listeners(addresses) # :nodoc:
158 Array === addresses or addresses = Array(addresses)
159 addresses.map! { |addr| expand_addr(addr) }
160 @set[:listeners] = addresses
163 # adds an +address+ to the existing listener set.
165 # The following options may be specified (but are generally not needed):
167 # +backlog+: this is the backlog of the listen() syscall.
169 # Some operating systems allow negative values here to specify the
170 # maximum allowable value. In most cases, this number is only
171 # recommendation and there are other OS-specific tunables and
172 # variables that can affect this number. See the listen(2)
173 # syscall documentation of your OS for the exact semantics of
176 # If you are running unicorn on multiple machines, lowering this number
177 # can help your load balancer detect when a machine is overloaded
178 # and give requests to a different machine.
182 # +rcvbuf+, +sndbuf+: maximum send and receive buffer sizes of sockets
184 # These correspond to the SO_RCVBUF and SO_SNDBUF settings which
185 # can be set via the setsockopt(2) syscall. Some kernels
186 # (e.g. Linux 2.4+) have intelligent auto-tuning mechanisms and
187 # there is no need (and it is sometimes detrimental) to specify them.
189 # See the socket API documentation of your operating system
190 # to determine the exact semantics of these settings and
191 # other operating system-specific knobs where they can be
194 # Defaults: operating system defaults
195 def listen(address, opt = { :backlog => 1024 })
196 address = expand_addr(address)
197 if String === address
198 Hash === @set[:listener_opts] or
199 @set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
200 [ :backlog, :sndbuf, :rcvbuf ].each do |key|
201 value = opt[key] or next
203 raise ArgumentError, "not an integer: #{key}=#{value.inspect}"
205 @set[:listener_opts][address].merge!(opt)
208 @set[:listeners] = [] unless Array === @set[:listeners]
209 @set[:listeners] << address
212 # sets the +path+ for the PID file of the unicorn master process
213 def pid(path); set_path(:pid, path); end
215 # Enabling this preloads an application before forking worker
216 # processes. This allows memory savings when using a
217 # copy-on-write-friendly GC but can cause bad things to happen when
218 # resources like sockets are opened at load time by the master
219 # process and shared by multiple children. People enabling this are
220 # highly encouraged to look at the before_fork/after_fork hooks to
221 # properly close/reopen sockets. Files opened for logging do not
222 # have to be reopened as (unbuffered-in-userspace) files opened with
223 # the File::APPEND flag are written to atomically on UNIX.
225 # In addition to reloading the unicorn-specific config settings,
226 # SIGHUP will reload application code in the working
227 # directory/symlink when workers are gracefully restarted.
228 def preload_app(bool)
230 when TrueClass, FalseClass
231 @set[:preload_app] = bool
233 raise ArgumentError, "preload_app=#{bool.inspect} not a boolean"
237 # Allow redirecting $stderr to a given path. Unlike doing this from
238 # the shell, this allows the unicorn process to know the path its
239 # writing to and rotate the file if it is used for logging. The
240 # file will be opened with the File::APPEND flag and writes
241 # synchronized to the kernel (but not necessarily to _disk_) so
242 # multiple processes can safely append to it.
243 def stderr_path(path)
244 set_path(:stderr_path, path)
247 # Same as stderr_path, except for $stdout
248 def stdout_path(path)
249 set_path(:stdout_path, path)
254 def set_path(var, path) #:nodoc:
258 path = File.expand_path(path)
259 File.writable?(File.dirname(path)) or \
260 raise ArgumentError, "directory for #{var}=#{path} not writable"
267 def set_hook(var, my_proc, req_arity = 2) #:nodoc:
270 arity = my_proc.arity
271 (arity == req_arity) or \
273 "#{var}=#{my_proc.inspect} has invalid arity: " \
274 "#{arity} (need #{req_arity})"
276 my_proc = DEFAULTS[var]
278 raise ArgumentError, "invalid type: #{var}=#{my_proc.inspect}"
283 # expands "unix:path/to/foo" to a socket relative to the current path
284 # expands pathnames of sockets if relative to "~" or "~username"
285 # expands "*:port and ":port" to "0.0.0.0:port"
286 def expand_addr(address) #:nodoc
287 return address unless String === address
290 when %r{\Aunix:(.*)\z}
293 File.expand_path(address)
294 when %r{\A\*:(\d+)\z}
296 when %r{\A(.*):(\d+)\z}
297 # canonicalize the name
298 packed = Socket.pack_sockaddr_in($2.to_i, $1)
299 Socket.unpack_sockaddr_in(packed).reverse!.join(':')