1 # -*- encoding: binary -*-
3 # This concurrency model implements a single-threaded app dispatch
4 # with a separate thread pool for writing responses.
6 # Unlike most \Rainbows! concurrency models, WriterThreadPool is
7 # designed to run behind nginx just like Unicorn is. This concurrency
8 # model may be useful for existing Unicorn users looking for more
9 # output concurrency than socket buffers can provide while still
10 # maintaining a single-threaded application dispatch (though if the
11 # response body is dynamically generated, it must be thread safe).
13 # For serving large or streaming responses, using more threads (via
14 # the +worker_connections+ setting) and setting "proxy_buffering off"
15 # in nginx is recommended. If your application does not handle
16 # uploads, then using any HTTP-aware proxy like haproxy is fine.
17 # Using a non-HTTP-aware proxy will leave you vulnerable to
18 # slow client denial-of-service attacks.
19 module Rainbows::WriterThreadPool
21 include Rainbows::Base
22 autoload :Client, 'rainbows/writer_thread_pool/client'
27 def process_client(client) # :nodoc:
29 Client.new(client, @@q[@@nr %= @@q.size]).process_loop
32 def worker_loop(worker) # :nodoc:
33 # we have multiple, single-thread queues since we don't want to
34 # interleave writes from the same client
35 qp = (1..worker_connections).map do |n|
36 Rainbows::QueuePool.new(1) do |response|
38 io, arg, *rest = response
43 io.close unless io.closed?
45 io.__send__(arg, *rest)
48 Rainbows::Error.write(io, err)
53 @@q = qp.map { |q| q.queue }
54 super(worker) # accept loop from Unicorn
55 qp.map { |q| q.quit! }