release GVL for filesystem operations
[clogger.git] / ext / clogger_ext / clogger.c
blob0adefa4f4cc5c1442c97e3996939007bc43ca8d8
1 #include <ruby.h>
2 #ifdef HAVE_RUBY_IO_H
3 # include <ruby/io.h>
4 #else
5 # include <rubyio.h>
6 #endif
7 #include <assert.h>
8 #include <unistd.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <sys/time.h>
12 #include <errno.h>
13 #ifdef HAVE_FCNTL_H
14 # include <fcntl.h>
15 #endif
16 #ifndef _POSIX_C_SOURCE
17 # define _POSIX_C_SOURCE 200112L
18 #endif
19 #include <time.h>
20 #include "ruby_1_9_compat.h"
21 #include "broken_system_compat.h"
22 #include "blocking_helpers.h"
25 * Availability of a monotonic clock needs to be detected at runtime
26 * since we could've been built on a different system than we're run
27 * under.
29 static clockid_t hopefully_CLOCK_MONOTONIC;
31 static void check_clock(void)
33 struct timespec now;
35 hopefully_CLOCK_MONOTONIC = CLOCK_MONOTONIC;
37 /* we can't check this reliably at compile time */
38 if (clock_gettime(CLOCK_MONOTONIC, &now) == 0)
39 return;
41 if (clock_gettime(CLOCK_REALTIME, &now) == 0) {
42 hopefully_CLOCK_MONOTONIC = CLOCK_REALTIME;
43 rb_warn("CLOCK_MONOTONIC not available, "
44 "falling back to CLOCK_REALTIME");
46 rb_warn("clock_gettime() totally broken, " \
47 "falling back to pure Ruby Clogger");
48 rb_raise(rb_eLoadError, "clock_gettime() broken");
51 static void clock_diff(struct timespec *a, const struct timespec *b)
53 a->tv_sec -= b->tv_sec;
54 a->tv_nsec -= b->tv_nsec;
55 if (a->tv_nsec < 0) {
56 --a->tv_sec;
57 a->tv_nsec += 1000000000;
61 /* give GCC hints for better branch prediction
62 * (we layout branches so that ASCII characters are handled faster) */
63 #if defined(__GNUC__) && (__GNUC__ >= 3)
64 # define likely(x) __builtin_expect (!!(x), 1)
65 # define unlikely(x) __builtin_expect (!!(x), 0)
66 #else
67 # define unlikely(x) (x)
68 # define likely(x) (x)
69 #endif
71 enum clogger_opcode {
72 CL_OP_LITERAL = 0,
73 CL_OP_REQUEST,
74 CL_OP_RESPONSE,
75 CL_OP_SPECIAL,
76 CL_OP_EVAL,
77 CL_OP_TIME_LOCAL,
78 CL_OP_TIME_UTC,
79 CL_OP_REQUEST_TIME,
80 CL_OP_TIME,
81 CL_OP_COOKIE
84 enum clogger_special {
85 CL_SP_body_bytes_sent = 0,
86 CL_SP_status,
87 CL_SP_request,
88 CL_SP_request_length,
89 CL_SP_response_length,
90 CL_SP_ip,
91 CL_SP_pid,
92 CL_SP_request_uri
95 struct clogger {
96 VALUE app;
98 VALUE fmt_ops;
99 VALUE logger;
100 VALUE log_buf;
102 VALUE env;
103 VALUE cookies;
104 VALUE status;
105 VALUE headers;
106 VALUE body;
108 off_t body_bytes_sent;
109 struct timespec ts_start;
111 int fd;
112 int wrap_body;
113 int need_resp;
114 int reentrant; /* tri-state, -1:auto, 1/0 true/false */
117 static ID ltlt_id;
118 static ID call_id;
119 static ID each_id;
120 static ID close_id;
121 static ID to_i_id;
122 static ID to_s_id;
123 static ID size_id;
124 static ID sq_brace_id;
125 static ID new_id;
126 static ID to_path_id;
127 static ID to_io_id;
128 static ID respond_to_id;
129 static VALUE cClogger;
130 static VALUE mFormat;
131 static VALUE cHeaderHash;
133 /* common hash lookup keys */
134 static VALUE g_HTTP_X_FORWARDED_FOR;
135 static VALUE g_REMOTE_ADDR;
136 static VALUE g_REQUEST_METHOD;
137 static VALUE g_PATH_INFO;
138 static VALUE g_REQUEST_URI;
139 static VALUE g_QUERY_STRING;
140 static VALUE g_HTTP_VERSION;
141 static VALUE g_rack_errors;
142 static VALUE g_rack_input;
143 static VALUE g_rack_multithread;
144 static VALUE g_dash;
145 static VALUE g_space;
146 static VALUE g_question_mark;
147 static VALUE g_rack_request_cookie_hash;
149 #define LOG_BUF_INIT_SIZE 128
151 static void init_buffers(struct clogger *c)
153 c->log_buf = rb_str_buf_new(LOG_BUF_INIT_SIZE);
156 static inline int need_escape(unsigned c)
158 assert(c <= 0xff);
159 return !!(c == '\'' || c == '"' || c <= 0x1f);
162 /* we are encoding-agnostic, clients can send us all sorts of junk */
163 static VALUE byte_xs_str(VALUE from)
165 static const char esc[] = "0123456789ABCDEF";
166 unsigned char *new_ptr;
167 unsigned char *ptr = (unsigned char *)RSTRING_PTR(from);
168 long len = RSTRING_LEN(from);
169 long new_len = len;
170 VALUE rv;
172 for (; --len >= 0; ptr++) {
173 unsigned c = *ptr;
175 if (unlikely(need_escape(c)))
176 new_len += 3; /* { '\', 'x', 'X', 'X' } */
179 len = RSTRING_LEN(from);
180 if (new_len == len)
181 return from;
183 rv = rb_str_new(NULL, new_len);
184 new_ptr = (unsigned char *)RSTRING_PTR(rv);
185 ptr = (unsigned char *)RSTRING_PTR(from);
186 for (; --len >= 0; ptr++) {
187 unsigned c = *ptr;
189 if (unlikely(need_escape(c))) {
190 *new_ptr++ = '\\';
191 *new_ptr++ = 'x';
192 *new_ptr++ = esc[c >> 4];
193 *new_ptr++ = esc[c & 0xf];
194 } else {
195 *new_ptr++ = c;
198 assert(RSTRING_PTR(rv)[RSTRING_LEN(rv)] == '\0');
200 return rv;
203 static VALUE byte_xs(VALUE from)
205 return byte_xs_str(rb_obj_as_string(from));
208 static void clogger_mark(void *ptr)
210 struct clogger *c = ptr;
212 rb_gc_mark(c->app);
213 rb_gc_mark(c->fmt_ops);
214 rb_gc_mark(c->logger);
215 rb_gc_mark(c->log_buf);
216 rb_gc_mark(c->env);
217 rb_gc_mark(c->cookies);
218 rb_gc_mark(c->status);
219 rb_gc_mark(c->headers);
220 rb_gc_mark(c->body);
223 static VALUE clogger_alloc(VALUE klass)
225 struct clogger *c;
227 return Data_Make_Struct(klass, struct clogger, clogger_mark, -1, c);
230 static struct clogger *clogger_get(VALUE self)
232 struct clogger *c;
234 Data_Get_Struct(self, struct clogger, c);
235 assert(c);
236 return c;
239 /* only for writing to regular files, not stupid crap like NFS */
240 static void write_full(int fd, const void *buf, size_t count)
242 ssize_t r;
243 unsigned long ubuf = (unsigned long)buf;
245 while (count > 0) {
246 r = write(fd, (void *)ubuf, count);
248 if ((size_t)r == count) { /* overwhelmingly likely */
249 return;
250 } else if (r > 0) {
251 count -= r;
252 ubuf += r;
253 } else {
254 if (errno == EINTR || errno == EAGAIN)
255 continue; /* poor souls on NFS and like: */
256 if (!errno)
257 errno = ENOSPC;
258 rb_sys_fail("write");
264 * allow us to use write_full() iff we detect a blocking file
265 * descriptor that wouldn't play nicely with Ruby threading/fibers
267 static int raw_fd(VALUE my_fd)
269 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(O_NONBLOCK)
270 int fd;
271 int flags;
273 if (NIL_P(my_fd))
274 return -1;
275 fd = NUM2INT(my_fd);
277 flags = fcntl(fd, F_GETFL);
278 if (flags < 0)
279 rb_sys_fail("fcntl");
281 if (flags & O_NONBLOCK) {
282 struct stat sb;
284 if (fstat(fd, &sb) < 0)
285 return -1;
287 /* O_NONBLOCK is no-op for regular files: */
288 if (! S_ISREG(sb.st_mode))
289 return -1;
291 return fd;
292 #else /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
293 return -1;
294 #endif /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
297 /* :nodoc: */
298 static VALUE clogger_reentrant(VALUE self)
300 return clogger_get(self)->reentrant == 0 ? Qfalse : Qtrue;
303 /* :nodoc: */
304 static VALUE clogger_wrap_body(VALUE self)
306 return clogger_get(self)->wrap_body == 0 ? Qfalse : Qtrue;
309 static void append_status(struct clogger *c)
311 char buf[sizeof("999")];
312 int nr;
313 VALUE status = c->status;
315 if (TYPE(status) != T_FIXNUM) {
316 status = rb_funcall(status, to_i_id, 0);
317 /* no way it's a valid status code (at least not HTTP/1.1) */
318 if (TYPE(status) != T_FIXNUM) {
319 rb_str_buf_append(c->log_buf, g_dash);
320 return;
324 nr = FIX2INT(status);
325 if (nr >= 100 && nr <= 999) {
326 nr = snprintf(buf, sizeof(buf), "%03d", nr);
327 assert(nr == 3);
328 rb_str_buf_cat(c->log_buf, buf, nr);
329 } else {
330 /* raise?, swap for 500? */
331 rb_str_buf_append(c->log_buf, g_dash);
335 /* this is Rack 1.0.0-compatible, won't try to parse commas in XFF */
336 static void append_ip(struct clogger *c)
338 VALUE env = c->env;
339 VALUE tmp = rb_hash_aref(env, g_HTTP_X_FORWARDED_FOR);
341 if (NIL_P(tmp)) {
342 /* can't be faked on any real server, so no escape */
343 tmp = rb_hash_aref(env, g_REMOTE_ADDR);
344 if (NIL_P(tmp))
345 tmp = g_dash;
346 } else {
347 tmp = byte_xs(tmp);
349 rb_str_buf_append(c->log_buf, tmp);
352 static void append_body_bytes_sent(struct clogger *c)
354 char buf[(sizeof(off_t) * 8) / 3 + 1];
355 const char *fmt = sizeof(off_t) == sizeof(long) ? "%ld" : "%lld";
356 int nr = snprintf(buf, sizeof(buf), fmt, c->body_bytes_sent);
358 assert(nr > 0 && nr < (int)sizeof(buf));
359 rb_str_buf_cat(c->log_buf, buf, nr);
362 static void append_ts(struct clogger *c, const VALUE *op, struct timespec *ts)
364 char buf[sizeof(".000000") + ((sizeof(ts->tv_sec) * 8) / 3)];
365 int nr;
366 char *fmt = RSTRING_PTR(op[1]);
367 int ndiv = NUM2INT(op[2]);
368 int usec = ts->tv_nsec / 1000;
370 nr = snprintf(buf, sizeof(buf), fmt,
371 (int)ts->tv_sec, (int)(usec / ndiv));
372 assert(nr > 0 && nr < (int)sizeof(buf));
373 rb_str_buf_cat(c->log_buf, buf, nr);
376 static void append_request_time_fmt(struct clogger *c, const VALUE *op)
378 struct timespec now;
380 clock_gettime(hopefully_CLOCK_MONOTONIC, &now);
381 clock_diff(&now, &c->ts_start);
382 append_ts(c, op, &now);
385 static void append_time_fmt(struct clogger *c, const VALUE *op)
387 struct timespec now;
388 int r = clock_gettime(CLOCK_REALTIME, &now);
390 if (unlikely(r != 0))
391 rb_sys_fail("clock_gettime(CLOCK_REALTIME)");
392 append_ts(c, op, &now);
395 static void append_request_uri(struct clogger *c)
397 VALUE tmp;
399 tmp = rb_hash_aref(c->env, g_REQUEST_URI);
400 if (NIL_P(tmp)) {
401 tmp = rb_hash_aref(c->env, g_PATH_INFO);
402 if (!NIL_P(tmp))
403 rb_str_buf_append(c->log_buf, byte_xs(tmp));
404 tmp = rb_hash_aref(c->env, g_QUERY_STRING);
405 if (!NIL_P(tmp) && RSTRING_LEN(tmp) != 0) {
406 rb_str_buf_append(c->log_buf, g_question_mark);
407 rb_str_buf_append(c->log_buf, byte_xs(tmp));
409 } else {
410 rb_str_buf_append(c->log_buf, byte_xs(tmp));
414 static void append_request(struct clogger *c)
416 VALUE tmp;
418 /* REQUEST_METHOD doesn't need escaping, Rack::Lint governs it */
419 tmp = rb_hash_aref(c->env, g_REQUEST_METHOD);
420 if (!NIL_P(tmp))
421 rb_str_buf_append(c->log_buf, tmp);
423 rb_str_buf_append(c->log_buf, g_space);
425 append_request_uri(c);
427 /* HTTP_VERSION can be injected by malicious clients */
428 tmp = rb_hash_aref(c->env, g_HTTP_VERSION);
429 if (!NIL_P(tmp)) {
430 rb_str_buf_append(c->log_buf, g_space);
431 rb_str_buf_append(c->log_buf, byte_xs(tmp));
435 static void append_request_length(struct clogger *c)
437 VALUE tmp = rb_hash_aref(c->env, g_rack_input);
438 if (NIL_P(tmp)) {
439 rb_str_buf_append(c->log_buf, g_dash);
440 } else {
441 tmp = rb_funcall(tmp, size_id, 0);
442 rb_str_buf_append(c->log_buf, rb_funcall(tmp, to_s_id, 0));
446 static void
447 append_time(struct clogger *c, enum clogger_opcode op, VALUE fmt, VALUE buf)
449 char *buf_ptr = RSTRING_PTR(buf);
450 size_t buf_size = RSTRING_LEN(buf) + 1; /* "\0" */
451 size_t nr;
452 struct tm tmp;
453 time_t t = time(NULL);
455 if (op == CL_OP_TIME_LOCAL)
456 localtime_r(&t, &tmp);
457 else if (op == CL_OP_TIME_UTC)
458 gmtime_r(&t, &tmp);
459 else
460 assert(0 && "unknown op");
462 nr = strftime(buf_ptr, buf_size, RSTRING_PTR(fmt), &tmp);
463 assert(nr < buf_size && "time format too small!");
464 rb_str_buf_cat(c->log_buf, buf_ptr, nr);
467 static void append_pid(struct clogger *c)
469 char buf[(sizeof(pid_t) * 8) / 3 + 1];
470 int nr = snprintf(buf, sizeof(buf), "%d", (int)getpid());
472 assert(nr > 0 && nr < (int)sizeof(buf));
473 rb_str_buf_cat(c->log_buf, buf, nr);
476 static void append_eval(struct clogger *c, VALUE str)
478 int state = -1;
479 VALUE rv = rb_eval_string_protect(RSTRING_PTR(str), &state);
481 rv = state == 0 ? rb_obj_as_string(rv) : g_dash;
482 rb_str_buf_append(c->log_buf, rv);
485 static void append_cookie(struct clogger *c, VALUE key)
487 VALUE cookie;
489 if (c->cookies == Qfalse)
490 c->cookies = rb_hash_aref(c->env, g_rack_request_cookie_hash);
492 if (NIL_P(c->cookies)) {
493 cookie = g_dash;
494 } else {
495 cookie = rb_hash_aref(c->cookies, key);
496 if (NIL_P(cookie))
497 cookie = g_dash;
499 rb_str_buf_append(c->log_buf, cookie);
502 static void append_request_env(struct clogger *c, VALUE key)
504 VALUE tmp = rb_hash_aref(c->env, key);
506 tmp = NIL_P(tmp) ? g_dash : byte_xs(tmp);
507 rb_str_buf_append(c->log_buf, tmp);
510 static void append_response(struct clogger *c, VALUE key)
512 VALUE v;
514 assert(rb_obj_is_kind_of(c->headers, cHeaderHash) && "not HeaderHash");
516 v = rb_funcall(c->headers, sq_brace_id, 1, key);
517 v = NIL_P(v) ? g_dash : byte_xs(v);
518 rb_str_buf_append(c->log_buf, v);
521 static void special_var(struct clogger *c, enum clogger_special var)
523 switch (var) {
524 case CL_SP_body_bytes_sent:
525 append_body_bytes_sent(c);
526 break;
527 case CL_SP_status:
528 append_status(c);
529 break;
530 case CL_SP_request:
531 append_request(c);
532 break;
533 case CL_SP_request_length:
534 append_request_length(c);
535 break;
536 case CL_SP_response_length:
537 if (c->body_bytes_sent == 0)
538 rb_str_buf_append(c->log_buf, g_dash);
539 else
540 append_body_bytes_sent(c);
541 break;
542 case CL_SP_ip:
543 append_ip(c);
544 break;
545 case CL_SP_pid:
546 append_pid(c);
547 break;
548 case CL_SP_request_uri:
549 append_request_uri(c);
553 static VALUE cwrite(struct clogger *c)
555 const VALUE ops = c->fmt_ops;
556 const VALUE *ary = RARRAY_PTR(ops);
557 long i = RARRAY_LEN(ops);
558 VALUE dst = c->log_buf;
560 rb_str_set_len(dst, 0);
562 for (; --i >= 0; ary++) {
563 const VALUE *op = RARRAY_PTR(*ary);
564 enum clogger_opcode opcode = FIX2INT(op[0]);
566 switch (opcode) {
567 case CL_OP_LITERAL:
568 rb_str_buf_append(dst, op[1]);
569 break;
570 case CL_OP_REQUEST:
571 append_request_env(c, op[1]);
572 break;
573 case CL_OP_RESPONSE:
574 append_response(c, op[1]);
575 break;
576 case CL_OP_SPECIAL:
577 special_var(c, FIX2INT(op[1]));
578 break;
579 case CL_OP_EVAL:
580 append_eval(c, op[1]);
581 break;
582 case CL_OP_TIME_LOCAL:
583 case CL_OP_TIME_UTC:
584 append_time(c, opcode, op[1], op[2]);
585 break;
586 case CL_OP_REQUEST_TIME:
587 append_request_time_fmt(c, op);
588 break;
589 case CL_OP_TIME:
590 append_time_fmt(c, op);
591 break;
592 case CL_OP_COOKIE:
593 append_cookie(c, op[1]);
594 break;
598 if (c->fd >= 0) {
599 write_full(c->fd, RSTRING_PTR(dst), RSTRING_LEN(dst));
600 } else {
601 VALUE logger = c->logger;
603 if (NIL_P(logger))
604 logger = rb_hash_aref(c->env, g_rack_errors);
605 rb_funcall(logger, ltlt_id, 1, dst);
608 return Qnil;
611 static VALUE clogger_write(VALUE self)
613 return cwrite(clogger_get(self));
616 static void init_logger(struct clogger *c, VALUE path)
618 ID id;
620 if (!NIL_P(path) && !NIL_P(c->logger))
621 rb_raise(rb_eArgError, ":logger and :path are independent");
622 if (!NIL_P(path)) {
623 VALUE ab = rb_str_new2("ab");
624 id = rb_intern("open");
625 c->logger = rb_funcall(rb_cFile, id, 2, path, ab);
628 id = rb_intern("sync=");
629 if (rb_respond_to(c->logger, id))
630 rb_funcall(c->logger, id, 1, Qtrue);
632 id = rb_intern("fileno");
633 if (rb_respond_to(c->logger, id))
634 c->fd = raw_fd(rb_funcall(c->logger, id, 0));
638 * call-seq:
639 * Clogger.new(app, :logger => $stderr, :format => string) => obj
641 * Creates a new Clogger object that wraps +app+. +:logger+ may
642 * be any object that responds to the "<<" method with a string argument.
643 * If +:logger:+ is a string, it will be treated as a path to a
644 * File that will be opened in append mode.
646 static VALUE clogger_init(int argc, VALUE *argv, VALUE self)
648 struct clogger *c = clogger_get(self);
649 VALUE o = Qnil;
650 VALUE fmt = rb_const_get(mFormat, rb_intern("Common"));
652 rb_scan_args(argc, argv, "11", &c->app, &o);
653 c->fd = -1;
654 c->logger = Qnil;
655 c->reentrant = -1; /* auto-detect */
657 if (TYPE(o) == T_HASH) {
658 VALUE tmp;
660 tmp = rb_hash_aref(o, ID2SYM(rb_intern("path")));
661 c->logger = rb_hash_aref(o, ID2SYM(rb_intern("logger")));
662 init_logger(c, tmp);
664 tmp = rb_hash_aref(o, ID2SYM(rb_intern("format")));
665 if (!NIL_P(tmp))
666 fmt = tmp;
668 tmp = rb_hash_aref(o, ID2SYM(rb_intern("reentrant")));
669 switch (TYPE(tmp)) {
670 case T_TRUE:
671 c->reentrant = 1;
672 break;
673 case T_FALSE:
674 c->reentrant = 0;
675 case T_NIL:
676 break;
677 default:
678 rb_raise(rb_eArgError, ":reentrant must be boolean");
682 init_buffers(c);
683 c->fmt_ops = rb_funcall(self, rb_intern("compile_format"), 2, fmt, o);
685 if (Qtrue == rb_funcall(self, rb_intern("need_response_headers?"),
686 1, c->fmt_ops))
687 c->need_resp = 1;
688 if (Qtrue == rb_funcall(self, rb_intern("need_wrap_body?"),
689 1, c->fmt_ops))
690 c->wrap_body = 1;
692 return self;
695 static VALUE body_iter_i(VALUE str, VALUE self)
697 struct clogger *c = clogger_get(self);
699 str = rb_obj_as_string(str);
700 c->body_bytes_sent += RSTRING_LEN(str);
702 return rb_yield(str);
705 static VALUE body_close(VALUE self)
707 struct clogger *c = clogger_get(self);
709 if (rb_respond_to(c->body, close_id))
710 return rb_funcall(c->body, close_id, 0);
711 return Qnil;
715 * call-seq:
716 * clogger.each { |part| socket.write(part) }
718 * Delegates the body#each call to the underlying +body+ object
719 * while tracking the number of bytes yielded. This will log
720 * the request.
722 static VALUE clogger_each(VALUE self)
724 struct clogger *c = clogger_get(self);
726 rb_need_block();
727 c->body_bytes_sent = 0;
728 rb_iterate(rb_each, c->body, body_iter_i, self);
730 return self;
734 * call-seq:
735 * clogger.close
737 * Delegates the body#close call to the underlying +body+ object.
738 * This is only used when Clogger is wrapping the +body+ of a Rack
739 * response and should be automatically called by the web server.
741 static VALUE clogger_close(VALUE self)
744 return rb_ensure(body_close, self, clogger_write, self);
747 /* :nodoc: */
748 static VALUE clogger_fileno(VALUE self)
750 struct clogger *c = clogger_get(self);
752 return c->fd < 0 ? Qnil : INT2NUM(c->fd);
755 static VALUE ccall(struct clogger *c, VALUE env)
757 VALUE rv;
759 clock_gettime(hopefully_CLOCK_MONOTONIC, &c->ts_start);
760 c->env = env;
761 c->cookies = Qfalse;
762 rv = rb_funcall(c->app, call_id, 1, env);
763 if (TYPE(rv) == T_ARRAY && RARRAY_LEN(rv) == 3) {
764 VALUE *tmp = RARRAY_PTR(rv);
766 c->status = tmp[0];
767 c->headers = tmp[1];
768 c->body = tmp[2];
770 rv = rb_ary_new4(3, tmp);
771 if (c->need_resp &&
772 ! rb_obj_is_kind_of(tmp[1], cHeaderHash)) {
773 c->headers = rb_funcall(cHeaderHash, new_id, 1, tmp[1]);
774 rb_ary_store(rv, 1, c->headers);
776 } else {
777 volatile VALUE tmp = rb_inspect(rv);
779 c->status = INT2FIX(500);
780 c->headers = c->body = rb_ary_new();
781 cwrite(c);
782 rb_raise(rb_eTypeError,
783 "app response not a 3 element Array: %s",
784 RSTRING_PTR(tmp));
787 return rv;
791 * call-seq:
792 * clogger.call(env) => [ status, headers, body ]
794 * calls the wrapped Rack application with +env+, returns the
795 * [status, headers, body ] tuplet required by Rack.
797 static VALUE clogger_call(VALUE self, VALUE env)
799 struct clogger *c = clogger_get(self);
800 VALUE rv;
802 env = rb_check_convert_type(env, T_HASH, "Hash", "to_hash");
804 if (c->wrap_body) {
805 if (c->reentrant < 0) {
806 VALUE tmp = rb_hash_aref(env, g_rack_multithread);
807 c->reentrant = Qfalse == tmp ? 0 : 1;
809 if (c->reentrant) {
810 self = rb_obj_dup(self);
811 c = clogger_get(self);
814 rv = ccall(c, env);
815 assert(!OBJ_FROZEN(rv) && "frozen response array");
816 rb_ary_store(rv, 2, self);
818 return rv;
821 rv = ccall(c, env);
822 cwrite(c);
824 return rv;
827 static void duplicate_buffers(VALUE ops)
829 long i = RARRAY_LEN(ops);
830 VALUE *ary = RARRAY_PTR(ops);
832 for ( ; --i >= 0; ary++) {
833 VALUE *op = RARRAY_PTR(*ary);
834 enum clogger_opcode opcode = FIX2INT(op[0]);
836 if (opcode == CL_OP_TIME_LOCAL || opcode == CL_OP_TIME_UTC) {
837 Check_Type(op[2], T_STRING);
838 op[2] = rb_str_dup(op[2]);
839 rb_str_modify(op[2]); /* trigger copy-on-write */
844 /* :nodoc: */
845 static VALUE clogger_init_copy(VALUE clone, VALUE orig)
847 struct clogger *a = clogger_get(orig);
848 struct clogger *b = clogger_get(clone);
850 memcpy(b, a, sizeof(struct clogger));
851 init_buffers(b);
852 duplicate_buffers(b->fmt_ops);
854 return clone;
857 #define CONST_GLOBAL_STR2(var, val) do { \
858 g_##var = rb_obj_freeze(rb_str_new(val, sizeof(val) - 1)); \
859 rb_global_variable(&g_##var); \
860 } while (0)
862 #define CONST_GLOBAL_STR(val) CONST_GLOBAL_STR2(val, #val)
865 * call-seq:
866 * clogger.respond_to?(:to_path) => true or false
867 * clogger.respond_to?(:close) => true
869 * used to delegate +:to_path+ checks for Rack webservers that optimize
870 * static file serving
872 static VALUE respond_to(VALUE self, VALUE method)
874 struct clogger *c = clogger_get(self);
875 ID id = rb_to_id(method);
877 if (close_id == id)
878 return Qtrue;
879 return rb_respond_to(c->body, id);
883 * call-seq:
884 * clogger.to_path
886 * used to proxy +:to_path+ method calls to the wrapped response body.
888 static VALUE to_path(VALUE self)
890 struct clogger *c = clogger_get(self);
891 VALUE path = rb_funcall(c->body, to_path_id, 0);
892 struct stat sb;
893 int rv;
894 unsigned devfd;
895 const char *cpath = StringValueCStr(path);
897 /* try to avoid an extra path lookup */
898 if (rb_respond_to(c->body, to_io_id))
899 rv = fstat(my_fileno(c->body), &sb);
901 * Rainbows! can use "/dev/fd/%u" in to_path output to avoid
902 * extra open() syscalls, too.
904 else if (sscanf(cpath, "/dev/fd/%u", &devfd) == 1)
905 rv = fstat((int)devfd, &sb);
906 else
907 rv = stat(cpath, &sb);
910 * calling this method implies the web server will bypass
911 * the each method where body_bytes_sent is calculated,
912 * so we stat and set that value here.
914 c->body_bytes_sent = rv == 0 ? sb.st_size : 0;
915 return path;
919 * call-seq:
920 * clogger.to_io
922 * used to proxy +:to_io+ method calls to the wrapped response body.
924 static VALUE to_io(VALUE self)
926 struct clogger *c = clogger_get(self);
927 struct stat sb;
928 VALUE io = rb_convert_type(c->body, T_FILE, "IO", "to_io");
930 if (fstat(my_fileno(io), &sb) == 0)
931 c->body_bytes_sent = sb.st_size;
933 return io;
936 /* :nodoc: */
937 static VALUE body(VALUE self)
939 return clogger_get(self)->body;
942 void Init_clogger_ext(void)
944 VALUE tmp;
946 check_clock();
948 ltlt_id = rb_intern("<<");
949 call_id = rb_intern("call");
950 each_id = rb_intern("each");
951 close_id = rb_intern("close");
952 to_i_id = rb_intern("to_i");
953 to_s_id = rb_intern("to_s");
954 size_id = rb_intern("size");
955 sq_brace_id = rb_intern("[]");
956 new_id = rb_intern("new");
957 to_path_id = rb_intern("to_path");
958 to_io_id = rb_intern("to_io");
959 respond_to_id = rb_intern("respond_to?");
960 cClogger = rb_define_class("Clogger", rb_cObject);
961 mFormat = rb_define_module_under(cClogger, "Format");
962 rb_define_alloc_func(cClogger, clogger_alloc);
963 rb_define_method(cClogger, "initialize", clogger_init, -1);
964 rb_define_method(cClogger, "initialize_copy", clogger_init_copy, 1);
965 rb_define_method(cClogger, "call", clogger_call, 1);
966 rb_define_method(cClogger, "each", clogger_each, 0);
967 rb_define_method(cClogger, "close", clogger_close, 0);
968 rb_define_method(cClogger, "fileno", clogger_fileno, 0);
969 rb_define_method(cClogger, "wrap_body?", clogger_wrap_body, 0);
970 rb_define_method(cClogger, "reentrant?", clogger_reentrant, 0);
971 rb_define_method(cClogger, "to_path", to_path, 0);
972 rb_define_method(cClogger, "to_io", to_io, 0);
973 rb_define_method(cClogger, "respond_to?", respond_to, 1);
974 rb_define_method(cClogger, "body", body, 0);
975 CONST_GLOBAL_STR(REMOTE_ADDR);
976 CONST_GLOBAL_STR(HTTP_X_FORWARDED_FOR);
977 CONST_GLOBAL_STR(REQUEST_METHOD);
978 CONST_GLOBAL_STR(PATH_INFO);
979 CONST_GLOBAL_STR(QUERY_STRING);
980 CONST_GLOBAL_STR(REQUEST_URI);
981 CONST_GLOBAL_STR(HTTP_VERSION);
982 CONST_GLOBAL_STR2(rack_errors, "rack.errors");
983 CONST_GLOBAL_STR2(rack_input, "rack.input");
984 CONST_GLOBAL_STR2(rack_multithread, "rack.multithread");
985 CONST_GLOBAL_STR2(dash, "-");
986 CONST_GLOBAL_STR2(space, " ");
987 CONST_GLOBAL_STR2(question_mark, "?");
988 CONST_GLOBAL_STR2(rack_request_cookie_hash, "rack.request.cookie_hash");
990 tmp = rb_const_get(rb_cObject, rb_intern("Rack"));
991 tmp = rb_const_get(tmp, rb_intern("Utils"));
992 cHeaderHash = rb_const_get(tmp, rb_intern("HeaderHash"));