pass along "to_io" calls to the body
[clogger.git] / ext / clogger_ext / clogger.c
blob94301fcfb6ee819255659501baf98db617891cf4
1 #include <ruby.h>
2 #ifdef HAVE_RUBY_IO_H
3 # include <ruby/io.h>
4 #else
5 # include <rubyio.h>
6 #endif
7 #include <assert.h>
8 #include <unistd.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <sys/time.h>
12 #include <errno.h>
13 #ifdef HAVE_FCNTL_H
14 # include <fcntl.h>
15 #endif
16 #ifndef _POSIX_C_SOURCE
17 # define _POSIX_C_SOURCE 200112L
18 #endif
19 #include <time.h>
20 #include "ruby_1_9_compat.h"
21 #include "broken_system_compat.h"
24 * Availability of a monotonic clock needs to be detected at runtime
25 * since we could've been built on a different system than we're run
26 * under.
28 static clockid_t hopefully_CLOCK_MONOTONIC;
30 static void check_clock(void)
32 struct timespec now;
34 hopefully_CLOCK_MONOTONIC = CLOCK_MONOTONIC;
36 /* we can't check this reliably at compile time */
37 if (clock_gettime(CLOCK_MONOTONIC, &now) == 0)
38 return;
40 if (clock_gettime(CLOCK_REALTIME, &now) == 0) {
41 hopefully_CLOCK_MONOTONIC = CLOCK_REALTIME;
42 rb_warn("CLOCK_MONOTONIC not available, "
43 "falling back to CLOCK_REALTIME");
45 rb_warn("clock_gettime() totally broken, " \
46 "falling back to pure Ruby Clogger");
47 rb_raise(rb_eLoadError, "clock_gettime() broken");
50 static void clock_diff(struct timespec *a, const struct timespec *b)
52 a->tv_sec -= b->tv_sec;
53 a->tv_nsec -= b->tv_nsec;
54 if (a->tv_nsec < 0) {
55 --a->tv_sec;
56 a->tv_nsec += 1000000000;
60 /* give GCC hints for better branch prediction
61 * (we layout branches so that ASCII characters are handled faster) */
62 #if defined(__GNUC__) && (__GNUC__ >= 3)
63 # define likely(x) __builtin_expect (!!(x), 1)
64 # define unlikely(x) __builtin_expect (!!(x), 0)
65 #else
66 # define unlikely(x) (x)
67 # define likely(x) (x)
68 #endif
70 enum clogger_opcode {
71 CL_OP_LITERAL = 0,
72 CL_OP_REQUEST,
73 CL_OP_RESPONSE,
74 CL_OP_SPECIAL,
75 CL_OP_EVAL,
76 CL_OP_TIME_LOCAL,
77 CL_OP_TIME_UTC,
78 CL_OP_REQUEST_TIME,
79 CL_OP_TIME,
80 CL_OP_COOKIE
83 enum clogger_special {
84 CL_SP_body_bytes_sent = 0,
85 CL_SP_status,
86 CL_SP_request,
87 CL_SP_request_length,
88 CL_SP_response_length,
89 CL_SP_ip,
90 CL_SP_pid,
91 CL_SP_request_uri
94 struct clogger {
95 VALUE app;
97 VALUE fmt_ops;
98 VALUE logger;
99 VALUE log_buf;
101 VALUE env;
102 VALUE cookies;
103 VALUE status;
104 VALUE headers;
105 VALUE body;
107 off_t body_bytes_sent;
108 struct timespec ts_start;
110 int fd;
111 int wrap_body;
112 int need_resp;
113 int reentrant; /* tri-state, -1:auto, 1/0 true/false */
116 static ID ltlt_id;
117 static ID call_id;
118 static ID each_id;
119 static ID close_id;
120 static ID to_i_id;
121 static ID to_s_id;
122 static ID size_id;
123 static ID sq_brace_id;
124 static ID new_id;
125 static ID to_path_id;
126 static ID to_io_id;
127 static ID respond_to_id;
128 static VALUE cClogger;
129 static VALUE mFormat;
130 static VALUE cHeaderHash;
132 /* common hash lookup keys */
133 static VALUE g_HTTP_X_FORWARDED_FOR;
134 static VALUE g_REMOTE_ADDR;
135 static VALUE g_REQUEST_METHOD;
136 static VALUE g_PATH_INFO;
137 static VALUE g_REQUEST_URI;
138 static VALUE g_QUERY_STRING;
139 static VALUE g_HTTP_VERSION;
140 static VALUE g_rack_errors;
141 static VALUE g_rack_input;
142 static VALUE g_rack_multithread;
143 static VALUE g_dash;
144 static VALUE g_space;
145 static VALUE g_question_mark;
146 static VALUE g_rack_request_cookie_hash;
148 #define LOG_BUF_INIT_SIZE 128
150 static void init_buffers(struct clogger *c)
152 c->log_buf = rb_str_buf_new(LOG_BUF_INIT_SIZE);
155 static inline int need_escape(unsigned c)
157 assert(c <= 0xff);
158 return !!(c == '\'' || c == '"' || c <= 0x1f);
161 /* we are encoding-agnostic, clients can send us all sorts of junk */
162 static VALUE byte_xs_str(VALUE from)
164 static const char esc[] = "0123456789ABCDEF";
165 unsigned char *new_ptr;
166 unsigned char *ptr = (unsigned char *)RSTRING_PTR(from);
167 long len = RSTRING_LEN(from);
168 long new_len = len;
169 VALUE rv;
171 for (; --len >= 0; ptr++) {
172 unsigned c = *ptr;
174 if (unlikely(need_escape(c)))
175 new_len += 3; /* { '\', 'x', 'X', 'X' } */
178 len = RSTRING_LEN(from);
179 if (new_len == len)
180 return from;
182 rv = rb_str_new(NULL, new_len);
183 new_ptr = (unsigned char *)RSTRING_PTR(rv);
184 ptr = (unsigned char *)RSTRING_PTR(from);
185 for (; --len >= 0; ptr++) {
186 unsigned c = *ptr;
188 if (unlikely(need_escape(c))) {
189 *new_ptr++ = '\\';
190 *new_ptr++ = 'x';
191 *new_ptr++ = esc[c >> 4];
192 *new_ptr++ = esc[c & 0xf];
193 } else {
194 *new_ptr++ = c;
197 assert(RSTRING_PTR(rv)[RSTRING_LEN(rv)] == '\0');
199 return rv;
202 static VALUE byte_xs(VALUE from)
204 return byte_xs_str(rb_obj_as_string(from));
207 static void clogger_mark(void *ptr)
209 struct clogger *c = ptr;
211 rb_gc_mark(c->app);
212 rb_gc_mark(c->fmt_ops);
213 rb_gc_mark(c->logger);
214 rb_gc_mark(c->log_buf);
215 rb_gc_mark(c->env);
216 rb_gc_mark(c->cookies);
217 rb_gc_mark(c->status);
218 rb_gc_mark(c->headers);
219 rb_gc_mark(c->body);
222 static VALUE clogger_alloc(VALUE klass)
224 struct clogger *c;
226 return Data_Make_Struct(klass, struct clogger, clogger_mark, -1, c);
229 static struct clogger *clogger_get(VALUE self)
231 struct clogger *c;
233 Data_Get_Struct(self, struct clogger, c);
234 assert(c);
235 return c;
238 /* only for writing to regular files, not stupid crap like NFS */
239 static void write_full(int fd, const void *buf, size_t count)
241 ssize_t r;
242 unsigned long ubuf = (unsigned long)buf;
244 while (count > 0) {
245 r = write(fd, (void *)ubuf, count);
247 if ((size_t)r == count) { /* overwhelmingly likely */
248 return;
249 } else if (r > 0) {
250 count -= r;
251 ubuf += r;
252 } else {
253 if (errno == EINTR || errno == EAGAIN)
254 continue; /* poor souls on NFS and like: */
255 if (!errno)
256 errno = ENOSPC;
257 rb_sys_fail("write");
263 * allow us to use write_full() iff we detect a blocking file
264 * descriptor that wouldn't play nicely with Ruby threading/fibers
266 static int raw_fd(VALUE my_fd)
268 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(O_NONBLOCK)
269 int fd;
270 int flags;
272 if (NIL_P(my_fd))
273 return -1;
274 fd = NUM2INT(my_fd);
276 flags = fcntl(fd, F_GETFL);
277 if (flags < 0)
278 rb_sys_fail("fcntl");
280 if (flags & O_NONBLOCK) {
281 struct stat sb;
283 if (fstat(fd, &sb) < 0)
284 return -1;
286 /* O_NONBLOCK is no-op for regular files: */
287 if (! S_ISREG(sb.st_mode))
288 return -1;
290 return fd;
291 #else /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
292 return -1;
293 #endif /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
296 /* :nodoc: */
297 static VALUE clogger_reentrant(VALUE self)
299 return clogger_get(self)->reentrant == 0 ? Qfalse : Qtrue;
302 /* :nodoc: */
303 static VALUE clogger_wrap_body(VALUE self)
305 return clogger_get(self)->wrap_body == 0 ? Qfalse : Qtrue;
308 static void append_status(struct clogger *c)
310 char buf[sizeof("999")];
311 int nr;
312 VALUE status = c->status;
314 if (TYPE(status) != T_FIXNUM) {
315 status = rb_funcall(status, to_i_id, 0);
316 /* no way it's a valid status code (at least not HTTP/1.1) */
317 if (TYPE(status) != T_FIXNUM) {
318 rb_str_buf_append(c->log_buf, g_dash);
319 return;
323 nr = FIX2INT(status);
324 if (nr >= 100 && nr <= 999) {
325 nr = snprintf(buf, sizeof(buf), "%03d", nr);
326 assert(nr == 3);
327 rb_str_buf_cat(c->log_buf, buf, nr);
328 } else {
329 /* raise?, swap for 500? */
330 rb_str_buf_append(c->log_buf, g_dash);
334 /* this is Rack 1.0.0-compatible, won't try to parse commas in XFF */
335 static void append_ip(struct clogger *c)
337 VALUE env = c->env;
338 VALUE tmp = rb_hash_aref(env, g_HTTP_X_FORWARDED_FOR);
340 if (NIL_P(tmp)) {
341 /* can't be faked on any real server, so no escape */
342 tmp = rb_hash_aref(env, g_REMOTE_ADDR);
343 if (NIL_P(tmp))
344 tmp = g_dash;
345 } else {
346 tmp = byte_xs(tmp);
348 rb_str_buf_append(c->log_buf, tmp);
351 static void append_body_bytes_sent(struct clogger *c)
353 char buf[(sizeof(off_t) * 8) / 3 + 1];
354 const char *fmt = sizeof(off_t) == sizeof(long) ? "%ld" : "%lld";
355 int nr = snprintf(buf, sizeof(buf), fmt, c->body_bytes_sent);
357 assert(nr > 0 && nr < (int)sizeof(buf));
358 rb_str_buf_cat(c->log_buf, buf, nr);
361 static void append_ts(struct clogger *c, const VALUE *op, struct timespec *ts)
363 char buf[sizeof(".000000") + ((sizeof(ts->tv_sec) * 8) / 3)];
364 int nr;
365 char *fmt = RSTRING_PTR(op[1]);
366 int ndiv = NUM2INT(op[2]);
367 int usec = ts->tv_nsec / 1000;
369 nr = snprintf(buf, sizeof(buf), fmt,
370 (int)ts->tv_sec, (int)(usec / ndiv));
371 assert(nr > 0 && nr < (int)sizeof(buf));
372 rb_str_buf_cat(c->log_buf, buf, nr);
375 static void append_request_time_fmt(struct clogger *c, const VALUE *op)
377 struct timespec now;
379 clock_gettime(hopefully_CLOCK_MONOTONIC, &now);
380 clock_diff(&now, &c->ts_start);
381 append_ts(c, op, &now);
384 static void append_time_fmt(struct clogger *c, const VALUE *op)
386 struct timespec now;
387 int r = clock_gettime(CLOCK_REALTIME, &now);
389 if (unlikely(r != 0))
390 rb_sys_fail("clock_gettime(CLOCK_REALTIME)");
391 append_ts(c, op, &now);
394 static void append_request_uri(struct clogger *c)
396 VALUE tmp;
398 tmp = rb_hash_aref(c->env, g_REQUEST_URI);
399 if (NIL_P(tmp)) {
400 tmp = rb_hash_aref(c->env, g_PATH_INFO);
401 if (!NIL_P(tmp))
402 rb_str_buf_append(c->log_buf, byte_xs(tmp));
403 tmp = rb_hash_aref(c->env, g_QUERY_STRING);
404 if (!NIL_P(tmp) && RSTRING_LEN(tmp) != 0) {
405 rb_str_buf_append(c->log_buf, g_question_mark);
406 rb_str_buf_append(c->log_buf, byte_xs(tmp));
408 } else {
409 rb_str_buf_append(c->log_buf, byte_xs(tmp));
413 static void append_request(struct clogger *c)
415 VALUE tmp;
417 /* REQUEST_METHOD doesn't need escaping, Rack::Lint governs it */
418 tmp = rb_hash_aref(c->env, g_REQUEST_METHOD);
419 if (!NIL_P(tmp))
420 rb_str_buf_append(c->log_buf, tmp);
422 rb_str_buf_append(c->log_buf, g_space);
424 append_request_uri(c);
426 /* HTTP_VERSION can be injected by malicious clients */
427 tmp = rb_hash_aref(c->env, g_HTTP_VERSION);
428 if (!NIL_P(tmp)) {
429 rb_str_buf_append(c->log_buf, g_space);
430 rb_str_buf_append(c->log_buf, byte_xs(tmp));
434 static void append_request_length(struct clogger *c)
436 VALUE tmp = rb_hash_aref(c->env, g_rack_input);
437 if (NIL_P(tmp)) {
438 rb_str_buf_append(c->log_buf, g_dash);
439 } else {
440 tmp = rb_funcall(tmp, size_id, 0);
441 rb_str_buf_append(c->log_buf, rb_funcall(tmp, to_s_id, 0));
445 static void
446 append_time(struct clogger *c, enum clogger_opcode op, VALUE fmt, VALUE buf)
448 char *buf_ptr = RSTRING_PTR(buf);
449 size_t buf_size = RSTRING_LEN(buf) + 1; /* "\0" */
450 size_t nr;
451 struct tm tmp;
452 time_t t = time(NULL);
454 if (op == CL_OP_TIME_LOCAL)
455 localtime_r(&t, &tmp);
456 else if (op == CL_OP_TIME_UTC)
457 gmtime_r(&t, &tmp);
458 else
459 assert(0 && "unknown op");
461 nr = strftime(buf_ptr, buf_size, RSTRING_PTR(fmt), &tmp);
462 assert(nr < buf_size && "time format too small!");
463 rb_str_buf_cat(c->log_buf, buf_ptr, nr);
466 static void append_pid(struct clogger *c)
468 char buf[(sizeof(pid_t) * 8) / 3 + 1];
469 int nr = snprintf(buf, sizeof(buf), "%d", (int)getpid());
471 assert(nr > 0 && nr < (int)sizeof(buf));
472 rb_str_buf_cat(c->log_buf, buf, nr);
475 static void append_eval(struct clogger *c, VALUE str)
477 int state = -1;
478 VALUE rv = rb_eval_string_protect(RSTRING_PTR(str), &state);
480 rv = state == 0 ? rb_obj_as_string(rv) : g_dash;
481 rb_str_buf_append(c->log_buf, rv);
484 static void append_cookie(struct clogger *c, VALUE key)
486 VALUE cookie;
488 if (c->cookies == Qfalse)
489 c->cookies = rb_hash_aref(c->env, g_rack_request_cookie_hash);
491 if (NIL_P(c->cookies)) {
492 cookie = g_dash;
493 } else {
494 cookie = rb_hash_aref(c->cookies, key);
495 if (NIL_P(cookie))
496 cookie = g_dash;
498 rb_str_buf_append(c->log_buf, cookie);
501 static void append_request_env(struct clogger *c, VALUE key)
503 VALUE tmp = rb_hash_aref(c->env, key);
505 tmp = NIL_P(tmp) ? g_dash : byte_xs(tmp);
506 rb_str_buf_append(c->log_buf, tmp);
509 static void append_response(struct clogger *c, VALUE key)
511 VALUE v;
513 assert(rb_obj_is_kind_of(c->headers, cHeaderHash) && "not HeaderHash");
515 v = rb_funcall(c->headers, sq_brace_id, 1, key);
516 v = NIL_P(v) ? g_dash : byte_xs(v);
517 rb_str_buf_append(c->log_buf, v);
520 static void special_var(struct clogger *c, enum clogger_special var)
522 switch (var) {
523 case CL_SP_body_bytes_sent:
524 append_body_bytes_sent(c);
525 break;
526 case CL_SP_status:
527 append_status(c);
528 break;
529 case CL_SP_request:
530 append_request(c);
531 break;
532 case CL_SP_request_length:
533 append_request_length(c);
534 break;
535 case CL_SP_response_length:
536 if (c->body_bytes_sent == 0)
537 rb_str_buf_append(c->log_buf, g_dash);
538 else
539 append_body_bytes_sent(c);
540 break;
541 case CL_SP_ip:
542 append_ip(c);
543 break;
544 case CL_SP_pid:
545 append_pid(c);
546 break;
547 case CL_SP_request_uri:
548 append_request_uri(c);
552 static VALUE cwrite(struct clogger *c)
554 const VALUE ops = c->fmt_ops;
555 const VALUE *ary = RARRAY_PTR(ops);
556 long i = RARRAY_LEN(ops);
557 VALUE dst = c->log_buf;
559 rb_str_set_len(dst, 0);
561 for (; --i >= 0; ary++) {
562 const VALUE *op = RARRAY_PTR(*ary);
563 enum clogger_opcode opcode = FIX2INT(op[0]);
565 switch (opcode) {
566 case CL_OP_LITERAL:
567 rb_str_buf_append(dst, op[1]);
568 break;
569 case CL_OP_REQUEST:
570 append_request_env(c, op[1]);
571 break;
572 case CL_OP_RESPONSE:
573 append_response(c, op[1]);
574 break;
575 case CL_OP_SPECIAL:
576 special_var(c, FIX2INT(op[1]));
577 break;
578 case CL_OP_EVAL:
579 append_eval(c, op[1]);
580 break;
581 case CL_OP_TIME_LOCAL:
582 case CL_OP_TIME_UTC:
583 append_time(c, opcode, op[1], op[2]);
584 break;
585 case CL_OP_REQUEST_TIME:
586 append_request_time_fmt(c, op);
587 break;
588 case CL_OP_TIME:
589 append_time_fmt(c, op);
590 break;
591 case CL_OP_COOKIE:
592 append_cookie(c, op[1]);
593 break;
597 if (c->fd >= 0) {
598 write_full(c->fd, RSTRING_PTR(dst), RSTRING_LEN(dst));
599 } else {
600 VALUE logger = c->logger;
602 if (NIL_P(logger))
603 logger = rb_hash_aref(c->env, g_rack_errors);
604 rb_funcall(logger, ltlt_id, 1, dst);
607 return Qnil;
610 static void init_logger(struct clogger *c, VALUE path)
612 ID id;
614 if (!NIL_P(path) && !NIL_P(c->logger))
615 rb_raise(rb_eArgError, ":logger and :path are independent");
616 if (!NIL_P(path)) {
617 VALUE ab = rb_str_new2("ab");
618 id = rb_intern("open");
619 c->logger = rb_funcall(rb_cFile, id, 2, path, ab);
622 id = rb_intern("sync=");
623 if (rb_respond_to(c->logger, id))
624 rb_funcall(c->logger, id, 1, Qtrue);
626 id = rb_intern("fileno");
627 if (rb_respond_to(c->logger, id))
628 c->fd = raw_fd(rb_funcall(c->logger, id, 0));
632 * call-seq:
633 * Clogger.new(app, :logger => $stderr, :format => string) => obj
635 * Creates a new Clogger object that wraps +app+. +:logger+ may
636 * be any object that responds to the "<<" method with a string argument.
637 * If +:logger:+ is a string, it will be treated as a path to a
638 * File that will be opened in append mode.
640 static VALUE clogger_init(int argc, VALUE *argv, VALUE self)
642 struct clogger *c = clogger_get(self);
643 VALUE o = Qnil;
644 VALUE fmt = rb_const_get(mFormat, rb_intern("Common"));
646 rb_scan_args(argc, argv, "11", &c->app, &o);
647 c->fd = -1;
648 c->logger = Qnil;
649 c->reentrant = -1; /* auto-detect */
651 if (TYPE(o) == T_HASH) {
652 VALUE tmp;
654 tmp = rb_hash_aref(o, ID2SYM(rb_intern("path")));
655 c->logger = rb_hash_aref(o, ID2SYM(rb_intern("logger")));
656 init_logger(c, tmp);
658 tmp = rb_hash_aref(o, ID2SYM(rb_intern("format")));
659 if (!NIL_P(tmp))
660 fmt = tmp;
662 tmp = rb_hash_aref(o, ID2SYM(rb_intern("reentrant")));
663 switch (TYPE(tmp)) {
664 case T_TRUE:
665 c->reentrant = 1;
666 break;
667 case T_FALSE:
668 c->reentrant = 0;
669 case T_NIL:
670 break;
671 default:
672 rb_raise(rb_eArgError, ":reentrant must be boolean");
676 init_buffers(c);
677 c->fmt_ops = rb_funcall(self, rb_intern("compile_format"), 2, fmt, o);
679 if (Qtrue == rb_funcall(self, rb_intern("need_response_headers?"),
680 1, c->fmt_ops))
681 c->need_resp = 1;
682 if (Qtrue == rb_funcall(self, rb_intern("need_wrap_body?"),
683 1, c->fmt_ops))
684 c->wrap_body = 1;
686 return self;
689 static VALUE body_iter_i(VALUE str, VALUE memop)
691 off_t *len = (off_t *)memop;
693 str = rb_obj_as_string(str);
694 *len += RSTRING_LEN(str);
696 return rb_yield(str);
699 static VALUE body_close(struct clogger *c)
701 if (rb_respond_to(c->body, close_id))
702 return rb_funcall(c->body, close_id, 0);
703 return Qnil;
707 * call-seq:
708 * clogger.each { |part| socket.write(part) }
710 * Delegates the body#each call to the underlying +body+ object
711 * while tracking the number of bytes yielded. This will log
712 * the request.
714 static VALUE clogger_each(VALUE self)
716 struct clogger *c = clogger_get(self);
718 rb_need_block();
719 c->body_bytes_sent = 0;
720 rb_iterate(rb_each, c->body, body_iter_i, (VALUE)&c->body_bytes_sent);
722 return self;
726 * call-seq:
727 * clogger.close
729 * Delegates the body#close call to the underlying +body+ object.
730 * This is only used when Clogger is wrapping the +body+ of a Rack
731 * response and should be automatically called by the web server.
733 static VALUE clogger_close(VALUE self)
735 struct clogger *c = clogger_get(self);
737 return rb_ensure(body_close, (VALUE)c, cwrite, (VALUE)c);
740 /* :nodoc: */
741 static VALUE clogger_fileno(VALUE self)
743 struct clogger *c = clogger_get(self);
745 return c->fd < 0 ? Qnil : INT2NUM(c->fd);
748 static VALUE ccall(struct clogger *c, VALUE env)
750 VALUE rv;
752 clock_gettime(hopefully_CLOCK_MONOTONIC, &c->ts_start);
753 c->env = env;
754 c->cookies = Qfalse;
755 rv = rb_funcall(c->app, call_id, 1, env);
756 if (TYPE(rv) == T_ARRAY && RARRAY_LEN(rv) == 3) {
757 VALUE *tmp = RARRAY_PTR(rv);
759 c->status = tmp[0];
760 c->headers = tmp[1];
761 c->body = tmp[2];
763 rv = rb_ary_new4(3, tmp);
764 if (c->need_resp &&
765 ! rb_obj_is_kind_of(tmp[1], cHeaderHash)) {
766 c->headers = rb_funcall(cHeaderHash, new_id, 1, tmp[1]);
767 rb_ary_store(rv, 1, c->headers);
769 } else {
770 volatile VALUE tmp = rb_inspect(rv);
772 c->status = INT2FIX(500);
773 c->headers = c->body = rb_ary_new();
774 cwrite(c);
775 rb_raise(rb_eTypeError,
776 "app response not a 3 element Array: %s",
777 RSTRING_PTR(tmp));
780 return rv;
784 * call-seq:
785 * clogger.call(env) => [ status, headers, body ]
787 * calls the wrapped Rack application with +env+, returns the
788 * [status, headers, body ] tuplet required by Rack.
790 static VALUE clogger_call(VALUE self, VALUE env)
792 struct clogger *c = clogger_get(self);
793 VALUE rv;
795 env = rb_check_convert_type(env, T_HASH, "Hash", "to_hash");
797 if (c->wrap_body) {
798 if (c->reentrant < 0) {
799 VALUE tmp = rb_hash_aref(env, g_rack_multithread);
800 c->reentrant = Qfalse == tmp ? 0 : 1;
802 if (c->reentrant) {
803 self = rb_obj_dup(self);
804 c = clogger_get(self);
807 rv = ccall(c, env);
808 assert(!OBJ_FROZEN(rv) && "frozen response array");
809 rb_ary_store(rv, 2, self);
811 return rv;
814 rv = ccall(c, env);
815 cwrite(c);
817 return rv;
820 static void duplicate_buffers(VALUE ops)
822 long i = RARRAY_LEN(ops);
823 VALUE *ary = RARRAY_PTR(ops);
825 for ( ; --i >= 0; ary++) {
826 VALUE *op = RARRAY_PTR(*ary);
827 enum clogger_opcode opcode = FIX2INT(op[0]);
829 if (opcode == CL_OP_TIME_LOCAL || opcode == CL_OP_TIME_UTC) {
830 Check_Type(op[2], T_STRING);
831 op[2] = rb_str_dup(op[2]);
832 rb_str_modify(op[2]); /* trigger copy-on-write */
837 /* :nodoc: */
838 static VALUE clogger_init_copy(VALUE clone, VALUE orig)
840 struct clogger *a = clogger_get(orig);
841 struct clogger *b = clogger_get(clone);
843 memcpy(b, a, sizeof(struct clogger));
844 init_buffers(b);
845 duplicate_buffers(b->fmt_ops);
847 return clone;
850 #define CONST_GLOBAL_STR2(var, val) do { \
851 g_##var = rb_obj_freeze(rb_str_new(val, sizeof(val) - 1)); \
852 rb_global_variable(&g_##var); \
853 } while (0)
855 #define CONST_GLOBAL_STR(val) CONST_GLOBAL_STR2(val, #val)
858 * call-seq:
859 * clogger.respond_to?(:to_path) => true or false
860 * clogger.respond_to?(:close) => true
862 * used to delegate +:to_path+ checks for Rack webservers that optimize
863 * static file serving
865 static VALUE respond_to(VALUE self, VALUE method)
867 struct clogger *c = clogger_get(self);
868 ID id = rb_to_id(method);
870 if (close_id == id)
871 return Qtrue;
872 return rb_respond_to(c->body, id);
876 * call-seq:
877 * clogger.to_path
879 * used to proxy +:to_path+ method calls to the wrapped response body.
881 static VALUE to_path(VALUE self)
883 struct clogger *c = clogger_get(self);
884 VALUE path = rb_funcall(c->body, to_path_id, 0);
885 struct stat sb;
886 int rv;
887 unsigned devfd;
888 const char *cpath = StringValuePtr(path);
890 /* try to avoid an extra path lookup */
891 if (rb_respond_to(c->body, to_io_id))
892 rv = fstat(my_fileno(c->body), &sb);
894 * Rainbows! can use "/dev/fd/%u" in to_path output to avoid
895 * extra open() syscalls, too.
897 else if (sscanf(cpath, "/dev/fd/%u", &devfd) == 1)
898 rv = fstat((int)devfd, &sb);
899 else
900 rv = stat(cpath, &sb);
903 * calling this method implies the web server will bypass
904 * the each method where body_bytes_sent is calculated,
905 * so we stat and set that value here.
907 c->body_bytes_sent = rv == 0 ? sb.st_size : 0;
908 return path;
912 * call-seq:
913 * clogger.to_io
915 * used to proxy +:to_io+ method calls to the wrapped response body.
917 static VALUE to_io(VALUE self)
919 struct clogger *c = clogger_get(self);
920 struct stat sb;
921 VALUE io = rb_convert_type(c->body, T_FILE, "IO", "to_io");
923 if (fstat(my_fileno(io), &sb) == 0)
924 c->body_bytes_sent = sb.st_size;
926 return io;
929 void Init_clogger_ext(void)
931 VALUE tmp;
933 check_clock();
935 ltlt_id = rb_intern("<<");
936 call_id = rb_intern("call");
937 each_id = rb_intern("each");
938 close_id = rb_intern("close");
939 to_i_id = rb_intern("to_i");
940 to_s_id = rb_intern("to_s");
941 size_id = rb_intern("size");
942 sq_brace_id = rb_intern("[]");
943 new_id = rb_intern("new");
944 to_path_id = rb_intern("to_path");
945 to_io_id = rb_intern("to_io");
946 respond_to_id = rb_intern("respond_to?");
947 cClogger = rb_define_class("Clogger", rb_cObject);
948 mFormat = rb_define_module_under(cClogger, "Format");
949 rb_define_alloc_func(cClogger, clogger_alloc);
950 rb_define_method(cClogger, "initialize", clogger_init, -1);
951 rb_define_method(cClogger, "initialize_copy", clogger_init_copy, 1);
952 rb_define_method(cClogger, "call", clogger_call, 1);
953 rb_define_method(cClogger, "each", clogger_each, 0);
954 rb_define_method(cClogger, "close", clogger_close, 0);
955 rb_define_method(cClogger, "fileno", clogger_fileno, 0);
956 rb_define_method(cClogger, "wrap_body?", clogger_wrap_body, 0);
957 rb_define_method(cClogger, "reentrant?", clogger_reentrant, 0);
958 rb_define_method(cClogger, "to_path", to_path, 0);
959 rb_define_method(cClogger, "to_io", to_io, 0);
960 rb_define_method(cClogger, "respond_to?", respond_to, 1);
961 CONST_GLOBAL_STR(REMOTE_ADDR);
962 CONST_GLOBAL_STR(HTTP_X_FORWARDED_FOR);
963 CONST_GLOBAL_STR(REQUEST_METHOD);
964 CONST_GLOBAL_STR(PATH_INFO);
965 CONST_GLOBAL_STR(QUERY_STRING);
966 CONST_GLOBAL_STR(REQUEST_URI);
967 CONST_GLOBAL_STR(HTTP_VERSION);
968 CONST_GLOBAL_STR2(rack_errors, "rack.errors");
969 CONST_GLOBAL_STR2(rack_input, "rack.input");
970 CONST_GLOBAL_STR2(rack_multithread, "rack.multithread");
971 CONST_GLOBAL_STR2(dash, "-");
972 CONST_GLOBAL_STR2(space, " ");
973 CONST_GLOBAL_STR2(question_mark, "?");
974 CONST_GLOBAL_STR2(rack_request_cookie_hash, "rack.request.cookie_hash");
976 tmp = rb_const_get(rb_cObject, rb_intern("Rack"));
977 tmp = rb_const_get(tmp, rb_intern("Utils"));
978 cHeaderHash = rb_const_get(tmp, rb_intern("HeaderHash"));