another workaround for systems with broken CLOCK_MONOTONIC
[clogger.git] / ext / clogger_ext / clogger.c
blob604b9b6d9561b3caef6074af8423905581fb601f
1 #include <ruby.h>
2 #ifdef HAVE_RUBY_IO_H
3 # include <ruby/io.h>
4 #else
5 # include <rubyio.h>
6 #endif
7 #include <assert.h>
8 #include <unistd.h>
9 #include <sys/types.h>
10 #include <sys/stat.h>
11 #include <sys/time.h>
12 #include <errno.h>
13 #ifdef HAVE_FCNTL_H
14 # include <fcntl.h>
15 #endif
16 #ifndef _POSIX_C_SOURCE
17 # define _POSIX_C_SOURCE 200112L
18 #endif
19 #include <time.h>
20 #include "ruby_1_9_compat.h"
21 #include "broken_system_compat.h"
24 * Availability of a monotonic clock needs to be detected at runtime
25 * since we could've been built on a different system than we're run
26 * under.
28 static clockid_t hopefully_CLOCK_MONOTONIC = CLOCK_MONOTONIC;
30 static void check_clock(void)
32 struct timespec now;
34 /* we can't check this reliably at compile time */
35 if (clock_gettime(CLOCK_MONOTONIC, &now) == 0)
36 return;
38 if (clock_gettime(CLOCK_REALTIME, &now) == 0) {
39 hopefully_CLOCK_MONOTONIC = CLOCK_REALTIME;
40 rb_warn("CLOCK_MONOTONIC not available, "
41 "falling back to CLOCK_REALTIME");
43 rb_warn("clock_gettime() totally broken, " \
44 "falling back to pure Ruby Clogger");
45 rb_raise(rb_eLoadError, "clock_gettime() broken");
48 static void clock_diff(struct timespec *a, const struct timespec *b)
50 a->tv_sec -= b->tv_sec;
51 a->tv_nsec -= b->tv_nsec;
52 if (a->tv_nsec < 0) {
53 --a->tv_sec;
54 a->tv_nsec += 1000000000;
58 /* give GCC hints for better branch prediction
59 * (we layout branches so that ASCII characters are handled faster) */
60 #if defined(__GNUC__) && (__GNUC__ >= 3)
61 # define likely(x) __builtin_expect (!!(x), 1)
62 # define unlikely(x) __builtin_expect (!!(x), 0)
63 #else
64 # define unlikely(x) (x)
65 # define likely(x) (x)
66 #endif
68 enum clogger_opcode {
69 CL_OP_LITERAL = 0,
70 CL_OP_REQUEST,
71 CL_OP_RESPONSE,
72 CL_OP_SPECIAL,
73 CL_OP_EVAL,
74 CL_OP_TIME_LOCAL,
75 CL_OP_TIME_UTC,
76 CL_OP_REQUEST_TIME,
77 CL_OP_TIME,
78 CL_OP_COOKIE
81 enum clogger_special {
82 CL_SP_body_bytes_sent = 0,
83 CL_SP_status,
84 CL_SP_request,
85 CL_SP_request_length,
86 CL_SP_response_length,
87 CL_SP_ip,
88 CL_SP_pid,
89 CL_SP_request_uri
92 struct clogger {
93 VALUE app;
95 VALUE fmt_ops;
96 VALUE logger;
97 VALUE log_buf;
99 VALUE env;
100 VALUE cookies;
101 VALUE status;
102 VALUE headers;
103 VALUE body;
105 off_t body_bytes_sent;
106 struct timespec ts_start;
108 int fd;
109 int wrap_body;
110 int need_resp;
111 int reentrant; /* tri-state, -1:auto, 1/0 true/false */
114 static ID ltlt_id;
115 static ID call_id;
116 static ID each_id;
117 static ID close_id;
118 static ID to_i_id;
119 static ID to_s_id;
120 static ID size_id;
121 static ID sq_brace_id;
122 static ID new_id;
123 static ID to_path_id;
124 static ID to_io_id;
125 static VALUE cClogger;
126 static VALUE cToPath;
127 static VALUE mFormat;
128 static VALUE cHeaderHash;
130 /* common hash lookup keys */
131 static VALUE g_HTTP_X_FORWARDED_FOR;
132 static VALUE g_REMOTE_ADDR;
133 static VALUE g_REQUEST_METHOD;
134 static VALUE g_PATH_INFO;
135 static VALUE g_REQUEST_URI;
136 static VALUE g_QUERY_STRING;
137 static VALUE g_HTTP_VERSION;
138 static VALUE g_rack_errors;
139 static VALUE g_rack_input;
140 static VALUE g_rack_multithread;
141 static VALUE g_dash;
142 static VALUE g_space;
143 static VALUE g_question_mark;
144 static VALUE g_rack_request_cookie_hash;
146 #define LOG_BUF_INIT_SIZE 128
148 static void init_buffers(struct clogger *c)
150 c->log_buf = rb_str_buf_new(LOG_BUF_INIT_SIZE);
153 static inline int need_escape(unsigned c)
155 assert(c <= 0xff);
156 return !!(c == '\'' || c == '"' || c <= 0x1f);
159 /* we are encoding-agnostic, clients can send us all sorts of junk */
160 static VALUE byte_xs_str(VALUE from)
162 static const char esc[] = "0123456789ABCDEF";
163 unsigned char *new_ptr;
164 unsigned char *ptr = (unsigned char *)RSTRING_PTR(from);
165 long len = RSTRING_LEN(from);
166 long new_len = len;
167 VALUE rv;
169 for (; --len >= 0; ptr++) {
170 unsigned c = *ptr;
172 if (unlikely(need_escape(c)))
173 new_len += 3; /* { '\', 'x', 'X', 'X' } */
176 len = RSTRING_LEN(from);
177 if (new_len == len)
178 return from;
180 rv = rb_str_new(NULL, new_len);
181 new_ptr = (unsigned char *)RSTRING_PTR(rv);
182 ptr = (unsigned char *)RSTRING_PTR(from);
183 for (; --len >= 0; ptr++) {
184 unsigned c = *ptr;
186 if (unlikely(need_escape(c))) {
187 *new_ptr++ = '\\';
188 *new_ptr++ = 'x';
189 *new_ptr++ = esc[c >> 4];
190 *new_ptr++ = esc[c & 0xf];
191 } else {
192 *new_ptr++ = c;
195 assert(RSTRING_PTR(rv)[RSTRING_LEN(rv)] == '\0');
197 return rv;
200 static VALUE byte_xs(VALUE from)
202 return byte_xs_str(rb_obj_as_string(from));
205 static void clogger_mark(void *ptr)
207 struct clogger *c = ptr;
209 rb_gc_mark(c->app);
210 rb_gc_mark(c->fmt_ops);
211 rb_gc_mark(c->logger);
212 rb_gc_mark(c->log_buf);
213 rb_gc_mark(c->env);
214 rb_gc_mark(c->cookies);
215 rb_gc_mark(c->status);
216 rb_gc_mark(c->headers);
217 rb_gc_mark(c->body);
220 static VALUE clogger_alloc(VALUE klass)
222 struct clogger *c;
224 return Data_Make_Struct(klass, struct clogger, clogger_mark, -1, c);
227 static struct clogger *clogger_get(VALUE self)
229 struct clogger *c;
231 Data_Get_Struct(self, struct clogger, c);
232 assert(c);
233 return c;
236 /* only for writing to regular files, not stupid crap like NFS */
237 static void write_full(int fd, const void *buf, size_t count)
239 ssize_t r;
240 unsigned long ubuf = (unsigned long)buf;
242 while (count > 0) {
243 r = write(fd, (void *)ubuf, count);
245 if ((size_t)r == count) { /* overwhelmingly likely */
246 return;
247 } else if (r > 0) {
248 count -= r;
249 ubuf += r;
250 } else {
251 if (errno == EINTR || errno == EAGAIN)
252 continue; /* poor souls on NFS and like: */
253 if (!errno)
254 errno = ENOSPC;
255 rb_sys_fail("write");
261 * allow us to use write_full() iff we detect a blocking file
262 * descriptor that wouldn't play nicely with Ruby threading/fibers
264 static int raw_fd(VALUE my_fd)
266 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(O_NONBLOCK)
267 int fd;
268 int flags;
270 if (NIL_P(my_fd))
271 return -1;
272 fd = NUM2INT(my_fd);
274 flags = fcntl(fd, F_GETFL);
275 if (flags < 0)
276 rb_sys_fail("fcntl");
278 if (flags & O_NONBLOCK) {
279 struct stat sb;
281 if (fstat(fd, &sb) < 0)
282 return -1;
284 /* O_NONBLOCK is no-op for regular files: */
285 if (! S_ISREG(sb.st_mode))
286 return -1;
288 return fd;
289 #else /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
290 return -1;
291 #endif /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
294 /* :nodoc: */
295 static VALUE clogger_reentrant(VALUE self)
297 return clogger_get(self)->reentrant == 0 ? Qfalse : Qtrue;
300 /* :nodoc: */
301 static VALUE clogger_wrap_body(VALUE self)
303 return clogger_get(self)->wrap_body == 0 ? Qfalse : Qtrue;
306 static void append_status(struct clogger *c)
308 char buf[sizeof("999")];
309 int nr;
310 VALUE status = c->status;
312 if (TYPE(status) != T_FIXNUM) {
313 status = rb_funcall(status, to_i_id, 0);
314 /* no way it's a valid status code (at least not HTTP/1.1) */
315 if (TYPE(status) != T_FIXNUM) {
316 rb_str_buf_append(c->log_buf, g_dash);
317 return;
321 nr = FIX2INT(status);
322 if (nr >= 100 && nr <= 999) {
323 nr = snprintf(buf, sizeof(buf), "%03d", nr);
324 assert(nr == 3);
325 rb_str_buf_cat(c->log_buf, buf, nr);
326 } else {
327 /* raise?, swap for 500? */
328 rb_str_buf_append(c->log_buf, g_dash);
332 /* this is Rack 1.0.0-compatible, won't try to parse commas in XFF */
333 static void append_ip(struct clogger *c)
335 VALUE env = c->env;
336 VALUE tmp = rb_hash_aref(env, g_HTTP_X_FORWARDED_FOR);
338 if (NIL_P(tmp)) {
339 /* can't be faked on any real server, so no escape */
340 tmp = rb_hash_aref(env, g_REMOTE_ADDR);
341 if (NIL_P(tmp))
342 tmp = g_dash;
343 } else {
344 tmp = byte_xs(tmp);
346 rb_str_buf_append(c->log_buf, tmp);
349 static void append_body_bytes_sent(struct clogger *c)
351 char buf[(sizeof(off_t) * 8) / 3 + 1];
352 const char *fmt = sizeof(off_t) == sizeof(long) ? "%ld" : "%lld";
353 int nr = snprintf(buf, sizeof(buf), fmt, c->body_bytes_sent);
355 assert(nr > 0 && nr < (int)sizeof(buf));
356 rb_str_buf_cat(c->log_buf, buf, nr);
359 static void append_ts(struct clogger *c, const VALUE *op, struct timespec *ts)
361 char buf[sizeof(".000000") + ((sizeof(ts->tv_sec) * 8) / 3)];
362 int nr;
363 char *fmt = RSTRING_PTR(op[1]);
364 int ndiv = NUM2INT(op[2]);
365 int usec = ts->tv_nsec / 1000;
367 nr = snprintf(buf, sizeof(buf), fmt,
368 (int)ts->tv_sec, (int)(usec / ndiv));
369 assert(nr > 0 && nr < (int)sizeof(buf));
370 rb_str_buf_cat(c->log_buf, buf, nr);
373 static void append_request_time_fmt(struct clogger *c, const VALUE *op)
375 struct timespec now;
377 clock_gettime(hopefully_CLOCK_MONOTONIC, &now);
378 clock_diff(&now, &c->ts_start);
379 append_ts(c, op, &now);
382 static void append_time_fmt(struct clogger *c, const VALUE *op)
384 struct timespec now;
385 int r = clock_gettime(CLOCK_REALTIME, &now);
387 if (unlikely(r != 0))
388 rb_sys_fail("clock_gettime(CLOCK_REALTIME)");
389 append_ts(c, op, &now);
392 static void append_request_uri(struct clogger *c)
394 VALUE tmp;
396 tmp = rb_hash_aref(c->env, g_REQUEST_URI);
397 if (NIL_P(tmp)) {
398 tmp = rb_hash_aref(c->env, g_PATH_INFO);
399 if (!NIL_P(tmp))
400 rb_str_buf_append(c->log_buf, byte_xs(tmp));
401 tmp = rb_hash_aref(c->env, g_QUERY_STRING);
402 if (!NIL_P(tmp) && RSTRING_LEN(tmp) != 0) {
403 rb_str_buf_append(c->log_buf, g_question_mark);
404 rb_str_buf_append(c->log_buf, byte_xs(tmp));
406 } else {
407 rb_str_buf_append(c->log_buf, byte_xs(tmp));
411 static void append_request(struct clogger *c)
413 VALUE tmp;
415 /* REQUEST_METHOD doesn't need escaping, Rack::Lint governs it */
416 tmp = rb_hash_aref(c->env, g_REQUEST_METHOD);
417 if (!NIL_P(tmp))
418 rb_str_buf_append(c->log_buf, tmp);
420 rb_str_buf_append(c->log_buf, g_space);
422 append_request_uri(c);
424 /* HTTP_VERSION can be injected by malicious clients */
425 tmp = rb_hash_aref(c->env, g_HTTP_VERSION);
426 if (!NIL_P(tmp)) {
427 rb_str_buf_append(c->log_buf, g_space);
428 rb_str_buf_append(c->log_buf, byte_xs(tmp));
432 static void append_request_length(struct clogger *c)
434 VALUE tmp = rb_hash_aref(c->env, g_rack_input);
435 if (NIL_P(tmp)) {
436 rb_str_buf_append(c->log_buf, g_dash);
437 } else {
438 tmp = rb_funcall(tmp, size_id, 0);
439 rb_str_buf_append(c->log_buf, rb_funcall(tmp, to_s_id, 0));
443 static void append_time(struct clogger *c, enum clogger_opcode op, VALUE fmt)
445 /* you'd have to be a moron to use formats this big... */
446 char buf[sizeof("Saturday, November 01, 1970, 00:00:00 PM +0000")];
447 size_t nr;
448 struct tm tmp;
449 time_t t = time(NULL);
451 if (op == CL_OP_TIME_LOCAL)
452 localtime_r(&t, &tmp);
453 else if (op == CL_OP_TIME_UTC)
454 gmtime_r(&t, &tmp);
455 else
456 assert(0 && "unknown op");
458 nr = strftime(buf, sizeof(buf), RSTRING_PTR(fmt), &tmp);
459 if (nr == 0 || nr == sizeof(buf))
460 rb_str_buf_append(c->log_buf, g_dash);
461 else
462 rb_str_buf_cat(c->log_buf, buf, nr);
465 static void append_pid(struct clogger *c)
467 char buf[(sizeof(pid_t) * 8) / 3 + 1];
468 int nr = snprintf(buf, sizeof(buf), "%d", (int)getpid());
470 assert(nr > 0 && nr < (int)sizeof(buf));
471 rb_str_buf_cat(c->log_buf, buf, nr);
474 static void append_eval(struct clogger *c, VALUE str)
476 int state = -1;
477 VALUE rv = rb_eval_string_protect(RSTRING_PTR(str), &state);
479 rv = state == 0 ? rb_obj_as_string(rv) : g_dash;
480 rb_str_buf_append(c->log_buf, rv);
483 static void append_cookie(struct clogger *c, VALUE key)
485 VALUE cookie;
487 if (c->cookies == Qfalse)
488 c->cookies = rb_hash_aref(c->env, g_rack_request_cookie_hash);
490 if (NIL_P(c->cookies)) {
491 cookie = g_dash;
492 } else {
493 cookie = rb_hash_aref(c->cookies, key);
494 if (NIL_P(cookie))
495 cookie = g_dash;
497 rb_str_buf_append(c->log_buf, cookie);
500 static void append_request_env(struct clogger *c, VALUE key)
502 VALUE tmp = rb_hash_aref(c->env, key);
504 tmp = NIL_P(tmp) ? g_dash : byte_xs(tmp);
505 rb_str_buf_append(c->log_buf, tmp);
508 static void append_response(struct clogger *c, VALUE key)
510 VALUE v;
512 assert(rb_obj_is_kind_of(c->headers, cHeaderHash) && "not HeaderHash");
514 v = rb_funcall(c->headers, sq_brace_id, 1, key);
515 v = NIL_P(v) ? g_dash : byte_xs(v);
516 rb_str_buf_append(c->log_buf, v);
519 static void special_var(struct clogger *c, enum clogger_special var)
521 switch (var) {
522 case CL_SP_body_bytes_sent:
523 append_body_bytes_sent(c);
524 break;
525 case CL_SP_status:
526 append_status(c);
527 break;
528 case CL_SP_request:
529 append_request(c);
530 break;
531 case CL_SP_request_length:
532 append_request_length(c);
533 break;
534 case CL_SP_response_length:
535 if (c->body_bytes_sent == 0)
536 rb_str_buf_append(c->log_buf, g_dash);
537 else
538 append_body_bytes_sent(c);
539 break;
540 case CL_SP_ip:
541 append_ip(c);
542 break;
543 case CL_SP_pid:
544 append_pid(c);
545 break;
546 case CL_SP_request_uri:
547 append_request_uri(c);
551 static VALUE cwrite(struct clogger *c)
553 const VALUE ops = c->fmt_ops;
554 const VALUE *ary = RARRAY_PTR(ops);
555 long i = RARRAY_LEN(ops);
556 VALUE dst = c->log_buf;
558 rb_str_set_len(dst, 0);
560 for (; --i >= 0; ary++) {
561 const VALUE *op = RARRAY_PTR(*ary);
562 enum clogger_opcode opcode = FIX2INT(op[0]);
564 switch (opcode) {
565 case CL_OP_LITERAL:
566 rb_str_buf_append(dst, op[1]);
567 break;
568 case CL_OP_REQUEST:
569 append_request_env(c, op[1]);
570 break;
571 case CL_OP_RESPONSE:
572 append_response(c, op[1]);
573 break;
574 case CL_OP_SPECIAL:
575 special_var(c, FIX2INT(op[1]));
576 break;
577 case CL_OP_EVAL:
578 append_eval(c, op[1]);
579 break;
580 case CL_OP_TIME_LOCAL:
581 case CL_OP_TIME_UTC:
582 append_time(c, opcode, op[1]);
583 break;
584 case CL_OP_REQUEST_TIME:
585 append_request_time_fmt(c, op);
586 break;
587 case CL_OP_TIME:
588 append_time_fmt(c, op);
589 break;
590 case CL_OP_COOKIE:
591 append_cookie(c, op[1]);
592 break;
596 if (c->fd >= 0) {
597 write_full(c->fd, RSTRING_PTR(dst), RSTRING_LEN(dst));
598 } else {
599 VALUE logger = c->logger;
601 if (NIL_P(logger))
602 logger = rb_hash_aref(c->env, g_rack_errors);
603 rb_funcall(logger, ltlt_id, 1, dst);
606 return Qnil;
609 static void init_logger(struct clogger *c, VALUE path)
611 ID id;
613 if (!NIL_P(path) && !NIL_P(c->logger))
614 rb_raise(rb_eArgError, ":logger and :path are independent");
615 if (!NIL_P(path)) {
616 VALUE ab = rb_str_new2("ab");
617 id = rb_intern("open");
618 c->logger = rb_funcall(rb_cFile, id, 2, path, ab);
621 id = rb_intern("sync=");
622 if (rb_respond_to(c->logger, id))
623 rb_funcall(c->logger, id, 1, Qtrue);
625 id = rb_intern("fileno");
626 if (rb_respond_to(c->logger, id))
627 c->fd = raw_fd(rb_funcall(c->logger, id, 0));
631 * call-seq:
632 * Clogger.new(app, :logger => $stderr, :format => string) => obj
634 * Creates a new Clogger object that wraps +app+. +:logger+ may
635 * be any object that responds to the "<<" method with a string argument.
636 * If +:logger:+ is a string, it will be treated as a path to a
637 * File that will be opened in append mode.
639 static VALUE clogger_init(int argc, VALUE *argv, VALUE self)
641 struct clogger *c = clogger_get(self);
642 VALUE o = Qnil;
643 VALUE fmt = rb_const_get(mFormat, rb_intern("Common"));
645 rb_scan_args(argc, argv, "11", &c->app, &o);
646 c->fd = -1;
647 c->logger = Qnil;
648 c->reentrant = -1; /* auto-detect */
650 if (TYPE(o) == T_HASH) {
651 VALUE tmp;
653 tmp = rb_hash_aref(o, ID2SYM(rb_intern("path")));
654 c->logger = rb_hash_aref(o, ID2SYM(rb_intern("logger")));
655 init_logger(c, tmp);
657 tmp = rb_hash_aref(o, ID2SYM(rb_intern("format")));
658 if (!NIL_P(tmp))
659 fmt = tmp;
661 tmp = rb_hash_aref(o, ID2SYM(rb_intern("reentrant")));
662 switch (TYPE(tmp)) {
663 case T_TRUE:
664 c->reentrant = 1;
665 break;
666 case T_FALSE:
667 c->reentrant = 0;
668 case T_NIL:
669 break;
670 default:
671 rb_raise(rb_eArgError, ":reentrant must be boolean");
675 init_buffers(c);
676 c->fmt_ops = rb_funcall(self, rb_intern("compile_format"), 2, fmt, o);
678 if (Qtrue == rb_funcall(self, rb_intern("need_response_headers?"),
679 1, c->fmt_ops))
680 c->need_resp = 1;
681 if (Qtrue == rb_funcall(self, rb_intern("need_wrap_body?"),
682 1, c->fmt_ops))
683 c->wrap_body = 1;
685 return self;
688 static VALUE body_iter_i(VALUE str, VALUE memop)
690 off_t *len = (off_t *)memop;
692 str = rb_obj_as_string(str);
693 *len += RSTRING_LEN(str);
695 return rb_yield(str);
698 static VALUE body_close(struct clogger *c)
700 if (rb_respond_to(c->body, close_id))
701 return rb_funcall(c->body, close_id, 0);
702 return Qnil;
706 * call-seq:
707 * clogger.each { |part| socket.write(part) }
709 * Delegates the body#each call to the underlying +body+ object
710 * while tracking the number of bytes yielded. This will log
711 * the request.
713 static VALUE clogger_each(VALUE self)
715 struct clogger *c = clogger_get(self);
717 rb_need_block();
718 c->body_bytes_sent = 0;
719 rb_iterate(rb_each, c->body, body_iter_i, (VALUE)&c->body_bytes_sent);
721 return self;
725 * call-seq:
726 * clogger.close
728 * Delegates the body#close call to the underlying +body+ object.
729 * This is only used when Clogger is wrapping the +body+ of a Rack
730 * response and should be automatically called by the web server.
732 static VALUE clogger_close(VALUE self)
734 struct clogger *c = clogger_get(self);
736 return rb_ensure(body_close, (VALUE)c, cwrite, (VALUE)c);
739 /* :nodoc: */
740 static VALUE clogger_fileno(VALUE self)
742 struct clogger *c = clogger_get(self);
744 return c->fd < 0 ? Qnil : INT2NUM(c->fd);
747 static VALUE ccall(struct clogger *c, VALUE env)
749 VALUE rv;
751 clock_gettime(hopefully_CLOCK_MONOTONIC, &c->ts_start);
752 c->env = env;
753 c->cookies = Qfalse;
754 rv = rb_funcall(c->app, call_id, 1, env);
755 if (TYPE(rv) == T_ARRAY && RARRAY_LEN(rv) == 3) {
756 VALUE *tmp = RARRAY_PTR(rv);
758 c->status = tmp[0];
759 c->headers = tmp[1];
760 c->body = tmp[2];
762 rv = rb_ary_new4(3, tmp);
763 if (c->need_resp &&
764 ! rb_obj_is_kind_of(tmp[1], cHeaderHash)) {
765 c->headers = rb_funcall(cHeaderHash, new_id, 1, tmp[1]);
766 rb_ary_store(rv, 1, c->headers);
768 } else {
769 volatile VALUE tmp = rb_inspect(rv);
771 c->status = INT2FIX(500);
772 c->headers = c->body = rb_ary_new();
773 cwrite(c);
774 rb_raise(rb_eTypeError,
775 "app response not a 3 element Array: %s",
776 RSTRING_PTR(tmp));
779 return rv;
783 * call-seq:
784 * clogger.call(env) => [ status, headers, body ]
786 * calls the wrapped Rack application with +env+, returns the
787 * [status, headers, body ] tuplet required by Rack.
789 static VALUE clogger_call(VALUE self, VALUE env)
791 struct clogger *c = clogger_get(self);
792 VALUE rv;
794 env = rb_check_convert_type(env, T_HASH, "Hash", "to_hash");
796 if (c->wrap_body) {
797 if (c->reentrant < 0) {
798 VALUE tmp = rb_hash_aref(env, g_rack_multithread);
799 c->reentrant = Qfalse == tmp ? 0 : 1;
801 if (c->reentrant) {
802 self = rb_obj_dup(self);
803 c = clogger_get(self);
806 rv = ccall(c, env);
807 assert(!OBJ_FROZEN(rv) && "frozen response array");
809 if (rb_respond_to(c->body, to_path_id))
810 self = rb_funcall(cToPath, new_id, 1, self);
811 rb_ary_store(rv, 2, self);
813 return rv;
816 rv = ccall(c, env);
817 cwrite(c);
819 return rv;
822 /* :nodoc */
823 static VALUE clogger_init_copy(VALUE clone, VALUE orig)
825 struct clogger *a = clogger_get(orig);
826 struct clogger *b = clogger_get(clone);
828 memcpy(b, a, sizeof(struct clogger));
829 init_buffers(b);
831 return clone;
834 #define CONST_GLOBAL_STR2(var, val) do { \
835 g_##var = rb_obj_freeze(rb_str_new(val, sizeof(val) - 1)); \
836 rb_global_variable(&g_##var); \
837 } while (0)
839 #define CONST_GLOBAL_STR(val) CONST_GLOBAL_STR2(val, #val)
841 #ifdef RSTRUCT_PTR
842 # define ToPath_clogger(tp) RSTRUCT_PTR(tp)[0]
843 #else
844 static ID clogger_id;
845 # define ToPath_clogger(tp) rb_funcall(tp,clogger_id,0)
846 #endif
848 static VALUE to_path(VALUE self)
850 VALUE my_clogger = ToPath_clogger(self);
851 struct clogger *c = clogger_get(my_clogger);
852 VALUE path = rb_funcall(c->body, to_path_id, 0);
853 struct stat sb;
854 int rv;
855 unsigned devfd;
856 const char *cpath;
858 Check_Type(path, T_STRING);
859 cpath = RSTRING_PTR(path);
861 /* try to avoid an extra path lookup */
862 if (rb_respond_to(c->body, to_io_id))
863 rv = fstat(my_fileno(c->body), &sb);
865 * Rainbows! can use "/dev/fd/%u" in to_path output to avoid
866 * extra open() syscalls, too.
868 else if (sscanf(cpath, "/dev/fd/%u", &devfd) == 1)
869 rv = fstat((int)devfd, &sb);
870 else
871 rv = stat(cpath, &sb);
874 * calling this method implies the web server will bypass
875 * the each method where body_bytes_sent is calculated,
876 * so we stat and set that value here.
878 c->body_bytes_sent = rv == 0 ? sb.st_size : 0;
879 return path;
882 void Init_clogger_ext(void)
884 VALUE tmp;
886 check_clock();
888 ltlt_id = rb_intern("<<");
889 call_id = rb_intern("call");
890 each_id = rb_intern("each");
891 close_id = rb_intern("close");
892 to_i_id = rb_intern("to_i");
893 to_s_id = rb_intern("to_s");
894 size_id = rb_intern("size");
895 sq_brace_id = rb_intern("[]");
896 new_id = rb_intern("new");
897 to_path_id = rb_intern("to_path");
898 to_io_id = rb_intern("to_io");
899 cClogger = rb_define_class("Clogger", rb_cObject);
900 mFormat = rb_define_module_under(cClogger, "Format");
901 rb_define_alloc_func(cClogger, clogger_alloc);
902 rb_define_method(cClogger, "initialize", clogger_init, -1);
903 rb_define_method(cClogger, "initialize_copy", clogger_init_copy, 1);
904 rb_define_method(cClogger, "call", clogger_call, 1);
905 rb_define_method(cClogger, "each", clogger_each, 0);
906 rb_define_method(cClogger, "close", clogger_close, 0);
907 rb_define_method(cClogger, "fileno", clogger_fileno, 0);
908 rb_define_method(cClogger, "wrap_body?", clogger_wrap_body, 0);
909 rb_define_method(cClogger, "reentrant?", clogger_reentrant, 0);
910 CONST_GLOBAL_STR(REMOTE_ADDR);
911 CONST_GLOBAL_STR(HTTP_X_FORWARDED_FOR);
912 CONST_GLOBAL_STR(REQUEST_METHOD);
913 CONST_GLOBAL_STR(PATH_INFO);
914 CONST_GLOBAL_STR(QUERY_STRING);
915 CONST_GLOBAL_STR(REQUEST_URI);
916 CONST_GLOBAL_STR(HTTP_VERSION);
917 CONST_GLOBAL_STR2(rack_errors, "rack.errors");
918 CONST_GLOBAL_STR2(rack_input, "rack.input");
919 CONST_GLOBAL_STR2(rack_multithread, "rack.multithread");
920 CONST_GLOBAL_STR2(dash, "-");
921 CONST_GLOBAL_STR2(space, " ");
922 CONST_GLOBAL_STR2(question_mark, "?");
923 CONST_GLOBAL_STR2(rack_request_cookie_hash, "rack.request.cookie_hash");
925 tmp = rb_const_get(rb_cObject, rb_intern("Rack"));
926 tmp = rb_const_get(tmp, rb_intern("Utils"));
927 cHeaderHash = rb_const_get(tmp, rb_intern("HeaderHash"));
928 cToPath = rb_const_get(cClogger, rb_intern("ToPath"));
929 rb_define_method(cToPath, "to_path", to_path, 0);
930 #ifndef RSTRUCT_PTR
931 clogger_id = rb_intern("clogger");
932 #endif