16 #ifndef _POSIX_C_SOURCE
17 # define _POSIX_C_SOURCE 200112L
20 #include "ruby_1_9_compat.h"
21 #include "broken_system_compat.h"
24 * Availability of a monotonic clock needs to be detected at runtime
25 * since we could've been built on a different system than we're run
28 static clockid_t hopefully_CLOCK_MONOTONIC
;
30 static void check_clock(void)
34 hopefully_CLOCK_MONOTONIC
= CLOCK_MONOTONIC
;
36 /* we can't check this reliably at compile time */
37 if (clock_gettime(CLOCK_MONOTONIC
, &now
) == 0)
40 if (clock_gettime(CLOCK_REALTIME
, &now
) == 0) {
41 hopefully_CLOCK_MONOTONIC
= CLOCK_REALTIME
;
42 rb_warn("CLOCK_MONOTONIC not available, "
43 "falling back to CLOCK_REALTIME");
45 rb_warn("clock_gettime() totally broken, " \
46 "falling back to pure Ruby Clogger");
47 rb_raise(rb_eLoadError
, "clock_gettime() broken");
50 static void clock_diff(struct timespec
*a
, const struct timespec
*b
)
52 a
->tv_sec
-= b
->tv_sec
;
53 a
->tv_nsec
-= b
->tv_nsec
;
56 a
->tv_nsec
+= 1000000000;
60 /* give GCC hints for better branch prediction
61 * (we layout branches so that ASCII characters are handled faster) */
62 #if defined(__GNUC__) && (__GNUC__ >= 3)
63 # define likely(x) __builtin_expect (!!(x), 1)
64 # define unlikely(x) __builtin_expect (!!(x), 0)
66 # define unlikely(x) (x)
67 # define likely(x) (x)
83 enum clogger_special
{
84 CL_SP_body_bytes_sent
= 0,
88 CL_SP_response_length
,
107 off_t body_bytes_sent
;
108 struct timespec ts_start
;
113 int reentrant
; /* tri-state, -1:auto, 1/0 true/false */
123 static ID sq_brace_id
;
125 static ID to_path_id
;
127 static ID respond_to_id
;
128 static VALUE cClogger
;
129 static VALUE mFormat
;
130 static VALUE cHeaderHash
;
132 /* common hash lookup keys */
133 static VALUE g_HTTP_X_FORWARDED_FOR
;
134 static VALUE g_REMOTE_ADDR
;
135 static VALUE g_REQUEST_METHOD
;
136 static VALUE g_PATH_INFO
;
137 static VALUE g_REQUEST_URI
;
138 static VALUE g_QUERY_STRING
;
139 static VALUE g_HTTP_VERSION
;
140 static VALUE g_rack_errors
;
141 static VALUE g_rack_input
;
142 static VALUE g_rack_multithread
;
144 static VALUE g_space
;
145 static VALUE g_question_mark
;
146 static VALUE g_rack_request_cookie_hash
;
148 #define LOG_BUF_INIT_SIZE 128
150 static void init_buffers(struct clogger
*c
)
152 c
->log_buf
= rb_str_buf_new(LOG_BUF_INIT_SIZE
);
155 static inline int need_escape(unsigned c
)
158 return !!(c
== '\'' || c
== '"' || c
<= 0x1f);
161 /* we are encoding-agnostic, clients can send us all sorts of junk */
162 static VALUE
byte_xs_str(VALUE from
)
164 static const char esc
[] = "0123456789ABCDEF";
165 unsigned char *new_ptr
;
166 unsigned char *ptr
= (unsigned char *)RSTRING_PTR(from
);
167 long len
= RSTRING_LEN(from
);
171 for (; --len
>= 0; ptr
++) {
174 if (unlikely(need_escape(c
)))
175 new_len
+= 3; /* { '\', 'x', 'X', 'X' } */
178 len
= RSTRING_LEN(from
);
182 rv
= rb_str_new(NULL
, new_len
);
183 new_ptr
= (unsigned char *)RSTRING_PTR(rv
);
184 ptr
= (unsigned char *)RSTRING_PTR(from
);
185 for (; --len
>= 0; ptr
++) {
188 if (unlikely(need_escape(c
))) {
191 *new_ptr
++ = esc
[c
>> 4];
192 *new_ptr
++ = esc
[c
& 0xf];
197 assert(RSTRING_PTR(rv
)[RSTRING_LEN(rv
)] == '\0');
202 static VALUE
byte_xs(VALUE from
)
204 return byte_xs_str(rb_obj_as_string(from
));
207 static void clogger_mark(void *ptr
)
209 struct clogger
*c
= ptr
;
212 rb_gc_mark(c
->fmt_ops
);
213 rb_gc_mark(c
->logger
);
214 rb_gc_mark(c
->log_buf
);
216 rb_gc_mark(c
->cookies
);
217 rb_gc_mark(c
->status
);
218 rb_gc_mark(c
->headers
);
222 static VALUE
clogger_alloc(VALUE klass
)
226 return Data_Make_Struct(klass
, struct clogger
, clogger_mark
, -1, c
);
229 static struct clogger
*clogger_get(VALUE self
)
233 Data_Get_Struct(self
, struct clogger
, c
);
238 /* only for writing to regular files, not stupid crap like NFS */
239 static void write_full(int fd
, const void *buf
, size_t count
)
242 unsigned long ubuf
= (unsigned long)buf
;
245 r
= write(fd
, (void *)ubuf
, count
);
247 if ((size_t)r
== count
) { /* overwhelmingly likely */
253 if (errno
== EINTR
|| errno
== EAGAIN
)
254 continue; /* poor souls on NFS and like: */
257 rb_sys_fail("write");
263 * allow us to use write_full() iff we detect a blocking file
264 * descriptor that wouldn't play nicely with Ruby threading/fibers
266 static int raw_fd(VALUE my_fd
)
268 #if defined(HAVE_FCNTL) && defined(F_GETFL) && defined(O_NONBLOCK)
276 flags
= fcntl(fd
, F_GETFL
);
278 rb_sys_fail("fcntl");
280 if (flags
& O_NONBLOCK
) {
283 if (fstat(fd
, &sb
) < 0)
286 /* O_NONBLOCK is no-op for regular files: */
287 if (! S_ISREG(sb
.st_mode
))
291 #else /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
293 #endif /* platforms w/o fcntl/F_GETFL/O_NONBLOCK */
297 static VALUE
clogger_reentrant(VALUE self
)
299 return clogger_get(self
)->reentrant
== 0 ? Qfalse
: Qtrue
;
303 static VALUE
clogger_wrap_body(VALUE self
)
305 return clogger_get(self
)->wrap_body
== 0 ? Qfalse
: Qtrue
;
308 static void append_status(struct clogger
*c
)
310 char buf
[sizeof("999")];
312 VALUE status
= c
->status
;
314 if (TYPE(status
) != T_FIXNUM
) {
315 status
= rb_funcall(status
, to_i_id
, 0);
316 /* no way it's a valid status code (at least not HTTP/1.1) */
317 if (TYPE(status
) != T_FIXNUM
) {
318 rb_str_buf_append(c
->log_buf
, g_dash
);
323 nr
= FIX2INT(status
);
324 if (nr
>= 100 && nr
<= 999) {
325 nr
= snprintf(buf
, sizeof(buf
), "%03d", nr
);
327 rb_str_buf_cat(c
->log_buf
, buf
, nr
);
329 /* raise?, swap for 500? */
330 rb_str_buf_append(c
->log_buf
, g_dash
);
334 /* this is Rack 1.0.0-compatible, won't try to parse commas in XFF */
335 static void append_ip(struct clogger
*c
)
338 VALUE tmp
= rb_hash_aref(env
, g_HTTP_X_FORWARDED_FOR
);
341 /* can't be faked on any real server, so no escape */
342 tmp
= rb_hash_aref(env
, g_REMOTE_ADDR
);
348 rb_str_buf_append(c
->log_buf
, tmp
);
351 static void append_body_bytes_sent(struct clogger
*c
)
353 char buf
[(sizeof(off_t
) * 8) / 3 + 1];
354 const char *fmt
= sizeof(off_t
) == sizeof(long) ? "%ld" : "%lld";
355 int nr
= snprintf(buf
, sizeof(buf
), fmt
, c
->body_bytes_sent
);
357 assert(nr
> 0 && nr
< (int)sizeof(buf
));
358 rb_str_buf_cat(c
->log_buf
, buf
, nr
);
361 static void append_ts(struct clogger
*c
, const VALUE
*op
, struct timespec
*ts
)
363 char buf
[sizeof(".000000") + ((sizeof(ts
->tv_sec
) * 8) / 3)];
365 char *fmt
= RSTRING_PTR(op
[1]);
366 int ndiv
= NUM2INT(op
[2]);
367 int usec
= ts
->tv_nsec
/ 1000;
369 nr
= snprintf(buf
, sizeof(buf
), fmt
,
370 (int)ts
->tv_sec
, (int)(usec
/ ndiv
));
371 assert(nr
> 0 && nr
< (int)sizeof(buf
));
372 rb_str_buf_cat(c
->log_buf
, buf
, nr
);
375 static void append_request_time_fmt(struct clogger
*c
, const VALUE
*op
)
379 clock_gettime(hopefully_CLOCK_MONOTONIC
, &now
);
380 clock_diff(&now
, &c
->ts_start
);
381 append_ts(c
, op
, &now
);
384 static void append_time_fmt(struct clogger
*c
, const VALUE
*op
)
387 int r
= clock_gettime(CLOCK_REALTIME
, &now
);
389 if (unlikely(r
!= 0))
390 rb_sys_fail("clock_gettime(CLOCK_REALTIME)");
391 append_ts(c
, op
, &now
);
394 static void append_request_uri(struct clogger
*c
)
398 tmp
= rb_hash_aref(c
->env
, g_REQUEST_URI
);
400 tmp
= rb_hash_aref(c
->env
, g_PATH_INFO
);
402 rb_str_buf_append(c
->log_buf
, byte_xs(tmp
));
403 tmp
= rb_hash_aref(c
->env
, g_QUERY_STRING
);
404 if (!NIL_P(tmp
) && RSTRING_LEN(tmp
) != 0) {
405 rb_str_buf_append(c
->log_buf
, g_question_mark
);
406 rb_str_buf_append(c
->log_buf
, byte_xs(tmp
));
409 rb_str_buf_append(c
->log_buf
, byte_xs(tmp
));
413 static void append_request(struct clogger
*c
)
417 /* REQUEST_METHOD doesn't need escaping, Rack::Lint governs it */
418 tmp
= rb_hash_aref(c
->env
, g_REQUEST_METHOD
);
420 rb_str_buf_append(c
->log_buf
, tmp
);
422 rb_str_buf_append(c
->log_buf
, g_space
);
424 append_request_uri(c
);
426 /* HTTP_VERSION can be injected by malicious clients */
427 tmp
= rb_hash_aref(c
->env
, g_HTTP_VERSION
);
429 rb_str_buf_append(c
->log_buf
, g_space
);
430 rb_str_buf_append(c
->log_buf
, byte_xs(tmp
));
434 static void append_request_length(struct clogger
*c
)
436 VALUE tmp
= rb_hash_aref(c
->env
, g_rack_input
);
438 rb_str_buf_append(c
->log_buf
, g_dash
);
440 tmp
= rb_funcall(tmp
, size_id
, 0);
441 rb_str_buf_append(c
->log_buf
, rb_funcall(tmp
, to_s_id
, 0));
446 append_time(struct clogger
*c
, enum clogger_opcode op
, VALUE fmt
, VALUE buf
)
448 char *buf_ptr
= RSTRING_PTR(buf
);
449 size_t buf_size
= RSTRING_LEN(buf
) + 1; /* "\0" */
452 time_t t
= time(NULL
);
454 if (op
== CL_OP_TIME_LOCAL
)
455 localtime_r(&t
, &tmp
);
456 else if (op
== CL_OP_TIME_UTC
)
459 assert(0 && "unknown op");
461 nr
= strftime(buf_ptr
, buf_size
, RSTRING_PTR(fmt
), &tmp
);
462 assert(nr
< buf_size
&& "time format too small!");
463 rb_str_buf_cat(c
->log_buf
, buf_ptr
, nr
);
466 static void append_pid(struct clogger
*c
)
468 char buf
[(sizeof(pid_t
) * 8) / 3 + 1];
469 int nr
= snprintf(buf
, sizeof(buf
), "%d", (int)getpid());
471 assert(nr
> 0 && nr
< (int)sizeof(buf
));
472 rb_str_buf_cat(c
->log_buf
, buf
, nr
);
475 static void append_eval(struct clogger
*c
, VALUE str
)
478 VALUE rv
= rb_eval_string_protect(RSTRING_PTR(str
), &state
);
480 rv
= state
== 0 ? rb_obj_as_string(rv
) : g_dash
;
481 rb_str_buf_append(c
->log_buf
, rv
);
484 static void append_cookie(struct clogger
*c
, VALUE key
)
488 if (c
->cookies
== Qfalse
)
489 c
->cookies
= rb_hash_aref(c
->env
, g_rack_request_cookie_hash
);
491 if (NIL_P(c
->cookies
)) {
494 cookie
= rb_hash_aref(c
->cookies
, key
);
498 rb_str_buf_append(c
->log_buf
, cookie
);
501 static void append_request_env(struct clogger
*c
, VALUE key
)
503 VALUE tmp
= rb_hash_aref(c
->env
, key
);
505 tmp
= NIL_P(tmp
) ? g_dash
: byte_xs(tmp
);
506 rb_str_buf_append(c
->log_buf
, tmp
);
509 static void append_response(struct clogger
*c
, VALUE key
)
513 assert(rb_obj_is_kind_of(c
->headers
, cHeaderHash
) && "not HeaderHash");
515 v
= rb_funcall(c
->headers
, sq_brace_id
, 1, key
);
516 v
= NIL_P(v
) ? g_dash
: byte_xs(v
);
517 rb_str_buf_append(c
->log_buf
, v
);
520 static void special_var(struct clogger
*c
, enum clogger_special var
)
523 case CL_SP_body_bytes_sent
:
524 append_body_bytes_sent(c
);
532 case CL_SP_request_length
:
533 append_request_length(c
);
535 case CL_SP_response_length
:
536 if (c
->body_bytes_sent
== 0)
537 rb_str_buf_append(c
->log_buf
, g_dash
);
539 append_body_bytes_sent(c
);
547 case CL_SP_request_uri
:
548 append_request_uri(c
);
552 static VALUE
cwrite(struct clogger
*c
)
554 const VALUE ops
= c
->fmt_ops
;
555 const VALUE
*ary
= RARRAY_PTR(ops
);
556 long i
= RARRAY_LEN(ops
);
557 VALUE dst
= c
->log_buf
;
559 rb_str_set_len(dst
, 0);
561 for (; --i
>= 0; ary
++) {
562 const VALUE
*op
= RARRAY_PTR(*ary
);
563 enum clogger_opcode opcode
= FIX2INT(op
[0]);
567 rb_str_buf_append(dst
, op
[1]);
570 append_request_env(c
, op
[1]);
573 append_response(c
, op
[1]);
576 special_var(c
, FIX2INT(op
[1]));
579 append_eval(c
, op
[1]);
581 case CL_OP_TIME_LOCAL
:
583 append_time(c
, opcode
, op
[1], op
[2]);
585 case CL_OP_REQUEST_TIME
:
586 append_request_time_fmt(c
, op
);
589 append_time_fmt(c
, op
);
592 append_cookie(c
, op
[1]);
598 write_full(c
->fd
, RSTRING_PTR(dst
), RSTRING_LEN(dst
));
600 VALUE logger
= c
->logger
;
603 logger
= rb_hash_aref(c
->env
, g_rack_errors
);
604 rb_funcall(logger
, ltlt_id
, 1, dst
);
610 static void init_logger(struct clogger
*c
, VALUE path
)
614 if (!NIL_P(path
) && !NIL_P(c
->logger
))
615 rb_raise(rb_eArgError
, ":logger and :path are independent");
617 VALUE ab
= rb_str_new2("ab");
618 id
= rb_intern("open");
619 c
->logger
= rb_funcall(rb_cFile
, id
, 2, path
, ab
);
622 id
= rb_intern("sync=");
623 if (rb_respond_to(c
->logger
, id
))
624 rb_funcall(c
->logger
, id
, 1, Qtrue
);
626 id
= rb_intern("fileno");
627 if (rb_respond_to(c
->logger
, id
))
628 c
->fd
= raw_fd(rb_funcall(c
->logger
, id
, 0));
633 * Clogger.new(app, :logger => $stderr, :format => string) => obj
635 * Creates a new Clogger object that wraps +app+. +:logger+ may
636 * be any object that responds to the "<<" method with a string argument.
637 * If +:logger:+ is a string, it will be treated as a path to a
638 * File that will be opened in append mode.
640 static VALUE
clogger_init(int argc
, VALUE
*argv
, VALUE self
)
642 struct clogger
*c
= clogger_get(self
);
644 VALUE fmt
= rb_const_get(mFormat
, rb_intern("Common"));
646 rb_scan_args(argc
, argv
, "11", &c
->app
, &o
);
649 c
->reentrant
= -1; /* auto-detect */
651 if (TYPE(o
) == T_HASH
) {
654 tmp
= rb_hash_aref(o
, ID2SYM(rb_intern("path")));
655 c
->logger
= rb_hash_aref(o
, ID2SYM(rb_intern("logger")));
658 tmp
= rb_hash_aref(o
, ID2SYM(rb_intern("format")));
662 tmp
= rb_hash_aref(o
, ID2SYM(rb_intern("reentrant")));
672 rb_raise(rb_eArgError
, ":reentrant must be boolean");
677 c
->fmt_ops
= rb_funcall(self
, rb_intern("compile_format"), 2, fmt
, o
);
679 if (Qtrue
== rb_funcall(self
, rb_intern("need_response_headers?"),
682 if (Qtrue
== rb_funcall(self
, rb_intern("need_wrap_body?"),
689 static VALUE
body_iter_i(VALUE str
, VALUE memop
)
691 off_t
*len
= (off_t
*)memop
;
693 str
= rb_obj_as_string(str
);
694 *len
+= RSTRING_LEN(str
);
696 return rb_yield(str
);
699 static VALUE
body_close(struct clogger
*c
)
701 if (rb_respond_to(c
->body
, close_id
))
702 return rb_funcall(c
->body
, close_id
, 0);
708 * clogger.each { |part| socket.write(part) }
710 * Delegates the body#each call to the underlying +body+ object
711 * while tracking the number of bytes yielded. This will log
714 static VALUE
clogger_each(VALUE self
)
716 struct clogger
*c
= clogger_get(self
);
719 c
->body_bytes_sent
= 0;
720 rb_iterate(rb_each
, c
->body
, body_iter_i
, (VALUE
)&c
->body_bytes_sent
);
729 * Delegates the body#close call to the underlying +body+ object.
730 * This is only used when Clogger is wrapping the +body+ of a Rack
731 * response and should be automatically called by the web server.
733 static VALUE
clogger_close(VALUE self
)
735 struct clogger
*c
= clogger_get(self
);
737 return rb_ensure(body_close
, (VALUE
)c
, cwrite
, (VALUE
)c
);
741 static VALUE
clogger_fileno(VALUE self
)
743 struct clogger
*c
= clogger_get(self
);
745 return c
->fd
< 0 ? Qnil
: INT2NUM(c
->fd
);
748 static VALUE
ccall(struct clogger
*c
, VALUE env
)
752 clock_gettime(hopefully_CLOCK_MONOTONIC
, &c
->ts_start
);
755 rv
= rb_funcall(c
->app
, call_id
, 1, env
);
756 if (TYPE(rv
) == T_ARRAY
&& RARRAY_LEN(rv
) == 3) {
757 VALUE
*tmp
= RARRAY_PTR(rv
);
763 rv
= rb_ary_new4(3, tmp
);
765 ! rb_obj_is_kind_of(tmp
[1], cHeaderHash
)) {
766 c
->headers
= rb_funcall(cHeaderHash
, new_id
, 1, tmp
[1]);
767 rb_ary_store(rv
, 1, c
->headers
);
770 volatile VALUE tmp
= rb_inspect(rv
);
772 c
->status
= INT2FIX(500);
773 c
->headers
= c
->body
= rb_ary_new();
775 rb_raise(rb_eTypeError
,
776 "app response not a 3 element Array: %s",
785 * clogger.call(env) => [ status, headers, body ]
787 * calls the wrapped Rack application with +env+, returns the
788 * [status, headers, body ] tuplet required by Rack.
790 static VALUE
clogger_call(VALUE self
, VALUE env
)
792 struct clogger
*c
= clogger_get(self
);
795 env
= rb_check_convert_type(env
, T_HASH
, "Hash", "to_hash");
798 if (c
->reentrant
< 0) {
799 VALUE tmp
= rb_hash_aref(env
, g_rack_multithread
);
800 c
->reentrant
= Qfalse
== tmp
? 0 : 1;
803 self
= rb_obj_dup(self
);
804 c
= clogger_get(self
);
808 assert(!OBJ_FROZEN(rv
) && "frozen response array");
809 rb_ary_store(rv
, 2, self
);
820 static void duplicate_buffers(VALUE ops
)
822 long i
= RARRAY_LEN(ops
);
823 VALUE
*ary
= RARRAY_PTR(ops
);
825 for ( ; --i
>= 0; ary
++) {
826 VALUE
*op
= RARRAY_PTR(*ary
);
827 enum clogger_opcode opcode
= FIX2INT(op
[0]);
829 if (opcode
== CL_OP_TIME_LOCAL
|| opcode
== CL_OP_TIME_UTC
) {
830 Check_Type(op
[2], T_STRING
);
831 op
[2] = rb_str_dup(op
[2]);
832 rb_str_modify(op
[2]); /* trigger copy-on-write */
838 static VALUE
clogger_init_copy(VALUE clone
, VALUE orig
)
840 struct clogger
*a
= clogger_get(orig
);
841 struct clogger
*b
= clogger_get(clone
);
843 memcpy(b
, a
, sizeof(struct clogger
));
845 duplicate_buffers(b
->fmt_ops
);
850 #define CONST_GLOBAL_STR2(var, val) do { \
851 g_##var = rb_obj_freeze(rb_str_new(val, sizeof(val) - 1)); \
852 rb_global_variable(&g_##var); \
855 #define CONST_GLOBAL_STR(val) CONST_GLOBAL_STR2(val, #val)
859 * clogger.respond_to?(:to_path) => true or false
860 * clogger.respond_to?(:close) => true
862 * used to delegate +:to_path+ checks for Rack webservers that optimize
863 * static file serving
865 static VALUE
respond_to(VALUE self
, VALUE method
)
867 struct clogger
*c
= clogger_get(self
);
868 ID id
= rb_to_id(method
);
872 return rb_respond_to(c
->body
, id
);
879 * used to proxy +:to_path+ method calls to the wrapped response body.
881 static VALUE
to_path(VALUE self
)
883 struct clogger
*c
= clogger_get(self
);
884 VALUE path
= rb_funcall(c
->body
, to_path_id
, 0);
888 const char *cpath
= StringValuePtr(path
);
890 /* try to avoid an extra path lookup */
891 if (rb_respond_to(c
->body
, to_io_id
))
892 rv
= fstat(my_fileno(c
->body
), &sb
);
894 * Rainbows! can use "/dev/fd/%u" in to_path output to avoid
895 * extra open() syscalls, too.
897 else if (sscanf(cpath
, "/dev/fd/%u", &devfd
) == 1)
898 rv
= fstat((int)devfd
, &sb
);
900 rv
= stat(cpath
, &sb
);
903 * calling this method implies the web server will bypass
904 * the each method where body_bytes_sent is calculated,
905 * so we stat and set that value here.
907 c
->body_bytes_sent
= rv
== 0 ? sb
.st_size
: 0;
915 * used to proxy +:to_io+ method calls to the wrapped response body.
917 static VALUE
to_io(VALUE self
)
919 struct clogger
*c
= clogger_get(self
);
921 VALUE io
= rb_convert_type(c
->body
, T_FILE
, "IO", "to_io");
923 if (fstat(my_fileno(io
), &sb
) == 0)
924 c
->body_bytes_sent
= sb
.st_size
;
929 void Init_clogger_ext(void)
935 ltlt_id
= rb_intern("<<");
936 call_id
= rb_intern("call");
937 each_id
= rb_intern("each");
938 close_id
= rb_intern("close");
939 to_i_id
= rb_intern("to_i");
940 to_s_id
= rb_intern("to_s");
941 size_id
= rb_intern("size");
942 sq_brace_id
= rb_intern("[]");
943 new_id
= rb_intern("new");
944 to_path_id
= rb_intern("to_path");
945 to_io_id
= rb_intern("to_io");
946 respond_to_id
= rb_intern("respond_to?");
947 cClogger
= rb_define_class("Clogger", rb_cObject
);
948 mFormat
= rb_define_module_under(cClogger
, "Format");
949 rb_define_alloc_func(cClogger
, clogger_alloc
);
950 rb_define_method(cClogger
, "initialize", clogger_init
, -1);
951 rb_define_method(cClogger
, "initialize_copy", clogger_init_copy
, 1);
952 rb_define_method(cClogger
, "call", clogger_call
, 1);
953 rb_define_method(cClogger
, "each", clogger_each
, 0);
954 rb_define_method(cClogger
, "close", clogger_close
, 0);
955 rb_define_method(cClogger
, "fileno", clogger_fileno
, 0);
956 rb_define_method(cClogger
, "wrap_body?", clogger_wrap_body
, 0);
957 rb_define_method(cClogger
, "reentrant?", clogger_reentrant
, 0);
958 rb_define_method(cClogger
, "to_path", to_path
, 0);
959 rb_define_method(cClogger
, "to_io", to_io
, 0);
960 rb_define_method(cClogger
, "respond_to?", respond_to
, 1);
961 CONST_GLOBAL_STR(REMOTE_ADDR
);
962 CONST_GLOBAL_STR(HTTP_X_FORWARDED_FOR
);
963 CONST_GLOBAL_STR(REQUEST_METHOD
);
964 CONST_GLOBAL_STR(PATH_INFO
);
965 CONST_GLOBAL_STR(QUERY_STRING
);
966 CONST_GLOBAL_STR(REQUEST_URI
);
967 CONST_GLOBAL_STR(HTTP_VERSION
);
968 CONST_GLOBAL_STR2(rack_errors
, "rack.errors");
969 CONST_GLOBAL_STR2(rack_input
, "rack.input");
970 CONST_GLOBAL_STR2(rack_multithread
, "rack.multithread");
971 CONST_GLOBAL_STR2(dash
, "-");
972 CONST_GLOBAL_STR2(space
, " ");
973 CONST_GLOBAL_STR2(question_mark
, "?");
974 CONST_GLOBAL_STR2(rack_request_cookie_hash
, "rack.request.cookie_hash");
976 tmp
= rb_const_get(rb_cObject
, rb_intern("Rack"));
977 tmp
= rb_const_get(tmp
, rb_intern("Utils"));
978 cHeaderHash
= rb_const_get(tmp
, rb_intern("HeaderHash"));