2 * Copyright (c) 2009 Eric Wong (all bugs are Eric's fault)
3 * Copyright (c) 2005 Zed A. Shaw
4 * You can redistribute it and/or modify it under the same terms as Ruby 1.8 or
5 * the GPLv2+ (GPLv3+ preferred)
11 #include <sys/types.h>
12 #include "common_field_optimization.h"
13 #include "global_variables.h"
15 #include "epollexclusive.h"
17 void init_unicorn_httpdate(void);
19 #define UH_FL_CHUNKED 0x1
20 #define UH_FL_HASBODY 0x2
21 #define UH_FL_INBODY 0x4
22 #define UH_FL_HASTRAILER 0x8
23 #define UH_FL_INTRAILER 0x10
24 #define UH_FL_INCHUNK 0x20
25 #define UH_FL_REQEOF 0x40
26 #define UH_FL_KAVERSION 0x80
27 #define UH_FL_HASHEADER 0x100
28 #define UH_FL_TO_CLEAR 0x200
29 #define UH_FL_RESSTART 0x400 /* for check_client_connection */
30 #define UH_FL_HIJACK 0x800
31 #define UH_FL_RES_CHUNK_VER (1U << 12)
32 #define UH_FL_RES_CHUNK_METHOD (1U << 13)
34 /* all of these flags need to be set for keepalive to be supported */
35 #define UH_FL_KEEPALIVE (UH_FL_KAVERSION | UH_FL_REQEOF | UH_FL_HASHEADER)
37 /* we can only chunk responses for non-HEAD HTTP/1.1 requests */
38 #define UH_FL_RES_CHUNKABLE (UH_FL_RES_CHUNK_VER | UH_FL_RES_CHUNK_METHOD)
40 static unsigned int MAX_HEADER_LEN = 1024 * (80 + 32); /* same as Mongrel */
42 /* this is only intended for use with Rainbows! */
43 static VALUE set_maxhdrlen(VALUE self, VALUE len)
45 return UINT2NUM(MAX_HEADER_LEN = NUM2UINT(len));
48 /* keep this small for other servers (e.g. yahns) since every client has one */
50 int cs; /* Ragel internal state */
54 union { /* these 2 fields don't nest */
59 unsigned int field_len; /* only used during header processing */
60 unsigned int dest_offset; /* only used during body processing */
64 VALUE cont; /* Qfalse: unset, Qnil: ignored header, T_STRING: append */
71 static ID id_set_backtrace, id_is_chunked_p;
72 static VALUE cHttpParser;
74 static void finalize_header(struct http_parser *hp);
76 static void parser_raise(VALUE klass, const char *msg)
78 VALUE exc = rb_exc_new2(klass, msg);
79 VALUE bt = rb_ary_new();
81 rb_funcall(exc, id_set_backtrace, 1, bt);
85 static inline unsigned int ulong2uint(unsigned long n)
87 unsigned int i = (unsigned int)n;
89 if (sizeof(unsigned int) != sizeof(unsigned long)) {
90 if ((unsigned long)i != n) {
91 rb_raise(rb_eRangeError, "too large to be 32-bit uint: %lu", n);
97 #define REMAINING (unsigned long)(pe - p)
98 #define LEN(AT, FPC) (ulong2uint(FPC - buffer) - hp->AT)
99 #define MARK(M,FPC) (hp->M = ulong2uint((FPC) - buffer))
100 #define PTR_TO(F) (buffer + hp->F)
101 #define STR_NEW(M,FPC) rb_str_new(PTR_TO(M), LEN(M, FPC))
102 #define STRIPPED_STR_NEW(M,FPC) stripped_str_new(PTR_TO(M), LEN(M, FPC))
104 #define HP_FL_TEST(hp,fl) ((hp)->flags & (UH_FL_##fl))
105 #define HP_FL_SET(hp,fl) ((hp)->flags |= (UH_FL_##fl))
106 #define HP_FL_UNSET(hp,fl) ((hp)->flags &= ~(UH_FL_##fl))
107 #define HP_FL_ALL(hp,fl) (HP_FL_TEST(hp, fl) == (UH_FL_##fl))
109 static int is_lws(char c)
111 return (c == ' ' || c == '\t');
114 static VALUE stripped_str_new(const char *str, long len)
118 for (end = len - 1; end >= 0 && is_lws(str[end]); end--);
120 return rb_str_new(str, end + 1);
124 * handles values of the "Connection:" header, keepalive is implied
125 * for HTTP/1.1 but needs to be explicitly enabled with HTTP/1.0
126 * Additionally, we require GET/HEAD requests to support keepalive.
128 static void hp_keepalive_connection(struct http_parser *hp, VALUE val)
130 if (STR_CSTR_CASE_EQ(val, "keep-alive")) {
131 /* basically have HTTP/1.0 masquerade as HTTP/1.1+ */
132 HP_FL_SET(hp, KAVERSION);
133 } else if (STR_CSTR_CASE_EQ(val, "close")) {
135 * it doesn't matter what HTTP version or request method we have,
136 * if a client says "Connection: close", we disable keepalive
138 HP_FL_UNSET(hp, KAVERSION);
141 * client could've sent anything, ignore it for now. Maybe
142 * "HP_FL_UNSET(hp, KAVERSION);" just in case?
143 * Raising an exception might be too mean...
149 request_method(struct http_parser *hp, const char *ptr, size_t len)
151 VALUE v = rb_str_new(ptr, len);
153 if (len != 4 || memcmp(ptr, "HEAD", 4))
154 HP_FL_SET(hp, RES_CHUNK_METHOD);
156 rb_hash_aset(hp->env, g_request_method, v);
160 http_version(struct http_parser *hp, const char *ptr, size_t len)
164 HP_FL_SET(hp, HASHEADER);
166 if (CONST_MEM_EQ("HTTP/1.1", ptr, len)) {
167 /* HTTP/1.1 implies keepalive unless "Connection: close" is set */
168 HP_FL_SET(hp, KAVERSION);
169 HP_FL_SET(hp, RES_CHUNK_VER);
171 } else if (CONST_MEM_EQ("HTTP/1.0", ptr, len)) {
174 v = rb_str_new(ptr, len);
176 rb_hash_aset(hp->env, g_server_protocol, v);
177 rb_hash_aset(hp->env, g_http_version, v);
180 static inline void hp_invalid_if_trailer(struct http_parser *hp)
182 if (HP_FL_TEST(hp, INTRAILER))
183 parser_raise(eHttpParserError, "invalid Trailer");
186 static void write_cont_value(struct http_parser *hp,
187 char *buffer, const char *p)
191 long len = LEN(mark, p);
194 if (hp->cont == Qfalse)
195 parser_raise(eHttpParserError, "invalid continuation line");
197 return; /* we're ignoring this header (probably Host:) */
199 assert(TYPE(hp->cont) == T_STRING && "continuation line is not a string");
200 assert(hp->mark > 0 && "impossible continuation line offset");
205 cont_len = RSTRING_LEN(hp->cont);
212 /* normalize tab to space */
214 assert((' ' == *vptr || '\t' == *vptr) && "invalid leading white space");
218 for (end = len - 1; end >= 0 && is_lws(vptr[end]); end--);
219 rb_str_buf_cat(hp->cont, vptr, end + 1);
222 static int is_chunked(VALUE v)
224 /* common case first */
225 if (STR_CSTR_CASE_EQ(v, "chunked"))
229 * call Ruby function in unicorn/http_request.rb to deal with unlikely
230 * comma-delimited case
232 return rb_funcall(cHttpParser, id_is_chunked_p, 1, v) != Qfalse;
235 static void write_value(struct http_parser *hp,
236 const char *buffer, const char *p)
238 VALUE f = find_common_field(PTR_TO(start.field), hp->s.field_len);
242 VALIDATE_MAX_LENGTH(LEN(mark, p), FIELD_VALUE);
243 v = LEN(mark, p) == 0 ? rb_str_buf_new(128) : STRIPPED_STR_NEW(mark, p);
245 const char *field = PTR_TO(start.field);
246 size_t flen = hp->s.field_len;
248 VALIDATE_MAX_LENGTH(flen, FIELD_NAME);
251 * ignore "Version" headers since they conflict with the HTTP_VERSION
254 if (CONST_MEM_EQ("VERSION", field, flen)) {
258 f = uncommon_field(field, flen);
259 } else if (f == g_http_connection) {
260 hp_keepalive_connection(hp, v);
261 } else if (f == g_content_length && !HP_FL_TEST(hp, CHUNKED)) {
263 parser_raise(eHttpParserError, "Content-Length already set");
264 hp->len.content = parse_length(RSTRING_PTR(v), RSTRING_LEN(v));
265 if (hp->len.content < 0)
266 parser_raise(eHttpParserError, "invalid Content-Length");
267 if (hp->len.content != 0)
268 HP_FL_SET(hp, HASBODY);
269 hp_invalid_if_trailer(hp);
270 } else if (f == g_http_transfer_encoding) {
272 if (HP_FL_TEST(hp, CHUNKED))
275 * A sender MUST NOT apply chunked more than once to a message body
276 * (i.e., chunking an already chunked message is not allowed).
278 parser_raise(eHttpParserError, "Transfer-Encoding double chunked");
280 HP_FL_SET(hp, CHUNKED);
281 HP_FL_SET(hp, HASBODY);
283 /* RFC 7230 3.3.3, 3: favor chunked if Content-Length exists */
285 } else if (HP_FL_TEST(hp, CHUNKED)) {
287 * RFC 7230 3.3.3, point 3 states:
288 * If a Transfer-Encoding header field is present in a request and
289 * the chunked transfer coding is not the final encoding, the
290 * message body length cannot be determined reliably; the server
291 * MUST respond with the 400 (Bad Request) status code and then
292 * close the connection.
294 parser_raise(eHttpParserError, "invalid Transfer-Encoding");
296 hp_invalid_if_trailer(hp);
297 } else if (f == g_http_trailer) {
298 HP_FL_SET(hp, HASTRAILER);
299 hp_invalid_if_trailer(hp);
301 assert(TYPE(f) == T_STRING && "memoized object is not a string");
305 e = rb_hash_aref(hp->env, f);
307 hp->cont = rb_hash_aset(hp->env, f, v);
308 } else if (f == g_http_host) {
310 * ignored, absolute URLs in REQUEST_URI take precedence over
311 * the Host: header (ref: rfc 2616, section 5.2.1)
315 rb_str_buf_cat(e, ",", 1);
316 hp->cont = rb_str_buf_append(e, v);
325 action mark {MARK(mark, fpc); }
327 action start_field { MARK(start.field, fpc); }
328 action snake_upcase_field { snake_upcase_char(deconst(fpc)); }
329 action downcase_char { downcase_char(deconst(fpc)); }
330 action write_field { hp->s.field_len = LEN(start.field, fpc); }
331 action start_value { MARK(mark, fpc); }
332 action write_value { write_value(hp, buffer, fpc); }
333 action write_cont_value { write_cont_value(hp, buffer, fpc); }
334 action request_method { request_method(hp, PTR_TO(mark), LEN(mark, fpc)); }
336 rb_hash_aset(hp->env, g_rack_url_scheme, STR_NEW(mark, fpc));
338 action host { rb_hash_aset(hp->env, g_http_host, STR_NEW(mark, fpc)); }
342 VALIDATE_MAX_URI_LENGTH(LEN(mark, fpc), REQUEST_URI);
343 str = rb_hash_aset(hp->env, g_request_uri, STR_NEW(mark, fpc));
345 * "OPTIONS * HTTP/1.1\r\n" is a valid request, but we can't have '*'
346 * in REQUEST_PATH or PATH_INFO or else Rack::Lint will complain
348 if (STR_CSTR_EQ(str, "*")) {
349 str = rb_str_new(NULL, 0);
350 rb_hash_aset(hp->env, g_path_info, str);
351 rb_hash_aset(hp->env, g_request_path, str);
355 VALIDATE_MAX_URI_LENGTH(LEN(mark, fpc), FRAGMENT);
356 rb_hash_aset(hp->env, g_fragment, STR_NEW(mark, fpc));
358 action start_query {MARK(start.query, fpc); }
359 action query_string {
360 VALIDATE_MAX_URI_LENGTH(LEN(start.query, fpc), QUERY_STRING);
361 rb_hash_aset(hp->env, g_query_string, STR_NEW(start.query, fpc));
363 action http_version { http_version(hp, PTR_TO(mark), LEN(mark, fpc)); }
364 action request_path {
367 VALIDATE_MAX_URI_LENGTH(LEN(mark, fpc), REQUEST_PATH);
368 val = rb_hash_aset(hp->env, g_request_path, STR_NEW(mark, fpc));
370 /* rack says PATH_INFO must start with "/" or be empty */
371 if (!STR_CSTR_EQ(val, "*"))
372 rb_hash_aset(hp->env, g_path_info, val);
374 action add_to_chunk_size {
375 hp->len.chunk = step_incr(hp->len.chunk, fc, 16);
376 if (hp->len.chunk < 0)
377 parser_raise(eHttpParserError, "invalid chunk size");
382 cs = http_parser_first_final;
383 if (HP_FL_TEST(hp, HASBODY)) {
384 HP_FL_SET(hp, INBODY);
385 if (HP_FL_TEST(hp, CHUNKED))
386 cs = http_parser_en_ChunkedBody;
388 HP_FL_SET(hp, REQEOF);
389 assert(!HP_FL_TEST(hp, CHUNKED) && "chunked encoding without body!");
392 * go back to Ruby so we can call the Rack application, we'll reenter
393 * the parser iff the body needs to be processed.
398 action end_trailers {
399 cs = http_parser_first_final;
403 action end_chunked_body {
404 HP_FL_SET(hp, INTRAILER);
405 cs = http_parser_en_Trailers;
407 assert(p <= pe && "buffer overflow after chunked body");
411 action skip_chunk_data {
412 skip_chunk_data_hack: {
413 size_t nr = MIN((size_t)hp->len.chunk, REMAINING);
414 memcpy(RSTRING_PTR(hp->cont) + hp->s.dest_offset, fpc, nr);
415 hp->s.dest_offset += nr;
418 assert(hp->len.chunk >= 0 && "negative chunk length");
419 if ((size_t)hp->len.chunk > REMAINING) {
420 HP_FL_SET(hp, INCHUNK);
428 include unicorn_http_common "unicorn_http_common.rl";
434 static void http_parser_init(struct http_parser *hp)
443 hp->cont = Qfalse; /* zero on MRI, should be optimized away by above */
450 http_parser_execute(struct http_parser *hp, char *buffer, size_t len)
454 size_t off = hp->offset;
456 if (cs == http_parser_first_final)
459 assert(off <= len && "offset past end of buffer");
464 assert((void *)(pe - p) == (void *)(len - off) &&
465 "pointers aren't same distance");
467 if (HP_FL_TEST(hp, INCHUNK)) {
468 HP_FL_UNSET(hp, INCHUNK);
469 goto skip_chunk_data_hack;
472 post_exec: /* "_out:" also goes here */
473 if (hp->cs != http_parser_error)
475 hp->offset = ulong2uint(p - buffer);
477 assert(p <= pe && "buffer overflow after parsing execute");
478 assert(hp->offset <= len && "offset longer than length");
481 static void hp_mark(void *ptr)
483 struct http_parser *hp = ptr;
487 rb_gc_mark(hp->cont);
490 static size_t hp_memsize(const void *ptr)
492 return sizeof(struct http_parser);
495 static const rb_data_type_t hp_type = {
497 { hp_mark, RUBY_TYPED_DEFAULT_FREE, hp_memsize, /* reserved */ },
498 /* parent, data, [ flags ] */
501 static struct http_parser *data_get(VALUE self)
503 struct http_parser *hp;
505 TypedData_Get_Struct(self, struct http_parser, &hp_type, hp);
506 assert(hp && "failed to extract http_parser struct");
511 * set rack.url_scheme to "https" or "http", no others are allowed by Rack
512 * this resembles the Rack::Request#scheme method as of rack commit
513 * 35bb5ba6746b5d346de9202c004cc926039650c7
515 static void set_url_scheme(VALUE env, VALUE *server_port)
517 VALUE scheme = rb_hash_aref(env, g_rack_url_scheme);
521 * would anybody be horribly opposed to removing the X-Forwarded-SSL
522 * and X-Forwarded-Proto handling from this parser? We've had it
523 * forever and nobody has said anything against it, either.
524 * Anyways, please send comments to our public mailing list:
525 * unicorn-public@yhbt.net (no HTML mail, no subscription necessary)
527 scheme = rb_hash_aref(env, g_http_x_forwarded_ssl);
528 if (!NIL_P(scheme) && STR_CSTR_EQ(scheme, "on")) {
529 *server_port = g_port_443;
532 scheme = rb_hash_aref(env, g_http_x_forwarded_proto);
536 long len = RSTRING_LEN(scheme);
537 if (len >= 5 && !memcmp(RSTRING_PTR(scheme), "https", 5)) {
540 *server_port = g_port_443;
546 rb_hash_aset(env, g_rack_url_scheme, scheme);
547 } else if (STR_CSTR_EQ(scheme, "https")) {
548 *server_port = g_port_443;
550 assert(*server_port == g_port_80 && "server_port not set");
555 * Parse and set the SERVER_NAME and SERVER_PORT variables
556 * Not supporting X-Forwarded-Host/X-Forwarded-Port in here since
557 * anybody who needs them is using an unsupported configuration and/or
558 * incompetent. Rack::Request will handle X-Forwarded-{Port,Host} just
561 static void set_server_vars(VALUE env, VALUE *server_port)
563 VALUE server_name = g_localhost;
564 VALUE host = rb_hash_aref(env, g_http_host);
567 char *host_ptr = RSTRING_PTR(host);
568 long host_len = RSTRING_LEN(host);
571 if (*host_ptr == '[') { /* ipv6 address format */
572 char *rbracket = memchr(host_ptr + 1, ']', host_len - 1);
575 colon = (rbracket[1] == ':') ? rbracket + 1 : NULL;
577 colon = memchr(host_ptr + 1, ':', host_len - 1);
579 colon = memchr(host_ptr, ':', host_len);
583 long port_start = colon - host_ptr + 1;
585 server_name = rb_str_substr(host, 0, colon - host_ptr);
586 if ((host_len - port_start) > 0)
587 *server_port = rb_str_substr(host, port_start, host_len);
592 rb_hash_aset(env, g_server_name, server_name);
593 rb_hash_aset(env, g_server_port, *server_port);
596 static void finalize_header(struct http_parser *hp)
598 VALUE server_port = g_port_80;
600 set_url_scheme(hp->env, &server_port);
601 set_server_vars(hp->env, &server_port);
603 if (!HP_FL_TEST(hp, HASHEADER))
604 rb_hash_aset(hp->env, g_server_protocol, g_http_09);
606 /* rack requires QUERY_STRING */
607 if (NIL_P(rb_hash_aref(hp->env, g_query_string)))
608 rb_hash_aset(hp->env, g_query_string, rb_str_new(NULL, 0));
611 static VALUE HttpParser_alloc(VALUE klass)
613 struct http_parser *hp;
615 return TypedData_Make_Struct(klass, struct http_parser, &hp_type, hp);
620 * parser.new => parser
622 * Creates a new parser.
624 static VALUE HttpParser_init(VALUE self)
626 struct http_parser *hp = data_get(self);
628 http_parser_init(hp);
629 hp->buf = rb_str_new(NULL, 0);
630 hp->env = rb_hash_new();
637 * parser.clear => parser
639 * Resets the parser to it's initial state so that you can reuse it
640 * rather than making new ones.
642 static VALUE HttpParser_clear(VALUE self)
644 struct http_parser *hp = data_get(self);
646 /* we can't safely reuse .buf and .env if hijacked */
647 if (HP_FL_TEST(hp, HIJACK))
648 return HttpParser_init(self);
650 http_parser_init(hp);
651 rb_hash_clear(hp->env);
656 static void advance_str(VALUE str, off_t nr)
658 long len = RSTRING_LEN(str);
665 assert(nr <= len && "trying to advance past end of buffer");
667 if (len > 0) /* unlikely, len is usually 0 */
668 memmove(RSTRING_PTR(str), RSTRING_PTR(str) + nr, len);
669 rb_str_set_len(str, len);
674 * parser.content_length => nil or Integer
676 * Returns the number of bytes left to run through HttpParser#filter_body.
677 * This will initially be the value of the "Content-Length" HTTP header
678 * after header parsing is complete and will decrease in value as
679 * HttpParser#filter_body is called for each chunk. This should return
680 * zero for requests with no body.
682 * This will return nil on "Transfer-Encoding: chunked" requests.
684 static VALUE HttpParser_content_length(VALUE self)
686 struct http_parser *hp = data_get(self);
688 return HP_FL_TEST(hp, CHUNKED) ? Qnil : OFFT2NUM(hp->len.content);
692 * Document-method: parse
694 * parser.parse => env or nil
696 * Takes a Hash and a String of data, parses the String of data filling
697 * in the Hash returning the Hash if parsing is finished, nil otherwise
698 * When returning the env Hash, it may modify data to point to where
699 * body processing should begin.
701 * Raises HttpParserError if there are parsing errors.
703 static VALUE HttpParser_parse(VALUE self)
705 struct http_parser *hp = data_get(self);
706 VALUE data = hp->buf;
708 if (HP_FL_TEST(hp, TO_CLEAR))
709 HttpParser_clear(self);
711 http_parser_execute(hp, RSTRING_PTR(data), RSTRING_LEN(data));
712 if (hp->offset > MAX_HEADER_LEN)
713 parser_raise(e413, "HTTP header is too large");
715 if (hp->cs == http_parser_first_final ||
716 hp->cs == http_parser_en_ChunkedBody) {
717 advance_str(data, hp->offset + 1);
719 if (HP_FL_TEST(hp, INTRAILER))
720 HP_FL_SET(hp, REQEOF);
725 if (hp->cs == http_parser_error)
726 parser_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
732 * Document-method: parse
734 * parser.add_parse(buffer) => env or nil
736 * adds the contents of +buffer+ to the internal buffer and attempts to
737 * continue parsing. Returns the +env+ Hash on success or nil if more
740 * Raises HttpParserError if there are parsing errors.
742 static VALUE HttpParser_add_parse(VALUE self, VALUE buffer)
744 struct http_parser *hp = data_get(self);
746 Check_Type(buffer, T_STRING);
747 rb_str_buf_append(hp->buf, buffer);
749 return HttpParser_parse(self);
753 * Document-method: trailers
755 * parser.trailers(req, data) => req or nil
757 * This is an alias for HttpParser#headers
761 * Document-method: headers
763 static VALUE HttpParser_headers(VALUE self, VALUE env, VALUE buf)
765 struct http_parser *hp = data_get(self);
770 return HttpParser_parse(self);
773 static int chunked_eof(struct http_parser *hp)
775 return ((hp->cs == http_parser_first_final) || HP_FL_TEST(hp, INTRAILER));
780 * parser.body_eof? => true or false
782 * Detects if we're done filtering the body or not. This can be used
783 * to detect when to stop calling HttpParser#filter_body.
785 static VALUE HttpParser_body_eof(VALUE self)
787 struct http_parser *hp = data_get(self);
789 if (HP_FL_TEST(hp, CHUNKED))
790 return chunked_eof(hp) ? Qtrue : Qfalse;
792 return hp->len.content == 0 ? Qtrue : Qfalse;
797 * parser.keepalive? => true or false
799 * This should be used to detect if a request can really handle
800 * keepalives and pipelining. Currently, the rules are:
802 * 1. MUST be a GET or HEAD request
803 * 2. MUST be HTTP/1.1 +or+ HTTP/1.0 with "Connection: keep-alive"
804 * 3. MUST NOT have "Connection: close" set
806 static VALUE HttpParser_keepalive(VALUE self)
808 struct http_parser *hp = data_get(self);
810 return HP_FL_ALL(hp, KEEPALIVE) ? Qtrue : Qfalse;
814 static VALUE chunkable_response_p(VALUE self)
816 const struct http_parser *hp = data_get(self);
818 return HP_FL_ALL(hp, RES_CHUNKABLE) ? Qtrue : Qfalse;
823 * parser.next? => true or false
825 * Exactly like HttpParser#keepalive?, except it will reset the internal
826 * parser state on next parse if it returns true.
828 static VALUE HttpParser_next(VALUE self)
830 struct http_parser *hp = data_get(self);
832 if (HP_FL_ALL(hp, KEEPALIVE)) {
833 HP_FL_SET(hp, TO_CLEAR);
841 * parser.headers? => true or false
843 * This should be used to detect if a request has headers (and if
844 * the response will have headers as well). HTTP/0.9 requests
845 * should return false, all subsequent HTTP versions will return true
847 static VALUE HttpParser_has_headers(VALUE self)
849 struct http_parser *hp = data_get(self);
851 return HP_FL_TEST(hp, HASHEADER) ? Qtrue : Qfalse;
854 static VALUE HttpParser_buf(VALUE self)
856 return data_get(self)->buf;
859 static VALUE HttpParser_env(VALUE self)
861 return data_get(self)->env;
864 static VALUE HttpParser_hijacked_bang(VALUE self)
866 struct http_parser *hp = data_get(self);
868 HP_FL_SET(hp, HIJACK);
875 * parser.filter_body(dst, src) => nil/src
877 * Takes a String of +src+, will modify data if dechunking is done.
878 * Returns +nil+ if there is more data left to process. Returns
879 * +src+ if body processing is complete. When returning +src+,
880 * it may modify +src+ so the start of the string points to where
881 * the body ended so that trailer processing can begin.
883 * Raises HttpParserError if there are dechunking errors.
884 * Basically this is a glorified memcpy(3) that copies +src+
885 * into +buf+ while filtering it through the dechunker.
887 static VALUE HttpParser_filter_body(VALUE self, VALUE dst, VALUE src)
889 struct http_parser *hp = data_get(self);
893 srcptr = RSTRING_PTR(src);
894 srclen = RSTRING_LEN(src);
898 if (HP_FL_TEST(hp, CHUNKED)) {
899 if (!chunked_eof(hp)) {
901 rb_str_resize(dst, srclen); /* we can never copy more than srclen bytes */
903 hp->s.dest_offset = 0;
906 http_parser_execute(hp, srcptr, srclen);
907 if (hp->cs == http_parser_error)
908 parser_raise(eHttpParserError, "Invalid HTTP format, parsing fails.");
910 assert(hp->s.dest_offset <= hp->offset &&
911 "destination buffer overflow");
912 advance_str(src, hp->offset);
913 rb_str_set_len(dst, hp->s.dest_offset);
915 if (RSTRING_LEN(dst) == 0 && chunked_eof(hp)) {
916 assert(hp->len.chunk == 0 && "chunk at EOF but more to parse");
922 /* no need to enter the Ragel machine for unchunked transfers */
923 assert(hp->len.content >= 0 && "negative Content-Length");
924 if (hp->len.content > 0) {
925 long nr = MIN(srclen, hp->len.content);
928 rb_str_resize(dst, nr);
930 * using rb_str_replace() to avoid memcpy() doesn't help in
931 * most cases because a GC-aware programmer will pass an explicit
932 * buffer to env["rack.input"].read and reuse the buffer in a loop.
933 * This causes copy-on-write behavior to be triggered anyways
934 * when the +src+ buffer is modified (when reading off the socket).
937 memcpy(RSTRING_PTR(dst), srcptr, nr);
938 hp->len.content -= nr;
939 if (hp->len.content == 0) {
940 HP_FL_SET(hp, REQEOF);
941 hp->cs = http_parser_first_final;
943 advance_str(src, nr);
947 hp->offset = 0; /* for trailer parsing */
951 static VALUE HttpParser_rssset(VALUE self, VALUE boolean)
953 struct http_parser *hp = data_get(self);
956 HP_FL_SET(hp, RESSTART);
958 HP_FL_UNSET(hp, RESSTART);
960 return boolean; /* ignored by Ruby anyways */
963 static VALUE HttpParser_rssget(VALUE self)
965 struct http_parser *hp = data_get(self);
967 return HP_FL_TEST(hp, RESSTART) ? Qtrue : Qfalse;
970 #define SET_GLOBAL(var,str) do { \
971 var = find_common_field(str, sizeof(str) - 1); \
972 assert(!NIL_P(var) && "missed global field"); \
975 void Init_unicorn_http(void)
979 mUnicorn = rb_define_module("Unicorn");
980 cHttpParser = rb_define_class_under(mUnicorn, "HttpParser", rb_cObject);
982 rb_define_class_under(mUnicorn, "HttpParserError", rb_eIOError);
983 e413 = rb_define_class_under(mUnicorn, "RequestEntityTooLargeError",
985 e414 = rb_define_class_under(mUnicorn, "RequestURITooLongError",
988 id_uminus = rb_intern("-@");
990 rb_define_alloc_func(cHttpParser, HttpParser_alloc);
991 rb_define_method(cHttpParser, "initialize", HttpParser_init, 0);
992 rb_define_method(cHttpParser, "clear", HttpParser_clear, 0);
993 rb_define_method(cHttpParser, "parse", HttpParser_parse, 0);
994 rb_define_method(cHttpParser, "add_parse", HttpParser_add_parse, 1);
995 rb_define_method(cHttpParser, "headers", HttpParser_headers, 2);
996 rb_define_method(cHttpParser, "trailers", HttpParser_headers, 2);
997 rb_define_method(cHttpParser, "filter_body", HttpParser_filter_body, 2);
998 rb_define_method(cHttpParser, "content_length", HttpParser_content_length, 0);
999 rb_define_method(cHttpParser, "body_eof?", HttpParser_body_eof, 0);
1000 rb_define_method(cHttpParser, "keepalive?", HttpParser_keepalive, 0);
1001 rb_define_method(cHttpParser, "chunkable_response?", chunkable_response_p, 0);
1002 rb_define_method(cHttpParser, "headers?", HttpParser_has_headers, 0);
1003 rb_define_method(cHttpParser, "next?", HttpParser_next, 0);
1004 rb_define_method(cHttpParser, "buf", HttpParser_buf, 0);
1005 rb_define_method(cHttpParser, "env", HttpParser_env, 0);
1006 rb_define_method(cHttpParser, "hijacked!", HttpParser_hijacked_bang, 0);
1007 rb_define_method(cHttpParser, "response_start_sent=", HttpParser_rssset, 1);
1008 rb_define_method(cHttpParser, "response_start_sent", HttpParser_rssget, 0);
1011 * The maximum size a single chunk when using chunked transfer encoding.
1012 * This is only a theoretical maximum used to detect errors in clients,
1013 * it is highly unlikely to encounter clients that send more than
1014 * several kilobytes at once.
1016 rb_define_const(cHttpParser, "CHUNK_MAX", OFFT2NUM(UH_OFF_T_MAX));
1019 * The maximum size of the body as specified by Content-Length.
1020 * This is only a theoretical maximum, the actual limit is subject
1021 * to the limits of the file system used for +Dir.tmpdir+.
1023 rb_define_const(cHttpParser, "LENGTH_MAX", OFFT2NUM(UH_OFF_T_MAX));
1025 rb_define_singleton_method(cHttpParser, "max_header_len=", set_maxhdrlen, 1);
1027 init_common_fields();
1028 SET_GLOBAL(g_http_host, "HOST");
1029 SET_GLOBAL(g_http_trailer, "TRAILER");
1030 SET_GLOBAL(g_http_transfer_encoding, "TRANSFER_ENCODING");
1031 SET_GLOBAL(g_content_length, "CONTENT_LENGTH");
1032 SET_GLOBAL(g_http_connection, "CONNECTION");
1033 id_set_backtrace = rb_intern("set_backtrace");
1034 init_unicorn_httpdate();
1036 id_is_chunked_p = rb_intern("is_chunked?");
1038 init_epollexclusive(mUnicorn);