1 /* Copyright (c) 2001 Matej Pfajfar.
2 * Copyright (c) 2001-2004, Roger Dingledine.
3 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
4 * Copyright (c) 2007-2013, The Tor Project, Inc. */
5 /* See LICENSE for licensing information */
9 * \brief Implements a generic interface buffer. Buffers are
10 * fairly opaque string holders that can read to or flush from:
11 * memory, file descriptors, or TLS connections.
13 #define BUFFERS_PRIVATE
15 #include "addressmap.h"
18 #include "connection_edge.h"
19 #include "connection_or.h"
22 #include "ext_orport.h"
23 #include "../common/util.h"
24 #include "../common/torlog.h"
32 /** Helper: If PARANOIA is defined, assert that the buffer in local variable
33 * <b>buf</b> is well-formed. */
34 #define check() STMT_BEGIN assert_buf_ok(buf); STMT_END
36 #define check() STMT_NIL
39 /* Implementation notes:
41 * After flirting with memmove, and dallying with ring-buffers, we're finally
42 * getting up to speed with the 1970s and implementing buffers as a linked
43 * list of small chunks. Each buffer has such a list; data is removed from
44 * the head of the list, and added at the tail. The list is singly linked,
45 * and the buffer keeps a pointer to the head and the tail.
47 * Every chunk, except the tail, contains at least one byte of data. Data in
48 * each chunk is contiguous.
50 * When you need to treat the first N characters on a buffer as a contiguous
51 * string, use the buf_pullup function to make them so. Don't do this more
54 * The major free Unix kernels have handled buffers like this since, like,
58 static int parse_socks(const char *data
, size_t datalen
, socks_request_t
*req
,
59 int log_sockstype
, int safe_socks
, ssize_t
*drain_out
,
60 size_t *want_length_out
);
61 static int parse_socks_client(const uint8_t *data
, size_t datalen
,
62 int state
, char **reason
,
65 /* Chunk manipulation functions */
67 #define CHUNK_HEADER_LEN STRUCT_OFFSET(chunk_t, mem[0])
69 /** Return the number of bytes needed to allocate a chunk to hold
70 * <b>memlen</b> bytes. */
71 #define CHUNK_ALLOC_SIZE(memlen) (CHUNK_HEADER_LEN + (memlen))
72 /** Return the number of usable bytes in a chunk allocated with
73 * malloc(<b>memlen</b>). */
74 #define CHUNK_SIZE_WITH_ALLOC(memlen) ((memlen) - CHUNK_HEADER_LEN)
76 /** Return the next character in <b>chunk</b> onto which data can be appended.
77 * If the chunk is full, this might be off the end of chunk->mem. */
79 CHUNK_WRITE_PTR(chunk_t
*chunk
)
81 return chunk
->data
+ chunk
->datalen
;
84 /** Return the number of bytes that can be written onto <b>chunk</b> without
85 * running out of space. */
87 CHUNK_REMAINING_CAPACITY(const chunk_t
*chunk
)
89 return (chunk
->mem
+ chunk
->memlen
) - (chunk
->data
+ chunk
->datalen
);
92 /** Move all bytes stored in <b>chunk</b> to the front of <b>chunk</b>->mem,
93 * to free up space at the end. */
95 chunk_repack(chunk_t
*chunk
)
97 if (chunk
->datalen
&& chunk
->data
!= &chunk
->mem
[0]) {
98 memmove(chunk
->mem
, chunk
->data
, chunk
->datalen
);
100 chunk
->data
= &chunk
->mem
[0];
103 /** Keep track of total size of allocated chunks for consistency asserts */
104 static size_t total_bytes_allocated_in_chunks
= 0;
106 #if defined(ENABLE_BUF_FREELISTS) || defined(RUNNING_DOXYGEN)
107 /** A freelist of chunks. */
108 typedef struct chunk_freelist_t
{
109 size_t alloc_size
; /**< What size chunks does this freelist hold? */
110 int max_length
; /**< Never allow more than this number of chunks in the
112 int slack
; /**< When trimming the freelist, leave this number of extra
113 * chunks beyond lowest_length.*/
114 int cur_length
; /**< How many chunks on the freelist now? */
115 int lowest_length
; /**< What's the smallest value of cur_length since the
116 * last time we cleaned this freelist? */
120 chunk_t
*head
; /**< First chunk on the freelist. */
123 /** Macro to help define freelists. */
124 #define FL(a,m,s) { a, m, s, 0, 0, 0, 0, 0, NULL }
126 /** Static array of freelists, sorted by alloc_len, terminated by an entry
127 * with alloc_size of 0. */
128 static chunk_freelist_t freelists
[] = {
129 FL(4096, 256, 8), FL(8192, 128, 4), FL(16384, 64, 4), FL(32768, 32, 2),
133 /** How many times have we looked for a chunk of a size that no freelist
134 * could help with? */
135 static uint64_t n_freelist_miss
= 0;
137 static void assert_freelist_ok(chunk_freelist_t
*fl
);
139 /** Return the freelist to hold chunks of size <b>alloc</b>, or NULL if
140 * no freelist exists for that size. */
141 static INLINE chunk_freelist_t
*
142 get_freelist(size_t alloc
)
145 for (i
=0; (freelists
[i
].alloc_size
<= alloc
&&
146 freelists
[i
].alloc_size
); ++i
) {
147 if (freelists
[i
].alloc_size
== alloc
) {
148 return &freelists
[i
];
154 /** Deallocate a chunk or put it on a freelist */
156 chunk_free_unchecked(chunk_t
*chunk
)
159 chunk_freelist_t
*freelist
;
161 alloc
= CHUNK_ALLOC_SIZE(chunk
->memlen
);
162 freelist
= get_freelist(alloc
);
163 if (freelist
&& freelist
->cur_length
< freelist
->max_length
) {
164 chunk
->next
= freelist
->head
;
165 freelist
->head
= chunk
;
166 ++freelist
->cur_length
;
170 #ifdef DEBUG_CHUNK_ALLOC
171 tor_assert(alloc
== chunk
->DBG_alloc
);
173 tor_assert(total_bytes_allocated_in_chunks
>= alloc
);
174 total_bytes_allocated_in_chunks
-= alloc
;
179 /** Allocate a new chunk with a given allocation size, or get one from the
180 * freelist. Note that a chunk with allocation size A can actually hold only
181 * CHUNK_SIZE_WITH_ALLOC(A) bytes in its mem field. */
182 static INLINE chunk_t
*
183 chunk_new_with_alloc_size(size_t alloc
)
186 chunk_freelist_t
*freelist
;
187 tor_assert(alloc
>= sizeof(chunk_t
));
188 freelist
= get_freelist(alloc
);
189 if (freelist
&& freelist
->head
) {
191 freelist
->head
= ch
->next
;
192 if (--freelist
->cur_length
< freelist
->lowest_length
)
193 freelist
->lowest_length
= freelist
->cur_length
;
200 ch
= tor_malloc(alloc
);
201 #ifdef DEBUG_CHUNK_ALLOC
202 ch
->DBG_alloc
= alloc
;
204 total_bytes_allocated_in_chunks
+= alloc
;
208 ch
->memlen
= CHUNK_SIZE_WITH_ALLOC(alloc
);
209 ch
->data
= &ch
->mem
[0];
214 chunk_free_unchecked(chunk_t
*chunk
)
218 #ifdef DEBUG_CHUNK_ALLOC
219 tor_assert(CHUNK_ALLOC_SIZE(chunk
->memlen
) == chunk
->DBG_alloc
);
221 tor_assert(total_bytes_allocated_in_chunks
>=
222 CHUNK_ALLOC_SIZE(chunk
->memlen
));
223 total_bytes_allocated_in_chunks
-= CHUNK_ALLOC_SIZE(chunk
->memlen
);
226 static INLINE chunk_t
*
227 chunk_new_with_alloc_size(size_t alloc
)
230 ch
= tor_malloc(alloc
);
233 #ifdef DEBUG_CHUNK_ALLOC
234 ch
->DBG_alloc
= alloc
;
236 ch
->memlen
= CHUNK_SIZE_WITH_ALLOC(alloc
);
237 total_bytes_allocated_in_chunks
+= alloc
;
238 ch
->data
= &ch
->mem
[0];
243 /** Expand <b>chunk</b> until it can hold <b>sz</b> bytes, and return a
244 * new pointer to <b>chunk</b>. Old pointers are no longer valid. */
245 static INLINE chunk_t
*
246 chunk_grow(chunk_t
*chunk
, size_t sz
)
249 size_t memlen_orig
= chunk
->memlen
;
250 tor_assert(sz
> chunk
->memlen
);
251 offset
= chunk
->data
- chunk
->mem
;
252 chunk
= tor_realloc(chunk
, CHUNK_ALLOC_SIZE(sz
));
254 chunk
->data
= chunk
->mem
+ offset
;
255 #ifdef DEBUG_CHUNK_ALLOC
256 tor_assert(chunk
->DBG_alloc
== CHUNK_ALLOC_SIZE(memlen_orig
));
257 chunk
->DBG_alloc
= CHUNK_ALLOC_SIZE(sz
);
259 total_bytes_allocated_in_chunks
+=
260 CHUNK_ALLOC_SIZE(sz
) - CHUNK_ALLOC_SIZE(memlen_orig
);
264 /** If a read onto the end of a chunk would be smaller than this number, then
265 * just start a new chunk. */
266 #define MIN_READ_LEN 8
267 /** Every chunk should take up at least this many bytes. */
268 #define MIN_CHUNK_ALLOC 256
269 /** No chunk should take up more than this many bytes. */
270 #define MAX_CHUNK_ALLOC 65536
272 /** Return the allocation size we'd like to use to hold <b>target</b>
275 preferred_chunk_size(size_t target
)
277 size_t sz
= MIN_CHUNK_ALLOC
;
278 while (CHUNK_SIZE_WITH_ALLOC(sz
) < target
) {
284 /** Remove from the freelists most chunks that have not been used since the
285 * last call to buf_shrink_freelists(). Return the amount of memory
288 buf_shrink_freelists(int free_all
)
290 #ifdef ENABLE_BUF_FREELISTS
292 size_t total_freed
= 0;
293 disable_control_logging();
294 for (i
= 0; freelists
[i
].alloc_size
; ++i
) {
295 int slack
= freelists
[i
].slack
;
296 assert_freelist_ok(&freelists
[i
]);
297 if (free_all
|| freelists
[i
].lowest_length
> slack
) {
298 int n_to_free
= free_all
? freelists
[i
].cur_length
:
299 (freelists
[i
].lowest_length
- slack
);
300 int n_to_skip
= freelists
[i
].cur_length
- n_to_free
;
301 int orig_length
= freelists
[i
].cur_length
;
302 int orig_n_to_free
= n_to_free
, n_freed
=0;
303 int orig_n_to_skip
= n_to_skip
;
304 int new_length
= n_to_skip
;
305 chunk_t
**chp
= &freelists
[i
].head
;
308 if (!(*chp
) || ! (*chp
)->next
) {
309 log_warn(LD_BUG
, "I wanted to skip %d chunks in the freelist for "
310 "%d-byte chunks, but only found %d. (Length %d)",
311 orig_n_to_skip
, (int)freelists
[i
].alloc_size
,
312 orig_n_to_skip
-n_to_skip
, freelists
[i
].cur_length
);
313 assert_freelist_ok(&freelists
[i
]);
316 // tor_assert((*chp)->next);
323 chunk_t
*next
= chunk
->next
;
324 #ifdef DEBUG_CHUNK_ALLOC
325 tor_assert(chunk
->DBG_alloc
== CHUNK_ALLOC_SIZE(chunk
->memlen
));
327 tor_assert(total_bytes_allocated_in_chunks
>=
328 CHUNK_ALLOC_SIZE(chunk
->memlen
));
329 total_bytes_allocated_in_chunks
-= CHUNK_ALLOC_SIZE(chunk
->memlen
);
330 total_freed
+= CHUNK_ALLOC_SIZE(chunk
->memlen
);
335 ++freelists
[i
].n_free
;
338 log_warn(LD_BUG
, "Freelist length for %d-byte chunks may have been "
339 "messed up somehow.", (int)freelists
[i
].alloc_size
);
340 log_warn(LD_BUG
, "There were %d chunks at the start. I decided to "
341 "keep %d. I wanted to free %d. I freed %d. I somehow think "
342 "I have %d left to free.",
343 freelists
[i
].cur_length
, n_to_skip
, orig_n_to_free
,
346 // tor_assert(!n_to_free);
347 freelists
[i
].cur_length
= new_length
;
348 tor_assert(orig_n_to_skip
== new_length
);
349 log_info(LD_MM
, "Cleaned freelist for %d-byte chunks: original "
350 "length %d, kept %d, dropped %d. New length is %d",
351 (int)freelists
[i
].alloc_size
, orig_length
,
352 orig_n_to_skip
, orig_n_to_free
, new_length
);
354 freelists
[i
].lowest_length
= freelists
[i
].cur_length
;
355 assert_freelist_ok(&freelists
[i
]);
358 enable_control_logging();
366 /** Describe the current status of the freelists at log level <b>severity</b>.
369 buf_dump_freelist_sizes(int severity
)
371 #ifdef ENABLE_BUF_FREELISTS
373 tor_log(severity
, LD_MM
, "====== Buffer freelists:");
374 for (i
= 0; freelists
[i
].alloc_size
; ++i
) {
375 uint64_t total
= ((uint64_t)freelists
[i
].cur_length
) *
376 freelists
[i
].alloc_size
;
377 tor_log(severity
, LD_MM
,
378 U64_FORMAT
" bytes in %d %d-byte chunks ["U64_FORMAT
379 " misses; "U64_FORMAT
" frees; "U64_FORMAT
" hits]",
380 U64_PRINTF_ARG(total
),
381 freelists
[i
].cur_length
, (int)freelists
[i
].alloc_size
,
382 U64_PRINTF_ARG(freelists
[i
].n_alloc
),
383 U64_PRINTF_ARG(freelists
[i
].n_free
),
384 U64_PRINTF_ARG(freelists
[i
].n_hit
));
386 tor_log(severity
, LD_MM
, U64_FORMAT
" allocations in non-freelist sizes",
387 U64_PRINTF_ARG(n_freelist_miss
));
393 /** Collapse data from the first N chunks from <b>buf</b> into buf->head,
394 * growing it as necessary, until buf->head has the first <b>bytes</b> bytes
395 * of data from the buffer, or until buf->head has all the data in <b>buf</b>.
397 * If <b>nulterminate</b> is true, ensure that there is a 0 byte in
398 * buf->head->mem right after all the data. */
400 buf_pullup(buf_t
*buf
, size_t bytes
, int nulterminate
)
402 /* XXXX nothing uses nulterminate; remove it. */
409 if (buf
->datalen
< bytes
)
410 bytes
= buf
->datalen
;
413 capacity
= bytes
+ 1;
414 if (buf
->head
->datalen
>= bytes
&& CHUNK_REMAINING_CAPACITY(buf
->head
)) {
415 *CHUNK_WRITE_PTR(buf
->head
) = '\0';
420 if (buf
->head
->datalen
>= bytes
)
424 if (buf
->head
->memlen
>= capacity
) {
425 /* We don't need to grow the first chunk, but we might need to repack it.*/
426 size_t needed
= capacity
- buf
->head
->datalen
;
427 if (CHUNK_REMAINING_CAPACITY(buf
->head
) < needed
)
428 chunk_repack(buf
->head
);
429 tor_assert(CHUNK_REMAINING_CAPACITY(buf
->head
) >= needed
);
433 /* We need to grow the chunk. */
434 chunk_repack(buf
->head
);
435 newsize
= CHUNK_SIZE_WITH_ALLOC(preferred_chunk_size(capacity
));
436 newhead
= chunk_grow(buf
->head
, newsize
);
437 tor_assert(newhead
->memlen
>= capacity
);
438 if (newhead
!= buf
->head
) {
439 if (buf
->tail
== buf
->head
)
446 while (dest
->datalen
< bytes
) {
447 size_t n
= bytes
- dest
->datalen
;
450 if (n
> src
->datalen
) {
451 memcpy(CHUNK_WRITE_PTR(dest
), src
->data
, src
->datalen
);
452 dest
->datalen
+= src
->datalen
;
453 dest
->next
= src
->next
;
454 if (buf
->tail
== src
)
456 chunk_free_unchecked(src
);
458 memcpy(CHUNK_WRITE_PTR(dest
), src
->data
, n
);
462 tor_assert(dest
->datalen
== bytes
);
467 tor_assert(CHUNK_REMAINING_CAPACITY(buf
->head
));
468 *CHUNK_WRITE_PTR(buf
->head
) = '\0';
474 #ifdef TOR_UNIT_TESTS
476 buf_get_first_chunk_data(const buf_t
*buf
, const char **cp
, size_t *sz
)
478 if (!buf
|| !buf
->head
) {
482 *cp
= buf
->head
->data
;
483 *sz
= buf
->head
->datalen
;
488 /** Resize buf so it won't hold extra memory that we haven't been
492 buf_shrink(buf_t
*buf
)
497 /** Remove the first <b>n</b> bytes from buf. */
499 buf_remove_from_front(buf_t
*buf
, size_t n
)
501 tor_assert(buf
->datalen
>= n
);
503 tor_assert(buf
->head
);
504 if (buf
->head
->datalen
> n
) {
505 buf
->head
->datalen
-= n
;
506 buf
->head
->data
+= n
;
510 chunk_t
*victim
= buf
->head
;
511 n
-= victim
->datalen
;
512 buf
->datalen
-= victim
->datalen
;
513 buf
->head
= victim
->next
;
514 if (buf
->tail
== victim
)
516 chunk_free_unchecked(victim
);
522 /** Create and return a new buf with default chunk capacity <b>size</b>.
525 buf_new_with_capacity(size_t size
)
527 buf_t
*b
= buf_new();
528 b
->default_chunk_size
= preferred_chunk_size(size
);
532 /** Allocate and return a new buffer with default capacity. */
536 buf_t
*buf
= tor_malloc_zero(sizeof(buf_t
));
537 buf
->magic
= BUFFER_MAGIC
;
538 buf
->default_chunk_size
= 4096;
543 buf_get_default_chunk_size(const buf_t
*buf
)
545 return buf
->default_chunk_size
;
548 /** Remove all data from <b>buf</b>. */
550 buf_clear(buf_t
*buf
)
552 chunk_t
*chunk
, *next
;
554 for (chunk
= buf
->head
; chunk
; chunk
= next
) {
556 chunk_free_unchecked(chunk
);
558 buf
->head
= buf
->tail
= NULL
;
561 /** Return the number of bytes stored in <b>buf</b> */
563 buf_datalen(const buf_t
*buf
)
568 /** Return the total length of all chunks used in <b>buf</b>. */
570 buf_allocation(const buf_t
*buf
)
573 const chunk_t
*chunk
;
574 for (chunk
= buf
->head
; chunk
; chunk
= chunk
->next
) {
575 total
+= CHUNK_ALLOC_SIZE(chunk
->memlen
);
580 /** Return the number of bytes that can be added to <b>buf</b> without
581 * performing any additional allocation. */
583 buf_slack(const buf_t
*buf
)
588 return CHUNK_REMAINING_CAPACITY(buf
->tail
);
591 /** Release storage held by <b>buf</b>. */
599 buf
->magic
= 0xdeadbeef;
603 /** Return a new copy of <b>in_chunk</b> */
605 chunk_copy(const chunk_t
*in_chunk
)
607 chunk_t
*newch
= tor_memdup(in_chunk
, CHUNK_ALLOC_SIZE(in_chunk
->memlen
));
608 total_bytes_allocated_in_chunks
+= CHUNK_ALLOC_SIZE(in_chunk
->memlen
);
609 #ifdef DEBUG_CHUNK_ALLOC
610 newch
->DBG_alloc
= CHUNK_ALLOC_SIZE(in_chunk
->memlen
);
613 if (in_chunk
->data
) {
614 off_t offset
= in_chunk
->data
- in_chunk
->mem
;
615 newch
->data
= newch
->mem
+ offset
;
620 /** Return a new copy of <b>buf</b> */
622 buf_copy(const buf_t
*buf
)
625 buf_t
*out
= buf_new();
626 out
->default_chunk_size
= buf
->default_chunk_size
;
627 for (ch
= buf
->head
; ch
; ch
= ch
->next
) {
628 chunk_t
*newch
= chunk_copy(ch
);
630 out
->tail
->next
= newch
;
633 out
->head
= out
->tail
= newch
;
636 out
->datalen
= buf
->datalen
;
640 /** Append a new chunk with enough capacity to hold <b>capacity</b> bytes to
641 * the tail of <b>buf</b>. If <b>capped</b>, don't allocate a chunk bigger
642 * than MAX_CHUNK_ALLOC. */
644 buf_add_chunk_with_capacity(buf_t
*buf
, size_t capacity
, int capped
)
648 if (CHUNK_ALLOC_SIZE(capacity
) < buf
->default_chunk_size
) {
649 chunk
= chunk_new_with_alloc_size(buf
->default_chunk_size
);
650 } else if (capped
&& CHUNK_ALLOC_SIZE(capacity
) > MAX_CHUNK_ALLOC
) {
651 chunk
= chunk_new_with_alloc_size(MAX_CHUNK_ALLOC
);
653 chunk
= chunk_new_with_alloc_size(preferred_chunk_size(capacity
));
656 tor_gettimeofday_cached_monotonic(&now
);
657 chunk
->inserted_time
= (uint32_t)tv_to_msec(&now
);
660 tor_assert(buf
->head
);
661 buf
->tail
->next
= chunk
;
664 tor_assert(!buf
->head
);
665 buf
->head
= buf
->tail
= chunk
;
671 /** Return the age of the oldest chunk in the buffer <b>buf</b>, in
672 * milliseconds. Requires the current time, in truncated milliseconds since
673 * the epoch, as its input <b>now</b>.
676 buf_get_oldest_chunk_timestamp(const buf_t
*buf
, uint32_t now
)
679 return now
- buf
->head
->inserted_time
;
686 buf_get_total_allocation(void)
688 return total_bytes_allocated_in_chunks
;
691 /** Read up to <b>at_most</b> bytes from the socket <b>fd</b> into
692 * <b>chunk</b> (which must be on <b>buf</b>). If we get an EOF, set
693 * *<b>reached_eof</b> to 1. Return -1 on error, 0 on eof or blocking,
694 * and the number of bytes read otherwise. */
696 read_to_chunk(buf_t
*buf
, chunk_t
*chunk
, tor_socket_t fd
, size_t at_most
,
697 int *reached_eof
, int *socket_error
)
700 if (at_most
> CHUNK_REMAINING_CAPACITY(chunk
))
701 at_most
= CHUNK_REMAINING_CAPACITY(chunk
);
702 read_result
= tor_socket_recv(fd
, CHUNK_WRITE_PTR(chunk
), at_most
, 0);
704 if (read_result
< 0) {
705 int e
= tor_socket_errno(fd
);
706 if (!ERRNO_IS_EAGAIN(e
)) { /* it's a real error */
709 log_warn(LD_NET
,"recv() failed: WSAENOBUFS. Not enough ram?");
714 return 0; /* would block. */
715 } else if (read_result
== 0) {
716 log_debug(LD_NET
,"Encountered eof on fd %d", (int)fd
);
719 } else { /* actually got bytes. */
720 buf
->datalen
+= read_result
;
721 chunk
->datalen
+= read_result
;
722 log_debug(LD_NET
,"Read %ld bytes. %d on inbuf.", (long)read_result
,
724 tor_assert(read_result
< INT_MAX
);
725 return (int)read_result
;
729 /** As read_to_chunk(), but return (negative) error code on error, blocking,
730 * or TLS, and the number of bytes read otherwise. */
732 read_to_chunk_tls(buf_t
*buf
, chunk_t
*chunk
, tor_tls_t
*tls
,
737 tor_assert(CHUNK_REMAINING_CAPACITY(chunk
) >= at_most
);
738 read_result
= tor_tls_read(tls
, CHUNK_WRITE_PTR(chunk
), at_most
);
741 buf
->datalen
+= read_result
;
742 chunk
->datalen
+= read_result
;
746 /** Read from socket <b>s</b>, writing onto end of <b>buf</b>. Read at most
747 * <b>at_most</b> bytes, growing the buffer as necessary. If recv() returns 0
748 * (because of EOF), set *<b>reached_eof</b> to 1 and return 0. Return -1 on
749 * error; else return the number of bytes read.
751 /* XXXX024 indicate "read blocked" somehow? */
753 read_to_buf(tor_socket_t s
, size_t at_most
, buf_t
*buf
, int *reached_eof
,
756 /* XXXX024 It's stupid to overload the return values for these functions:
757 * "error status" and "number of bytes read" are not mutually exclusive.
760 size_t total_read
= 0;
763 tor_assert(reached_eof
);
764 tor_assert(SOCKET_OK(s
));
766 while (at_most
> total_read
) {
767 size_t readlen
= at_most
- total_read
;
769 if (!buf
->tail
|| CHUNK_REMAINING_CAPACITY(buf
->tail
) < MIN_READ_LEN
) {
770 chunk
= buf_add_chunk_with_capacity(buf
, at_most
, 1);
771 if (readlen
> chunk
->memlen
)
772 readlen
= chunk
->memlen
;
774 size_t cap
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
780 r
= read_to_chunk(buf
, chunk
, s
, readlen
, reached_eof
, socket_error
);
783 return r
; /* Error */
784 tor_assert(total_read
+r
< INT_MAX
);
786 if ((size_t)r
< readlen
) { /* eof, block, or no more to read. */
790 return (int)total_read
;
793 /** As read_to_buf, but reads from a TLS connection, and returns a TLS
794 * status value rather than the number of bytes read.
796 * Using TLS on OR connections complicates matters in two ways.
798 * First, a TLS stream has its own read buffer independent of the
799 * connection's read buffer. (TLS needs to read an entire frame from
800 * the network before it can decrypt any data. Thus, trying to read 1
801 * byte from TLS can require that several KB be read from the network
802 * and decrypted. The extra data is stored in TLS's decrypt buffer.)
803 * Because the data hasn't been read by Tor (it's still inside the TLS),
804 * this means that sometimes a connection "has stuff to read" even when
805 * poll() didn't return POLLIN. The tor_tls_get_pending_bytes function is
806 * used in connection.c to detect TLS objects with non-empty internal
807 * buffers and read from them again.
809 * Second, the TLS stream's events do not correspond directly to network
810 * events: sometimes, before a TLS stream can read, the network must be
811 * ready to write -- or vice versa.
814 read_to_buf_tls(tor_tls_t
*tls
, size_t at_most
, buf_t
*buf
)
817 size_t total_read
= 0;
819 check_no_tls_errors();
823 while (at_most
> total_read
) {
824 size_t readlen
= at_most
- total_read
;
826 if (!buf
->tail
|| CHUNK_REMAINING_CAPACITY(buf
->tail
) < MIN_READ_LEN
) {
827 chunk
= buf_add_chunk_with_capacity(buf
, at_most
, 1);
828 if (readlen
> chunk
->memlen
)
829 readlen
= chunk
->memlen
;
831 size_t cap
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
837 r
= read_to_chunk_tls(buf
, chunk
, tls
, readlen
);
840 return r
; /* Error */
841 tor_assert(total_read
+r
< INT_MAX
);
843 if ((size_t)r
< readlen
) /* eof, block, or no more to read. */
846 return (int)total_read
;
849 /** Helper for flush_buf(): try to write <b>sz</b> bytes from chunk
850 * <b>chunk</b> of buffer <b>buf</b> onto socket <b>s</b>. On success, deduct
851 * the bytes written from *<b>buf_flushlen</b>. Return the number of bytes
852 * written on success, 0 on blocking, -1 on failure.
855 flush_chunk(tor_socket_t s
, buf_t
*buf
, chunk_t
*chunk
, size_t sz
,
856 size_t *buf_flushlen
)
858 ssize_t write_result
;
860 if (sz
> chunk
->datalen
)
862 write_result
= tor_socket_send(s
, chunk
->data
, sz
, 0);
864 if (write_result
< 0) {
865 int e
= tor_socket_errno(s
);
866 if (!ERRNO_IS_EAGAIN(e
)) { /* it's a real error */
869 log_warn(LD_NET
,"write() failed: WSAENOBUFS. Not enough ram?");
873 log_debug(LD_NET
,"write() would block, returning.");
876 *buf_flushlen
-= write_result
;
877 buf_remove_from_front(buf
, write_result
);
878 tor_assert(write_result
< INT_MAX
);
879 return (int)write_result
;
883 /** Helper for flush_buf_tls(): try to write <b>sz</b> bytes from chunk
884 * <b>chunk</b> of buffer <b>buf</b> onto socket <b>s</b>. (Tries to write
885 * more if there is a forced pending write size.) On success, deduct the
886 * bytes written from *<b>buf_flushlen</b>. Return the number of bytes
887 * written on success, and a TOR_TLS error code on failure or blocking.
890 flush_chunk_tls(tor_tls_t
*tls
, buf_t
*buf
, chunk_t
*chunk
,
891 size_t sz
, size_t *buf_flushlen
)
897 forced
= tor_tls_get_forced_write_size(tls
);
902 tor_assert(sz
<= chunk
->datalen
);
907 r
= tor_tls_write(tls
, data
, sz
);
910 if (*buf_flushlen
> (size_t)r
)
914 buf_remove_from_front(buf
, r
);
915 log_debug(LD_NET
,"flushed %d bytes, %d ready to flush, %d remain.",
916 r
,(int)*buf_flushlen
,(int)buf
->datalen
);
920 /** Write data from <b>buf</b> to the socket <b>s</b>. Write at most
921 * <b>sz</b> bytes, decrement *<b>buf_flushlen</b> by
922 * the number of bytes actually written, and remove the written bytes
923 * from the buffer. Return the number of bytes written on success,
924 * -1 on failure. Return 0 if write() would block.
927 flush_buf(tor_socket_t s
, buf_t
*buf
, size_t sz
, size_t *buf_flushlen
)
929 /* XXXX024 It's stupid to overload the return values for these functions:
930 * "error status" and "number of bytes flushed" are not mutually exclusive.
934 tor_assert(buf_flushlen
);
935 tor_assert(SOCKET_OK(s
));
936 tor_assert(*buf_flushlen
<= buf
->datalen
);
937 tor_assert(sz
<= *buf_flushlen
);
942 tor_assert(buf
->head
);
943 if (buf
->head
->datalen
>= sz
)
946 flushlen0
= buf
->head
->datalen
;
948 r
= flush_chunk(s
, buf
, buf
->head
, flushlen0
, buf_flushlen
);
954 if (r
== 0 || (size_t)r
< flushlen0
) /* can't flush any more now. */
957 tor_assert(flushed
< INT_MAX
);
961 /** As flush_buf(), but writes data to a TLS connection. Can write more than
962 * <b>flushlen</b> bytes.
965 flush_buf_tls(tor_tls_t
*tls
, buf_t
*buf
, size_t flushlen
,
966 size_t *buf_flushlen
)
971 tor_assert(buf_flushlen
);
972 tor_assert(*buf_flushlen
<= buf
->datalen
);
973 tor_assert(flushlen
<= *buf_flushlen
);
974 sz
= (ssize_t
) flushlen
;
976 /* we want to let tls write even if flushlen is zero, because it might
977 * have a partial record pending */
978 check_no_tls_errors();
984 if ((ssize_t
)buf
->head
->datalen
>= sz
)
987 flushlen0
= buf
->head
->datalen
;
992 r
= flush_chunk_tls(tls
, buf
, buf
->head
, flushlen0
, buf_flushlen
);
998 if (r
== 0) /* Can't flush any more now. */
1001 tor_assert(flushed
< INT_MAX
);
1002 return (int)flushed
;
1005 /** Append <b>string_len</b> bytes from <b>string</b> to the end of
1008 * Return the new length of the buffer on success, -1 on failure.
1011 write_to_buf(const char *string
, size_t string_len
, buf_t
*buf
)
1014 return (int)buf
->datalen
;
1017 while (string_len
) {
1019 if (!buf
->tail
|| !CHUNK_REMAINING_CAPACITY(buf
->tail
))
1020 buf_add_chunk_with_capacity(buf
, string_len
, 1);
1022 copy
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
1023 if (copy
> string_len
)
1025 memcpy(CHUNK_WRITE_PTR(buf
->tail
), string
, copy
);
1028 buf
->datalen
+= copy
;
1029 buf
->tail
->datalen
+= copy
;
1033 tor_assert(buf
->datalen
< INT_MAX
);
1034 return (int)buf
->datalen
;
1037 /** Helper: copy the first <b>string_len</b> bytes from <b>buf</b>
1038 * onto <b>string</b>.
1041 peek_from_buf(char *string
, size_t string_len
, const buf_t
*buf
)
1046 /* make sure we don't ask for too much */
1047 tor_assert(string_len
<= buf
->datalen
);
1048 /* assert_buf_ok(buf); */
1051 while (string_len
) {
1052 size_t copy
= string_len
;
1054 if (chunk
->datalen
< copy
)
1055 copy
= chunk
->datalen
;
1056 memcpy(string
, chunk
->data
, copy
);
1059 chunk
= chunk
->next
;
1063 /** Remove <b>string_len</b> bytes from the front of <b>buf</b>, and store
1064 * them into <b>string</b>. Return the new buffer size. <b>string_len</b>
1065 * must be \<= the number of bytes on the buffer.
1068 fetch_from_buf(char *string
, size_t string_len
, buf_t
*buf
)
1070 /* There must be string_len bytes in buf; write them onto string,
1071 * then memmove buf back (that is, remove them from buf).
1073 * Return the number of bytes still on the buffer. */
1076 peek_from_buf(string
, string_len
, buf
);
1077 buf_remove_from_front(buf
, string_len
);
1079 tor_assert(buf
->datalen
< INT_MAX
);
1080 return (int)buf
->datalen
;
1083 /** True iff the cell command <b>command</b> is one that implies a
1084 * variable-length cell in Tor link protocol <b>linkproto</b>. */
1086 cell_command_is_var_length(uint8_t command
, int linkproto
)
1088 /* If linkproto is v2 (2), CELL_VERSIONS is the only variable-length cells
1089 * work as implemented here. If it's 1, there are no variable-length cells.
1090 * Tor does not support other versions right now, and so can't negotiate
1093 switch (linkproto
) {
1095 /* Link protocol version 1 has no variable-length cells. */
1098 /* In link protocol version 2, VERSIONS is the only variable-length cell */
1099 return command
== CELL_VERSIONS
;
1103 /* In link protocol version 3 and later, and in version "unknown",
1104 * commands 128 and higher indicate variable-length. VERSIONS is
1105 * grandfathered in. */
1106 return command
== CELL_VERSIONS
|| command
>= 128;
1110 /** Check <b>buf</b> for a variable-length cell according to the rules of link
1111 * protocol version <b>linkproto</b>. If one is found, pull it off the buffer
1112 * and assign a newly allocated var_cell_t to *<b>out</b>, and return 1.
1113 * Return 0 if whatever is on the start of buf_t is not a variable-length
1114 * cell. Return 1 and set *<b>out</b> to NULL if there seems to be the start
1115 * of a variable-length cell on <b>buf</b>, but the whole thing isn't there
1118 fetch_var_cell_from_buf(buf_t
*buf
, var_cell_t
**out
, int linkproto
)
1120 char hdr
[VAR_CELL_MAX_HEADER_SIZE
];
1124 const int wide_circ_ids
= linkproto
>= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS
;
1125 const int circ_id_len
= get_circ_id_size(wide_circ_ids
);
1126 const unsigned header_len
= get_var_cell_header_size(wide_circ_ids
);
1129 if (buf
->datalen
< header_len
)
1131 peek_from_buf(hdr
, header_len
, buf
);
1133 command
= get_uint8(hdr
+ circ_id_len
);
1134 if (!(cell_command_is_var_length(command
, linkproto
)))
1137 length
= ntohs(get_uint16(hdr
+ circ_id_len
+ 1));
1138 if (buf
->datalen
< (size_t)(header_len
+length
))
1140 result
= var_cell_new(length
);
1141 result
->command
= command
;
1143 result
->circ_id
= ntohl(get_uint32(hdr
));
1145 result
->circ_id
= ntohs(get_uint16(hdr
));
1147 buf_remove_from_front(buf
, header_len
);
1148 peek_from_buf((char*) result
->payload
, length
, buf
);
1149 buf_remove_from_front(buf
, length
);
1156 #ifdef USE_BUFFEREVENTS
1157 /** Try to read <b>n</b> bytes from <b>buf</b> at <b>pos</b> (which may be
1158 * NULL for the start of the buffer), copying the data only if necessary. Set
1159 * *<b>data_out</b> to a pointer to the desired bytes. Set <b>free_out</b>
1160 * to 1 if we needed to malloc *<b>data</b> because the original bytes were
1161 * noncontiguous; 0 otherwise. Return the number of bytes actually available
1162 * at *<b>data_out</b>.
1165 inspect_evbuffer(struct evbuffer
*buf
, char **data_out
, size_t n
,
1166 int *free_out
, struct evbuffer_ptr
*pos
)
1170 if (evbuffer_get_length(buf
) < n
)
1171 n
= evbuffer_get_length(buf
);
1174 n_vecs
= evbuffer_peek(buf
, n
, pos
, NULL
, 0);
1175 tor_assert(n_vecs
> 0);
1177 struct evbuffer_iovec v
;
1178 i
= evbuffer_peek(buf
, n
, pos
, &v
, 1);
1180 *data_out
= v
.iov_base
;
1185 *data_out
= tor_malloc(n
);
1187 copied
= evbuffer_copyout(buf
, *data_out
, n
);
1188 tor_assert(copied
>= 0 && (size_t)copied
== n
);
1193 /** As fetch_var_cell_from_buf, buf works on an evbuffer. */
1195 fetch_var_cell_from_evbuffer(struct evbuffer
*buf
, var_cell_t
**out
,
1203 uint16_t cell_length
;
1206 const int wide_circ_ids
= linkproto
>= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS
;
1207 const int circ_id_len
= get_circ_id_size(wide_circ_ids
);
1208 const unsigned header_len
= get_var_cell_header_size(wide_circ_ids
);
1211 buf_len
= evbuffer_get_length(buf
);
1212 if (buf_len
< header_len
)
1215 n
= inspect_evbuffer(buf
, &hdr
, header_len
, &free_hdr
, NULL
);
1216 tor_assert(n
>= header_len
);
1218 command
= get_uint8(hdr
+ circ_id_len
);
1219 if (!(cell_command_is_var_length(command
, linkproto
))) {
1223 cell_length
= ntohs(get_uint16(hdr
+ circ_id_len
+ 1));
1224 if (buf_len
< (size_t)(header_len
+cell_length
)) {
1225 result
= 1; /* Not all here yet. */
1229 cell
= var_cell_new(cell_length
);
1230 cell
->command
= command
;
1232 cell
->circ_id
= ntohl(get_uint32(hdr
));
1234 cell
->circ_id
= ntohs(get_uint16(hdr
));
1235 evbuffer_drain(buf
, header_len
);
1236 evbuffer_remove(buf
, cell
->payload
, cell_length
);
1241 if (free_hdr
&& hdr
)
1247 /** Move up to *<b>buf_flushlen</b> bytes from <b>buf_in</b> to
1248 * <b>buf_out</b>, and modify *<b>buf_flushlen</b> appropriately.
1249 * Return the number of bytes actually copied.
1252 move_buf_to_buf(buf_t
*buf_out
, buf_t
*buf_in
, size_t *buf_flushlen
)
1254 /* We can do way better here, but this doesn't turn up in any profiles. */
1257 len
= *buf_flushlen
;
1258 if (len
> buf_in
->datalen
)
1259 len
= buf_in
->datalen
;
1261 cp
= len
; /* Remember the number of bytes we intend to copy. */
1262 tor_assert(cp
< INT_MAX
);
1264 /* This isn't the most efficient implementation one could imagine, since
1265 * it does two copies instead of 1, but I kinda doubt that this will be
1267 size_t n
= len
> sizeof(b
) ? sizeof(b
) : len
;
1268 fetch_from_buf(b
, n
, buf_in
);
1269 write_to_buf(b
, n
, buf_out
);
1272 *buf_flushlen
-= cp
;
1276 /** Internal structure: represents a position in a buffer. */
1277 typedef struct buf_pos_t
{
1278 const chunk_t
*chunk
; /**< Which chunk are we pointing to? */
1279 int pos
;/**< Which character inside the chunk's data are we pointing to? */
1280 size_t chunk_pos
; /**< Total length of all previous chunks. */
1283 /** Initialize <b>out</b> to point to the first character of <b>buf</b>.*/
1285 buf_pos_init(const buf_t
*buf
, buf_pos_t
*out
)
1287 out
->chunk
= buf
->head
;
1292 /** Advance <b>out</b> to the first appearance of <b>ch</b> at the current
1293 * position of <b>out</b>, or later. Return -1 if no instances are found;
1294 * otherwise returns the absolute position of the character. */
1296 buf_find_pos_of_char(char ch
, buf_pos_t
*out
)
1298 const chunk_t
*chunk
;
1302 if (out
->chunk
->datalen
) {
1303 tor_assert(out
->pos
< (off_t
)out
->chunk
->datalen
);
1305 tor_assert(out
->pos
== 0);
1309 for (chunk
= out
->chunk
; chunk
; chunk
= chunk
->next
) {
1310 char *cp
= memchr(chunk
->data
+pos
, ch
, chunk
->datalen
- pos
);
1313 tor_assert(cp
- chunk
->data
< INT_MAX
);
1314 out
->pos
= (int)(cp
- chunk
->data
);
1315 return out
->chunk_pos
+ out
->pos
;
1317 out
->chunk_pos
+= chunk
->datalen
;
1324 /** Advance <b>pos</b> by a single character, if there are any more characters
1325 * in the buffer. Returns 0 on success, -1 on failure. */
1327 buf_pos_inc(buf_pos_t
*pos
)
1330 if (pos
->pos
== (off_t
)pos
->chunk
->datalen
) {
1331 if (!pos
->chunk
->next
)
1333 pos
->chunk_pos
+= pos
->chunk
->datalen
;
1334 pos
->chunk
= pos
->chunk
->next
;
1340 /** Return true iff the <b>n</b>-character string in <b>s</b> appears
1341 * (verbatim) at <b>pos</b>. */
1343 buf_matches_at_pos(const buf_pos_t
*pos
, const char *s
, size_t n
)
1349 memcpy(&p
, pos
, sizeof(p
));
1352 char ch
= p
.chunk
->data
[p
.pos
];
1356 /* If we're out of characters that don't match, we match. Check this
1357 * _before_ we test incrementing pos, in case we're at the end of the
1361 if (buf_pos_inc(&p
)<0)
1366 /** Return the first position in <b>buf</b> at which the <b>n</b>-character
1367 * string <b>s</b> occurs, or -1 if it does not occur. */
1369 buf_find_string_offset(const buf_t
*buf
, const char *s
, size_t n
)
1372 buf_pos_init(buf
, &pos
);
1373 while (buf_find_pos_of_char(*s
, &pos
) >= 0) {
1374 if (buf_matches_at_pos(&pos
, s
, n
)) {
1375 tor_assert(pos
.chunk_pos
+ pos
.pos
< INT_MAX
);
1376 return (int)(pos
.chunk_pos
+ pos
.pos
);
1378 if (buf_pos_inc(&pos
)<0)
1385 /** There is a (possibly incomplete) http statement on <b>buf</b>, of the
1386 * form "\%s\\r\\n\\r\\n\%s", headers, body. (body may contain NULs.)
1387 * If a) the headers include a Content-Length field and all bytes in
1388 * the body are present, or b) there's no Content-Length field and
1389 * all headers are present, then:
1391 * - strdup headers into <b>*headers_out</b>, and NUL-terminate it.
1392 * - memdup body into <b>*body_out</b>, and NUL-terminate it.
1393 * - Then remove them from <b>buf</b>, and return 1.
1395 * - If headers or body is NULL, discard that part of the buf.
1396 * - If a headers or body doesn't fit in the arg, return -1.
1397 * (We ensure that the headers or body don't exceed max len,
1398 * _even if_ we're planning to discard them.)
1399 * - If force_complete is true, then succeed even if not all of the
1400 * content has arrived.
1402 * Else, change nothing and return 0.
1405 fetch_from_buf_http(buf_t
*buf
,
1406 char **headers_out
, size_t max_headerlen
,
1407 char **body_out
, size_t *body_used
, size_t max_bodylen
,
1411 size_t headerlen
, bodylen
, contentlen
;
1418 crlf_offset
= buf_find_string_offset(buf
, "\r\n\r\n", 4);
1419 if (crlf_offset
> (int)max_headerlen
||
1420 (crlf_offset
< 0 && buf
->datalen
> max_headerlen
)) {
1421 log_debug(LD_HTTP
,"headers too long.");
1423 } else if (crlf_offset
< 0) {
1424 log_debug(LD_HTTP
,"headers not all here yet.");
1427 /* Okay, we have a full header. Make sure it all appears in the first
1429 if ((int)buf
->head
->datalen
< crlf_offset
+ 4)
1430 buf_pullup(buf
, crlf_offset
+4, 0);
1431 headerlen
= crlf_offset
+ 4;
1433 headers
= buf
->head
->data
;
1434 bodylen
= buf
->datalen
- headerlen
;
1435 log_debug(LD_HTTP
,"headerlen %d, bodylen %d.", (int)headerlen
, (int)bodylen
);
1437 if (max_headerlen
<= headerlen
) {
1438 log_warn(LD_HTTP
,"headerlen %d larger than %d. Failing.",
1439 (int)headerlen
, (int)max_headerlen
-1);
1442 if (max_bodylen
<= bodylen
) {
1443 log_warn(LD_HTTP
,"bodylen %d larger than %d. Failing.",
1444 (int)bodylen
, (int)max_bodylen
-1);
1448 #define CONTENT_LENGTH "\r\nContent-Length: "
1449 p
= (char*) tor_memstr(headers
, headerlen
, CONTENT_LENGTH
);
1452 i
= atoi(p
+strlen(CONTENT_LENGTH
));
1454 log_warn(LD_PROTOCOL
, "Content-Length is less than zero; it looks like "
1455 "someone is trying to crash us.");
1459 /* if content-length is malformed, then our body length is 0. fine. */
1460 log_debug(LD_HTTP
,"Got a contentlen of %d.",(int)contentlen
);
1461 if (bodylen
< contentlen
) {
1462 if (!force_complete
) {
1463 log_debug(LD_HTTP
,"body not all here yet.");
1464 return 0; /* not all there yet */
1467 if (bodylen
> contentlen
) {
1468 bodylen
= contentlen
;
1469 log_debug(LD_HTTP
,"bodylen reduced to %d.",(int)bodylen
);
1472 /* all happy. copy into the appropriate places, and return 1 */
1474 *headers_out
= tor_malloc(headerlen
+1);
1475 fetch_from_buf(*headers_out
, headerlen
, buf
);
1476 (*headers_out
)[headerlen
] = 0; /* NUL terminate it */
1479 tor_assert(body_used
);
1480 *body_used
= bodylen
;
1481 *body_out
= tor_malloc(bodylen
+1);
1482 fetch_from_buf(*body_out
, bodylen
, buf
);
1483 (*body_out
)[bodylen
] = 0; /* NUL terminate it */
1489 #ifdef USE_BUFFEREVENTS
1490 /** As fetch_from_buf_http, buf works on an evbuffer. */
1492 fetch_from_evbuffer_http(struct evbuffer
*buf
,
1493 char **headers_out
, size_t max_headerlen
,
1494 char **body_out
, size_t *body_used
, size_t max_bodylen
,
1497 struct evbuffer_ptr crlf
, content_length
;
1498 size_t headerlen
, bodylen
, contentlen
;
1500 /* Find the first \r\n\r\n in the buffer */
1501 crlf
= evbuffer_search(buf
, "\r\n\r\n", 4, NULL
);
1503 /* We didn't find one. */
1504 if (evbuffer_get_length(buf
) > max_headerlen
)
1505 return -1; /* Headers too long. */
1506 return 0; /* Headers not here yet. */
1507 } else if (crlf
.pos
> (int)max_headerlen
) {
1508 return -1; /* Headers too long. */
1511 headerlen
= crlf
.pos
+ 4; /* Skip over the \r\n\r\n */
1512 bodylen
= evbuffer_get_length(buf
) - headerlen
;
1513 if (bodylen
> max_bodylen
)
1514 return -1; /* body too long */
1516 /* Look for the first occurrence of CONTENT_LENGTH insize buf before the
1518 content_length
= evbuffer_search_range(buf
, CONTENT_LENGTH
,
1519 strlen(CONTENT_LENGTH
), NULL
, &crlf
);
1521 if (content_length
.pos
>= 0) {
1522 /* We found a content_length: parse it and figure out if the body is here
1524 struct evbuffer_ptr eol
;
1528 n
= evbuffer_ptr_set(buf
, &content_length
, strlen(CONTENT_LENGTH
),
1531 eol
= evbuffer_search_eol(buf
, &content_length
, NULL
, EVBUFFER_EOL_CRLF
);
1532 tor_assert(eol
.pos
> content_length
.pos
);
1533 tor_assert(eol
.pos
<= crlf
.pos
);
1534 inspect_evbuffer(buf
, &data
, eol
.pos
- content_length
.pos
, &free_data
,
1541 log_warn(LD_PROTOCOL
, "Content-Length is less than zero; it looks like "
1542 "someone is trying to crash us.");
1546 /* if content-length is malformed, then our body length is 0. fine. */
1547 log_debug(LD_HTTP
,"Got a contentlen of %d.",(int)contentlen
);
1548 if (bodylen
< contentlen
) {
1549 if (!force_complete
) {
1550 log_debug(LD_HTTP
,"body not all here yet.");
1551 return 0; /* not all there yet */
1554 if (bodylen
> contentlen
) {
1555 bodylen
= contentlen
;
1556 log_debug(LD_HTTP
,"bodylen reduced to %d.",(int)bodylen
);
1561 *headers_out
= tor_malloc(headerlen
+1);
1562 evbuffer_remove(buf
, *headers_out
, headerlen
);
1563 (*headers_out
)[headerlen
] = '\0';
1566 tor_assert(headers_out
);
1567 tor_assert(body_used
);
1568 *body_used
= bodylen
;
1569 *body_out
= tor_malloc(bodylen
+1);
1570 evbuffer_remove(buf
, *body_out
, bodylen
);
1571 (*body_out
)[bodylen
] = '\0';
1578 * Wait this many seconds before warning the user about using SOCKS unsafely
1579 * again (requires that WarnUnsafeSocks is turned on). */
1580 #define SOCKS_WARN_INTERVAL 5
1582 /** Warn that the user application has made an unsafe socks request using
1583 * protocol <b>socks_protocol</b> on port <b>port</b>. Don't warn more than
1584 * once per SOCKS_WARN_INTERVAL, unless <b>safe_socks</b> is set. */
1586 log_unsafe_socks_warning(int socks_protocol
, const char *address
,
1587 uint16_t port
, int safe_socks
)
1589 static ratelim_t socks_ratelim
= RATELIM_INIT(SOCKS_WARN_INTERVAL
);
1591 const or_options_t
*options
= get_options();
1592 if (! options
->WarnUnsafeSocks
)
1595 log_fn_ratelim(&socks_ratelim
, LOG_WARN
, LD_APP
,
1596 "Your application (using socks%d to port %d) is giving "
1597 "Tor only an IP address. Applications that do DNS resolves "
1598 "themselves may leak information. Consider using Socks4A "
1599 "(e.g. via privoxy or socat) instead. For more information, "
1600 "please see https://wiki.torproject.org/TheOnionRouter/"
1601 "TorFAQ#SOCKSAndDNS.%s",
1604 safe_socks
? " Rejecting." : "");
1606 control_event_client_status(LOG_WARN
,
1607 "DANGEROUS_SOCKS PROTOCOL=SOCKS%d ADDRESS=%s:%d",
1608 socks_protocol
, address
, (int)port
);
1611 /** Do not attempt to parse socks messages longer than this. This value is
1612 * actually significantly higher than the longest possible socks message. */
1613 #define MAX_SOCKS_MESSAGE_LEN 512
1615 /** Return a new socks_request_t. */
1617 socks_request_new(void)
1619 return tor_malloc_zero(sizeof(socks_request_t
));
1622 /** Free all storage held in the socks_request_t <b>req</b>. */
1624 socks_request_free(socks_request_t
*req
)
1628 if (req
->username
) {
1629 memwipe(req
->username
, 0x10, req
->usernamelen
);
1630 tor_free(req
->username
);
1632 if (req
->password
) {
1633 memwipe(req
->password
, 0x04, req
->passwordlen
);
1634 tor_free(req
->password
);
1636 memwipe(req
, 0xCC, sizeof(socks_request_t
));
1640 /** There is a (possibly incomplete) socks handshake on <b>buf</b>, of one
1642 * - socks4: "socksheader username\\0"
1643 * - socks4a: "socksheader username\\0 destaddr\\0"
1644 * - socks5 phase one: "version #methods methods"
1645 * - socks5 phase two: "version command 0 addresstype..."
1646 * If it's a complete and valid handshake, and destaddr fits in
1647 * MAX_SOCKS_ADDR_LEN bytes, then pull the handshake off the buf,
1648 * assign to <b>req</b>, and return 1.
1650 * If it's invalid or too big, return -1.
1652 * Else it's not all there yet, leave buf alone and return 0.
1654 * If you want to specify the socks reply, write it into <b>req->reply</b>
1655 * and set <b>req->replylen</b>, else leave <b>req->replylen</b> alone.
1657 * If <b>log_sockstype</b> is non-zero, then do a notice-level log of whether
1658 * the connection is possibly leaking DNS requests locally or not.
1660 * If <b>safe_socks</b> is true, then reject unsafe socks protocols.
1662 * If returning 0 or -1, <b>req->address</b> and <b>req->port</b> are
1666 fetch_from_buf_socks(buf_t
*buf
, socks_request_t
*req
,
1667 int log_sockstype
, int safe_socks
)
1671 size_t want_length
= 128;
1673 if (buf
->datalen
< 2) /* version and another byte */
1678 buf_pullup(buf
, want_length
, 0);
1679 tor_assert(buf
->head
&& buf
->head
->datalen
>= 2);
1682 res
= parse_socks(buf
->head
->data
, buf
->head
->datalen
, req
, log_sockstype
,
1683 safe_socks
, &n_drain
, &want_length
);
1687 else if (n_drain
> 0)
1688 buf_remove_from_front(buf
, n_drain
);
1690 } while (res
== 0 && buf
->head
&& want_length
< buf
->datalen
&&
1696 #ifdef USE_BUFFEREVENTS
1697 /* As fetch_from_buf_socks(), but targets an evbuffer instead. */
1699 fetch_from_evbuffer_socks(struct evbuffer
*buf
, socks_request_t
*req
,
1700 int log_sockstype
, int safe_socks
)
1704 size_t datalen
, buflen
, want_length
;
1707 buflen
= evbuffer_get_length(buf
);
1712 /* See if we can find the socks request in the first chunk of the buffer.
1714 struct evbuffer_iovec v
;
1717 i
= evbuffer_peek(buf
, -1, NULL
, &v
, 1);
1720 datalen
= v
.iov_len
;
1723 res
= parse_socks(data
, datalen
, req
, log_sockstype
,
1724 safe_socks
, &n_drain
, &want_length
);
1727 evbuffer_drain(buf
, evbuffer_get_length(buf
));
1728 else if (n_drain
> 0)
1729 evbuffer_drain(buf
, n_drain
);
1735 /* Okay, the first chunk of the buffer didn't have a complete socks request.
1736 * That means that either we don't have a whole socks request at all, or
1737 * it's gotten split up. We're going to try passing parse_socks() bigger
1738 * and bigger chunks until either it says "Okay, I got it", or it says it
1739 * will need more data than we currently have. */
1741 /* Loop while we have more data that we haven't given parse_socks() yet. */
1744 const size_t last_wanted
= want_length
;
1747 datalen
= inspect_evbuffer(buf
, &data
, want_length
, &free_data
, NULL
);
1750 res
= parse_socks(data
, datalen
, req
, log_sockstype
,
1751 safe_socks
, &n_drain
, &want_length
);
1757 evbuffer_drain(buf
, evbuffer_get_length(buf
));
1758 else if (n_drain
> 0)
1759 evbuffer_drain(buf
, n_drain
);
1761 if (res
== 0 && n_drain
== 0 && want_length
<= last_wanted
) {
1762 /* If we drained nothing, and we didn't ask for more than last time,
1763 * then we probably wanted more data than the buffer actually had,
1764 * and we're finding out that we're not satisified with it. It's
1765 * time to break until we have more data. */
1769 buflen
= evbuffer_get_length(buf
);
1770 } while (res
== 0 && want_length
<= buflen
&& buflen
>= 2);
1776 /** The size of the header of an Extended ORPort message: 2 bytes for
1777 * COMMAND, 2 bytes for BODYLEN */
1778 #define EXT_OR_CMD_HEADER_SIZE 4
1780 /** Read <b>buf</b>, which should contain an Extended ORPort message
1781 * from a transport proxy. If well-formed, create and populate
1782 * <b>out</b> with the Extended ORport message. Return 0 if the
1783 * buffer was incomplete, 1 if it was well-formed and -1 if we
1784 * encountered an error while parsing it. */
1786 fetch_ext_or_command_from_buf(buf_t
*buf
, ext_or_cmd_t
**out
)
1788 char hdr
[EXT_OR_CMD_HEADER_SIZE
];
1792 if (buf
->datalen
< EXT_OR_CMD_HEADER_SIZE
)
1794 peek_from_buf(hdr
, sizeof(hdr
), buf
);
1795 len
= ntohs(get_uint16(hdr
+2));
1796 if (buf
->datalen
< (unsigned)len
+ EXT_OR_CMD_HEADER_SIZE
)
1798 *out
= ext_or_cmd_new(len
);
1799 (*out
)->cmd
= ntohs(get_uint16(hdr
));
1801 buf_remove_from_front(buf
, EXT_OR_CMD_HEADER_SIZE
);
1802 fetch_from_buf((*out
)->body
, len
, buf
);
1806 #ifdef USE_BUFFEREVENTS
1807 /** Read <b>buf</b>, which should contain an Extended ORPort message
1808 * from a transport proxy. If well-formed, create and populate
1809 * <b>out</b> with the Extended ORport message. Return 0 if the
1810 * buffer was incomplete, 1 if it was well-formed and -1 if we
1811 * encountered an error while parsing it. */
1813 fetch_ext_or_command_from_evbuffer(struct evbuffer
*buf
, ext_or_cmd_t
**out
)
1815 char hdr
[EXT_OR_CMD_HEADER_SIZE
];
1817 size_t buf_len
= evbuffer_get_length(buf
);
1819 if (buf_len
< EXT_OR_CMD_HEADER_SIZE
)
1821 evbuffer_copyout(buf
, hdr
, EXT_OR_CMD_HEADER_SIZE
);
1822 len
= ntohs(get_uint16(hdr
+2));
1823 if (buf_len
< (unsigned)len
+ EXT_OR_CMD_HEADER_SIZE
)
1825 *out
= ext_or_cmd_new(len
);
1826 (*out
)->cmd
= ntohs(get_uint16(hdr
));
1828 evbuffer_drain(buf
, EXT_OR_CMD_HEADER_SIZE
);
1829 evbuffer_remove(buf
, (*out
)->body
, len
);
1834 /** Implementation helper to implement fetch_from_*_socks. Instead of looking
1835 * at a buffer's contents, we look at the <b>datalen</b> bytes of data in
1836 * <b>data</b>. Instead of removing data from the buffer, we set
1837 * <b>drain_out</b> to the amount of data that should be removed (or -1 if the
1838 * buffer should be cleared). Instead of pulling more data into the first
1839 * chunk of the buffer, we set *<b>want_length_out</b> to the number of bytes
1840 * we'd like to see in the input buffer, if they're available. */
1842 parse_socks(const char *data
, size_t datalen
, socks_request_t
*req
,
1843 int log_sockstype
, int safe_socks
, ssize_t
*drain_out
,
1844 size_t *want_length_out
)
1847 char tmpbuf
[TOR_ADDR_BUF_LEN
+1];
1848 tor_addr_t destaddr
;
1851 char *next
, *startaddr
;
1852 unsigned char usernamelen
, passlen
;
1856 /* We always need at least 2 bytes. */
1857 *want_length_out
= 2;
1861 if (req
->socks_version
== 5 && !req
->got_auth
) {
1862 /* See if we have received authentication. Strictly speaking, we should
1863 also check whether we actually negotiated username/password
1864 authentication. But some broken clients will send us authentication
1865 even if we negotiated SOCKS_NO_AUTH. */
1866 if (*data
== 1) { /* username/pass version 1 */
1867 /* Format is: authversion [1 byte] == 1
1868 usernamelen [1 byte]
1869 username [usernamelen bytes]
1871 password [passlen bytes] */
1872 usernamelen
= (unsigned char)*(data
+ 1);
1873 if (datalen
< 2u + usernamelen
+ 1u) {
1874 *want_length_out
= 2u + usernamelen
+ 1u;
1877 passlen
= (unsigned char)*(data
+ 2u + usernamelen
);
1878 if (datalen
< 2u + usernamelen
+ 1u + passlen
) {
1879 *want_length_out
= 2u + usernamelen
+ 1u + passlen
;
1882 req
->replylen
= 2; /* 2 bytes of response */
1883 req
->reply
[0] = 1; /* authversion == 1 */
1884 req
->reply
[1] = 0; /* authentication successful */
1886 "socks5: Accepted username/password without checking.");
1888 req
->username
= tor_memdup(data
+2u, usernamelen
);
1889 req
->usernamelen
= usernamelen
;
1892 req
->password
= tor_memdup(data
+3u+usernamelen
, passlen
);
1893 req
->passwordlen
= passlen
;
1895 *drain_out
= 2u + usernamelen
+ 1u + passlen
;
1897 *want_length_out
= 7; /* Minimal socks5 sommand. */
1899 } else if (req
->auth_type
== SOCKS_USER_PASS
) {
1900 /* unknown version byte */
1901 log_warn(LD_APP
, "Socks5 username/password version %d not recognized; "
1902 "rejecting.", (int)*data
);
1909 switch (socksver
) { /* which version of socks? */
1910 case 5: /* socks5 */
1912 if (req
->socks_version
!= 5) { /* we need to negotiate a method */
1913 unsigned char nummethods
= (unsigned char)*(data
+1);
1914 int have_user_pass
, have_no_auth
;
1916 tor_assert(!req
->socks_version
);
1917 if (datalen
< 2u+nummethods
) {
1918 *want_length_out
= 2u+nummethods
;
1923 req
->replylen
= 2; /* 2 bytes of response */
1924 req
->reply
[0] = 5; /* socks5 reply */
1925 have_user_pass
= (memchr(data
+2, SOCKS_USER_PASS
, nummethods
) !=NULL
);
1926 have_no_auth
= (memchr(data
+2, SOCKS_NO_AUTH
, nummethods
) !=NULL
);
1927 if (have_user_pass
&& !(have_no_auth
&& req
->socks_prefer_no_auth
)) {
1928 req
->auth_type
= SOCKS_USER_PASS
;
1929 req
->reply
[1] = SOCKS_USER_PASS
; /* tell client to use "user/pass"
1931 req
->socks_version
= 5; /* remember we've already negotiated auth */
1932 log_debug(LD_APP
,"socks5: accepted method 2 (username/password)");
1934 } else if (have_no_auth
) {
1935 req
->reply
[1] = SOCKS_NO_AUTH
; /* tell client to use "none" auth
1937 req
->socks_version
= 5; /* remember we've already negotiated auth */
1938 log_debug(LD_APP
,"socks5: accepted method 0 (no authentication)");
1942 "socks5: offered methods don't include 'no auth' or "
1943 "username/password. Rejecting.");
1944 req
->reply
[1] = '\xFF'; /* reject all methods */
1947 /* Remove packet from buf. Some SOCKS clients will have sent extra
1948 * junk at this point; let's hope it's an authentication message. */
1949 *drain_out
= 2u + nummethods
;
1953 if (req
->auth_type
!= SOCKS_NO_AUTH
&& !req
->got_auth
) {
1955 "socks5: negotiated authentication, but none provided");
1958 /* we know the method; read in the request */
1959 log_debug(LD_APP
,"socks5: checking request");
1960 if (datalen
< 7) {/* basic info plus >=1 for addr plus 2 for port */
1961 *want_length_out
= 7;
1962 return 0; /* not yet */
1964 req
->command
= (unsigned char) *(data
+1);
1965 if (req
->command
!= SOCKS_COMMAND_CONNECT
&&
1966 req
->command
!= SOCKS_COMMAND_RESOLVE
&&
1967 req
->command
!= SOCKS_COMMAND_RESOLVE_PTR
) {
1968 /* not a connect or resolve or a resolve_ptr? we don't support it. */
1969 log_warn(LD_APP
,"socks5: command %d not recognized. Rejecting.",
1973 switch (*(data
+3)) { /* address type */
1974 case 1: /* IPv4 address */
1975 case 4: /* IPv6 address */ {
1976 const int is_v6
= *(data
+3) == 4;
1977 const unsigned addrlen
= is_v6
? 16 : 4;
1978 log_debug(LD_APP
,"socks5: ipv4 address type");
1979 if (datalen
< 6+addrlen
) {/* ip/port there? */
1980 *want_length_out
= 6+addrlen
;
1981 return 0; /* not yet */
1985 tor_addr_from_ipv6_bytes(&destaddr
, data
+4);
1987 tor_addr_from_ipv4n(&destaddr
, get_uint32(data
+4));
1989 tor_addr_to_str(tmpbuf
, &destaddr
, sizeof(tmpbuf
), 1);
1991 if (strlen(tmpbuf
)+1 > MAX_SOCKS_ADDR_LEN
) {
1993 "socks5 IP takes %d bytes, which doesn't fit in %d. "
1995 (int)strlen(tmpbuf
)+1,(int)MAX_SOCKS_ADDR_LEN
);
1998 strlcpy(req
->address
,tmpbuf
,sizeof(req
->address
));
1999 req
->port
= ntohs(get_uint16(data
+4+addrlen
));
2000 *drain_out
= 6+addrlen
;
2001 if (req
->command
!= SOCKS_COMMAND_RESOLVE_PTR
&&
2002 !addressmap_have_mapping(req
->address
,0)) {
2003 log_unsafe_socks_warning(5, req
->address
, req
->port
, safe_socks
);
2010 log_debug(LD_APP
,"socks5: fqdn address type");
2011 if (req
->command
== SOCKS_COMMAND_RESOLVE_PTR
) {
2012 log_warn(LD_APP
, "socks5 received RESOLVE_PTR command with "
2013 "hostname type. Rejecting.");
2016 len
= (unsigned char)*(data
+4);
2017 if (datalen
< 7+len
) { /* addr/port there? */
2018 *want_length_out
= 7+len
;
2019 return 0; /* not yet */
2021 if (len
+1 > MAX_SOCKS_ADDR_LEN
) {
2023 "socks5 hostname is %d bytes, which doesn't fit in "
2024 "%d. Rejecting.", len
+1,MAX_SOCKS_ADDR_LEN
);
2027 memcpy(req
->address
,data
+5,len
);
2028 req
->address
[len
] = 0;
2029 req
->port
= ntohs(get_uint16(data
+5+len
));
2030 *drain_out
= 5+len
+2;
2031 if (!tor_strisprint(req
->address
) || strchr(req
->address
,'\"')) {
2032 log_warn(LD_PROTOCOL
,
2033 "Your application (using socks5 to port %d) gave Tor "
2034 "a malformed hostname: %s. Rejecting the connection.",
2035 req
->port
, escaped(req
->address
));
2040 "Your application (using socks5 to port %d) instructed "
2041 "Tor to take care of the DNS resolution itself if "
2042 "necessary. This is good.", req
->port
);
2044 default: /* unsupported */
2045 log_warn(LD_APP
,"socks5: unsupported address type %d. Rejecting.",
2050 case 4: { /* socks4 */
2051 enum {socks4
, socks4a
} socks4_prot
= socks4a
;
2052 const char *authstart
, *authend
;
2053 /* http://ss5.sourceforge.net/socks4.protocol.txt */
2054 /* http://ss5.sourceforge.net/socks4A.protocol.txt */
2056 req
->socks_version
= 4;
2057 if (datalen
< SOCKS4_NETWORK_LEN
) {/* basic info available? */
2058 *want_length_out
= SOCKS4_NETWORK_LEN
;
2059 return 0; /* not yet */
2061 // buf_pullup(buf, 1280, 0);
2062 req
->command
= (unsigned char) *(data
+1);
2063 if (req
->command
!= SOCKS_COMMAND_CONNECT
&&
2064 req
->command
!= SOCKS_COMMAND_RESOLVE
) {
2065 /* not a connect or resolve? we don't support it. (No resolve_ptr with
2067 log_warn(LD_APP
,"socks4: command %d not recognized. Rejecting.",
2072 req
->port
= ntohs(get_uint16(data
+2));
2073 destip
= ntohl(get_uint32(data
+4));
2074 if ((!req
->port
&& req
->command
!=SOCKS_COMMAND_RESOLVE
) || !destip
) {
2075 log_warn(LD_APP
,"socks4: Port or DestIP is zero. Rejecting.");
2079 log_debug(LD_APP
,"socks4: destip not in form 0.0.0.x.");
2080 in
.s_addr
= htonl(destip
);
2081 tor_inet_ntoa(&in
,tmpbuf
,sizeof(tmpbuf
));
2082 if (strlen(tmpbuf
)+1 > MAX_SOCKS_ADDR_LEN
) {
2083 log_debug(LD_APP
,"socks4 addr (%d bytes) too long. Rejecting.",
2084 (int)strlen(tmpbuf
));
2088 "socks4: successfully read destip (%s)",
2089 safe_str_client(tmpbuf
));
2090 socks4_prot
= socks4
;
2093 authstart
= data
+ SOCKS4_NETWORK_LEN
;
2094 next
= memchr(authstart
, 0,
2095 datalen
-SOCKS4_NETWORK_LEN
);
2097 if (datalen
>= 1024) {
2098 log_debug(LD_APP
, "Socks4 user name too long; rejecting.");
2101 log_debug(LD_APP
,"socks4: Username not here yet.");
2102 *want_length_out
= datalen
+1024; /* More than we need, but safe */
2106 tor_assert(next
< data
+datalen
);
2109 if (socks4_prot
!= socks4a
&&
2110 !addressmap_have_mapping(tmpbuf
,0)) {
2111 log_unsafe_socks_warning(4, tmpbuf
, req
->port
, safe_socks
);
2116 if (socks4_prot
== socks4a
) {
2117 if (next
+1 == data
+datalen
) {
2118 log_debug(LD_APP
,"socks4: No part of destaddr here yet.");
2119 *want_length_out
= datalen
+ 1024; /* More than we need, but safe */
2123 next
= memchr(startaddr
, 0, data
+ datalen
- startaddr
);
2125 if (datalen
>= 1024) {
2126 log_debug(LD_APP
,"socks4: Destaddr too long.");
2129 log_debug(LD_APP
,"socks4: Destaddr not all here yet.");
2130 *want_length_out
= datalen
+ 1024; /* More than we need, but safe */
2133 if (MAX_SOCKS_ADDR_LEN
<= next
-startaddr
) {
2134 log_warn(LD_APP
,"socks4: Destaddr too long. Rejecting.");
2137 // tor_assert(next < buf->cur+buf->datalen);
2141 "Your application (using socks4a to port %d) instructed "
2142 "Tor to take care of the DNS resolution itself if "
2143 "necessary. This is good.", req
->port
);
2145 log_debug(LD_APP
,"socks4: Everything is here. Success.");
2146 strlcpy(req
->address
, startaddr
? startaddr
: tmpbuf
,
2147 sizeof(req
->address
));
2148 if (!tor_strisprint(req
->address
) || strchr(req
->address
,'\"')) {
2149 log_warn(LD_PROTOCOL
,
2150 "Your application (using socks4 to port %d) gave Tor "
2151 "a malformed hostname: %s. Rejecting the connection.",
2152 req
->port
, escaped(req
->address
));
2155 if (authend
!= authstart
) {
2157 req
->usernamelen
= authend
- authstart
;
2158 req
->username
= tor_memdup(authstart
, authend
- authstart
);
2160 /* next points to the final \0 on inbuf */
2161 *drain_out
= next
- data
+ 1;
2165 case 'H': /* head */
2166 case 'P': /* put/post */
2167 case 'C': /* connect */
2168 strlcpy((char*)req
->reply
,
2169 "HTTP/1.0 501 Tor is not an HTTP Proxy\r\n"
2170 "Content-Type: text/html; charset=iso-8859-1\r\n\r\n"
2173 "<title>Tor is not an HTTP Proxy</title>\n"
2176 "<h1>Tor is not an HTTP Proxy</h1>\n"
2178 "It appears you have configured your web browser to use Tor as an HTTP proxy."
2180 "This is not correct: Tor is a SOCKS proxy, not an HTTP proxy.\n"
2181 "Please configure your client accordingly.\n"
2184 "See <a href=\"https://www.torproject.org/documentation.html\">"
2185 "https://www.torproject.org/documentation.html</a> for more "
2187 "<!-- Plus this comment, to make the body response more than 512 bytes, so "
2188 " IE will be willing to display it. Comment comment comment comment "
2189 " comment comment comment comment comment comment comment comment.-->\n"
2193 , MAX_SOCKS_REPLY_LEN
);
2194 req
->replylen
= strlen((char*)req
->reply
)+1;
2196 default: /* version is not socks4 or socks5 */
2198 "Socks version %d not recognized. (Tor is not an http proxy.)",
2201 /* Tell the controller the first 8 bytes. */
2202 char *tmp
= tor_strndup(data
, datalen
< 8 ? datalen
: 8);
2203 control_event_client_status(LOG_WARN
,
2204 "SOCKS_UNKNOWN_PROTOCOL DATA=\"%s\"",
2212 /** Inspect a reply from SOCKS server stored in <b>buf</b> according
2213 * to <b>state</b>, removing the protocol data upon success. Return 0 on
2214 * incomplete response, 1 on success and -1 on error, in which case
2215 * <b>reason</b> is set to a descriptive message (free() when finished
2218 * As a special case, 2 is returned when user/pass is required
2219 * during SOCKS5 handshake and user/pass is configured.
2222 fetch_from_buf_socks_client(buf_t
*buf
, int state
, char **reason
)
2226 if (buf
->datalen
< 2)
2229 buf_pullup(buf
, MAX_SOCKS_MESSAGE_LEN
, 0);
2230 tor_assert(buf
->head
&& buf
->head
->datalen
>= 2);
2232 r
= parse_socks_client((uint8_t*)buf
->head
->data
, buf
->head
->datalen
,
2233 state
, reason
, &drain
);
2235 buf_remove_from_front(buf
, drain
);
2242 #ifdef USE_BUFFEREVENTS
2243 /** As fetch_from_buf_socks_client, buf works on an evbuffer */
2245 fetch_from_evbuffer_socks_client(struct evbuffer
*buf
, int state
,
2253 /* Linearize the SOCKS response in the buffer, up to 128 bytes.
2254 * (parse_socks_client shouldn't need to see anything beyond that.) */
2255 datalen
= evbuffer_get_length(buf
);
2256 if (datalen
> MAX_SOCKS_MESSAGE_LEN
)
2257 datalen
= MAX_SOCKS_MESSAGE_LEN
;
2258 data
= evbuffer_pullup(buf
, datalen
);
2260 r
= parse_socks_client(data
, datalen
, state
, reason
, &drain
);
2262 evbuffer_drain(buf
, drain
);
2264 evbuffer_drain(buf
, evbuffer_get_length(buf
));
2270 /** Implementation logic for fetch_from_*_socks_client. */
2272 parse_socks_client(const uint8_t *data
, size_t datalen
,
2273 int state
, char **reason
,
2276 unsigned int addrlen
;
2282 case PROXY_SOCKS4_WANT_CONNECT_OK
:
2283 /* Wait for the complete response */
2287 if (data
[1] != 0x5a) {
2288 *reason
= tor_strdup(socks4_response_code_to_string(data
[1]));
2296 case PROXY_SOCKS5_WANT_AUTH_METHOD_NONE
:
2297 /* we don't have any credentials */
2298 if (data
[1] != 0x00) {
2299 *reason
= tor_strdup("server doesn't support any of our "
2300 "available authentication methods");
2304 log_info(LD_NET
, "SOCKS 5 client: continuing without authentication");
2308 case PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929
:
2309 /* we have a username and password. return 1 if we can proceed without
2310 * providing authentication, or 2 otherwise. */
2313 log_info(LD_NET
, "SOCKS 5 client: we have auth details but server "
2314 "doesn't require authentication.");
2318 log_info(LD_NET
, "SOCKS 5 client: need authentication.");
2324 *reason
= tor_strdup("server doesn't support any of our available "
2325 "authentication methods");
2328 case PROXY_SOCKS5_WANT_AUTH_RFC1929_OK
:
2329 /* handle server reply to rfc1929 authentication */
2330 if (data
[1] != 0x00) {
2331 *reason
= tor_strdup("authentication failed");
2335 log_info(LD_NET
, "SOCKS 5 client: authentication successful.");
2339 case PROXY_SOCKS5_WANT_CONNECT_OK
:
2340 /* response is variable length. BND.ADDR, etc, isn't needed
2341 * (don't bother with buf_pullup()), but make sure to eat all
2344 /* wait for address type field to arrive */
2349 case 0x01: /* ip4 */
2352 case 0x04: /* ip6 */
2355 case 0x03: /* fqdn (can this happen here?) */
2358 addrlen
= 1 + data
[4];
2361 *reason
= tor_strdup("invalid response to connect request");
2365 /* wait for address and port */
2366 if (datalen
< 6 + addrlen
)
2369 if (data
[1] != 0x00) {
2370 *reason
= tor_strdup(socks5_response_code_to_string(data
[1]));
2374 *drain_out
= 6 + addrlen
;
2378 /* shouldn't get here... */
2384 /** Return 1 iff buf looks more like it has an (obsolete) v0 controller
2385 * command on it than any valid v1 controller command. */
2387 peek_buf_has_control0_command(buf_t
*buf
)
2389 if (buf
->datalen
>= 4) {
2392 peek_from_buf(header
, sizeof(header
), buf
);
2393 cmd
= ntohs(get_uint16(header
+2));
2395 return 1; /* This is definitely not a v1 control command. */
2400 #ifdef USE_BUFFEREVENTS
2402 peek_evbuffer_has_control0_command(struct evbuffer
*buf
)
2405 if (evbuffer_get_length(buf
) >= 4) {
2408 size_t n
= inspect_evbuffer(buf
, &data
, 4, &free_out
, NULL
);
2411 cmd
= ntohs(get_uint16(data
+2));
2421 /** Return the index within <b>buf</b> at which <b>ch</b> first appears,
2422 * or -1 if <b>ch</b> does not appear on buf. */
2424 buf_find_offset_of_char(buf_t
*buf
, char ch
)
2428 for (chunk
= buf
->head
; chunk
; chunk
= chunk
->next
) {
2429 char *cp
= memchr(chunk
->data
, ch
, chunk
->datalen
);
2431 return offset
+ (cp
- chunk
->data
);
2433 offset
+= chunk
->datalen
;
2438 /** Try to read a single LF-terminated line from <b>buf</b>, and write it
2439 * (including the LF), NUL-terminated, into the *<b>data_len</b> byte buffer
2440 * at <b>data_out</b>. Set *<b>data_len</b> to the number of bytes in the
2441 * line, not counting the terminating NUL. Return 1 if we read a whole line,
2442 * return 0 if we don't have a whole line yet, and return -1 if the line
2443 * length exceeds *<b>data_len</b>.
2446 fetch_from_buf_line(buf_t
*buf
, char *data_out
, size_t *data_len
)
2454 offset
= buf_find_offset_of_char(buf
, '\n');
2457 sz
= (size_t) offset
;
2458 if (sz
+2 > *data_len
) {
2462 fetch_from_buf(data_out
, sz
+1, buf
);
2463 data_out
[sz
+1] = '\0';
2468 /** Compress on uncompress the <b>data_len</b> bytes in <b>data</b> using the
2469 * zlib state <b>state</b>, appending the result to <b>buf</b>. If
2470 * <b>done</b> is true, flush the data in the state and finish the
2471 * compression/uncompression. Return -1 on failure, 0 on success. */
2473 write_to_buf_zlib(buf_t
*buf
, tor_zlib_state_t
*state
,
2474 const char *data
, size_t data_len
,
2478 size_t old_avail
, avail
;
2482 int need_new_chunk
= 0;
2483 if (!buf
->tail
|| ! CHUNK_REMAINING_CAPACITY(buf
->tail
)) {
2484 size_t cap
= data_len
/ 4;
2485 buf_add_chunk_with_capacity(buf
, cap
, 1);
2487 next
= CHUNK_WRITE_PTR(buf
->tail
);
2488 avail
= old_avail
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
2489 switch (tor_zlib_process(state
, &next
, &avail
, &data
, &data_len
, done
)) {
2499 case TOR_ZLIB_BUF_FULL
:
2501 /* Zlib says we need more room (ZLIB_BUF_FULL). Start a new chunk
2502 * automatically, whether were going to or not. */
2507 buf
->datalen
+= old_avail
- avail
;
2508 buf
->tail
->datalen
+= old_avail
- avail
;
2509 if (need_new_chunk
) {
2510 buf_add_chunk_with_capacity(buf
, data_len
/4, 1);
2518 #ifdef USE_BUFFEREVENTS
2520 write_to_evbuffer_zlib(struct evbuffer
*buf
, tor_zlib_state_t
*state
,
2521 const char *data
, size_t data_len
,
2525 size_t old_avail
, avail
;
2527 struct evbuffer_iovec vec
[1];
2530 size_t cap
= data_len
/ 4;
2533 /* XXXX NM this strategy is fragmentation-prone. We should really have
2534 * two iovecs, and write first into the one, and then into the
2535 * second if the first gets full. */
2536 n
= evbuffer_reserve_space(buf
, cap
, vec
, 1);
2540 next
= vec
[0].iov_base
;
2541 avail
= old_avail
= vec
[0].iov_len
;
2543 switch (tor_zlib_process(state
, &next
, &avail
, &data
, &data_len
, done
)) {
2553 case TOR_ZLIB_BUF_FULL
:
2555 /* Zlib says we need more room (ZLIB_BUF_FULL). Start a new chunk
2556 * automatically, whether were going to or not. */
2561 /* XXXX possible infinite loop on BUF_FULL. */
2562 vec
[0].iov_len
= old_avail
- avail
;
2563 evbuffer_commit_space(buf
, vec
, 1);
2571 /** Set *<b>output</b> to contain a copy of the data in *<b>input</b> */
2573 generic_buffer_set_to_copy(generic_buffer_t
**output
,
2574 const generic_buffer_t
*input
)
2576 #ifdef USE_BUFFEREVENTS
2577 struct evbuffer_ptr ptr
;
2578 size_t remaining
= evbuffer_get_length(input
);
2580 evbuffer_drain(*output
, evbuffer_get_length(*output
));
2582 if (!(*output
= evbuffer_new()))
2585 evbuffer_ptr_set((struct evbuffer
*)input
, &ptr
, 0, EVBUFFER_PTR_SET
);
2587 struct evbuffer_iovec v
[4];
2589 n_used
= evbuffer_peek((struct evbuffer
*)input
, -1, &ptr
, v
, 4);
2592 for (i
=0;i
<n_used
;++i
) {
2593 evbuffer_add(*output
, v
[i
].iov_base
, v
[i
].iov_len
);
2594 tor_assert(v
[i
].iov_len
<= remaining
);
2595 remaining
-= v
[i
].iov_len
;
2596 evbuffer_ptr_set((struct evbuffer
*)input
,
2597 &ptr
, v
[i
].iov_len
, EVBUFFER_PTR_ADD
);
2603 *output
= buf_copy(input
);
2608 /** Log an error and exit if <b>buf</b> is corrupted.
2611 assert_buf_ok(buf_t
*buf
)
2614 tor_assert(buf
->magic
== BUFFER_MAGIC
);
2617 tor_assert(!buf
->tail
);
2618 tor_assert(buf
->datalen
== 0);
2622 tor_assert(buf
->tail
);
2623 for (ch
= buf
->head
; ch
; ch
= ch
->next
) {
2624 total
+= ch
->datalen
;
2625 tor_assert(ch
->datalen
<= ch
->memlen
);
2626 tor_assert(ch
->data
>= &ch
->mem
[0]);
2627 tor_assert(ch
->data
< &ch
->mem
[0]+ch
->memlen
);
2628 tor_assert(ch
->data
+ch
->datalen
<= &ch
->mem
[0] + ch
->memlen
);
2630 tor_assert(ch
== buf
->tail
);
2632 tor_assert(buf
->datalen
== total
);
2636 #ifdef ENABLE_BUF_FREELISTS
2637 /** Log an error and exit if <b>fl</b> is corrupted.
2640 assert_freelist_ok(chunk_freelist_t
*fl
)
2644 tor_assert(fl
->alloc_size
> 0);
2646 for (ch
= fl
->head
; ch
; ch
= ch
->next
) {
2647 tor_assert(CHUNK_ALLOC_SIZE(ch
->memlen
) == fl
->alloc_size
);
2650 tor_assert(n
== fl
->cur_length
);
2651 tor_assert(n
>= fl
->lowest_length
);
2652 tor_assert(n
<= fl
->max_length
);