1 /* Copyright (c) 2001 Matej Pfajfar.
2 * Copyright (c) 2001-2004, Roger Dingledine.
3 * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
4 * Copyright (c) 2007-2015, The Tor Project, Inc. */
5 /* See LICENSE for licensing information */
9 * \brief Implements a generic interface buffer. Buffers are
10 * fairly opaque string holders that can read to or flush from:
11 * memory, file descriptors, or TLS connections.
13 #define BUFFERS_PRIVATE
15 #include "addressmap.h"
18 #include "connection_edge.h"
19 #include "connection_or.h"
22 #include "ext_orport.h"
32 /** Helper: If PARANOIA is defined, assert that the buffer in local variable
33 * <b>buf</b> is well-formed. */
34 #define check() STMT_BEGIN assert_buf_ok(buf); STMT_END
36 #define check() STMT_NIL
39 /* Implementation notes:
41 * After flirting with memmove, and dallying with ring-buffers, we're finally
42 * getting up to speed with the 1970s and implementing buffers as a linked
43 * list of small chunks. Each buffer has such a list; data is removed from
44 * the head of the list, and added at the tail. The list is singly linked,
45 * and the buffer keeps a pointer to the head and the tail.
47 * Every chunk, except the tail, contains at least one byte of data. Data in
48 * each chunk is contiguous.
50 * When you need to treat the first N characters on a buffer as a contiguous
51 * string, use the buf_pullup function to make them so. Don't do this more
54 * The major free Unix kernels have handled buffers like this since, like,
58 static void socks_request_set_socks5_error(socks_request_t
*req
,
59 socks5_reply_status_t reason
);
61 static int parse_socks(const char *data
, size_t datalen
, socks_request_t
*req
,
62 int log_sockstype
, int safe_socks
, ssize_t
*drain_out
,
63 size_t *want_length_out
);
64 static int parse_socks_client(const uint8_t *data
, size_t datalen
,
65 int state
, char **reason
,
68 /* Chunk manipulation functions */
70 #define CHUNK_HEADER_LEN STRUCT_OFFSET(chunk_t, mem[0])
72 /* We leave this many NUL bytes at the end of the buffer. */
73 #define SENTINEL_LEN 4
75 /* Header size plus NUL bytes at the end */
76 #define CHUNK_OVERHEAD (CHUNK_HEADER_LEN + SENTINEL_LEN)
78 /** Return the number of bytes needed to allocate a chunk to hold
79 * <b>memlen</b> bytes. */
80 #define CHUNK_ALLOC_SIZE(memlen) (CHUNK_OVERHEAD + (memlen))
81 /** Return the number of usable bytes in a chunk allocated with
82 * malloc(<b>memlen</b>). */
83 #define CHUNK_SIZE_WITH_ALLOC(memlen) ((memlen) - CHUNK_OVERHEAD)
85 #define DEBUG_SENTINEL
90 #define DBG_S(s) (void)0
93 #define CHUNK_SET_SENTINEL(chunk, alloclen) do { \
94 uint8_t *a = (uint8_t*) &(chunk)->mem[(chunk)->memlen]; \
95 DBG_S(uint8_t *b = &((uint8_t*)(chunk))[(alloclen)-SENTINEL_LEN]); \
96 DBG_S(tor_assert(a == b)); \
97 memset(a,0,SENTINEL_LEN); \
100 /** Return the next character in <b>chunk</b> onto which data can be appended.
101 * If the chunk is full, this might be off the end of chunk->mem. */
103 CHUNK_WRITE_PTR(chunk_t
*chunk
)
105 return chunk
->data
+ chunk
->datalen
;
108 /** Return the number of bytes that can be written onto <b>chunk</b> without
109 * running out of space. */
111 CHUNK_REMAINING_CAPACITY(const chunk_t
*chunk
)
113 return (chunk
->mem
+ chunk
->memlen
) - (chunk
->data
+ chunk
->datalen
);
116 /** Move all bytes stored in <b>chunk</b> to the front of <b>chunk</b>->mem,
117 * to free up space at the end. */
119 chunk_repack(chunk_t
*chunk
)
121 if (chunk
->datalen
&& chunk
->data
!= &chunk
->mem
[0]) {
122 memmove(chunk
->mem
, chunk
->data
, chunk
->datalen
);
124 chunk
->data
= &chunk
->mem
[0];
127 /** Keep track of total size of allocated chunks for consistency asserts */
128 static size_t total_bytes_allocated_in_chunks
= 0;
130 chunk_free_unchecked(chunk_t
*chunk
)
134 #ifdef DEBUG_CHUNK_ALLOC
135 tor_assert(CHUNK_ALLOC_SIZE(chunk
->memlen
) == chunk
->DBG_alloc
);
137 tor_assert(total_bytes_allocated_in_chunks
>=
138 CHUNK_ALLOC_SIZE(chunk
->memlen
));
139 total_bytes_allocated_in_chunks
-= CHUNK_ALLOC_SIZE(chunk
->memlen
);
142 static INLINE chunk_t
*
143 chunk_new_with_alloc_size(size_t alloc
)
146 ch
= tor_malloc(alloc
);
149 #ifdef DEBUG_CHUNK_ALLOC
150 ch
->DBG_alloc
= alloc
;
152 ch
->memlen
= CHUNK_SIZE_WITH_ALLOC(alloc
);
153 total_bytes_allocated_in_chunks
+= alloc
;
154 ch
->data
= &ch
->mem
[0];
155 CHUNK_SET_SENTINEL(ch
, alloc
);
159 /** Expand <b>chunk</b> until it can hold <b>sz</b> bytes, and return a
160 * new pointer to <b>chunk</b>. Old pointers are no longer valid. */
161 static INLINE chunk_t
*
162 chunk_grow(chunk_t
*chunk
, size_t sz
)
165 const size_t memlen_orig
= chunk
->memlen
;
166 const size_t orig_alloc
= CHUNK_ALLOC_SIZE(memlen_orig
);
167 const size_t new_alloc
= CHUNK_ALLOC_SIZE(sz
);
168 tor_assert(sz
> chunk
->memlen
);
169 offset
= chunk
->data
- chunk
->mem
;
170 chunk
= tor_realloc(chunk
, new_alloc
);
172 chunk
->data
= chunk
->mem
+ offset
;
173 #ifdef DEBUG_CHUNK_ALLOC
174 tor_assert(chunk
->DBG_alloc
== orig_alloc
);
175 chunk
->DBG_alloc
= new_alloc
;
177 total_bytes_allocated_in_chunks
+= new_alloc
- orig_alloc
;
178 CHUNK_SET_SENTINEL(chunk
, new_alloc
);
182 /** If a read onto the end of a chunk would be smaller than this number, then
183 * just start a new chunk. */
184 #define MIN_READ_LEN 8
185 /** Every chunk should take up at least this many bytes. */
186 #define MIN_CHUNK_ALLOC 256
187 /** No chunk should take up more than this many bytes. */
188 #define MAX_CHUNK_ALLOC 65536
190 /** Return the allocation size we'd like to use to hold <b>target</b>
193 preferred_chunk_size(size_t target
)
195 size_t sz
= MIN_CHUNK_ALLOC
;
196 while (CHUNK_SIZE_WITH_ALLOC(sz
) < target
) {
202 /** Collapse data from the first N chunks from <b>buf</b> into buf->head,
203 * growing it as necessary, until buf->head has the first <b>bytes</b> bytes
204 * of data from the buffer, or until buf->head has all the data in <b>buf</b>.
207 buf_pullup(buf_t
*buf
, size_t bytes
)
215 if (buf
->datalen
< bytes
)
216 bytes
= buf
->datalen
;
219 if (buf
->head
->datalen
>= bytes
)
222 if (buf
->head
->memlen
>= capacity
) {
223 /* We don't need to grow the first chunk, but we might need to repack it.*/
224 size_t needed
= capacity
- buf
->head
->datalen
;
225 if (CHUNK_REMAINING_CAPACITY(buf
->head
) < needed
)
226 chunk_repack(buf
->head
);
227 tor_assert(CHUNK_REMAINING_CAPACITY(buf
->head
) >= needed
);
231 /* We need to grow the chunk. */
232 chunk_repack(buf
->head
);
233 newsize
= CHUNK_SIZE_WITH_ALLOC(preferred_chunk_size(capacity
));
234 newhead
= chunk_grow(buf
->head
, newsize
);
235 tor_assert(newhead
->memlen
>= capacity
);
236 if (newhead
!= buf
->head
) {
237 if (buf
->tail
== buf
->head
)
244 while (dest
->datalen
< bytes
) {
245 size_t n
= bytes
- dest
->datalen
;
248 if (n
>= src
->datalen
) {
249 memcpy(CHUNK_WRITE_PTR(dest
), src
->data
, src
->datalen
);
250 dest
->datalen
+= src
->datalen
;
251 dest
->next
= src
->next
;
252 if (buf
->tail
== src
)
254 chunk_free_unchecked(src
);
256 memcpy(CHUNK_WRITE_PTR(dest
), src
->data
, n
);
260 tor_assert(dest
->datalen
== bytes
);
267 #ifdef TOR_UNIT_TESTS
269 buf_get_first_chunk_data(const buf_t
*buf
, const char **cp
, size_t *sz
)
271 if (!buf
|| !buf
->head
) {
275 *cp
= buf
->head
->data
;
276 *sz
= buf
->head
->datalen
;
281 /** Remove the first <b>n</b> bytes from buf. */
283 buf_remove_from_front(buf_t
*buf
, size_t n
)
285 tor_assert(buf
->datalen
>= n
);
287 tor_assert(buf
->head
);
288 if (buf
->head
->datalen
> n
) {
289 buf
->head
->datalen
-= n
;
290 buf
->head
->data
+= n
;
294 chunk_t
*victim
= buf
->head
;
295 n
-= victim
->datalen
;
296 buf
->datalen
-= victim
->datalen
;
297 buf
->head
= victim
->next
;
298 if (buf
->tail
== victim
)
300 chunk_free_unchecked(victim
);
306 /** Create and return a new buf with default chunk capacity <b>size</b>.
309 buf_new_with_capacity(size_t size
)
311 buf_t
*b
= buf_new();
312 b
->default_chunk_size
= preferred_chunk_size(size
);
316 /** Allocate and return a new buffer with default capacity. */
320 buf_t
*buf
= tor_malloc_zero(sizeof(buf_t
));
321 buf
->magic
= BUFFER_MAGIC
;
322 buf
->default_chunk_size
= 4096;
327 buf_get_default_chunk_size(const buf_t
*buf
)
329 return buf
->default_chunk_size
;
332 /** Remove all data from <b>buf</b>. */
334 buf_clear(buf_t
*buf
)
336 chunk_t
*chunk
, *next
;
338 for (chunk
= buf
->head
; chunk
; chunk
= next
) {
340 chunk_free_unchecked(chunk
);
342 buf
->head
= buf
->tail
= NULL
;
345 /** Return the number of bytes stored in <b>buf</b> */
347 buf_datalen
, (const buf_t
*buf
))
352 /** Return the total length of all chunks used in <b>buf</b>. */
354 buf_allocation(const buf_t
*buf
)
357 const chunk_t
*chunk
;
358 for (chunk
= buf
->head
; chunk
; chunk
= chunk
->next
) {
359 total
+= CHUNK_ALLOC_SIZE(chunk
->memlen
);
364 /** Return the number of bytes that can be added to <b>buf</b> without
365 * performing any additional allocation. */
367 buf_slack(const buf_t
*buf
)
372 return CHUNK_REMAINING_CAPACITY(buf
->tail
);
375 /** Release storage held by <b>buf</b>. */
383 buf
->magic
= 0xdeadbeef;
387 /** Return a new copy of <b>in_chunk</b> */
389 chunk_copy(const chunk_t
*in_chunk
)
391 chunk_t
*newch
= tor_memdup(in_chunk
, CHUNK_ALLOC_SIZE(in_chunk
->memlen
));
392 total_bytes_allocated_in_chunks
+= CHUNK_ALLOC_SIZE(in_chunk
->memlen
);
393 #ifdef DEBUG_CHUNK_ALLOC
394 newch
->DBG_alloc
= CHUNK_ALLOC_SIZE(in_chunk
->memlen
);
397 if (in_chunk
->data
) {
398 off_t offset
= in_chunk
->data
- in_chunk
->mem
;
399 newch
->data
= newch
->mem
+ offset
;
404 /** Return a new copy of <b>buf</b> */
406 buf_copy(const buf_t
*buf
)
409 buf_t
*out
= buf_new();
410 out
->default_chunk_size
= buf
->default_chunk_size
;
411 for (ch
= buf
->head
; ch
; ch
= ch
->next
) {
412 chunk_t
*newch
= chunk_copy(ch
);
414 out
->tail
->next
= newch
;
417 out
->head
= out
->tail
= newch
;
420 out
->datalen
= buf
->datalen
;
424 /** Append a new chunk with enough capacity to hold <b>capacity</b> bytes to
425 * the tail of <b>buf</b>. If <b>capped</b>, don't allocate a chunk bigger
426 * than MAX_CHUNK_ALLOC. */
428 buf_add_chunk_with_capacity(buf_t
*buf
, size_t capacity
, int capped
)
432 if (CHUNK_ALLOC_SIZE(capacity
) < buf
->default_chunk_size
) {
433 chunk
= chunk_new_with_alloc_size(buf
->default_chunk_size
);
434 } else if (capped
&& CHUNK_ALLOC_SIZE(capacity
) > MAX_CHUNK_ALLOC
) {
435 chunk
= chunk_new_with_alloc_size(MAX_CHUNK_ALLOC
);
437 chunk
= chunk_new_with_alloc_size(preferred_chunk_size(capacity
));
440 tor_gettimeofday_cached_monotonic(&now
);
441 chunk
->inserted_time
= (uint32_t)tv_to_msec(&now
);
444 tor_assert(buf
->head
);
445 buf
->tail
->next
= chunk
;
448 tor_assert(!buf
->head
);
449 buf
->head
= buf
->tail
= chunk
;
455 /** Return the age of the oldest chunk in the buffer <b>buf</b>, in
456 * milliseconds. Requires the current time, in truncated milliseconds since
457 * the epoch, as its input <b>now</b>.
460 buf_get_oldest_chunk_timestamp(const buf_t
*buf
, uint32_t now
)
463 return now
- buf
->head
->inserted_time
;
470 buf_get_total_allocation(void)
472 return total_bytes_allocated_in_chunks
;
475 /** Read up to <b>at_most</b> bytes from the socket <b>fd</b> into
476 * <b>chunk</b> (which must be on <b>buf</b>). If we get an EOF, set
477 * *<b>reached_eof</b> to 1. Return -1 on error, 0 on eof or blocking,
478 * and the number of bytes read otherwise. */
480 read_to_chunk(buf_t
*buf
, chunk_t
*chunk
, tor_socket_t fd
, size_t at_most
,
481 int *reached_eof
, int *socket_error
)
484 if (at_most
> CHUNK_REMAINING_CAPACITY(chunk
))
485 at_most
= CHUNK_REMAINING_CAPACITY(chunk
);
486 read_result
= tor_socket_recv(fd
, CHUNK_WRITE_PTR(chunk
), at_most
, 0);
488 if (read_result
< 0) {
489 int e
= tor_socket_errno(fd
);
490 if (!ERRNO_IS_EAGAIN(e
)) { /* it's a real error */
493 log_warn(LD_NET
,"recv() failed: WSAENOBUFS. Not enough ram?");
498 return 0; /* would block. */
499 } else if (read_result
== 0) {
500 log_debug(LD_NET
,"Encountered eof on fd %d", (int)fd
);
503 } else { /* actually got bytes. */
504 buf
->datalen
+= read_result
;
505 chunk
->datalen
+= read_result
;
506 log_debug(LD_NET
,"Read %ld bytes. %d on inbuf.", (long)read_result
,
508 tor_assert(read_result
< INT_MAX
);
509 return (int)read_result
;
513 /** As read_to_chunk(), but return (negative) error code on error, blocking,
514 * or TLS, and the number of bytes read otherwise. */
516 read_to_chunk_tls(buf_t
*buf
, chunk_t
*chunk
, tor_tls_t
*tls
,
521 tor_assert(CHUNK_REMAINING_CAPACITY(chunk
) >= at_most
);
522 read_result
= tor_tls_read(tls
, CHUNK_WRITE_PTR(chunk
), at_most
);
525 buf
->datalen
+= read_result
;
526 chunk
->datalen
+= read_result
;
530 /** Read from socket <b>s</b>, writing onto end of <b>buf</b>. Read at most
531 * <b>at_most</b> bytes, growing the buffer as necessary. If recv() returns 0
532 * (because of EOF), set *<b>reached_eof</b> to 1 and return 0. Return -1 on
533 * error; else return the number of bytes read.
535 /* XXXX024 indicate "read blocked" somehow? */
537 read_to_buf(tor_socket_t s
, size_t at_most
, buf_t
*buf
, int *reached_eof
,
540 /* XXXX024 It's stupid to overload the return values for these functions:
541 * "error status" and "number of bytes read" are not mutually exclusive.
544 size_t total_read
= 0;
547 tor_assert(reached_eof
);
548 tor_assert(SOCKET_OK(s
));
550 while (at_most
> total_read
) {
551 size_t readlen
= at_most
- total_read
;
553 if (!buf
->tail
|| CHUNK_REMAINING_CAPACITY(buf
->tail
) < MIN_READ_LEN
) {
554 chunk
= buf_add_chunk_with_capacity(buf
, at_most
, 1);
555 if (readlen
> chunk
->memlen
)
556 readlen
= chunk
->memlen
;
558 size_t cap
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
564 r
= read_to_chunk(buf
, chunk
, s
, readlen
, reached_eof
, socket_error
);
567 return r
; /* Error */
568 tor_assert(total_read
+r
< INT_MAX
);
570 if ((size_t)r
< readlen
) { /* eof, block, or no more to read. */
574 return (int)total_read
;
577 /** As read_to_buf, but reads from a TLS connection, and returns a TLS
578 * status value rather than the number of bytes read.
580 * Using TLS on OR connections complicates matters in two ways.
582 * First, a TLS stream has its own read buffer independent of the
583 * connection's read buffer. (TLS needs to read an entire frame from
584 * the network before it can decrypt any data. Thus, trying to read 1
585 * byte from TLS can require that several KB be read from the network
586 * and decrypted. The extra data is stored in TLS's decrypt buffer.)
587 * Because the data hasn't been read by Tor (it's still inside the TLS),
588 * this means that sometimes a connection "has stuff to read" even when
589 * poll() didn't return POLLIN. The tor_tls_get_pending_bytes function is
590 * used in connection.c to detect TLS objects with non-empty internal
591 * buffers and read from them again.
593 * Second, the TLS stream's events do not correspond directly to network
594 * events: sometimes, before a TLS stream can read, the network must be
595 * ready to write -- or vice versa.
598 read_to_buf_tls(tor_tls_t
*tls
, size_t at_most
, buf_t
*buf
)
601 size_t total_read
= 0;
603 check_no_tls_errors();
607 while (at_most
> total_read
) {
608 size_t readlen
= at_most
- total_read
;
610 if (!buf
->tail
|| CHUNK_REMAINING_CAPACITY(buf
->tail
) < MIN_READ_LEN
) {
611 chunk
= buf_add_chunk_with_capacity(buf
, at_most
, 1);
612 if (readlen
> chunk
->memlen
)
613 readlen
= chunk
->memlen
;
615 size_t cap
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
621 r
= read_to_chunk_tls(buf
, chunk
, tls
, readlen
);
624 return r
; /* Error */
625 tor_assert(total_read
+r
< INT_MAX
);
627 if ((size_t)r
< readlen
) /* eof, block, or no more to read. */
630 return (int)total_read
;
633 /** Helper for flush_buf(): try to write <b>sz</b> bytes from chunk
634 * <b>chunk</b> of buffer <b>buf</b> onto socket <b>s</b>. On success, deduct
635 * the bytes written from *<b>buf_flushlen</b>. Return the number of bytes
636 * written on success, 0 on blocking, -1 on failure.
639 flush_chunk(tor_socket_t s
, buf_t
*buf
, chunk_t
*chunk
, size_t sz
,
640 size_t *buf_flushlen
)
642 ssize_t write_result
;
644 if (sz
> chunk
->datalen
)
646 write_result
= tor_socket_send(s
, chunk
->data
, sz
, 0);
648 if (write_result
< 0) {
649 int e
= tor_socket_errno(s
);
650 if (!ERRNO_IS_EAGAIN(e
)) { /* it's a real error */
653 log_warn(LD_NET
,"write() failed: WSAENOBUFS. Not enough ram?");
657 log_debug(LD_NET
,"write() would block, returning.");
660 *buf_flushlen
-= write_result
;
661 buf_remove_from_front(buf
, write_result
);
662 tor_assert(write_result
< INT_MAX
);
663 return (int)write_result
;
667 /** Helper for flush_buf_tls(): try to write <b>sz</b> bytes from chunk
668 * <b>chunk</b> of buffer <b>buf</b> onto socket <b>s</b>. (Tries to write
669 * more if there is a forced pending write size.) On success, deduct the
670 * bytes written from *<b>buf_flushlen</b>. Return the number of bytes
671 * written on success, and a TOR_TLS error code on failure or blocking.
674 flush_chunk_tls(tor_tls_t
*tls
, buf_t
*buf
, chunk_t
*chunk
,
675 size_t sz
, size_t *buf_flushlen
)
681 forced
= tor_tls_get_forced_write_size(tls
);
686 tor_assert(sz
<= chunk
->datalen
);
691 r
= tor_tls_write(tls
, data
, sz
);
694 if (*buf_flushlen
> (size_t)r
)
698 buf_remove_from_front(buf
, r
);
699 log_debug(LD_NET
,"flushed %d bytes, %d ready to flush, %d remain.",
700 r
,(int)*buf_flushlen
,(int)buf
->datalen
);
704 /** Write data from <b>buf</b> to the socket <b>s</b>. Write at most
705 * <b>sz</b> bytes, decrement *<b>buf_flushlen</b> by
706 * the number of bytes actually written, and remove the written bytes
707 * from the buffer. Return the number of bytes written on success,
708 * -1 on failure. Return 0 if write() would block.
711 flush_buf(tor_socket_t s
, buf_t
*buf
, size_t sz
, size_t *buf_flushlen
)
713 /* XXXX024 It's stupid to overload the return values for these functions:
714 * "error status" and "number of bytes flushed" are not mutually exclusive.
718 tor_assert(buf_flushlen
);
719 tor_assert(SOCKET_OK(s
));
720 tor_assert(*buf_flushlen
<= buf
->datalen
);
721 tor_assert(sz
<= *buf_flushlen
);
726 tor_assert(buf
->head
);
727 if (buf
->head
->datalen
>= sz
)
730 flushlen0
= buf
->head
->datalen
;
732 r
= flush_chunk(s
, buf
, buf
->head
, flushlen0
, buf_flushlen
);
738 if (r
== 0 || (size_t)r
< flushlen0
) /* can't flush any more now. */
741 tor_assert(flushed
< INT_MAX
);
745 /** As flush_buf(), but writes data to a TLS connection. Can write more than
746 * <b>flushlen</b> bytes.
749 flush_buf_tls(tor_tls_t
*tls
, buf_t
*buf
, size_t flushlen
,
750 size_t *buf_flushlen
)
755 tor_assert(buf_flushlen
);
756 tor_assert(*buf_flushlen
<= buf
->datalen
);
757 tor_assert(flushlen
<= *buf_flushlen
);
758 sz
= (ssize_t
) flushlen
;
760 /* we want to let tls write even if flushlen is zero, because it might
761 * have a partial record pending */
762 check_no_tls_errors();
768 if ((ssize_t
)buf
->head
->datalen
>= sz
)
771 flushlen0
= buf
->head
->datalen
;
776 r
= flush_chunk_tls(tls
, buf
, buf
->head
, flushlen0
, buf_flushlen
);
782 if (r
== 0) /* Can't flush any more now. */
785 tor_assert(flushed
< INT_MAX
);
789 /** Append <b>string_len</b> bytes from <b>string</b> to the end of
792 * Return the new length of the buffer on success, -1 on failure.
795 write_to_buf(const char *string
, size_t string_len
, buf_t
*buf
)
798 return (int)buf
->datalen
;
803 if (!buf
->tail
|| !CHUNK_REMAINING_CAPACITY(buf
->tail
))
804 buf_add_chunk_with_capacity(buf
, string_len
, 1);
806 copy
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
807 if (copy
> string_len
)
809 memcpy(CHUNK_WRITE_PTR(buf
->tail
), string
, copy
);
812 buf
->datalen
+= copy
;
813 buf
->tail
->datalen
+= copy
;
817 tor_assert(buf
->datalen
< INT_MAX
);
818 return (int)buf
->datalen
;
821 /** Helper: copy the first <b>string_len</b> bytes from <b>buf</b>
822 * onto <b>string</b>.
825 peek_from_buf(char *string
, size_t string_len
, const buf_t
*buf
)
830 /* make sure we don't ask for too much */
831 tor_assert(string_len
<= buf
->datalen
);
832 /* assert_buf_ok(buf); */
836 size_t copy
= string_len
;
838 if (chunk
->datalen
< copy
)
839 copy
= chunk
->datalen
;
840 memcpy(string
, chunk
->data
, copy
);
847 /** Remove <b>string_len</b> bytes from the front of <b>buf</b>, and store
848 * them into <b>string</b>. Return the new buffer size. <b>string_len</b>
849 * must be \<= the number of bytes on the buffer.
852 fetch_from_buf(char *string
, size_t string_len
, buf_t
*buf
)
854 /* There must be string_len bytes in buf; write them onto string,
855 * then memmove buf back (that is, remove them from buf).
857 * Return the number of bytes still on the buffer. */
860 peek_from_buf(string
, string_len
, buf
);
861 buf_remove_from_front(buf
, string_len
);
863 tor_assert(buf
->datalen
< INT_MAX
);
864 return (int)buf
->datalen
;
867 /** True iff the cell command <b>command</b> is one that implies a
868 * variable-length cell in Tor link protocol <b>linkproto</b>. */
870 cell_command_is_var_length(uint8_t command
, int linkproto
)
872 /* If linkproto is v2 (2), CELL_VERSIONS is the only variable-length cells
873 * work as implemented here. If it's 1, there are no variable-length cells.
874 * Tor does not support other versions right now, and so can't negotiate
879 /* Link protocol version 1 has no variable-length cells. */
882 /* In link protocol version 2, VERSIONS is the only variable-length cell */
883 return command
== CELL_VERSIONS
;
887 /* In link protocol version 3 and later, and in version "unknown",
888 * commands 128 and higher indicate variable-length. VERSIONS is
889 * grandfathered in. */
890 return command
== CELL_VERSIONS
|| command
>= 128;
894 /** Check <b>buf</b> for a variable-length cell according to the rules of link
895 * protocol version <b>linkproto</b>. If one is found, pull it off the buffer
896 * and assign a newly allocated var_cell_t to *<b>out</b>, and return 1.
897 * Return 0 if whatever is on the start of buf_t is not a variable-length
898 * cell. Return 1 and set *<b>out</b> to NULL if there seems to be the start
899 * of a variable-length cell on <b>buf</b>, but the whole thing isn't there
902 fetch_var_cell_from_buf(buf_t
*buf
, var_cell_t
**out
, int linkproto
)
904 char hdr
[VAR_CELL_MAX_HEADER_SIZE
];
908 const int wide_circ_ids
= linkproto
>= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS
;
909 const int circ_id_len
= get_circ_id_size(wide_circ_ids
);
910 const unsigned header_len
= get_var_cell_header_size(wide_circ_ids
);
913 if (buf
->datalen
< header_len
)
915 peek_from_buf(hdr
, header_len
, buf
);
917 command
= get_uint8(hdr
+ circ_id_len
);
918 if (!(cell_command_is_var_length(command
, linkproto
)))
921 length
= ntohs(get_uint16(hdr
+ circ_id_len
+ 1));
922 if (buf
->datalen
< (size_t)(header_len
+length
))
924 result
= var_cell_new(length
);
925 result
->command
= command
;
927 result
->circ_id
= ntohl(get_uint32(hdr
));
929 result
->circ_id
= ntohs(get_uint16(hdr
));
931 buf_remove_from_front(buf
, header_len
);
932 peek_from_buf((char*) result
->payload
, length
, buf
);
933 buf_remove_from_front(buf
, length
);
940 #ifdef USE_BUFFEREVENTS
941 /** Try to read <b>n</b> bytes from <b>buf</b> at <b>pos</b> (which may be
942 * NULL for the start of the buffer), copying the data only if necessary. Set
943 * *<b>data_out</b> to a pointer to the desired bytes. Set <b>free_out</b>
944 * to 1 if we needed to malloc *<b>data</b> because the original bytes were
945 * noncontiguous; 0 otherwise. Return the number of bytes actually available
946 * at *<b>data_out</b>.
949 inspect_evbuffer(struct evbuffer
*buf
, char **data_out
, size_t n
,
950 int *free_out
, struct evbuffer_ptr
*pos
)
954 if (evbuffer_get_length(buf
) < n
)
955 n
= evbuffer_get_length(buf
);
958 n_vecs
= evbuffer_peek(buf
, n
, pos
, NULL
, 0);
959 tor_assert(n_vecs
> 0);
961 struct evbuffer_iovec v
;
962 i
= evbuffer_peek(buf
, n
, pos
, &v
, 1);
964 *data_out
= v
.iov_base
;
969 *data_out
= tor_malloc(n
);
971 copied
= evbuffer_copyout(buf
, *data_out
, n
);
972 tor_assert(copied
>= 0 && (size_t)copied
== n
);
977 /** As fetch_var_cell_from_buf, buf works on an evbuffer. */
979 fetch_var_cell_from_evbuffer(struct evbuffer
*buf
, var_cell_t
**out
,
987 uint16_t cell_length
;
990 const int wide_circ_ids
= linkproto
>= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS
;
991 const int circ_id_len
= get_circ_id_size(wide_circ_ids
);
992 const unsigned header_len
= get_var_cell_header_size(wide_circ_ids
);
995 buf_len
= evbuffer_get_length(buf
);
996 if (buf_len
< header_len
)
999 n
= inspect_evbuffer(buf
, &hdr
, header_len
, &free_hdr
, NULL
);
1000 tor_assert(n
>= header_len
);
1002 command
= get_uint8(hdr
+ circ_id_len
);
1003 if (!(cell_command_is_var_length(command
, linkproto
))) {
1007 cell_length
= ntohs(get_uint16(hdr
+ circ_id_len
+ 1));
1008 if (buf_len
< (size_t)(header_len
+cell_length
)) {
1009 result
= 1; /* Not all here yet. */
1013 cell
= var_cell_new(cell_length
);
1014 cell
->command
= command
;
1016 cell
->circ_id
= ntohl(get_uint32(hdr
));
1018 cell
->circ_id
= ntohs(get_uint16(hdr
));
1019 evbuffer_drain(buf
, header_len
);
1020 evbuffer_remove(buf
, cell
->payload
, cell_length
);
1025 if (free_hdr
&& hdr
)
1031 /** Move up to *<b>buf_flushlen</b> bytes from <b>buf_in</b> to
1032 * <b>buf_out</b>, and modify *<b>buf_flushlen</b> appropriately.
1033 * Return the number of bytes actually copied.
1036 move_buf_to_buf(buf_t
*buf_out
, buf_t
*buf_in
, size_t *buf_flushlen
)
1038 /* We can do way better here, but this doesn't turn up in any profiles. */
1041 len
= *buf_flushlen
;
1042 if (len
> buf_in
->datalen
)
1043 len
= buf_in
->datalen
;
1045 cp
= len
; /* Remember the number of bytes we intend to copy. */
1046 tor_assert(cp
< INT_MAX
);
1048 /* This isn't the most efficient implementation one could imagine, since
1049 * it does two copies instead of 1, but I kinda doubt that this will be
1051 size_t n
= len
> sizeof(b
) ? sizeof(b
) : len
;
1052 fetch_from_buf(b
, n
, buf_in
);
1053 write_to_buf(b
, n
, buf_out
);
1056 *buf_flushlen
-= cp
;
1060 /** Internal structure: represents a position in a buffer. */
1061 typedef struct buf_pos_t
{
1062 const chunk_t
*chunk
; /**< Which chunk are we pointing to? */
1063 int pos
;/**< Which character inside the chunk's data are we pointing to? */
1064 size_t chunk_pos
; /**< Total length of all previous chunks. */
1067 /** Initialize <b>out</b> to point to the first character of <b>buf</b>.*/
1069 buf_pos_init(const buf_t
*buf
, buf_pos_t
*out
)
1071 out
->chunk
= buf
->head
;
1076 /** Advance <b>out</b> to the first appearance of <b>ch</b> at the current
1077 * position of <b>out</b>, or later. Return -1 if no instances are found;
1078 * otherwise returns the absolute position of the character. */
1080 buf_find_pos_of_char(char ch
, buf_pos_t
*out
)
1082 const chunk_t
*chunk
;
1086 if (out
->chunk
->datalen
) {
1087 tor_assert(out
->pos
< (off_t
)out
->chunk
->datalen
);
1089 tor_assert(out
->pos
== 0);
1093 for (chunk
= out
->chunk
; chunk
; chunk
= chunk
->next
) {
1094 char *cp
= memchr(chunk
->data
+pos
, ch
, chunk
->datalen
- pos
);
1097 tor_assert(cp
- chunk
->data
< INT_MAX
);
1098 out
->pos
= (int)(cp
- chunk
->data
);
1099 return out
->chunk_pos
+ out
->pos
;
1101 out
->chunk_pos
+= chunk
->datalen
;
1108 /** Advance <b>pos</b> by a single character, if there are any more characters
1109 * in the buffer. Returns 0 on success, -1 on failure. */
1111 buf_pos_inc(buf_pos_t
*pos
)
1114 if (pos
->pos
== (off_t
)pos
->chunk
->datalen
) {
1115 if (!pos
->chunk
->next
)
1117 pos
->chunk_pos
+= pos
->chunk
->datalen
;
1118 pos
->chunk
= pos
->chunk
->next
;
1124 /** Return true iff the <b>n</b>-character string in <b>s</b> appears
1125 * (verbatim) at <b>pos</b>. */
1127 buf_matches_at_pos(const buf_pos_t
*pos
, const char *s
, size_t n
)
1133 memcpy(&p
, pos
, sizeof(p
));
1136 char ch
= p
.chunk
->data
[p
.pos
];
1140 /* If we're out of characters that don't match, we match. Check this
1141 * _before_ we test incrementing pos, in case we're at the end of the
1145 if (buf_pos_inc(&p
)<0)
1150 /** Return the first position in <b>buf</b> at which the <b>n</b>-character
1151 * string <b>s</b> occurs, or -1 if it does not occur. */
1153 buf_find_string_offset(const buf_t
*buf
, const char *s
, size_t n
)
1156 buf_pos_init(buf
, &pos
);
1157 while (buf_find_pos_of_char(*s
, &pos
) >= 0) {
1158 if (buf_matches_at_pos(&pos
, s
, n
)) {
1159 tor_assert(pos
.chunk_pos
+ pos
.pos
< INT_MAX
);
1160 return (int)(pos
.chunk_pos
+ pos
.pos
);
1162 if (buf_pos_inc(&pos
)<0)
1169 /** There is a (possibly incomplete) http statement on <b>buf</b>, of the
1170 * form "\%s\\r\\n\\r\\n\%s", headers, body. (body may contain NULs.)
1171 * If a) the headers include a Content-Length field and all bytes in
1172 * the body are present, or b) there's no Content-Length field and
1173 * all headers are present, then:
1175 * - strdup headers into <b>*headers_out</b>, and NUL-terminate it.
1176 * - memdup body into <b>*body_out</b>, and NUL-terminate it.
1177 * - Then remove them from <b>buf</b>, and return 1.
1179 * - If headers or body is NULL, discard that part of the buf.
1180 * - If a headers or body doesn't fit in the arg, return -1.
1181 * (We ensure that the headers or body don't exceed max len,
1182 * _even if_ we're planning to discard them.)
1183 * - If force_complete is true, then succeed even if not all of the
1184 * content has arrived.
1186 * Else, change nothing and return 0.
1189 fetch_from_buf_http(buf_t
*buf
,
1190 char **headers_out
, size_t max_headerlen
,
1191 char **body_out
, size_t *body_used
, size_t max_bodylen
,
1195 size_t headerlen
, bodylen
, contentlen
;
1202 crlf_offset
= buf_find_string_offset(buf
, "\r\n\r\n", 4);
1203 if (crlf_offset
> (int)max_headerlen
||
1204 (crlf_offset
< 0 && buf
->datalen
> max_headerlen
)) {
1205 log_debug(LD_HTTP
,"headers too long.");
1207 } else if (crlf_offset
< 0) {
1208 log_debug(LD_HTTP
,"headers not all here yet.");
1211 /* Okay, we have a full header. Make sure it all appears in the first
1213 if ((int)buf
->head
->datalen
< crlf_offset
+ 4)
1214 buf_pullup(buf
, crlf_offset
+4);
1215 headerlen
= crlf_offset
+ 4;
1217 headers
= buf
->head
->data
;
1218 bodylen
= buf
->datalen
- headerlen
;
1219 log_debug(LD_HTTP
,"headerlen %d, bodylen %d.", (int)headerlen
, (int)bodylen
);
1221 if (max_headerlen
<= headerlen
) {
1222 log_warn(LD_HTTP
,"headerlen %d larger than %d. Failing.",
1223 (int)headerlen
, (int)max_headerlen
-1);
1226 if (max_bodylen
<= bodylen
) {
1227 log_warn(LD_HTTP
,"bodylen %d larger than %d. Failing.",
1228 (int)bodylen
, (int)max_bodylen
-1);
1232 #define CONTENT_LENGTH "\r\nContent-Length: "
1233 p
= (char*) tor_memstr(headers
, headerlen
, CONTENT_LENGTH
);
1236 i
= atoi(p
+strlen(CONTENT_LENGTH
));
1238 log_warn(LD_PROTOCOL
, "Content-Length is less than zero; it looks like "
1239 "someone is trying to crash us.");
1243 /* if content-length is malformed, then our body length is 0. fine. */
1244 log_debug(LD_HTTP
,"Got a contentlen of %d.",(int)contentlen
);
1245 if (bodylen
< contentlen
) {
1246 if (!force_complete
) {
1247 log_debug(LD_HTTP
,"body not all here yet.");
1248 return 0; /* not all there yet */
1251 if (bodylen
> contentlen
) {
1252 bodylen
= contentlen
;
1253 log_debug(LD_HTTP
,"bodylen reduced to %d.",(int)bodylen
);
1256 /* all happy. copy into the appropriate places, and return 1 */
1258 *headers_out
= tor_malloc(headerlen
+1);
1259 fetch_from_buf(*headers_out
, headerlen
, buf
);
1260 (*headers_out
)[headerlen
] = 0; /* NUL terminate it */
1263 tor_assert(body_used
);
1264 *body_used
= bodylen
;
1265 *body_out
= tor_malloc(bodylen
+1);
1266 fetch_from_buf(*body_out
, bodylen
, buf
);
1267 (*body_out
)[bodylen
] = 0; /* NUL terminate it */
1273 #ifdef USE_BUFFEREVENTS
1274 /** As fetch_from_buf_http, buf works on an evbuffer. */
1276 fetch_from_evbuffer_http(struct evbuffer
*buf
,
1277 char **headers_out
, size_t max_headerlen
,
1278 char **body_out
, size_t *body_used
, size_t max_bodylen
,
1281 struct evbuffer_ptr crlf
, content_length
;
1282 size_t headerlen
, bodylen
, contentlen
;
1284 /* Find the first \r\n\r\n in the buffer */
1285 crlf
= evbuffer_search(buf
, "\r\n\r\n", 4, NULL
);
1287 /* We didn't find one. */
1288 if (evbuffer_get_length(buf
) > max_headerlen
)
1289 return -1; /* Headers too long. */
1290 return 0; /* Headers not here yet. */
1291 } else if (crlf
.pos
> (int)max_headerlen
) {
1292 return -1; /* Headers too long. */
1295 headerlen
= crlf
.pos
+ 4; /* Skip over the \r\n\r\n */
1296 bodylen
= evbuffer_get_length(buf
) - headerlen
;
1297 if (bodylen
> max_bodylen
)
1298 return -1; /* body too long */
1300 /* Look for the first occurrence of CONTENT_LENGTH insize buf before the
1302 content_length
= evbuffer_search_range(buf
, CONTENT_LENGTH
,
1303 strlen(CONTENT_LENGTH
), NULL
, &crlf
);
1305 if (content_length
.pos
>= 0) {
1306 /* We found a content_length: parse it and figure out if the body is here
1308 struct evbuffer_ptr eol
;
1312 n
= evbuffer_ptr_set(buf
, &content_length
, strlen(CONTENT_LENGTH
),
1315 eol
= evbuffer_search_eol(buf
, &content_length
, NULL
, EVBUFFER_EOL_CRLF
);
1316 tor_assert(eol
.pos
> content_length
.pos
);
1317 tor_assert(eol
.pos
<= crlf
.pos
);
1318 inspect_evbuffer(buf
, &data
, eol
.pos
- content_length
.pos
, &free_data
,
1325 log_warn(LD_PROTOCOL
, "Content-Length is less than zero; it looks like "
1326 "someone is trying to crash us.");
1330 /* if content-length is malformed, then our body length is 0. fine. */
1331 log_debug(LD_HTTP
,"Got a contentlen of %d.",(int)contentlen
);
1332 if (bodylen
< contentlen
) {
1333 if (!force_complete
) {
1334 log_debug(LD_HTTP
,"body not all here yet.");
1335 return 0; /* not all there yet */
1338 if (bodylen
> contentlen
) {
1339 bodylen
= contentlen
;
1340 log_debug(LD_HTTP
,"bodylen reduced to %d.",(int)bodylen
);
1345 *headers_out
= tor_malloc(headerlen
+1);
1346 evbuffer_remove(buf
, *headers_out
, headerlen
);
1347 (*headers_out
)[headerlen
] = '\0';
1350 tor_assert(headers_out
);
1351 tor_assert(body_used
);
1352 *body_used
= bodylen
;
1353 *body_out
= tor_malloc(bodylen
+1);
1354 evbuffer_remove(buf
, *body_out
, bodylen
);
1355 (*body_out
)[bodylen
] = '\0';
1362 * Wait this many seconds before warning the user about using SOCKS unsafely
1363 * again (requires that WarnUnsafeSocks is turned on). */
1364 #define SOCKS_WARN_INTERVAL 5
1366 /** Warn that the user application has made an unsafe socks request using
1367 * protocol <b>socks_protocol</b> on port <b>port</b>. Don't warn more than
1368 * once per SOCKS_WARN_INTERVAL, unless <b>safe_socks</b> is set. */
1370 log_unsafe_socks_warning(int socks_protocol
, const char *address
,
1371 uint16_t port
, int safe_socks
)
1373 static ratelim_t socks_ratelim
= RATELIM_INIT(SOCKS_WARN_INTERVAL
);
1375 const or_options_t
*options
= get_options();
1376 if (! options
->WarnUnsafeSocks
)
1379 log_fn_ratelim(&socks_ratelim
, LOG_WARN
, LD_APP
,
1380 "Your application (using socks%d to port %d) is giving "
1381 "Tor only an IP address. Applications that do DNS resolves "
1382 "themselves may leak information. Consider using Socks4A "
1383 "(e.g. via privoxy or socat) instead. For more information, "
1384 "please see https://wiki.torproject.org/TheOnionRouter/"
1385 "TorFAQ#SOCKSAndDNS.%s",
1388 safe_socks
? " Rejecting." : "");
1390 control_event_client_status(LOG_WARN
,
1391 "DANGEROUS_SOCKS PROTOCOL=SOCKS%d ADDRESS=%s:%d",
1392 socks_protocol
, address
, (int)port
);
1395 /** Do not attempt to parse socks messages longer than this. This value is
1396 * actually significantly higher than the longest possible socks message. */
1397 #define MAX_SOCKS_MESSAGE_LEN 512
1399 /** Return a new socks_request_t. */
1401 socks_request_new(void)
1403 return tor_malloc_zero(sizeof(socks_request_t
));
1406 /** Free all storage held in the socks_request_t <b>req</b>. */
1408 socks_request_free(socks_request_t
*req
)
1412 if (req
->username
) {
1413 memwipe(req
->username
, 0x10, req
->usernamelen
);
1414 tor_free(req
->username
);
1416 if (req
->password
) {
1417 memwipe(req
->password
, 0x04, req
->passwordlen
);
1418 tor_free(req
->password
);
1420 memwipe(req
, 0xCC, sizeof(socks_request_t
));
1424 /** There is a (possibly incomplete) socks handshake on <b>buf</b>, of one
1426 * - socks4: "socksheader username\\0"
1427 * - socks4a: "socksheader username\\0 destaddr\\0"
1428 * - socks5 phase one: "version #methods methods"
1429 * - socks5 phase two: "version command 0 addresstype..."
1430 * If it's a complete and valid handshake, and destaddr fits in
1431 * MAX_SOCKS_ADDR_LEN bytes, then pull the handshake off the buf,
1432 * assign to <b>req</b>, and return 1.
1434 * If it's invalid or too big, return -1.
1436 * Else it's not all there yet, leave buf alone and return 0.
1438 * If you want to specify the socks reply, write it into <b>req->reply</b>
1439 * and set <b>req->replylen</b>, else leave <b>req->replylen</b> alone.
1441 * If <b>log_sockstype</b> is non-zero, then do a notice-level log of whether
1442 * the connection is possibly leaking DNS requests locally or not.
1444 * If <b>safe_socks</b> is true, then reject unsafe socks protocols.
1446 * If returning 0 or -1, <b>req->address</b> and <b>req->port</b> are
1450 fetch_from_buf_socks(buf_t
*buf
, socks_request_t
*req
,
1451 int log_sockstype
, int safe_socks
)
1455 size_t want_length
= 128;
1457 if (buf
->datalen
< 2) /* version and another byte */
1462 buf_pullup(buf
, want_length
);
1463 tor_assert(buf
->head
&& buf
->head
->datalen
>= 2);
1466 res
= parse_socks(buf
->head
->data
, buf
->head
->datalen
, req
, log_sockstype
,
1467 safe_socks
, &n_drain
, &want_length
);
1471 else if (n_drain
> 0)
1472 buf_remove_from_front(buf
, n_drain
);
1474 } while (res
== 0 && buf
->head
&& want_length
< buf
->datalen
&&
1480 #ifdef USE_BUFFEREVENTS
1481 /* As fetch_from_buf_socks(), but targets an evbuffer instead. */
1483 fetch_from_evbuffer_socks(struct evbuffer
*buf
, socks_request_t
*req
,
1484 int log_sockstype
, int safe_socks
)
1488 size_t datalen
, buflen
, want_length
;
1491 buflen
= evbuffer_get_length(buf
);
1496 /* See if we can find the socks request in the first chunk of the buffer.
1498 struct evbuffer_iovec v
;
1501 i
= evbuffer_peek(buf
, -1, NULL
, &v
, 1);
1504 datalen
= v
.iov_len
;
1507 res
= parse_socks(data
, datalen
, req
, log_sockstype
,
1508 safe_socks
, &n_drain
, &want_length
);
1511 evbuffer_drain(buf
, evbuffer_get_length(buf
));
1512 else if (n_drain
> 0)
1513 evbuffer_drain(buf
, n_drain
);
1519 /* Okay, the first chunk of the buffer didn't have a complete socks request.
1520 * That means that either we don't have a whole socks request at all, or
1521 * it's gotten split up. We're going to try passing parse_socks() bigger
1522 * and bigger chunks until either it says "Okay, I got it", or it says it
1523 * will need more data than we currently have. */
1525 /* Loop while we have more data that we haven't given parse_socks() yet. */
1528 const size_t last_wanted
= want_length
;
1531 datalen
= inspect_evbuffer(buf
, &data
, want_length
, &free_data
, NULL
);
1534 res
= parse_socks(data
, datalen
, req
, log_sockstype
,
1535 safe_socks
, &n_drain
, &want_length
);
1541 evbuffer_drain(buf
, evbuffer_get_length(buf
));
1542 else if (n_drain
> 0)
1543 evbuffer_drain(buf
, n_drain
);
1545 if (res
== 0 && n_drain
== 0 && want_length
<= last_wanted
) {
1546 /* If we drained nothing, and we didn't ask for more than last time,
1547 * then we probably wanted more data than the buffer actually had,
1548 * and we're finding out that we're not satisified with it. It's
1549 * time to break until we have more data. */
1553 buflen
= evbuffer_get_length(buf
);
1554 } while (res
== 0 && want_length
<= buflen
&& buflen
>= 2);
1560 /** The size of the header of an Extended ORPort message: 2 bytes for
1561 * COMMAND, 2 bytes for BODYLEN */
1562 #define EXT_OR_CMD_HEADER_SIZE 4
1564 /** Read <b>buf</b>, which should contain an Extended ORPort message
1565 * from a transport proxy. If well-formed, create and populate
1566 * <b>out</b> with the Extended ORport message. Return 0 if the
1567 * buffer was incomplete, 1 if it was well-formed and -1 if we
1568 * encountered an error while parsing it. */
1570 fetch_ext_or_command_from_buf(buf_t
*buf
, ext_or_cmd_t
**out
)
1572 char hdr
[EXT_OR_CMD_HEADER_SIZE
];
1576 if (buf
->datalen
< EXT_OR_CMD_HEADER_SIZE
)
1578 peek_from_buf(hdr
, sizeof(hdr
), buf
);
1579 len
= ntohs(get_uint16(hdr
+2));
1580 if (buf
->datalen
< (unsigned)len
+ EXT_OR_CMD_HEADER_SIZE
)
1582 *out
= ext_or_cmd_new(len
);
1583 (*out
)->cmd
= ntohs(get_uint16(hdr
));
1585 buf_remove_from_front(buf
, EXT_OR_CMD_HEADER_SIZE
);
1586 fetch_from_buf((*out
)->body
, len
, buf
);
1590 #ifdef USE_BUFFEREVENTS
1591 /** Read <b>buf</b>, which should contain an Extended ORPort message
1592 * from a transport proxy. If well-formed, create and populate
1593 * <b>out</b> with the Extended ORport message. Return 0 if the
1594 * buffer was incomplete, 1 if it was well-formed and -1 if we
1595 * encountered an error while parsing it. */
1597 fetch_ext_or_command_from_evbuffer(struct evbuffer
*buf
, ext_or_cmd_t
**out
)
1599 char hdr
[EXT_OR_CMD_HEADER_SIZE
];
1601 size_t buf_len
= evbuffer_get_length(buf
);
1603 if (buf_len
< EXT_OR_CMD_HEADER_SIZE
)
1605 evbuffer_copyout(buf
, hdr
, EXT_OR_CMD_HEADER_SIZE
);
1606 len
= ntohs(get_uint16(hdr
+2));
1607 if (buf_len
< (unsigned)len
+ EXT_OR_CMD_HEADER_SIZE
)
1609 *out
= ext_or_cmd_new(len
);
1610 (*out
)->cmd
= ntohs(get_uint16(hdr
));
1612 evbuffer_drain(buf
, EXT_OR_CMD_HEADER_SIZE
);
1613 evbuffer_remove(buf
, (*out
)->body
, len
);
1618 /** Create a SOCKS5 reply message with <b>reason</b> in its REP field and
1619 * have Tor send it as error response to <b>req</b>.
1622 socks_request_set_socks5_error(socks_request_t
*req
,
1623 socks5_reply_status_t reason
)
1626 memset(req
->reply
,0,10);
1628 req
->reply
[0] = 0x05; // VER field.
1629 req
->reply
[1] = reason
; // REP field.
1630 req
->reply
[3] = 0x01; // ATYP field.
1633 /** Implementation helper to implement fetch_from_*_socks. Instead of looking
1634 * at a buffer's contents, we look at the <b>datalen</b> bytes of data in
1635 * <b>data</b>. Instead of removing data from the buffer, we set
1636 * <b>drain_out</b> to the amount of data that should be removed (or -1 if the
1637 * buffer should be cleared). Instead of pulling more data into the first
1638 * chunk of the buffer, we set *<b>want_length_out</b> to the number of bytes
1639 * we'd like to see in the input buffer, if they're available. */
1641 parse_socks(const char *data
, size_t datalen
, socks_request_t
*req
,
1642 int log_sockstype
, int safe_socks
, ssize_t
*drain_out
,
1643 size_t *want_length_out
)
1646 char tmpbuf
[TOR_ADDR_BUF_LEN
+1];
1647 tor_addr_t destaddr
;
1650 char *next
, *startaddr
;
1651 unsigned char usernamelen
, passlen
;
1655 /* We always need at least 2 bytes. */
1656 *want_length_out
= 2;
1660 if (req
->socks_version
== 5 && !req
->got_auth
) {
1661 /* See if we have received authentication. Strictly speaking, we should
1662 also check whether we actually negotiated username/password
1663 authentication. But some broken clients will send us authentication
1664 even if we negotiated SOCKS_NO_AUTH. */
1665 if (*data
== 1) { /* username/pass version 1 */
1666 /* Format is: authversion [1 byte] == 1
1667 usernamelen [1 byte]
1668 username [usernamelen bytes]
1670 password [passlen bytes] */
1671 usernamelen
= (unsigned char)*(data
+ 1);
1672 if (datalen
< 2u + usernamelen
+ 1u) {
1673 *want_length_out
= 2u + usernamelen
+ 1u;
1676 passlen
= (unsigned char)*(data
+ 2u + usernamelen
);
1677 if (datalen
< 2u + usernamelen
+ 1u + passlen
) {
1678 *want_length_out
= 2u + usernamelen
+ 1u + passlen
;
1681 req
->replylen
= 2; /* 2 bytes of response */
1682 req
->reply
[0] = 1; /* authversion == 1 */
1683 req
->reply
[1] = 0; /* authentication successful */
1685 "socks5: Accepted username/password without checking.");
1687 req
->username
= tor_memdup(data
+2u, usernamelen
);
1688 req
->usernamelen
= usernamelen
;
1691 req
->password
= tor_memdup(data
+3u+usernamelen
, passlen
);
1692 req
->passwordlen
= passlen
;
1694 *drain_out
= 2u + usernamelen
+ 1u + passlen
;
1696 *want_length_out
= 7; /* Minimal socks5 command. */
1698 } else if (req
->auth_type
== SOCKS_USER_PASS
) {
1699 /* unknown version byte */
1700 log_warn(LD_APP
, "Socks5 username/password version %d not recognized; "
1701 "rejecting.", (int)*data
);
1708 switch (socksver
) { /* which version of socks? */
1709 case 5: /* socks5 */
1711 if (req
->socks_version
!= 5) { /* we need to negotiate a method */
1712 unsigned char nummethods
= (unsigned char)*(data
+1);
1713 int have_user_pass
, have_no_auth
;
1715 tor_assert(!req
->socks_version
);
1716 if (datalen
< 2u+nummethods
) {
1717 *want_length_out
= 2u+nummethods
;
1722 req
->replylen
= 2; /* 2 bytes of response */
1723 req
->reply
[0] = 5; /* socks5 reply */
1724 have_user_pass
= (memchr(data
+2, SOCKS_USER_PASS
, nummethods
) !=NULL
);
1725 have_no_auth
= (memchr(data
+2, SOCKS_NO_AUTH
, nummethods
) !=NULL
);
1726 if (have_user_pass
&& !(have_no_auth
&& req
->socks_prefer_no_auth
)) {
1727 req
->auth_type
= SOCKS_USER_PASS
;
1728 req
->reply
[1] = SOCKS_USER_PASS
; /* tell client to use "user/pass"
1730 req
->socks_version
= 5; /* remember we've already negotiated auth */
1731 log_debug(LD_APP
,"socks5: accepted method 2 (username/password)");
1733 } else if (have_no_auth
) {
1734 req
->reply
[1] = SOCKS_NO_AUTH
; /* tell client to use "none" auth
1736 req
->socks_version
= 5; /* remember we've already negotiated auth */
1737 log_debug(LD_APP
,"socks5: accepted method 0 (no authentication)");
1741 "socks5: offered methods don't include 'no auth' or "
1742 "username/password. Rejecting.");
1743 req
->reply
[1] = '\xFF'; /* reject all methods */
1746 /* Remove packet from buf. Some SOCKS clients will have sent extra
1747 * junk at this point; let's hope it's an authentication message. */
1748 *drain_out
= 2u + nummethods
;
1752 if (req
->auth_type
!= SOCKS_NO_AUTH
&& !req
->got_auth
) {
1754 "socks5: negotiated authentication, but none provided");
1757 /* we know the method; read in the request */
1758 log_debug(LD_APP
,"socks5: checking request");
1759 if (datalen
< 7) {/* basic info plus >=1 for addr plus 2 for port */
1760 *want_length_out
= 7;
1761 return 0; /* not yet */
1763 req
->command
= (unsigned char) *(data
+1);
1764 if (req
->command
!= SOCKS_COMMAND_CONNECT
&&
1765 req
->command
!= SOCKS_COMMAND_RESOLVE
&&
1766 req
->command
!= SOCKS_COMMAND_RESOLVE_PTR
) {
1767 /* not a connect or resolve or a resolve_ptr? we don't support it. */
1768 socks_request_set_socks5_error(req
,SOCKS5_COMMAND_NOT_SUPPORTED
);
1770 log_warn(LD_APP
,"socks5: command %d not recognized. Rejecting.",
1774 switch (*(data
+3)) { /* address type */
1775 case 1: /* IPv4 address */
1776 case 4: /* IPv6 address */ {
1777 const int is_v6
= *(data
+3) == 4;
1778 const unsigned addrlen
= is_v6
? 16 : 4;
1779 log_debug(LD_APP
,"socks5: ipv4 address type");
1780 if (datalen
< 6+addrlen
) {/* ip/port there? */
1781 *want_length_out
= 6+addrlen
;
1782 return 0; /* not yet */
1786 tor_addr_from_ipv6_bytes(&destaddr
, data
+4);
1788 tor_addr_from_ipv4n(&destaddr
, get_uint32(data
+4));
1790 tor_addr_to_str(tmpbuf
, &destaddr
, sizeof(tmpbuf
), 1);
1792 if (strlen(tmpbuf
)+1 > MAX_SOCKS_ADDR_LEN
) {
1793 socks_request_set_socks5_error(req
, SOCKS5_GENERAL_ERROR
);
1795 "socks5 IP takes %d bytes, which doesn't fit in %d. "
1797 (int)strlen(tmpbuf
)+1,(int)MAX_SOCKS_ADDR_LEN
);
1800 strlcpy(req
->address
,tmpbuf
,sizeof(req
->address
));
1801 req
->port
= ntohs(get_uint16(data
+4+addrlen
));
1802 *drain_out
= 6+addrlen
;
1803 if (req
->command
!= SOCKS_COMMAND_RESOLVE_PTR
&&
1804 !addressmap_have_mapping(req
->address
,0)) {
1805 log_unsafe_socks_warning(5, req
->address
, req
->port
, safe_socks
);
1807 socks_request_set_socks5_error(req
, SOCKS5_NOT_ALLOWED
);
1814 log_debug(LD_APP
,"socks5: fqdn address type");
1815 if (req
->command
== SOCKS_COMMAND_RESOLVE_PTR
) {
1816 socks_request_set_socks5_error(req
,
1817 SOCKS5_ADDRESS_TYPE_NOT_SUPPORTED
);
1818 log_warn(LD_APP
, "socks5 received RESOLVE_PTR command with "
1819 "hostname type. Rejecting.");
1822 len
= (unsigned char)*(data
+4);
1823 if (datalen
< 7+len
) { /* addr/port there? */
1824 *want_length_out
= 7+len
;
1825 return 0; /* not yet */
1827 if (len
+1 > MAX_SOCKS_ADDR_LEN
) {
1828 socks_request_set_socks5_error(req
, SOCKS5_GENERAL_ERROR
);
1830 "socks5 hostname is %d bytes, which doesn't fit in "
1831 "%d. Rejecting.", len
+1,MAX_SOCKS_ADDR_LEN
);
1834 memcpy(req
->address
,data
+5,len
);
1835 req
->address
[len
] = 0;
1836 req
->port
= ntohs(get_uint16(data
+5+len
));
1837 *drain_out
= 5+len
+2;
1839 if (string_is_valid_ipv4_address(req
->address
) ||
1840 string_is_valid_ipv6_address(req
->address
)) {
1841 log_unsafe_socks_warning(5,req
->address
,req
->port
,safe_socks
);
1844 socks_request_set_socks5_error(req
, SOCKS5_NOT_ALLOWED
);
1847 } else if (!string_is_valid_hostname(req
->address
)) {
1848 socks_request_set_socks5_error(req
, SOCKS5_GENERAL_ERROR
);
1850 log_warn(LD_PROTOCOL
,
1851 "Your application (using socks5 to port %d) gave Tor "
1852 "a malformed hostname: %s. Rejecting the connection.",
1853 req
->port
, escaped_safe_str_client(req
->address
));
1858 "Your application (using socks5 to port %d) instructed "
1859 "Tor to take care of the DNS resolution itself if "
1860 "necessary. This is good.", req
->port
);
1862 default: /* unsupported */
1863 socks_request_set_socks5_error(req
,
1864 SOCKS5_ADDRESS_TYPE_NOT_SUPPORTED
);
1865 log_warn(LD_APP
,"socks5: unsupported address type %d. Rejecting.",
1870 case 4: { /* socks4 */
1871 enum {socks4
, socks4a
} socks4_prot
= socks4a
;
1872 const char *authstart
, *authend
;
1873 /* http://ss5.sourceforge.net/socks4.protocol.txt */
1874 /* http://ss5.sourceforge.net/socks4A.protocol.txt */
1876 req
->socks_version
= 4;
1877 if (datalen
< SOCKS4_NETWORK_LEN
) {/* basic info available? */
1878 *want_length_out
= SOCKS4_NETWORK_LEN
;
1879 return 0; /* not yet */
1881 // buf_pullup(buf, 1280);
1882 req
->command
= (unsigned char) *(data
+1);
1883 if (req
->command
!= SOCKS_COMMAND_CONNECT
&&
1884 req
->command
!= SOCKS_COMMAND_RESOLVE
) {
1885 /* not a connect or resolve? we don't support it. (No resolve_ptr with
1887 log_warn(LD_APP
,"socks4: command %d not recognized. Rejecting.",
1892 req
->port
= ntohs(get_uint16(data
+2));
1893 destip
= ntohl(get_uint32(data
+4));
1894 if ((!req
->port
&& req
->command
!=SOCKS_COMMAND_RESOLVE
) || !destip
) {
1895 log_warn(LD_APP
,"socks4: Port or DestIP is zero. Rejecting.");
1899 log_debug(LD_APP
,"socks4: destip not in form 0.0.0.x.");
1900 in
.s_addr
= htonl(destip
);
1901 tor_inet_ntoa(&in
,tmpbuf
,sizeof(tmpbuf
));
1902 if (strlen(tmpbuf
)+1 > MAX_SOCKS_ADDR_LEN
) {
1903 log_debug(LD_APP
,"socks4 addr (%d bytes) too long. Rejecting.",
1904 (int)strlen(tmpbuf
));
1908 "socks4: successfully read destip (%s)",
1909 safe_str_client(tmpbuf
));
1910 socks4_prot
= socks4
;
1913 authstart
= data
+ SOCKS4_NETWORK_LEN
;
1914 next
= memchr(authstart
, 0,
1915 datalen
-SOCKS4_NETWORK_LEN
);
1917 if (datalen
>= 1024) {
1918 log_debug(LD_APP
, "Socks4 user name too long; rejecting.");
1921 log_debug(LD_APP
,"socks4: Username not here yet.");
1922 *want_length_out
= datalen
+1024; /* More than we need, but safe */
1926 tor_assert(next
< data
+datalen
);
1929 if (socks4_prot
!= socks4a
&&
1930 !addressmap_have_mapping(tmpbuf
,0)) {
1931 log_unsafe_socks_warning(4, tmpbuf
, req
->port
, safe_socks
);
1936 if (socks4_prot
== socks4a
) {
1937 if (next
+1 == data
+datalen
) {
1938 log_debug(LD_APP
,"socks4: No part of destaddr here yet.");
1939 *want_length_out
= datalen
+ 1024; /* More than we need, but safe */
1943 next
= memchr(startaddr
, 0, data
+ datalen
- startaddr
);
1945 if (datalen
>= 1024) {
1946 log_debug(LD_APP
,"socks4: Destaddr too long.");
1949 log_debug(LD_APP
,"socks4: Destaddr not all here yet.");
1950 *want_length_out
= datalen
+ 1024; /* More than we need, but safe */
1953 if (MAX_SOCKS_ADDR_LEN
<= next
-startaddr
) {
1954 log_warn(LD_APP
,"socks4: Destaddr too long. Rejecting.");
1957 // tor_assert(next < buf->cur+buf->datalen);
1961 "Your application (using socks4a to port %d) instructed "
1962 "Tor to take care of the DNS resolution itself if "
1963 "necessary. This is good.", req
->port
);
1965 log_debug(LD_APP
,"socks4: Everything is here. Success.");
1966 strlcpy(req
->address
, startaddr
? startaddr
: tmpbuf
,
1967 sizeof(req
->address
));
1968 if (!tor_strisprint(req
->address
) || strchr(req
->address
,'\"')) {
1969 log_warn(LD_PROTOCOL
,
1970 "Your application (using socks4 to port %d) gave Tor "
1971 "a malformed hostname: %s. Rejecting the connection.",
1972 req
->port
, escaped(req
->address
));
1975 if (authend
!= authstart
) {
1977 req
->usernamelen
= authend
- authstart
;
1978 req
->username
= tor_memdup(authstart
, authend
- authstart
);
1980 /* next points to the final \0 on inbuf */
1981 *drain_out
= next
- data
+ 1;
1985 case 'H': /* head */
1986 case 'P': /* put/post */
1987 case 'C': /* connect */
1988 strlcpy((char*)req
->reply
,
1989 "HTTP/1.0 501 Tor is not an HTTP Proxy\r\n"
1990 "Content-Type: text/html; charset=iso-8859-1\r\n\r\n"
1993 "<title>Tor is not an HTTP Proxy</title>\n"
1996 "<h1>Tor is not an HTTP Proxy</h1>\n"
1998 "It appears you have configured your web browser to use Tor as an HTTP proxy."
2000 "This is not correct: Tor is a SOCKS proxy, not an HTTP proxy.\n"
2001 "Please configure your client accordingly.\n"
2004 "See <a href=\"https://www.torproject.org/documentation.html\">"
2005 "https://www.torproject.org/documentation.html</a> for more "
2007 "<!-- Plus this comment, to make the body response more than 512 bytes, so "
2008 " IE will be willing to display it. Comment comment comment comment "
2009 " comment comment comment comment comment comment comment comment.-->\n"
2013 , MAX_SOCKS_REPLY_LEN
);
2014 req
->replylen
= strlen((char*)req
->reply
)+1;
2016 default: /* version is not socks4 or socks5 */
2018 "Socks version %d not recognized. (Tor is not an http proxy.)",
2021 /* Tell the controller the first 8 bytes. */
2022 char *tmp
= tor_strndup(data
, datalen
< 8 ? datalen
: 8);
2023 control_event_client_status(LOG_WARN
,
2024 "SOCKS_UNKNOWN_PROTOCOL DATA=\"%s\"",
2032 /** Inspect a reply from SOCKS server stored in <b>buf</b> according
2033 * to <b>state</b>, removing the protocol data upon success. Return 0 on
2034 * incomplete response, 1 on success and -1 on error, in which case
2035 * <b>reason</b> is set to a descriptive message (free() when finished
2038 * As a special case, 2 is returned when user/pass is required
2039 * during SOCKS5 handshake and user/pass is configured.
2042 fetch_from_buf_socks_client(buf_t
*buf
, int state
, char **reason
)
2046 if (buf
->datalen
< 2)
2049 buf_pullup(buf
, MAX_SOCKS_MESSAGE_LEN
);
2050 tor_assert(buf
->head
&& buf
->head
->datalen
>= 2);
2052 r
= parse_socks_client((uint8_t*)buf
->head
->data
, buf
->head
->datalen
,
2053 state
, reason
, &drain
);
2055 buf_remove_from_front(buf
, drain
);
2062 #ifdef USE_BUFFEREVENTS
2063 /** As fetch_from_buf_socks_client, buf works on an evbuffer */
2065 fetch_from_evbuffer_socks_client(struct evbuffer
*buf
, int state
,
2073 /* Linearize the SOCKS response in the buffer, up to 128 bytes.
2074 * (parse_socks_client shouldn't need to see anything beyond that.) */
2075 datalen
= evbuffer_get_length(buf
);
2076 if (datalen
> MAX_SOCKS_MESSAGE_LEN
)
2077 datalen
= MAX_SOCKS_MESSAGE_LEN
;
2078 data
= evbuffer_pullup(buf
, datalen
);
2080 r
= parse_socks_client(data
, datalen
, state
, reason
, &drain
);
2082 evbuffer_drain(buf
, drain
);
2084 evbuffer_drain(buf
, evbuffer_get_length(buf
));
2090 /** Implementation logic for fetch_from_*_socks_client. */
2092 parse_socks_client(const uint8_t *data
, size_t datalen
,
2093 int state
, char **reason
,
2096 unsigned int addrlen
;
2102 case PROXY_SOCKS4_WANT_CONNECT_OK
:
2103 /* Wait for the complete response */
2107 if (data
[1] != 0x5a) {
2108 *reason
= tor_strdup(socks4_response_code_to_string(data
[1]));
2116 case PROXY_SOCKS5_WANT_AUTH_METHOD_NONE
:
2117 /* we don't have any credentials */
2118 if (data
[1] != 0x00) {
2119 *reason
= tor_strdup("server doesn't support any of our "
2120 "available authentication methods");
2124 log_info(LD_NET
, "SOCKS 5 client: continuing without authentication");
2128 case PROXY_SOCKS5_WANT_AUTH_METHOD_RFC1929
:
2129 /* we have a username and password. return 1 if we can proceed without
2130 * providing authentication, or 2 otherwise. */
2133 log_info(LD_NET
, "SOCKS 5 client: we have auth details but server "
2134 "doesn't require authentication.");
2138 log_info(LD_NET
, "SOCKS 5 client: need authentication.");
2144 *reason
= tor_strdup("server doesn't support any of our available "
2145 "authentication methods");
2148 case PROXY_SOCKS5_WANT_AUTH_RFC1929_OK
:
2149 /* handle server reply to rfc1929 authentication */
2150 if (data
[1] != 0x00) {
2151 *reason
= tor_strdup("authentication failed");
2155 log_info(LD_NET
, "SOCKS 5 client: authentication successful.");
2159 case PROXY_SOCKS5_WANT_CONNECT_OK
:
2160 /* response is variable length. BND.ADDR, etc, isn't needed
2161 * (don't bother with buf_pullup()), but make sure to eat all
2164 /* wait for address type field to arrive */
2169 case 0x01: /* ip4 */
2172 case 0x04: /* ip6 */
2175 case 0x03: /* fqdn (can this happen here?) */
2178 addrlen
= 1 + data
[4];
2181 *reason
= tor_strdup("invalid response to connect request");
2185 /* wait for address and port */
2186 if (datalen
< 6 + addrlen
)
2189 if (data
[1] != 0x00) {
2190 *reason
= tor_strdup(socks5_response_code_to_string(data
[1]));
2194 *drain_out
= 6 + addrlen
;
2198 /* shouldn't get here... */
2204 /** Return 1 iff buf looks more like it has an (obsolete) v0 controller
2205 * command on it than any valid v1 controller command. */
2207 peek_buf_has_control0_command(buf_t
*buf
)
2209 if (buf
->datalen
>= 4) {
2212 peek_from_buf(header
, sizeof(header
), buf
);
2213 cmd
= ntohs(get_uint16(header
+2));
2215 return 1; /* This is definitely not a v1 control command. */
2220 #ifdef USE_BUFFEREVENTS
2222 peek_evbuffer_has_control0_command(struct evbuffer
*buf
)
2225 if (evbuffer_get_length(buf
) >= 4) {
2228 size_t n
= inspect_evbuffer(buf
, &data
, 4, &free_out
, NULL
);
2231 cmd
= ntohs(get_uint16(data
+2));
2241 /** Return the index within <b>buf</b> at which <b>ch</b> first appears,
2242 * or -1 if <b>ch</b> does not appear on buf. */
2244 buf_find_offset_of_char(buf_t
*buf
, char ch
)
2248 for (chunk
= buf
->head
; chunk
; chunk
= chunk
->next
) {
2249 char *cp
= memchr(chunk
->data
, ch
, chunk
->datalen
);
2251 return offset
+ (cp
- chunk
->data
);
2253 offset
+= chunk
->datalen
;
2258 /** Try to read a single LF-terminated line from <b>buf</b>, and write it
2259 * (including the LF), NUL-terminated, into the *<b>data_len</b> byte buffer
2260 * at <b>data_out</b>. Set *<b>data_len</b> to the number of bytes in the
2261 * line, not counting the terminating NUL. Return 1 if we read a whole line,
2262 * return 0 if we don't have a whole line yet, and return -1 if the line
2263 * length exceeds *<b>data_len</b>.
2266 fetch_from_buf_line(buf_t
*buf
, char *data_out
, size_t *data_len
)
2274 offset
= buf_find_offset_of_char(buf
, '\n');
2277 sz
= (size_t) offset
;
2278 if (sz
+2 > *data_len
) {
2282 fetch_from_buf(data_out
, sz
+1, buf
);
2283 data_out
[sz
+1] = '\0';
2288 /** Compress on uncompress the <b>data_len</b> bytes in <b>data</b> using the
2289 * zlib state <b>state</b>, appending the result to <b>buf</b>. If
2290 * <b>done</b> is true, flush the data in the state and finish the
2291 * compression/uncompression. Return -1 on failure, 0 on success. */
2293 write_to_buf_zlib(buf_t
*buf
, tor_zlib_state_t
*state
,
2294 const char *data
, size_t data_len
,
2298 size_t old_avail
, avail
;
2302 int need_new_chunk
= 0;
2303 if (!buf
->tail
|| ! CHUNK_REMAINING_CAPACITY(buf
->tail
)) {
2304 size_t cap
= data_len
/ 4;
2305 buf_add_chunk_with_capacity(buf
, cap
, 1);
2307 next
= CHUNK_WRITE_PTR(buf
->tail
);
2308 avail
= old_avail
= CHUNK_REMAINING_CAPACITY(buf
->tail
);
2309 switch (tor_zlib_process(state
, &next
, &avail
, &data
, &data_len
, done
)) {
2319 case TOR_ZLIB_BUF_FULL
:
2321 /* Zlib says we need more room (ZLIB_BUF_FULL). Start a new chunk
2322 * automatically, whether were going to or not. */
2327 buf
->datalen
+= old_avail
- avail
;
2328 buf
->tail
->datalen
+= old_avail
- avail
;
2329 if (need_new_chunk
) {
2330 buf_add_chunk_with_capacity(buf
, data_len
/4, 1);
2338 #ifdef USE_BUFFEREVENTS
2340 write_to_evbuffer_zlib(struct evbuffer
*buf
, tor_zlib_state_t
*state
,
2341 const char *data
, size_t data_len
,
2345 size_t old_avail
, avail
;
2347 struct evbuffer_iovec vec
[1];
2350 size_t cap
= data_len
/ 4;
2353 /* XXXX NM this strategy is fragmentation-prone. We should really have
2354 * two iovecs, and write first into the one, and then into the
2355 * second if the first gets full. */
2356 n
= evbuffer_reserve_space(buf
, cap
, vec
, 1);
2360 next
= vec
[0].iov_base
;
2361 avail
= old_avail
= vec
[0].iov_len
;
2363 switch (tor_zlib_process(state
, &next
, &avail
, &data
, &data_len
, done
)) {
2373 case TOR_ZLIB_BUF_FULL
:
2375 /* Zlib says we need more room (ZLIB_BUF_FULL). Start a new chunk
2376 * automatically, whether were going to or not. */
2381 /* XXXX possible infinite loop on BUF_FULL. */
2382 vec
[0].iov_len
= old_avail
- avail
;
2383 evbuffer_commit_space(buf
, vec
, 1);
2391 /** Set *<b>output</b> to contain a copy of the data in *<b>input</b> */
2393 generic_buffer_set_to_copy(generic_buffer_t
**output
,
2394 const generic_buffer_t
*input
)
2396 #ifdef USE_BUFFEREVENTS
2397 struct evbuffer_ptr ptr
;
2398 size_t remaining
= evbuffer_get_length(input
);
2400 evbuffer_drain(*output
, evbuffer_get_length(*output
));
2402 if (!(*output
= evbuffer_new()))
2405 evbuffer_ptr_set((struct evbuffer
*)input
, &ptr
, 0, EVBUFFER_PTR_SET
);
2407 struct evbuffer_iovec v
[4];
2409 n_used
= evbuffer_peek((struct evbuffer
*)input
, -1, &ptr
, v
, 4);
2412 for (i
=0;i
<n_used
;++i
) {
2413 evbuffer_add(*output
, v
[i
].iov_base
, v
[i
].iov_len
);
2414 tor_assert(v
[i
].iov_len
<= remaining
);
2415 remaining
-= v
[i
].iov_len
;
2416 evbuffer_ptr_set((struct evbuffer
*)input
,
2417 &ptr
, v
[i
].iov_len
, EVBUFFER_PTR_ADD
);
2423 *output
= buf_copy(input
);
2428 /** Log an error and exit if <b>buf</b> is corrupted.
2431 assert_buf_ok(buf_t
*buf
)
2434 tor_assert(buf
->magic
== BUFFER_MAGIC
);
2437 tor_assert(!buf
->tail
);
2438 tor_assert(buf
->datalen
== 0);
2442 tor_assert(buf
->tail
);
2443 for (ch
= buf
->head
; ch
; ch
= ch
->next
) {
2444 total
+= ch
->datalen
;
2445 tor_assert(ch
->datalen
<= ch
->memlen
);
2446 tor_assert(ch
->data
>= &ch
->mem
[0]);
2447 tor_assert(ch
->data
<= &ch
->mem
[0]+ch
->memlen
);
2448 if (ch
->data
== &ch
->mem
[0]+ch
->memlen
) {
2449 static int warned
= 0;
2451 log_warn(LD_BUG
, "Invariant violation in buf.c related to #15083");
2455 tor_assert(ch
->data
+ch
->datalen
<= &ch
->mem
[0] + ch
->memlen
);
2457 tor_assert(ch
== buf
->tail
);
2459 tor_assert(buf
->datalen
== total
);