1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "apr_strings.h"
20 #include "apr_pools.h"
21 #include "apr_tables.h"
22 #include "apr_buckets.h"
23 #include "apr_errno.h"
24 #define APR_WANT_MEMFUNC
25 #define APR_WANT_STRFUNC
28 #if APR_HAVE_SYS_UIO_H
32 static apr_status_t
brigade_cleanup(void *data
)
34 return apr_brigade_cleanup(data
);
37 APU_DECLARE(apr_status_t
) apr_brigade_cleanup(void *data
)
39 apr_bucket_brigade
*b
= data
;
42 while (!APR_BRIGADE_EMPTY(b
)) {
43 e
= APR_BRIGADE_FIRST(b
);
46 /* We don't need to free(bb) because it's allocated from a pool. */
50 APU_DECLARE(apr_status_t
) apr_brigade_destroy(apr_bucket_brigade
*b
)
52 apr_pool_cleanup_kill(b
->p
, b
, brigade_cleanup
);
53 return apr_brigade_cleanup(b
);
56 APU_DECLARE(apr_bucket_brigade
*) apr_brigade_create(apr_pool_t
*p
,
57 apr_bucket_alloc_t
*list
)
59 apr_bucket_brigade
*b
;
61 b
= apr_palloc(p
, sizeof(*b
));
63 b
->bucket_alloc
= list
;
65 APR_RING_INIT(&b
->list
, apr_bucket
, link
);
67 apr_pool_cleanup_register(b
->p
, b
, brigade_cleanup
, apr_pool_cleanup_null
);
71 APU_DECLARE(apr_bucket_brigade
*) apr_brigade_split_ex(apr_bucket_brigade
*b
,
73 apr_bucket_brigade
*a
)
78 a
= apr_brigade_create(b
->p
, b
->bucket_alloc
);
80 else if (!APR_BRIGADE_EMPTY(a
)) {
81 apr_brigade_cleanup(a
);
83 /* Return an empty brigade if there is nothing left in
84 * the first brigade to split off
86 if (e
!= APR_BRIGADE_SENTINEL(b
)) {
87 f
= APR_RING_LAST(&b
->list
);
88 APR_RING_UNSPLICE(e
, f
, link
);
89 APR_RING_SPLICE_HEAD(&a
->list
, e
, f
, apr_bucket
, link
);
92 APR_BRIGADE_CHECK_CONSISTENCY(a
);
93 APR_BRIGADE_CHECK_CONSISTENCY(b
);
98 APU_DECLARE(apr_bucket_brigade
*) apr_brigade_split(apr_bucket_brigade
*b
,
101 return apr_brigade_split_ex(b
, e
, NULL
);
104 APU_DECLARE(apr_status_t
) apr_brigade_partition(apr_bucket_brigade
*b
,
106 apr_bucket
**after_point
)
111 apr_uint64_t point64
;
115 /* this could cause weird (not necessarily SEGV) things to happen */
119 *after_point
= APR_BRIGADE_FIRST(b
);
124 * Try to reduce the following casting mess: We know that point will be
125 * larger equal 0 now and forever and thus that point (apr_off_t) and
126 * apr_size_t will fit into apr_uint64_t in any case.
128 point64
= (apr_uint64_t
)point
;
130 APR_BRIGADE_CHECK_CONSISTENCY(b
);
132 for (e
= APR_BRIGADE_FIRST(b
);
133 e
!= APR_BRIGADE_SENTINEL(b
);
134 e
= APR_BUCKET_NEXT(e
))
136 /* For an unknown length bucket, while 'point64' is beyond the possible
137 * size contained in apr_size_t, read and continue...
139 if ((e
->length
== (apr_size_t
)(-1))
140 && (point64
> (apr_uint64_t
)APR_SIZE_MAX
)) {
141 /* point64 is too far out to simply split this bucket,
142 * we must fix this bucket's size and keep going... */
143 rv
= apr_bucket_read(e
, &s
, &len
, APR_BLOCK_READ
);
144 if (rv
!= APR_SUCCESS
) {
149 else if ((point64
< (apr_uint64_t
)e
->length
)
150 || (e
->length
== (apr_size_t
)(-1))) {
151 /* We already consumed buckets where point64 is beyond
152 * our interest ( point64 > APR_SIZE_MAX ), above.
153 * Here point falls between 0 and APR_SIZE_MAX
154 * and is within this bucket, or this bucket's len
155 * is undefined, so now we are ready to split it.
156 * First try to split the bucket natively... */
157 if ((rv
= apr_bucket_split(e
, (apr_size_t
)point64
))
159 *after_point
= APR_BUCKET_NEXT(e
);
163 /* if the bucket cannot be split, we must read from it,
164 * changing its type to one that can be split */
165 rv
= apr_bucket_read(e
, &s
, &len
, APR_BLOCK_READ
);
166 if (rv
!= APR_SUCCESS
) {
171 /* this assumes that len == e->length, which is okay because e
172 * might have been morphed by the apr_bucket_read() above, but
173 * if it was, the length would have been adjusted appropriately */
174 if (point64
< (apr_uint64_t
)e
->length
) {
175 rv
= apr_bucket_split(e
, (apr_size_t
)point64
);
176 *after_point
= APR_BUCKET_NEXT(e
);
180 if (point64
== (apr_uint64_t
)e
->length
) {
181 *after_point
= APR_BUCKET_NEXT(e
);
184 point64
-= (apr_uint64_t
)e
->length
;
186 *after_point
= APR_BRIGADE_SENTINEL(b
);
187 return APR_INCOMPLETE
;
190 APU_DECLARE(apr_status_t
) apr_brigade_length(apr_bucket_brigade
*bb
,
191 int read_all
, apr_off_t
*length
)
195 apr_status_t status
= APR_SUCCESS
;
197 for (bkt
= APR_BRIGADE_FIRST(bb
);
198 bkt
!= APR_BRIGADE_SENTINEL(bb
);
199 bkt
= APR_BUCKET_NEXT(bkt
))
201 if (bkt
->length
== (apr_size_t
)(-1)) {
210 if ((status
= apr_bucket_read(bkt
, &ignore
, &len
,
211 APR_BLOCK_READ
)) != APR_SUCCESS
) {
216 total
+= bkt
->length
;
223 APU_DECLARE(apr_status_t
) apr_brigade_flatten(apr_bucket_brigade
*bb
,
224 char *c
, apr_size_t
*len
)
226 apr_size_t actual
= 0;
229 for (b
= APR_BRIGADE_FIRST(bb
);
230 b
!= APR_BRIGADE_SENTINEL(bb
);
231 b
= APR_BUCKET_NEXT(b
))
237 status
= apr_bucket_read(b
, &str
, &str_len
, APR_BLOCK_READ
);
238 if (status
!= APR_SUCCESS
) {
242 /* If we would overflow. */
243 if (str_len
+ actual
> *len
) {
244 str_len
= *len
- actual
;
247 /* XXX: It appears that overflow of the final bucket
248 * is DISCARDED without any warning to the caller.
250 * No, we only copy the data up to their requested size. -- jre
252 memcpy(c
, str
, str_len
);
257 /* This could probably be actual == *len, but be safe from stray
259 if (actual
>= *len
) {
268 APU_DECLARE(apr_status_t
) apr_brigade_pflatten(apr_bucket_brigade
*bb
,
277 apr_brigade_length(bb
, 1, &actual
);
279 /* XXX: This is dangerous beyond belief. At least in the
280 * apr_brigade_flatten case, the user explicitly stated their
281 * buffer length - so we don't up and palloc 4GB for a single
282 * file bucket. This API must grow a useful max boundry,
283 * either compiled-in or preset via the *len value.
285 * Shouldn't both fn's grow an additional return value for
286 * the case that the brigade couldn't be flattened into the
287 * provided or allocated buffer (such as APR_EMOREDATA?)
288 * Not a failure, simply an advisory result.
290 total
= (apr_size_t
)actual
;
292 *c
= apr_palloc(pool
, total
);
294 rv
= apr_brigade_flatten(bb
, *c
, &total
);
296 if (rv
!= APR_SUCCESS
) {
304 APU_DECLARE(apr_status_t
) apr_brigade_split_line(apr_bucket_brigade
*bbOut
,
305 apr_bucket_brigade
*bbIn
,
306 apr_read_type_e block
,
309 apr_off_t readbytes
= 0;
311 while (!APR_BRIGADE_EMPTY(bbIn
)) {
318 e
= APR_BRIGADE_FIRST(bbIn
);
319 rv
= apr_bucket_read(e
, &str
, &len
, block
);
321 if (rv
!= APR_SUCCESS
) {
325 pos
= memchr(str
, APR_ASCII_LF
, len
);
326 /* We found a match. */
328 apr_bucket_split(e
, pos
- str
+ 1);
329 APR_BUCKET_REMOVE(e
);
330 APR_BRIGADE_INSERT_TAIL(bbOut
, e
);
333 APR_BUCKET_REMOVE(e
);
334 APR_BRIGADE_INSERT_TAIL(bbOut
, e
);
336 /* We didn't find an APR_ASCII_LF within the maximum line length. */
337 if (readbytes
>= maxbytes
) {
346 APU_DECLARE(apr_status_t
) apr_brigade_to_iovec(apr_bucket_brigade
*b
,
347 struct iovec
*vec
, int *nvec
)
353 const char *iov_base
;
358 for (e
= APR_BRIGADE_FIRST(b
);
359 e
!= APR_BRIGADE_SENTINEL(b
);
360 e
= APR_BUCKET_NEXT(e
))
365 rv
= apr_bucket_read(e
, &iov_base
, &iov_len
, APR_NONBLOCK_READ
);
366 if (rv
!= APR_SUCCESS
)
368 /* Set indirectly since types differ: */
369 vec
->iov_len
= iov_len
;
370 vec
->iov_base
= (void *)iov_base
;
374 *nvec
= (int)(vec
- orig
);
378 APU_DECLARE(apr_status_t
) apr_brigade_vputstrs(apr_bucket_brigade
*b
,
379 apr_brigade_flush flush
,
384 const char *str
= va_arg(va
, const char *);
390 rv
= apr_brigade_write(b
, flush
, ctx
, str
, strlen(str
));
391 if (rv
!= APR_SUCCESS
)
398 APU_DECLARE(apr_status_t
) apr_brigade_putc(apr_bucket_brigade
*b
,
399 apr_brigade_flush flush
, void *ctx
,
402 return apr_brigade_write(b
, flush
, ctx
, &c
, 1);
405 APU_DECLARE(apr_status_t
) apr_brigade_write(apr_bucket_brigade
*b
,
406 apr_brigade_flush flush
,
408 const char *str
, apr_size_t nbyte
)
410 apr_bucket
*e
= APR_BRIGADE_LAST(b
);
411 apr_size_t remaining
= APR_BUCKET_BUFF_SIZE
;
414 if (!APR_BRIGADE_EMPTY(b
) && APR_BUCKET_IS_HEAP(e
)) {
415 apr_bucket_heap
*h
= e
->data
;
417 /* HEAP bucket start offsets are always in-memory, safe to cast */
418 remaining
= h
->alloc_len
- (e
->length
+ (apr_size_t
)e
->start
);
419 buf
= h
->base
+ e
->start
+ e
->length
;
422 if (nbyte
> remaining
) {
423 /* either a buffer bucket exists but is full,
424 * or no buffer bucket exists and the data is too big
425 * to buffer. In either case, we should flush. */
427 e
= apr_bucket_transient_create(str
, nbyte
, b
->bucket_alloc
);
428 APR_BRIGADE_INSERT_TAIL(b
, e
);
429 return flush(b
, ctx
);
432 e
= apr_bucket_heap_create(str
, nbyte
, NULL
, b
->bucket_alloc
);
433 APR_BRIGADE_INSERT_TAIL(b
, e
);
438 /* we don't have a buffer, but the data is small enough
439 * that we don't mind making a new buffer */
440 buf
= apr_bucket_alloc(APR_BUCKET_BUFF_SIZE
, b
->bucket_alloc
);
441 e
= apr_bucket_heap_create(buf
, APR_BUCKET_BUFF_SIZE
,
442 apr_bucket_free
, b
->bucket_alloc
);
443 APR_BRIGADE_INSERT_TAIL(b
, e
);
444 e
->length
= 0; /* We are writing into the brigade, and
445 * allocating more memory than we need. This
446 * ensures that the bucket thinks it is empty just
447 * after we create it. We'll fix the length
448 * once we put data in it below.
452 /* there is a sufficiently big buffer bucket available now */
453 memcpy(buf
, str
, nbyte
);
459 APU_DECLARE(apr_status_t
) apr_brigade_writev(apr_bucket_brigade
*b
,
460 apr_brigade_flush flush
,
462 const struct iovec
*vec
,
466 apr_size_t total_len
;
470 /* Compute the total length of the data to be written.
473 for (i
= 0; i
< nvec
; i
++) {
474 total_len
+= vec
[i
].iov_len
;
477 /* If the data to be written is very large, try to convert
478 * the iovec to transient buckets rather than copying.
480 if (total_len
> APR_BUCKET_BUFF_SIZE
) {
482 for (i
= 0; i
< nvec
; i
++) {
483 e
= apr_bucket_transient_create(vec
[i
].iov_base
,
486 APR_BRIGADE_INSERT_TAIL(b
, e
);
488 return flush(b
, ctx
);
491 for (i
= 0; i
< nvec
; i
++) {
492 e
= apr_bucket_heap_create((const char *) vec
[i
].iov_base
,
493 vec
[i
].iov_len
, NULL
,
495 APR_BRIGADE_INSERT_TAIL(b
, e
);
503 /* If there is a heap bucket at the end of the brigade
504 * already, copy into the existing bucket.
506 e
= APR_BRIGADE_LAST(b
);
507 if (!APR_BRIGADE_EMPTY(b
) && APR_BUCKET_IS_HEAP(e
)) {
508 apr_bucket_heap
*h
= e
->data
;
509 apr_size_t remaining
= h
->alloc_len
-
510 (e
->length
+ (apr_size_t
)e
->start
);
511 buf
= h
->base
+ e
->start
+ e
->length
;
513 if (remaining
>= total_len
) {
514 /* Simple case: all the data will fit in the
515 * existing heap bucket
517 for (; i
< nvec
; i
++) {
518 apr_size_t len
= vec
[i
].iov_len
;
519 memcpy(buf
, (const void *) vec
[i
].iov_base
, len
);
522 e
->length
+= total_len
;
526 /* More complicated case: not all of the data
527 * will fit in the existing heap bucket. The
528 * total data size is <= APR_BUCKET_BUFF_SIZE,
529 * so we'll need only one additional bucket.
531 const char *start_buf
= buf
;
532 for (; i
< nvec
; i
++) {
533 apr_size_t len
= vec
[i
].iov_len
;
534 if (len
> remaining
) {
537 memcpy(buf
, (const void *) vec
[i
].iov_base
, len
);
541 e
->length
+= (buf
- start_buf
);
542 total_len
-= (buf
- start_buf
);
545 apr_status_t rv
= flush(b
, ctx
);
546 if (rv
!= APR_SUCCESS
) {
551 /* Now fall through into the case below to
552 * allocate another heap bucket and copy the
553 * rest of the array. (Note that i is not
554 * reset to zero here; it holds the index
555 * of the first vector element to be
556 * written to the new bucket.)
561 /* Allocate a new heap bucket, and copy the data into it.
562 * The checks above ensure that the amount of data to be
563 * written here is no larger than APR_BUCKET_BUFF_SIZE.
565 buf
= apr_bucket_alloc(APR_BUCKET_BUFF_SIZE
, b
->bucket_alloc
);
566 e
= apr_bucket_heap_create(buf
, APR_BUCKET_BUFF_SIZE
,
567 apr_bucket_free
, b
->bucket_alloc
);
568 for (; i
< nvec
; i
++) {
569 apr_size_t len
= vec
[i
].iov_len
;
570 memcpy(buf
, (const void *) vec
[i
].iov_base
, len
);
573 e
->length
= total_len
;
574 APR_BRIGADE_INSERT_TAIL(b
, e
);
579 APU_DECLARE(apr_status_t
) apr_brigade_puts(apr_bucket_brigade
*bb
,
580 apr_brigade_flush flush
, void *ctx
,
583 apr_size_t len
= strlen(str
);
584 apr_bucket
*bkt
= APR_BRIGADE_LAST(bb
);
585 if (!APR_BRIGADE_EMPTY(bb
) && APR_BUCKET_IS_HEAP(bkt
)) {
586 /* If there is enough space available in a heap bucket
587 * at the end of the brigade, copy the string directly
588 * into the heap bucket
590 apr_bucket_heap
*h
= bkt
->data
;
591 apr_size_t bytes_avail
= h
->alloc_len
- bkt
->length
;
593 if (bytes_avail
>= len
) {
594 char *buf
= h
->base
+ bkt
->start
+ bkt
->length
;
595 memcpy(buf
, str
, len
);
601 /* If the string could not be copied into an existing heap
602 * bucket, delegate the work to apr_brigade_write(), which
603 * knows how to grow the brigade
605 return apr_brigade_write(bb
, flush
, ctx
, str
, len
);
608 APU_DECLARE_NONSTD(apr_status_t
) apr_brigade_putstrs(apr_bucket_brigade
*b
,
609 apr_brigade_flush flush
,
616 rv
= apr_brigade_vputstrs(b
, flush
, ctx
, va
);
621 APU_DECLARE_NONSTD(apr_status_t
) apr_brigade_printf(apr_bucket_brigade
*b
,
622 apr_brigade_flush flush
,
624 const char *fmt
, ...)
630 rv
= apr_brigade_vprintf(b
, flush
, ctx
, fmt
, ap
);
635 struct brigade_vprintf_data_t
{
636 apr_vformatter_buff_t vbuff
;
638 apr_bucket_brigade
*b
; /* associated brigade */
639 apr_brigade_flush
*flusher
; /* flushing function */
642 char *cbuff
; /* buffer to flush from */
645 static apr_status_t
brigade_flush(apr_vformatter_buff_t
*buff
)
647 /* callback function passed to ap_vformatter to be
648 * called when vformatter needs to buff and
649 * buff.curpos > buff.endpos
652 /* "downcast," have really passed a brigade_vprintf_data_t* */
653 struct brigade_vprintf_data_t
*vd
= (struct brigade_vprintf_data_t
*)buff
;
654 apr_status_t res
= APR_SUCCESS
;
656 res
= apr_brigade_write(vd
->b
, *vd
->flusher
, vd
->ctx
, vd
->cbuff
,
657 APR_BUCKET_BUFF_SIZE
);
659 if(res
!= APR_SUCCESS
) {
663 vd
->vbuff
.curpos
= vd
->cbuff
;
664 vd
->vbuff
.endpos
= vd
->cbuff
+ APR_BUCKET_BUFF_SIZE
;
669 APU_DECLARE(apr_status_t
) apr_brigade_vprintf(apr_bucket_brigade
*b
,
670 apr_brigade_flush flush
,
672 const char *fmt
, va_list va
)
674 /* the cast, in order of appearance */
675 struct brigade_vprintf_data_t vd
;
676 char buf
[APR_BUCKET_BUFF_SIZE
];
679 vd
.vbuff
.curpos
= buf
;
680 vd
.vbuff
.endpos
= buf
+ APR_BUCKET_BUFF_SIZE
;
686 written
= apr_vformatter(brigade_flush
, &vd
.vbuff
, fmt
, va
);
692 /* tack on null terminator to remaining string */
693 *(vd
.vbuff
.curpos
) = '\0';
695 /* write out what remains in the buffer */
696 return apr_brigade_write(b
, flush
, ctx
, buf
, vd
.vbuff
.curpos
- buf
);
699 /* A "safe" maximum bucket size, 1Gb */
700 #define MAX_BUCKET_SIZE (0x40000000)
702 APU_DECLARE(apr_bucket
*) apr_brigade_insert_file(apr_bucket_brigade
*bb
,
710 if (sizeof(apr_off_t
) == sizeof(apr_size_t
) || length
< MAX_BUCKET_SIZE
) {
711 e
= apr_bucket_file_create(f
, start
, (apr_size_t
)length
, p
,
715 /* Several buckets are needed. */
716 e
= apr_bucket_file_create(f
, start
, MAX_BUCKET_SIZE
, p
,
719 while (length
> MAX_BUCKET_SIZE
) {
721 apr_bucket_copy(e
, &ce
);
722 APR_BRIGADE_INSERT_TAIL(bb
, ce
);
723 e
->start
+= MAX_BUCKET_SIZE
;
724 length
-= MAX_BUCKET_SIZE
;
726 e
->length
= (apr_size_t
)length
; /* Resize just the last bucket */
729 APR_BRIGADE_INSERT_TAIL(bb
, e
);