Use of pre_cleanups is not the default for reslists.
[apr-util.git] / buckets / apr_brigade.c
bloba5c392d16399f9786ec764698f1963cd01ab13b0
1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "apr.h"
18 #include "apr_lib.h"
19 #include "apr_strings.h"
20 #include "apr_pools.h"
21 #include "apr_tables.h"
22 #include "apr_buckets.h"
23 #include "apr_errno.h"
24 #define APR_WANT_MEMFUNC
25 #define APR_WANT_STRFUNC
26 #include "apr_want.h"
28 #if APR_HAVE_SYS_UIO_H
29 #include <sys/uio.h>
30 #endif
32 static apr_status_t brigade_cleanup(void *data)
34 return apr_brigade_cleanup(data);
37 APU_DECLARE(apr_status_t) apr_brigade_cleanup(void *data)
39 apr_bucket_brigade *b = data;
40 apr_bucket *e;
42 while (!APR_BRIGADE_EMPTY(b)) {
43 e = APR_BRIGADE_FIRST(b);
44 apr_bucket_delete(e);
46 /* We don't need to free(bb) because it's allocated from a pool. */
47 return APR_SUCCESS;
50 APU_DECLARE(apr_status_t) apr_brigade_destroy(apr_bucket_brigade *b)
52 apr_pool_cleanup_kill(b->p, b, brigade_cleanup);
53 return apr_brigade_cleanup(b);
56 APU_DECLARE(apr_bucket_brigade *) apr_brigade_create(apr_pool_t *p,
57 apr_bucket_alloc_t *list)
59 apr_bucket_brigade *b;
61 b = apr_palloc(p, sizeof(*b));
62 b->p = p;
63 b->bucket_alloc = list;
65 APR_RING_INIT(&b->list, apr_bucket, link);
67 apr_pool_cleanup_register(b->p, b, brigade_cleanup, apr_pool_cleanup_null);
68 return b;
71 APU_DECLARE(apr_bucket_brigade *) apr_brigade_split_ex(apr_bucket_brigade *b,
72 apr_bucket *e,
73 apr_bucket_brigade *a)
75 apr_bucket *f;
77 if (!a) {
78 a = apr_brigade_create(b->p, b->bucket_alloc);
80 else if (!APR_BRIGADE_EMPTY(a)) {
81 apr_brigade_cleanup(a);
83 /* Return an empty brigade if there is nothing left in
84 * the first brigade to split off
86 if (e != APR_BRIGADE_SENTINEL(b)) {
87 f = APR_RING_LAST(&b->list);
88 APR_RING_UNSPLICE(e, f, link);
89 APR_RING_SPLICE_HEAD(&a->list, e, f, apr_bucket, link);
92 APR_BRIGADE_CHECK_CONSISTENCY(a);
93 APR_BRIGADE_CHECK_CONSISTENCY(b);
95 return a;
98 APU_DECLARE(apr_bucket_brigade *) apr_brigade_split(apr_bucket_brigade *b,
99 apr_bucket *e)
101 return apr_brigade_split_ex(b, e, NULL);
104 APU_DECLARE(apr_status_t) apr_brigade_partition(apr_bucket_brigade *b,
105 apr_off_t point,
106 apr_bucket **after_point)
108 apr_bucket *e;
109 const char *s;
110 apr_size_t len;
111 apr_uint64_t point64;
112 apr_status_t rv;
114 if (point < 0) {
115 /* this could cause weird (not necessarily SEGV) things to happen */
116 return APR_EINVAL;
118 if (point == 0) {
119 *after_point = APR_BRIGADE_FIRST(b);
120 return APR_SUCCESS;
124 * Try to reduce the following casting mess: We know that point will be
125 * larger equal 0 now and forever and thus that point (apr_off_t) and
126 * apr_size_t will fit into apr_uint64_t in any case.
128 point64 = (apr_uint64_t)point;
130 APR_BRIGADE_CHECK_CONSISTENCY(b);
132 for (e = APR_BRIGADE_FIRST(b);
133 e != APR_BRIGADE_SENTINEL(b);
134 e = APR_BUCKET_NEXT(e))
136 /* For an unknown length bucket, while 'point64' is beyond the possible
137 * size contained in apr_size_t, read and continue...
139 if ((e->length == (apr_size_t)(-1))
140 && (point64 > (apr_uint64_t)APR_SIZE_MAX)) {
141 /* point64 is too far out to simply split this bucket,
142 * we must fix this bucket's size and keep going... */
143 rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ);
144 if (rv != APR_SUCCESS) {
145 *after_point = e;
146 return rv;
149 else if ((point64 < (apr_uint64_t)e->length)
150 || (e->length == (apr_size_t)(-1))) {
151 /* We already consumed buckets where point64 is beyond
152 * our interest ( point64 > APR_SIZE_MAX ), above.
153 * Here point falls between 0 and APR_SIZE_MAX
154 * and is within this bucket, or this bucket's len
155 * is undefined, so now we are ready to split it.
156 * First try to split the bucket natively... */
157 if ((rv = apr_bucket_split(e, (apr_size_t)point64))
158 != APR_ENOTIMPL) {
159 *after_point = APR_BUCKET_NEXT(e);
160 return rv;
163 /* if the bucket cannot be split, we must read from it,
164 * changing its type to one that can be split */
165 rv = apr_bucket_read(e, &s, &len, APR_BLOCK_READ);
166 if (rv != APR_SUCCESS) {
167 *after_point = e;
168 return rv;
171 /* this assumes that len == e->length, which is okay because e
172 * might have been morphed by the apr_bucket_read() above, but
173 * if it was, the length would have been adjusted appropriately */
174 if (point64 < (apr_uint64_t)e->length) {
175 rv = apr_bucket_split(e, (apr_size_t)point64);
176 *after_point = APR_BUCKET_NEXT(e);
177 return rv;
180 if (point64 == (apr_uint64_t)e->length) {
181 *after_point = APR_BUCKET_NEXT(e);
182 return APR_SUCCESS;
184 point64 -= (apr_uint64_t)e->length;
186 *after_point = APR_BRIGADE_SENTINEL(b);
187 return APR_INCOMPLETE;
190 APU_DECLARE(apr_status_t) apr_brigade_length(apr_bucket_brigade *bb,
191 int read_all, apr_off_t *length)
193 apr_off_t total = 0;
194 apr_bucket *bkt;
195 apr_status_t status = APR_SUCCESS;
197 for (bkt = APR_BRIGADE_FIRST(bb);
198 bkt != APR_BRIGADE_SENTINEL(bb);
199 bkt = APR_BUCKET_NEXT(bkt))
201 if (bkt->length == (apr_size_t)(-1)) {
202 const char *ignore;
203 apr_size_t len;
205 if (!read_all) {
206 total = -1;
207 break;
210 if ((status = apr_bucket_read(bkt, &ignore, &len,
211 APR_BLOCK_READ)) != APR_SUCCESS) {
212 break;
216 total += bkt->length;
219 *length = total;
220 return status;
223 APU_DECLARE(apr_status_t) apr_brigade_flatten(apr_bucket_brigade *bb,
224 char *c, apr_size_t *len)
226 apr_size_t actual = 0;
227 apr_bucket *b;
229 for (b = APR_BRIGADE_FIRST(bb);
230 b != APR_BRIGADE_SENTINEL(bb);
231 b = APR_BUCKET_NEXT(b))
233 const char *str;
234 apr_size_t str_len;
235 apr_status_t status;
237 status = apr_bucket_read(b, &str, &str_len, APR_BLOCK_READ);
238 if (status != APR_SUCCESS) {
239 return status;
242 /* If we would overflow. */
243 if (str_len + actual > *len) {
244 str_len = *len - actual;
247 /* XXX: It appears that overflow of the final bucket
248 * is DISCARDED without any warning to the caller.
250 * No, we only copy the data up to their requested size. -- jre
252 memcpy(c, str, str_len);
254 c += str_len;
255 actual += str_len;
257 /* This could probably be actual == *len, but be safe from stray
258 * photons. */
259 if (actual >= *len) {
260 break;
264 *len = actual;
265 return APR_SUCCESS;
268 APU_DECLARE(apr_status_t) apr_brigade_pflatten(apr_bucket_brigade *bb,
269 char **c,
270 apr_size_t *len,
271 apr_pool_t *pool)
273 apr_off_t actual;
274 apr_size_t total;
275 apr_status_t rv;
277 apr_brigade_length(bb, 1, &actual);
279 /* XXX: This is dangerous beyond belief. At least in the
280 * apr_brigade_flatten case, the user explicitly stated their
281 * buffer length - so we don't up and palloc 4GB for a single
282 * file bucket. This API must grow a useful max boundry,
283 * either compiled-in or preset via the *len value.
285 * Shouldn't both fn's grow an additional return value for
286 * the case that the brigade couldn't be flattened into the
287 * provided or allocated buffer (such as APR_EMOREDATA?)
288 * Not a failure, simply an advisory result.
290 total = (apr_size_t)actual;
292 *c = apr_palloc(pool, total);
294 rv = apr_brigade_flatten(bb, *c, &total);
296 if (rv != APR_SUCCESS) {
297 return rv;
300 *len = total;
301 return APR_SUCCESS;
304 APU_DECLARE(apr_status_t) apr_brigade_split_line(apr_bucket_brigade *bbOut,
305 apr_bucket_brigade *bbIn,
306 apr_read_type_e block,
307 apr_off_t maxbytes)
309 apr_off_t readbytes = 0;
311 while (!APR_BRIGADE_EMPTY(bbIn)) {
312 const char *pos;
313 const char *str;
314 apr_size_t len;
315 apr_status_t rv;
316 apr_bucket *e;
318 e = APR_BRIGADE_FIRST(bbIn);
319 rv = apr_bucket_read(e, &str, &len, block);
321 if (rv != APR_SUCCESS) {
322 return rv;
325 pos = memchr(str, APR_ASCII_LF, len);
326 /* We found a match. */
327 if (pos != NULL) {
328 apr_bucket_split(e, pos - str + 1);
329 APR_BUCKET_REMOVE(e);
330 APR_BRIGADE_INSERT_TAIL(bbOut, e);
331 return APR_SUCCESS;
333 APR_BUCKET_REMOVE(e);
334 APR_BRIGADE_INSERT_TAIL(bbOut, e);
335 readbytes += len;
336 /* We didn't find an APR_ASCII_LF within the maximum line length. */
337 if (readbytes >= maxbytes) {
338 break;
342 return APR_SUCCESS;
346 APU_DECLARE(apr_status_t) apr_brigade_to_iovec(apr_bucket_brigade *b,
347 struct iovec *vec, int *nvec)
349 int left = *nvec;
350 apr_bucket *e;
351 struct iovec *orig;
352 apr_size_t iov_len;
353 const char *iov_base;
354 apr_status_t rv;
356 orig = vec;
358 for (e = APR_BRIGADE_FIRST(b);
359 e != APR_BRIGADE_SENTINEL(b);
360 e = APR_BUCKET_NEXT(e))
362 if (left-- == 0)
363 break;
365 rv = apr_bucket_read(e, &iov_base, &iov_len, APR_NONBLOCK_READ);
366 if (rv != APR_SUCCESS)
367 return rv;
368 /* Set indirectly since types differ: */
369 vec->iov_len = iov_len;
370 vec->iov_base = (void *)iov_base;
371 ++vec;
374 *nvec = (int)(vec - orig);
375 return APR_SUCCESS;
378 APU_DECLARE(apr_status_t) apr_brigade_vputstrs(apr_bucket_brigade *b,
379 apr_brigade_flush flush,
380 void *ctx,
381 va_list va)
383 for (;;) {
384 const char *str = va_arg(va, const char *);
385 apr_status_t rv;
387 if (str == NULL)
388 break;
390 rv = apr_brigade_write(b, flush, ctx, str, strlen(str));
391 if (rv != APR_SUCCESS)
392 return rv;
395 return APR_SUCCESS;
398 APU_DECLARE(apr_status_t) apr_brigade_putc(apr_bucket_brigade *b,
399 apr_brigade_flush flush, void *ctx,
400 const char c)
402 return apr_brigade_write(b, flush, ctx, &c, 1);
405 APU_DECLARE(apr_status_t) apr_brigade_write(apr_bucket_brigade *b,
406 apr_brigade_flush flush,
407 void *ctx,
408 const char *str, apr_size_t nbyte)
410 apr_bucket *e = APR_BRIGADE_LAST(b);
411 apr_size_t remaining = APR_BUCKET_BUFF_SIZE;
412 char *buf = NULL;
414 if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e)) {
415 apr_bucket_heap *h = e->data;
417 /* HEAP bucket start offsets are always in-memory, safe to cast */
418 remaining = h->alloc_len - (e->length + (apr_size_t)e->start);
419 buf = h->base + e->start + e->length;
422 if (nbyte > remaining) {
423 /* either a buffer bucket exists but is full,
424 * or no buffer bucket exists and the data is too big
425 * to buffer. In either case, we should flush. */
426 if (flush) {
427 e = apr_bucket_transient_create(str, nbyte, b->bucket_alloc);
428 APR_BRIGADE_INSERT_TAIL(b, e);
429 return flush(b, ctx);
431 else {
432 e = apr_bucket_heap_create(str, nbyte, NULL, b->bucket_alloc);
433 APR_BRIGADE_INSERT_TAIL(b, e);
434 return APR_SUCCESS;
437 else if (!buf) {
438 /* we don't have a buffer, but the data is small enough
439 * that we don't mind making a new buffer */
440 buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc);
441 e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE,
442 apr_bucket_free, b->bucket_alloc);
443 APR_BRIGADE_INSERT_TAIL(b, e);
444 e->length = 0; /* We are writing into the brigade, and
445 * allocating more memory than we need. This
446 * ensures that the bucket thinks it is empty just
447 * after we create it. We'll fix the length
448 * once we put data in it below.
452 /* there is a sufficiently big buffer bucket available now */
453 memcpy(buf, str, nbyte);
454 e->length += nbyte;
456 return APR_SUCCESS;
459 APU_DECLARE(apr_status_t) apr_brigade_writev(apr_bucket_brigade *b,
460 apr_brigade_flush flush,
461 void *ctx,
462 const struct iovec *vec,
463 apr_size_t nvec)
465 apr_bucket *e;
466 apr_size_t total_len;
467 apr_size_t i;
468 char *buf;
470 /* Compute the total length of the data to be written.
472 total_len = 0;
473 for (i = 0; i < nvec; i++) {
474 total_len += vec[i].iov_len;
477 /* If the data to be written is very large, try to convert
478 * the iovec to transient buckets rather than copying.
480 if (total_len > APR_BUCKET_BUFF_SIZE) {
481 if (flush) {
482 for (i = 0; i < nvec; i++) {
483 e = apr_bucket_transient_create(vec[i].iov_base,
484 vec[i].iov_len,
485 b->bucket_alloc);
486 APR_BRIGADE_INSERT_TAIL(b, e);
488 return flush(b, ctx);
490 else {
491 for (i = 0; i < nvec; i++) {
492 e = apr_bucket_heap_create((const char *) vec[i].iov_base,
493 vec[i].iov_len, NULL,
494 b->bucket_alloc);
495 APR_BRIGADE_INSERT_TAIL(b, e);
497 return APR_SUCCESS;
501 i = 0;
503 /* If there is a heap bucket at the end of the brigade
504 * already, copy into the existing bucket.
506 e = APR_BRIGADE_LAST(b);
507 if (!APR_BRIGADE_EMPTY(b) && APR_BUCKET_IS_HEAP(e)) {
508 apr_bucket_heap *h = e->data;
509 apr_size_t remaining = h->alloc_len -
510 (e->length + (apr_size_t)e->start);
511 buf = h->base + e->start + e->length;
513 if (remaining >= total_len) {
514 /* Simple case: all the data will fit in the
515 * existing heap bucket
517 for (; i < nvec; i++) {
518 apr_size_t len = vec[i].iov_len;
519 memcpy(buf, (const void *) vec[i].iov_base, len);
520 buf += len;
522 e->length += total_len;
523 return APR_SUCCESS;
525 else {
526 /* More complicated case: not all of the data
527 * will fit in the existing heap bucket. The
528 * total data size is <= APR_BUCKET_BUFF_SIZE,
529 * so we'll need only one additional bucket.
531 const char *start_buf = buf;
532 for (; i < nvec; i++) {
533 apr_size_t len = vec[i].iov_len;
534 if (len > remaining) {
535 break;
537 memcpy(buf, (const void *) vec[i].iov_base, len);
538 buf += len;
539 remaining -= len;
541 e->length += (buf - start_buf);
542 total_len -= (buf - start_buf);
544 if (flush) {
545 apr_status_t rv = flush(b, ctx);
546 if (rv != APR_SUCCESS) {
547 return rv;
551 /* Now fall through into the case below to
552 * allocate another heap bucket and copy the
553 * rest of the array. (Note that i is not
554 * reset to zero here; it holds the index
555 * of the first vector element to be
556 * written to the new bucket.)
561 /* Allocate a new heap bucket, and copy the data into it.
562 * The checks above ensure that the amount of data to be
563 * written here is no larger than APR_BUCKET_BUFF_SIZE.
565 buf = apr_bucket_alloc(APR_BUCKET_BUFF_SIZE, b->bucket_alloc);
566 e = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE,
567 apr_bucket_free, b->bucket_alloc);
568 for (; i < nvec; i++) {
569 apr_size_t len = vec[i].iov_len;
570 memcpy(buf, (const void *) vec[i].iov_base, len);
571 buf += len;
573 e->length = total_len;
574 APR_BRIGADE_INSERT_TAIL(b, e);
576 return APR_SUCCESS;
579 APU_DECLARE(apr_status_t) apr_brigade_puts(apr_bucket_brigade *bb,
580 apr_brigade_flush flush, void *ctx,
581 const char *str)
583 apr_size_t len = strlen(str);
584 apr_bucket *bkt = APR_BRIGADE_LAST(bb);
585 if (!APR_BRIGADE_EMPTY(bb) && APR_BUCKET_IS_HEAP(bkt)) {
586 /* If there is enough space available in a heap bucket
587 * at the end of the brigade, copy the string directly
588 * into the heap bucket
590 apr_bucket_heap *h = bkt->data;
591 apr_size_t bytes_avail = h->alloc_len - bkt->length;
593 if (bytes_avail >= len) {
594 char *buf = h->base + bkt->start + bkt->length;
595 memcpy(buf, str, len);
596 bkt->length += len;
597 return APR_SUCCESS;
601 /* If the string could not be copied into an existing heap
602 * bucket, delegate the work to apr_brigade_write(), which
603 * knows how to grow the brigade
605 return apr_brigade_write(bb, flush, ctx, str, len);
608 APU_DECLARE_NONSTD(apr_status_t) apr_brigade_putstrs(apr_bucket_brigade *b,
609 apr_brigade_flush flush,
610 void *ctx, ...)
612 va_list va;
613 apr_status_t rv;
615 va_start(va, ctx);
616 rv = apr_brigade_vputstrs(b, flush, ctx, va);
617 va_end(va);
618 return rv;
621 APU_DECLARE_NONSTD(apr_status_t) apr_brigade_printf(apr_bucket_brigade *b,
622 apr_brigade_flush flush,
623 void *ctx,
624 const char *fmt, ...)
626 va_list ap;
627 apr_status_t rv;
629 va_start(ap, fmt);
630 rv = apr_brigade_vprintf(b, flush, ctx, fmt, ap);
631 va_end(ap);
632 return rv;
635 struct brigade_vprintf_data_t {
636 apr_vformatter_buff_t vbuff;
638 apr_bucket_brigade *b; /* associated brigade */
639 apr_brigade_flush *flusher; /* flushing function */
640 void *ctx;
642 char *cbuff; /* buffer to flush from */
645 static apr_status_t brigade_flush(apr_vformatter_buff_t *buff)
647 /* callback function passed to ap_vformatter to be
648 * called when vformatter needs to buff and
649 * buff.curpos > buff.endpos
652 /* "downcast," have really passed a brigade_vprintf_data_t* */
653 struct brigade_vprintf_data_t *vd = (struct brigade_vprintf_data_t*)buff;
654 apr_status_t res = APR_SUCCESS;
656 res = apr_brigade_write(vd->b, *vd->flusher, vd->ctx, vd->cbuff,
657 APR_BUCKET_BUFF_SIZE);
659 if(res != APR_SUCCESS) {
660 return -1;
663 vd->vbuff.curpos = vd->cbuff;
664 vd->vbuff.endpos = vd->cbuff + APR_BUCKET_BUFF_SIZE;
666 return res;
669 APU_DECLARE(apr_status_t) apr_brigade_vprintf(apr_bucket_brigade *b,
670 apr_brigade_flush flush,
671 void *ctx,
672 const char *fmt, va_list va)
674 /* the cast, in order of appearance */
675 struct brigade_vprintf_data_t vd;
676 char buf[APR_BUCKET_BUFF_SIZE];
677 int written;
679 vd.vbuff.curpos = buf;
680 vd.vbuff.endpos = buf + APR_BUCKET_BUFF_SIZE;
681 vd.b = b;
682 vd.flusher = &flush;
683 vd.ctx = ctx;
684 vd.cbuff = buf;
686 written = apr_vformatter(brigade_flush, &vd.vbuff, fmt, va);
688 if (written == -1) {
689 return -1;
692 /* tack on null terminator to remaining string */
693 *(vd.vbuff.curpos) = '\0';
695 /* write out what remains in the buffer */
696 return apr_brigade_write(b, flush, ctx, buf, vd.vbuff.curpos - buf);
699 /* A "safe" maximum bucket size, 1Gb */
700 #define MAX_BUCKET_SIZE (0x40000000)
702 APU_DECLARE(apr_bucket *) apr_brigade_insert_file(apr_bucket_brigade *bb,
703 apr_file_t *f,
704 apr_off_t start,
705 apr_off_t length,
706 apr_pool_t *p)
708 apr_bucket *e;
710 if (sizeof(apr_off_t) == sizeof(apr_size_t) || length < MAX_BUCKET_SIZE) {
711 e = apr_bucket_file_create(f, start, (apr_size_t)length, p,
712 bb->bucket_alloc);
714 else {
715 /* Several buckets are needed. */
716 e = apr_bucket_file_create(f, start, MAX_BUCKET_SIZE, p,
717 bb->bucket_alloc);
719 while (length > MAX_BUCKET_SIZE) {
720 apr_bucket *ce;
721 apr_bucket_copy(e, &ce);
722 APR_BRIGADE_INSERT_TAIL(bb, ce);
723 e->start += MAX_BUCKET_SIZE;
724 length -= MAX_BUCKET_SIZE;
726 e->length = (apr_size_t)length; /* Resize just the last bucket */
729 APR_BRIGADE_INSERT_TAIL(bb, e);
730 return e;