11 #include "bfu/dialog.h"
12 #include "cache/cache.h"
13 #include "cache/dialogs.h"
14 #include "config/options.h"
15 #include "main/main.h"
16 #include "main/object.h"
17 #include "network/connection.h"
18 #include "protocol/protocol.h"
19 #include "protocol/proxy.h"
20 #include "protocol/uri.h"
21 #ifdef CONFIG_SCRIPTING_SPIDERMONKEY
22 # include "scripting/smjs/smjs.h"
24 #include "util/error.h"
25 #include "util/memory.h"
26 #include "util/string.h"
27 #include "util/time.h"
29 /* The list of cache entries */
30 static INIT_LIST_OF(struct cache_entry
, cache_entries
);
32 static unsigned longlong cache_size
;
33 static int id_counter
= 1;
35 static void truncate_entry(struct cache_entry
*cached
, off_t offset
, int final
);
37 /* Change 0 to 1 to enable cache debugging features (redirect stderr to a file). */
44 #define dump_frag(frag, count) \
46 DBG(" [%d] f=%p offset=%" OFF_PRINT_FORMAT \
47 " length=%" OFF_PRINT_FORMAT \
48 " real_length=%" OFF_PRINT_FORMAT, \
49 count, frag, (off_print_T) frag->offset, \
50 (off_print_T) frag->length, (off_print_T) frag->real_length); \
53 #define dump_frags(entry, comment) \
55 struct fragment *frag; \
58 DBG("%s: url=%s, cache_size=%li", comment, struri(entry->uri), cache_size); \
59 foreach (frag, entry->frag) \
60 dump_frag(frag, ++count); \
64 #define dump_frags(entry, comment)
65 #endif /* DEBUG_CACHE */
74 get_cache_entry_count(void)
76 return list_size(&cache_entries
);
80 get_cache_entry_used_count(void)
82 struct cache_entry
*cached
;
85 foreach (cached
, cache_entries
)
86 i
+= is_object_used(cached
);
92 get_cache_entry_loading_count(void)
94 struct cache_entry
*cached
;
97 foreach (cached
, cache_entries
)
98 i
+= is_entry_used(cached
);
104 find_in_cache(struct uri
*uri
)
106 struct cache_entry
*cached
;
107 int proxy
= (uri
->protocol
== PROTOCOL_PROXY
);
109 foreach (cached
, cache_entries
) {
112 if (!cached
->valid
) continue;
114 c_uri
= proxy
? cached
->proxy_uri
: cached
->uri
;
115 if (!compare_uri(c_uri
, uri
, URI_BASE
))
118 move_to_top_of_list(cache_entries
, cached
);
127 get_cache_entry(struct uri
*uri
)
129 struct cache_entry
*cached
= find_in_cache(uri
);
131 assertm(!uri
->fragment
, "Fragment in URI (%s)", struri(uri
));
133 if (cached
) return cached
;
137 cached
= mem_calloc(1, sizeof(*cached
));
138 if (!cached
) return NULL
;
140 cached
->uri
= get_proxied_uri(uri
);
146 cached
->proxy_uri
= get_proxy_uri(uri
, NULL
);
147 if (!cached
->proxy_uri
) {
148 done_uri(cached
->uri
);
152 cached
->incomplete
= 1;
155 init_list(cached
->frag
);
156 cached
->cache_id
= id_counter
++;
157 object_nolock(cached
, "cache_entry"); /* Debugging purpose. */
159 cached
->box_item
= add_listbox_leaf(&cache_browser
, NULL
, cached
);
161 add_to_list(cache_entries
, cached
);
167 cache_entry_has_expired(struct cache_entry
*cached
)
173 return timeval_cmp(&cached
->max_age
, &now
) <= 0;
177 get_validated_cache_entry(struct uri
*uri
, enum cache_mode cache_mode
)
179 struct cache_entry
*cached
;
181 /* We have to check if something should be reloaded */
182 if (cache_mode
> CACHE_MODE_NORMAL
)
185 /* We only consider complete entries */
186 cached
= find_in_cache(uri
);
187 if (!cached
|| cached
->incomplete
)
191 /* A bit of a gray zone. Delete the entry if the it has the strictest
192 * cache mode and we don't want the most aggressive mode or we have to
193 * remove the redirect or the entry expired. Please enlighten me.
195 if ((cached
->cache_mode
== CACHE_MODE_NEVER
&& cache_mode
!= CACHE_MODE_ALWAYS
)
196 || (cached
->redirect
&& !get_opt_bool("document.cache.cache_redirects", NULL
))
197 || (cached
->expire
&& cache_entry_has_expired(cached
))) {
198 if (!is_object_used(cached
)) delete_cache_entry(cached
);
202 if (cached
->cache_mode
<= CACHE_MODE_CHECK_IF_MODIFIED
203 && cache_mode
<= CACHE_MODE_CHECK_IF_MODIFIED
204 && (cached
->last_modified
|| cached
->etag
)
205 && get_opt_int("document.cache.revalidation_interval", NULL
) >= 0) {
206 if (cached
->seconds
+ get_opt_int("document.cache.revalidation_interval", NULL
) < time(NULL
))
214 cache_entry_is_valid(struct cache_entry
*cached
)
216 struct cache_entry
*valid_cached
;
218 foreach (valid_cached
, cache_entries
) {
219 if (valid_cached
== cached
)
228 follow_cached_redirects(struct cache_entry
*cached
)
233 if (!cached
->redirect
) {
234 /* XXX: This is not quite true, but does that difference
239 if (++redirects
> MAX_REDIRECTS
) break;
241 cached
= find_in_cache(cached
->redirect
);
248 get_redirected_cache_entry(struct uri
*uri
)
250 struct cache_entry
*cached
= find_in_cache(uri
);
252 return cached
? follow_cached_redirects(cached
) : NULL
;
257 enlarge_entry(struct cache_entry
*cached
, off_t size
)
259 cached
->data_size
+= size
;
260 assertm(cached
->data_size
>= 0,
261 "cache entry data_size underflow: %ld", cached
->data_size
);
262 if_assert_failed
{ cached
->data_size
= 0; }
265 assertm(cache_size
>= 0, "cache_size underflow: %ld", cache_size
);
266 if_assert_failed
{ cache_size
= 0; }
270 #define CACHE_PAD(x) (((x) | 0x3fff) + 1)
272 /* One byte is reserved for data in struct fragment. */
273 #define FRAGSIZE(x) (sizeof(struct fragment) + (x) - 1)
275 /* We store the fragments themselves in a private vault, safely separated from
276 * the rest of memory structures. If we lived in the main libc memory pool, we
277 * would trigger annoying pathological behaviour like artificially enlarging
278 * the memory pool to 50M, then securing it with some stupid cookie record at
279 * the top and then no matter how you flush the cache the data segment is still
282 * Cool, but we don't want that, so fragments (where the big data is stored)
283 * live in their little mmap()ed worlds. There is some overhead, but if we
284 * assume single fragment per cache entry and page size (mmap() allocation
285 * granularity) 4096, for a squad of ten 1kb documents this amounts 30kb.
286 * That's not *that* horrible when you realize that the freshmeat front page
287 * takes 300kb in memory and we usually do not deal with documents so small
288 * that max. 4kb overhead would be visible there.
290 * The alternative would be of course to manage an entire custom memory pool,
291 * but that is feasible only when we are able to resize it efficiently. We
292 * aren't, except on Linux.
294 * Of course for all this to really completely prevent the pathological cases,
295 * we need to stuff the rendered documents in too, because they seem to amount
296 * the major memory bursts. */
298 static struct fragment
*
299 frag_alloc(size_t size
)
301 struct fragment
*f
= mem_mmap_alloc(FRAGSIZE(size
));
304 memset(f
, 0, FRAGSIZE(size
));
308 static struct fragment
*
309 frag_realloc(struct fragment
*f
, size_t size
)
311 return mem_mmap_realloc(f
, FRAGSIZE(f
->real_length
), FRAGSIZE(size
));
315 frag_free(struct fragment
*f
)
317 mem_mmap_free(f
, FRAGSIZE(f
->real_length
));
321 /* Concatenate overlapping fragments. */
323 remove_overlaps(struct cache_entry
*cached
, struct fragment
*f
, int *trunc
)
325 off_t f_end_offset
= f
->offset
+ f
->length
;
327 /* Iterate thru all fragments we still overlap to. */
328 while (list_has_next(cached
->frag
, f
)
329 && f_end_offset
> f
->next
->offset
) {
331 off_t end_offset
= f
->next
->offset
+ f
->next
->length
;
333 if (f_end_offset
< end_offset
) {
334 /* We end before end of the following fragment, though.
335 * So try to append overlapping part of that fragment
337 nf
= frag_realloc(f
, end_offset
- f
->offset
);
343 if (memcmp(f
->data
+ f
->next
->offset
- f
->offset
,
345 f
->offset
+ f
->length
- f
->next
->offset
))
348 memcpy(f
->data
+ f
->length
,
349 f
->next
->data
+ f_end_offset
- f
->next
->offset
,
350 end_offset
- f_end_offset
);
352 enlarge_entry(cached
, end_offset
- f_end_offset
);
353 f
->length
= f
->real_length
= end_offset
- f
->offset
;
357 /* We will just discard this, it's complete subset of
358 * our new fragment. */
359 if (memcmp(f
->data
+ f
->next
->offset
- f
->offset
,
365 /* Remove the fragment, it influences our new one! */
367 enlarge_entry(cached
, -nf
->length
);
373 /* Note that this function is maybe overcommented, but I'm certainly not
374 * unhappy from that. */
376 add_fragment(struct cache_entry
*cached
, off_t offset
,
377 const unsigned char *data
, ssize_t length
)
379 struct fragment
*f
, *nf
;
383 if (!length
) return 0;
385 end_offset
= offset
+ length
;
386 if (cached
->length
< end_offset
)
387 cached
->length
= end_offset
;
389 /* id marks each entry, and change each time it's modified,
390 * used in HTML renderer. */
391 cached
->cache_id
= id_counter
++;
393 /* Possibly insert the new data in the middle of existing fragment. */
394 foreach (f
, cached
->frag
) {
396 off_t f_end_offset
= f
->offset
+ f
->length
;
398 /* No intersection? */
399 if (f
->offset
> offset
) break;
400 if (f_end_offset
< offset
) continue;
402 if (end_offset
> f_end_offset
) {
403 /* Overlap - we end further than original fragment. */
405 if (end_offset
- f
->offset
<= f
->real_length
) {
406 /* We fit here, so let's enlarge it by delta of
407 * old and new end.. */
408 enlarge_entry(cached
, end_offset
- f_end_offset
);
409 /* ..and length is now total length. */
410 f
->length
= end_offset
- f
->offset
;
412 ret
= 1; /* It was enlarged. */
414 /* We will reduce fragment length only to the
415 * starting non-interjecting size and add new
416 * fragment directly after this one. */
417 f
->length
= offset
- f
->offset
;
422 } /* else We are subset of original fragment. */
424 /* Copy the stuff over there. */
425 memcpy(f
->data
+ offset
- f
->offset
, data
, length
);
427 remove_overlaps(cached
, f
, &trunc
);
429 /* We truncate the entry even if the data contents is the
430 * same as what we have in the fragment, because that does
431 * not mean that what is going to follow won't differ, This
432 * is a serious problem when rendering HTML frame with onload
433 * snippets - we "guess" the rest of the document here,
434 * interpret the snippet, then it turns out in the real
435 * document the snippet is different and we are in trouble.
437 * Debugging this took me about 1.5 day (really), the diff with
438 * all the debugging print commands amounted about 20kb (gdb
439 * wasn't much useful since it stalled the download, de facto
440 * eliminating the bad behaviour). */
441 truncate_entry(cached
, end_offset
, 0);
443 dump_frags(cached
, "add_fragment");
448 /* Make up new fragment. */
449 nf
= frag_alloc(CACHE_PAD(length
));
454 nf
->real_length
= CACHE_PAD(length
);
455 memcpy(nf
->data
, data
, length
);
456 add_at_pos(f
->prev
, nf
);
458 enlarge_entry(cached
, length
);
460 remove_overlaps(cached
, nf
, &trunc
);
461 if (trunc
) truncate_entry(cached
, end_offset
, 0);
463 dump_frags(cached
, "add_fragment");
468 /* Try to defragment the cache entry. Defragmentation will not be possible
469 * if there is a gap in the fragments; if we have bytes 1-100 in one fragment
470 * and bytes 201-300 in the second, we must leave those two fragments separate
471 * so that the fragment for bytes 101-200 can later be inserted. However,
472 * if we have the fragments for bytes 1-100, 101-200, and 201-300, we will
473 * catenate them into one new fragment and replace the original fragments
474 * with that new fragment.
476 * If are no fragments, return NULL. If there is no fragment with byte 1,
477 * return NULL. Otherwise, return the first fragment, whether or not it was
478 * possible to fully defragment the entry. */
480 get_cache_fragment(struct cache_entry
*cached
)
482 struct fragment
*first_frag
, *adj_frag
, *frag
, *new_frag
;
485 if (list_empty(cached
->frag
))
488 first_frag
= cached
->frag
.next
;
489 if (first_frag
->offset
)
492 /* Only one fragment so no defragmentation is needed */
493 if (list_is_singleton(cached
->frag
))
496 /* Find the first pair of fragments with a gap in between. Only
497 * fragments up to the first gap can be defragmented. */
498 for (adj_frag
= first_frag
->next
; adj_frag
!= (void *) &cached
->frag
;
499 adj_frag
= adj_frag
->next
) {
500 long gap
= adj_frag
->offset
501 - (adj_frag
->prev
->offset
+ adj_frag
->prev
->length
);
504 if (gap
== 0) continue;
506 INTERNAL("fragments overlap");
510 /* There is a gap between the first two fragments, so we can't
511 * defragment anything. */
512 if (adj_frag
== first_frag
->next
)
515 /* Calculate the length of the defragmented fragment. */
516 for (new_frag_len
= 0, frag
= first_frag
;
519 new_frag_len
+= frag
->length
;
521 /* XXX: If the defragmentation fails because of allocation failure,
522 * fall back to return the first fragment and pretend all is well. */
523 /* FIXME: Is this terribly brain-dead? It corresponds to the semantic of
524 * the code this extended version of the old defrag_entry() is supposed
525 * to replace. --jonas */
526 new_frag
= frag_alloc(new_frag_len
);
528 return first_frag
->length
? first_frag
: NULL
;
530 new_frag
->length
= new_frag_len
;
531 new_frag
->real_length
= new_frag_len
;
533 for (new_frag_len
= 0, frag
= first_frag
;
536 struct fragment
*tmp
= frag
;
538 memcpy(new_frag
->data
+ new_frag_len
, frag
->data
, frag
->length
);
539 new_frag_len
+= frag
->length
;
546 add_to_list(cached
->frag
, new_frag
);
548 dump_frags(cached
, "get_cache_fragment");
554 delete_fragment(struct cache_entry
*cached
, struct fragment
*f
)
556 while ((void *) f
!= &cached
->frag
) {
557 struct fragment
*tmp
= f
->next
;
559 enlarge_entry(cached
, -f
->length
);
567 truncate_entry(struct cache_entry
*cached
, off_t offset
, int final
)
571 if (cached
->length
> offset
) {
572 cached
->length
= offset
;
573 cached
->incomplete
= 1;
576 foreach (f
, cached
->frag
) {
577 off_t size
= offset
- f
->offset
;
579 /* XXX: is zero length fragment really legal here ? --Zas */
580 assert(f
->length
>= 0);
582 if (size
>= f
->length
) continue;
585 enlarge_entry(cached
, -(f
->length
- size
));
591 nf
= frag_realloc(f
, f
->length
);
596 f
->real_length
= f
->length
;
603 delete_fragment(cached
, f
);
605 dump_frags(cached
, "truncate_entry");
611 free_entry_to(struct cache_entry
*cached
, off_t offset
)
615 foreach (f
, cached
->frag
) {
616 if (f
->offset
+ f
->length
<= offset
) {
617 struct fragment
*tmp
= f
;
619 enlarge_entry(cached
, -f
->length
);
623 } else if (f
->offset
< offset
) {
624 off_t size
= offset
- f
->offset
;
626 enlarge_entry(cached
, -size
);
628 memmove(f
->data
, f
->data
+ size
, f
->length
);
635 delete_entry_content(struct cache_entry
*cached
)
637 enlarge_entry(cached
, -cached
->data_size
);
639 while (cached
->frag
.next
!= (void *) &cached
->frag
) {
640 struct fragment
*f
= cached
->frag
.next
;
645 cached
->cache_id
= id_counter
++;
647 cached
->incomplete
= 1;
649 mem_free_set(&cached
->last_modified
, NULL
);
650 mem_free_set(&cached
->etag
, NULL
);
654 done_cache_entry(struct cache_entry
*cached
)
656 assertm(!is_object_used(cached
), "deleting locked cache entry");
657 assertm(!is_entry_used(cached
), "deleting loading cache entry");
659 delete_entry_content(cached
);
661 if (cached
->box_item
) done_listbox_item(&cache_browser
, cached
->box_item
);
662 #ifdef CONFIG_SCRIPTING_SPIDERMONKEY
663 if (cached
->jsobject
) smjs_detach_cache_entry_object(cached
);
666 if (cached
->uri
) done_uri(cached
->uri
);
667 if (cached
->proxy_uri
) done_uri(cached
->proxy_uri
);
668 if (cached
->redirect
) done_uri(cached
->redirect
);
670 mem_free_if(cached
->head
);
671 mem_free_if(cached
->content_type
);
672 mem_free_if(cached
->last_modified
);
673 mem_free_if(cached
->ssl_info
);
674 mem_free_if(cached
->encoding_info
);
675 mem_free_if(cached
->etag
);
681 delete_cache_entry(struct cache_entry
*cached
)
683 del_from_list(cached
);
685 done_cache_entry(cached
);
690 normalize_cache_entry(struct cache_entry
*cached
, off_t truncate_length
)
692 if (truncate_length
< 0)
695 truncate_entry(cached
, truncate_length
, 1);
696 cached
->incomplete
= 0;
697 cached
->preformatted
= 0;
698 cached
->seconds
= time(NULL
);
703 redirect_cache(struct cache_entry
*cached
, unsigned char *location
,
704 int get
, int incomplete
)
706 unsigned char *uristring
;
708 /* XXX: I am a little puzzled whether we should only use the cache
709 * entry's URI if it is valid. Hopefully always using it won't hurt.
710 * Currently we handle direction redirects where "/" should be appended
711 * special dunno if join_urls() could be made to handle that.
713 /* XXX: We are assuming here that incomplete will only be zero when
714 * doing these fake redirects which only purpose is to add an ending
715 * slash *cough* dirseparator to the end of the URI. */
716 if (incomplete
== 0 && dir_sep(location
[0]) && location
[1] == 0) {
717 /* To be sure use get_uri_string() to get rid of post data */
718 uristring
= get_uri_string(cached
->uri
, URI_ORIGINAL
);
719 if (uristring
) add_to_strn(&uristring
, location
);
721 uristring
= join_urls(cached
->uri
, location
);
724 if (!uristring
) return NULL
;
726 /* Only add the post data if the redirect should not use GET method.
727 * This is tied to the HTTP handling of the 303 and (if the
728 * protocol.http.bugs.broken_302_redirect is enabled) the 302 status
730 if (cached
->uri
->post
731 && !cached
->redirect_get
733 /* XXX: Add POST_CHAR and post data assuming URI components
734 * belong to one string. */
736 /* To be certain we don't append post data twice in some
737 * conditions... --Zas */
738 assert(!strchr(uristring
, POST_CHAR
));
740 add_to_strn(&uristring
, cached
->uri
->post
- 1);
743 if (cached
->redirect
) done_uri(cached
->redirect
);
744 cached
->redirect
= get_uri(uristring
, 0);
745 cached
->redirect_get
= get
;
746 if (incomplete
>= 0) cached
->incomplete
= incomplete
;
750 return cached
->redirect
;
755 garbage_collection(int whole
)
757 struct cache_entry
*cached
;
758 /* We recompute cache_size when scanning cache entries, to ensure
760 unsigned longlong old_cache_size
= 0;
761 /* The maximal cache size tolerated by user. Note that this is only
762 * size of the "just stored" unused cache entries, used cache entries
763 * are not counted to that. */
764 unsigned longlong opt_cache_size
= get_opt_long("document.cache.memory.size", NULL
);
765 /* The low-treshold cache size. Basically, when the cache size is
766 * higher than opt_cache_size, we free the cache so that there is no
767 * more than this value in the cache anymore. This is to make sure we
768 * aren't cleaning cache too frequently when working with a lot of
769 * small cache entries but rather free more and then let it grow a
770 * little more as well. */
771 unsigned longlong gc_cache_size
= opt_cache_size
* MEMORY_CACHE_GC_PERCENT
/ 100;
772 /* The cache size we aim to reach. */
773 unsigned longlong new_cache_size
= cache_size
;
775 /* Whether we've hit an used (unfreeable) entry when collecting
777 int obstacle_entry
= 0;
781 DBG("gc whole=%d opt_cache_size=%ld gc_cache_size=%ld",
782 whole
, opt_cache_size
,gc_cache_size
);
785 if (!whole
&& cache_size
<= opt_cache_size
) return;
788 /* Scanning cache, pass #1:
789 * Weed out the used cache entries from @new_cache_size, so that we
790 * will work only with the unused entries from then on. Also ensure
791 * that @cache_size is in sync. */
793 foreach (cached
, cache_entries
) {
794 old_cache_size
+= cached
->data_size
;
796 if (!is_object_used(cached
) && !is_entry_used(cached
))
799 assertm(new_cache_size
>= cached
->data_size
,
800 "cache_size (%ld) underflow: subtracting %ld from %ld",
801 cache_size
, cached
->data_size
, new_cache_size
);
803 new_cache_size
-= cached
->data_size
;
805 if_assert_failed
{ new_cache_size
= 0; }
808 assertm(old_cache_size
== cache_size
,
809 "cache_size out of sync: %ld != (actual) %ld",
810 cache_size
, old_cache_size
);
811 if_assert_failed
{ cache_size
= old_cache_size
; }
813 if (!whole
&& new_cache_size
<= opt_cache_size
) return;
816 /* Scanning cache, pass #2:
817 * Mark potential targets for destruction, from the oldest to the
820 foreachback (cached
, cache_entries
) {
821 /* We would have shrinked enough already? */
822 if (!whole
&& new_cache_size
<= gc_cache_size
)
823 goto shrinked_enough
;
825 /* Skip used cache entries. */
826 if (is_object_used(cached
) || is_entry_used(cached
)) {
830 cached
->gc_target
= 0;
834 /* FIXME: Optionally take cached->max_age into consideration,
835 * but that will probably complicate things too much. We'd have
836 * to sort entries so prioritize removing the oldest entries. */
838 assertm(new_cache_size
>= cached
->data_size
,
839 "cache_size (%ld) underflow: subtracting %ld from %ld",
840 cache_size
, cached
->data_size
, new_cache_size
);
842 /* Mark me for destruction, sir. */
843 cached
->gc_target
= 1;
844 new_cache_size
-= cached
->data_size
;
846 if_assert_failed
{ new_cache_size
= 0; }
849 /* If we'd free the whole cache... */
850 assertm(new_cache_size
== 0,
851 "cache_size (%ld) overflow: %ld",
852 cache_size
, new_cache_size
);
853 if_assert_failed
{ new_cache_size
= 0; }
858 /* Now turn around and start walking in the opposite direction. */
859 cached
= cached
->next
;
861 /* Something is strange when we decided all is ok before dropping any
863 if ((void *) cached
== &cache_entries
) return;
867 struct cache_entry
*entry
;
869 /* Scanning cache, pass #3:
870 * Walk back in the cache and unmark the cache entries which
871 * could still fit into the cache. */
873 /* This makes sense when the newest entry is HUGE and after it,
874 * there's just plenty of tiny entries. By this point, all the
875 * tiny entries would be marked for deletion even though it'd
876 * be enough to free the huge entry. This actually fixes that
879 for (entry
= cached
; (void *) entry
!= &cache_entries
; entry
= entry
->next
) {
880 unsigned longlong newer_cache_size
= new_cache_size
+ entry
->data_size
;
882 if (newer_cache_size
> gc_cache_size
)
885 new_cache_size
= newer_cache_size
;
886 entry
->gc_target
= 0;
891 /* Scanning cache, pass #4:
892 * Destroy the marked entries. So sad, but that's life, bro'. */
894 for (; (void *) cached
!= &cache_entries
; ) {
895 cached
= cached
->next
;
896 if (cached
->prev
->gc_target
)
897 delete_cache_entry(cached
->prev
);
902 if ((whole
|| !obstacle_entry
) && cache_size
> gc_cache_size
) {
903 DBG("garbage collection doesn't work, cache size %ld > %ld, "
904 "document.cache.memory.size set to: %ld bytes",
905 cache_size
, gc_cache_size
,
906 get_opt_long("document.cache.memory.size", NULL
));