2 * This file Copyright (C) 2010 Mnemosyne LLC
4 * This file is licensed by the GPL version 2. Works owned by the
5 * Transmission project are granted a special exemption to clause 2(b)
6 * so that the bulk of its code can remain under the MIT license.
7 * This exemption does not extend to derived works not owned by
8 * the Transmission project.
10 * $Id: cache.c 11398 2010-11-11 15:31:11Z charles $
13 #include "transmission.h"
16 #include "peer-common.h" /* MAX_BLOCK_SIZE */
21 #define MY_NAME "Cache"
23 #define dbgmsg( ... ) \
25 if( tr_deepLoggingIsActive( ) ) \
26 tr_deepLog( __FILE__, __LINE__, MY_NAME, __VA_ARGS__ ); \
38 tr_piece_index_t piece
;
43 tr_block_index_t block
;
55 size_t disk_write_bytes
;
57 size_t cache_write_bytes
;
68 time_t last_block_time
;
69 tr_bool is_multi_piece
;
70 tr_bool is_piece_done
;
75 /* return a count of how many contiguous blocks there are starting at this pos */
77 getBlockRun( const tr_cache
* cache
, int pos
, struct run_info
* info
)
80 const int n
= tr_ptrArraySize( &cache
->blocks
);
81 const struct cache_block
** blocks
= (const struct cache_block
**) tr_ptrArrayBase( &cache
->blocks
);
82 const struct cache_block
* ref
= blocks
[pos
];
83 tr_block_index_t block
= ref
->block
;
85 for( i
=pos
; i
<n
; ++i
, ++block
) {
86 const struct cache_block
* b
= blocks
[i
];
87 if( b
->block
!= block
) break;
88 if( b
->tor
!= ref
->tor
) break;
89 //fprintf( stderr, "pos %d tor %d block %zu time %zu\n", i, b->tor->uniqueId, (size_t)b->block, (size_t)b->time );
92 //fprintf( stderr, "run is %d long from [%d to %d)\n", (int)(i-pos), i, (int)pos );
94 const struct cache_block
* b
= blocks
[i
-1];
95 info
->last_block_time
= b
->time
;
96 info
->is_piece_done
= tr_cpPieceIsComplete( &b
->tor
->completion
, b
->piece
);
97 info
->is_multi_piece
= b
->piece
!= blocks
[pos
]->piece
? TRUE
: FALSE
;
106 compareRuns( const void * va
, const void * vb
)
108 const struct run_info a
= *(const struct run_info
*)va
;
109 const struct run_info b
= *(const struct run_info
*)vb
;
110 return b
.rank
- a
.rank
;
120 * - Stale runs, runs sitting in cache for a long time or runs not growing, get priority.
121 * Returns number of runs.
124 calcRuns( tr_cache
* cache
, struct run_info
* runs
)
126 const int n
= tr_ptrArraySize( &cache
->blocks
);
128 const time_t now
= tr_time();
130 for( pos
= 0; pos
< n
; pos
+= runs
[i
++].len
)
132 int rank
= getBlockRun( cache
, pos
, &runs
[i
] );
134 /* This adds ~2 to the relative length of a run for every minute it has
135 * languished in the cache. */
136 rank
+= ( now
- runs
[i
].last_block_time
) / 32;
138 /* Flushing stale blocks should be a top priority as the probability of them
139 * growing is very small, for blocks on piece boundaries, and nonexistant for
140 * blocks inside pieces. */
141 rank
|= runs
[i
].is_piece_done
? DONEFLAG
: 0;
143 /* Move the multi piece runs higher */
144 rank
|= runs
[i
].is_multi_piece
? MULTIFLAG
: 0;
147 //fprintf(stderr,"block run at pos %d of length %d and age %ld adjusted +%d\n",runs[i].pos,runs[i].len,now-runs[i].last_block_time,rank-runs[i].len);
150 //fprintf( stderr, "%d block runs\n", i );
151 qsort( runs
, i
, sizeof( struct run_info
), compareRuns
);
156 flushContiguous( tr_cache
* cache
, int pos
, int n
)
160 uint8_t * buf
= tr_new( uint8_t, n
* MAX_BLOCK_SIZE
);
161 uint8_t * walk
= buf
;
162 struct cache_block
** blocks
= (struct cache_block
**) tr_ptrArrayBase( &cache
->blocks
);
164 struct cache_block
* b
= blocks
[pos
];
165 tr_torrent
* tor
= b
->tor
;
166 const tr_piece_index_t piece
= b
->piece
;
167 const uint32_t offset
= b
->offset
;
169 //fprintf( stderr, "flushing %d contiguous blocks [%d-%d) from cache to disk\n", n, pos, n+pos );
171 for( i
=pos
; i
<pos
+n
; ++i
) {
173 memcpy( walk
, b
->buf
, b
->length
);
178 tr_ptrArrayErase( &cache
->blocks
, pos
, pos
+n
);
181 tr_tordbg( tor
, "Writing to disk piece %d, offset %d, len %d", (int)piece
, (int)offset
, (int)(walk
-buf
) );
182 tr_ndbg( MY_NAME
, "Removing %d blocks from cache, rank: %d - %d left", n
, rank
, tr_ptrArraySize(&cache
->blocks
) );
183 fprintf( stderr
, "%s - Writing to disk piece %d, offset %d, len %d\n", tr_torrentName(tor
), (int)piece
, (int)offset
, (int)(walk
-buf
) );
184 fprintf( stderr
, "%s - Removing %d blocks from cache; %d left\n", MY_NAME
, n
, tr_ptrArraySize(&cache
->blocks
) );
187 err
= tr_ioWrite( tor
, piece
, offset
, walk
-buf
, buf
);
190 ++cache
->disk_writes
;
191 cache
->disk_write_bytes
+= walk
-buf
;
196 flushRuns( tr_cache
* cache
, struct run_info
* runs
, int n
)
200 for( i
= 0; !err
&& i
< n
; ++i
)
202 err
= flushContiguous( cache
, runs
[i
].pos
, runs
[i
].len
);
203 for( j
= i
+ 1; j
< n
; ++j
)
204 if( runs
[j
].pos
> runs
[i
].pos
)
205 runs
[j
].pos
-= runs
[i
].len
;
212 cacheTrim( tr_cache
* cache
)
216 if( tr_ptrArraySize( &cache
->blocks
) > cache
->max_blocks
)
218 /* Amount of cache that should be removed by the flush. This influences how large
219 * runs can grow as well as how often flushes will happen. */
220 const int cacheCutoff
= 1 + cache
->max_blocks
/ 4;
221 struct run_info
* runs
= tr_new( struct run_info
, tr_ptrArraySize( &cache
->blocks
) );
224 calcRuns( cache
, runs
);
225 while( j
< cacheCutoff
)
227 err
= flushRuns( cache
, runs
, i
);
239 getMaxBlocks( int64_t max_bytes
)
241 return max_bytes
/ (double)MAX_BLOCK_SIZE
;
245 tr_cacheSetLimit( tr_cache
* cache
, int64_t max_bytes
)
249 cache
->max_bytes
= max_bytes
;
250 cache
->max_blocks
= getMaxBlocks( max_bytes
);
252 tr_formatter_mem_B( buf
, cache
->max_bytes
, sizeof( buf
) );
253 tr_ndbg( MY_NAME
, "Maximum cache size set to %s (%d blocks)", buf
, cache
->max_blocks
);
255 return cacheTrim( cache
);
259 tr_cacheGetLimit( const tr_cache
* cache
)
261 return cache
->max_bytes
;
265 tr_cacheNew( int64_t max_bytes
)
267 tr_cache
* cache
= tr_new0( tr_cache
, 1 );
268 cache
->blocks
= TR_PTR_ARRAY_INIT
;
269 cache
->max_bytes
= max_bytes
;
270 cache
->max_blocks
= getMaxBlocks( max_bytes
);
275 tr_cacheFree( tr_cache
* cache
)
277 assert( tr_ptrArrayEmpty( &cache
->blocks
) );
278 tr_ptrArrayDestruct( &cache
->blocks
, NULL
);
287 cache_block_compare( const void * va
, const void * vb
)
289 const struct cache_block
* a
= va
;
290 const struct cache_block
* b
= vb
;
292 /* primary key: torrent id */
293 if( a
->tor
->uniqueId
!= b
->tor
->uniqueId
)
294 return a
->tor
->uniqueId
< b
->tor
->uniqueId
? -1 : 1;
296 /* secondary key: block # */
297 if( a
->block
!= b
->block
)
298 return a
->block
< b
->block
? -1 : 1;
304 static struct cache_block
*
305 findBlock( tr_cache
* cache
,
306 tr_torrent
* torrent
,
307 tr_piece_index_t piece
,
310 struct cache_block key
;
312 key
.block
= _tr_block( torrent
, piece
, offset
);
313 return tr_ptrArrayFindSorted( &cache
->blocks
, &key
, cache_block_compare
);
317 tr_cacheWriteBlock( tr_cache
* cache
,
318 tr_torrent
* torrent
,
319 tr_piece_index_t piece
,
322 const uint8_t * writeme
)
324 struct cache_block
* cb
= findBlock( cache
, torrent
, piece
, offset
);
328 cb
= tr_new( struct cache_block
, 1 );
333 cb
->block
= _tr_block( torrent
, piece
, offset
);
335 tr_ptrArrayInsertSorted( &cache
->blocks
, cb
, cache_block_compare
);
338 cb
->time
= tr_time();
341 cb
->buf
= tr_memdup( writeme
, cb
->length
);
343 ++cache
->cache_writes
;
344 cache
->cache_write_bytes
+= cb
->length
;
346 return cacheTrim( cache
);
350 tr_cacheReadBlock( tr_cache
* cache
,
351 tr_torrent
* torrent
,
352 tr_piece_index_t piece
,
358 struct cache_block
* cb
= findBlock( cache
, torrent
, piece
, offset
);
361 memcpy( setme
, cb
->buf
, len
);
363 err
= tr_ioRead( torrent
, piece
, offset
, len
, setme
);
369 tr_cachePrefetchBlock( tr_cache
* cache
,
370 tr_torrent
* torrent
,
371 tr_piece_index_t piece
,
376 struct cache_block
* cb
= findBlock( cache
, torrent
, piece
, offset
);
379 err
= tr_ioPrefetch( torrent
, piece
, offset
, len
);
389 findPiece( tr_cache
* cache
, tr_torrent
* torrent
, tr_piece_index_t piece
)
391 struct cache_block key
;
393 key
.block
= tr_torPieceFirstBlock( torrent
, piece
);
394 return tr_ptrArrayLowerBound( &cache
->blocks
, &key
, cache_block_compare
, NULL
);
397 int tr_cacheFlushDone( tr_cache
* cache
)
401 if( tr_ptrArraySize( &cache
->blocks
) > 0 )
403 struct run_info
* runs
= tr_new( struct run_info
, tr_ptrArraySize( &cache
->blocks
) );
406 n
= calcRuns( cache
, runs
);
407 while( i
< n
&& ( runs
[i
].is_piece_done
|| runs
[i
].is_multi_piece
) )
408 runs
[i
++].rank
|= SESSIONFLAG
;
409 err
= flushRuns( cache
, runs
, i
);
417 tr_cacheFlushFile( tr_cache
* cache
, tr_torrent
* torrent
, tr_file_index_t i
)
420 const tr_file
* file
= &torrent
->info
.files
[i
];
421 const tr_block_index_t begin
= tr_torPieceFirstBlock( torrent
, file
->firstPiece
);
422 const tr_block_index_t end
= tr_torPieceFirstBlock( torrent
, file
->lastPiece
) + tr_torPieceCountBlocks( torrent
, file
->lastPiece
);
423 const int pos
= findPiece( cache
, torrent
, file
->firstPiece
);
424 dbgmsg( "flushing file %d from cache to disk: blocks [%zu...%zu)", (int)i
, (size_t)begin
, (size_t)end
);
426 /* flush out all the blocks in that file */
427 while( !err
&& ( pos
< tr_ptrArraySize( &cache
->blocks
) ) )
429 const struct cache_block
* b
= tr_ptrArrayNth( &cache
->blocks
, pos
);
430 if( b
->tor
!= torrent
) break;
431 if( ( b
->block
< begin
) || ( b
->block
>= end
) ) break;
432 err
= flushContiguous( cache
, pos
, getBlockRun( cache
, pos
, NULL
) );
439 tr_cacheFlushTorrent( tr_cache
* cache
, tr_torrent
* torrent
)
442 const int pos
= findPiece( cache
, torrent
, 0 );
444 /* flush out all the blocks in that torrent */
445 while( !err
&& ( pos
< tr_ptrArraySize( &cache
->blocks
) ) )
447 const struct cache_block
* b
= tr_ptrArrayNth( &cache
->blocks
, pos
);
448 if( b
->tor
!= torrent
) break;
449 err
= flushContiguous( cache
, pos
, getBlockRun( cache
, pos
, NULL
) );