Revert "transmission: update from 2.13 to 2.22"
[tomato.git] / release / src / router / transmission / libtransmission / bandwidth.c
blob88e79da33df0e71241a5f16f11bd557ca850ee6c
1 /*
2 * This file Copyright (C) 2008-2010 Mnemosyne LLC
4 * This file is licensed by the GPL version 2. Works owned by the
5 * Transmission project are granted a special exemption to clause 2(b)
6 * so that the bulk of its code can remain under the MIT license.
7 * This exemption does not extend to derived works not owned by
8 * the Transmission project.
10 * $Id: bandwidth.c 11349 2010-10-24 01:08:08Z charles $
13 #include <assert.h>
14 #include <limits.h>
16 #include "transmission.h"
17 #include "bandwidth.h"
18 #include "crypto.h"
19 #include "peer-io.h"
20 #include "ptrarray.h"
21 #include "utils.h"
23 #define dbgmsg( ... ) \
24 do { \
25 if( tr_deepLoggingIsActive( ) ) \
26 tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \
27 } while( 0 )
29 /***
30 ****
31 ***/
33 static unsigned int
34 getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now )
36 uint64_t bytes = 0;
37 const uint64_t cutoff = (now?now:tr_time_msec()) - interval_msec;
38 int i = r->newest;
40 for( ;; )
42 if( r->transfers[i].date <= cutoff )
43 break;
45 bytes += r->transfers[i].size;
47 if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */
48 if( i == r->newest ) break; /* we've come all the way around */
51 return (unsigned int)(( bytes * 1000u ) / interval_msec);
54 static void
55 bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size )
57 if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now )
58 r->transfers[r->newest].size += size;
59 else
61 if( ++r->newest == HISTORY_SIZE ) r->newest = 0;
62 r->transfers[r->newest].date = now;
63 r->transfers[r->newest].size = size;
67 /******
68 *******
69 *******
70 ******/
72 static inline int
73 comparePointers( const void * a, const void * b )
75 if( a != b )
76 return a < b ? -1 : 1;
78 return 0;
81 /***
82 ****
83 ***/
85 tr_bandwidth*
86 tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent )
88 b->session = session;
89 b->children = TR_PTR_ARRAY_INIT;
90 b->magicNumber = MAGIC_NUMBER;
91 b->band[TR_UP].honorParentLimits = TRUE;
92 b->band[TR_DOWN].honorParentLimits = TRUE;
93 tr_bandwidthSetParent( b, parent );
94 return b;
97 tr_bandwidth*
98 tr_bandwidthDestruct( tr_bandwidth * b )
100 assert( tr_isBandwidth( b ) );
102 tr_bandwidthSetParent( b, NULL );
103 tr_ptrArrayDestruct( &b->children, NULL );
105 memset( b, ~0, sizeof( tr_bandwidth ) );
106 return b;
109 /***
110 ****
111 ***/
113 void
114 tr_bandwidthSetParent( tr_bandwidth * b,
115 tr_bandwidth * parent )
117 assert( tr_isBandwidth( b ) );
118 assert( b != parent );
120 if( b->parent )
122 void * removed;
124 assert( tr_isBandwidth( b->parent ) );
126 removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, comparePointers );
127 assert( removed == b );
128 assert( tr_ptrArrayFindSorted( &b->parent->children, b, comparePointers ) == NULL );
130 b->parent = NULL;
133 if( parent )
135 assert( tr_isBandwidth( parent ) );
136 assert( parent->parent != b );
138 tr_ptrArrayInsertSorted( &parent->children, b, comparePointers );
139 assert( tr_ptrArrayFindSorted( &parent->children, b, comparePointers ) == b );
140 b->parent = parent;
144 /***
145 ****
146 ***/
147 #if 0
148 #warning do not check the code in with this enabled
149 #define DEBUG_DIRECTION TR_UP
150 #endif
152 static void
153 allocateBandwidth( tr_bandwidth * b,
154 tr_priority_t parent_priority,
155 tr_direction dir,
156 unsigned int period_msec,
157 tr_ptrArray * peer_pool )
159 tr_priority_t priority;
161 assert( tr_isBandwidth( b ) );
162 assert( tr_isDirection( dir ) );
164 /* set the available bandwidth */
165 if( b->band[dir].isLimited )
167 const unsigned int nextPulseSpeed = b->band[dir].desiredSpeed_Bps;
168 b->band[dir].bytesLeft = ( nextPulseSpeed * period_msec ) / 1000u;
170 #ifdef DEBUG_DIRECTION
171 if( dir == DEBUG_DIRECTION )
172 fprintf( stderr, "bandwidth %p currentPieceSpeed(%5.2f of %5.2f) desiredSpeed(%5.2f), allocating %d\n",
173 b, currentSpeed, tr_bandwidthGetRawSpeed( b, dir ), desiredSpeed,
174 b->band[dir].bytesLeft );
175 #endif
178 priority = MAX( parent_priority, b->priority );
180 /* add this bandwidth's peer, if any, to the peer pool */
181 if( b->peer != NULL ) {
182 b->peer->priority = priority;
183 tr_ptrArrayAppend( peer_pool, b->peer );
186 #ifdef DEBUG_DIRECTION
187 if( ( dir == DEBUG_DIRECTION ) && ( n > 1 ) )
188 fprintf( stderr, "bandwidth %p has %d peers\n", b, n );
189 #endif
191 /* traverse & repeat for the subtree */
192 if( 1 ) {
193 int i;
194 struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children );
195 const int n = tr_ptrArraySize( &b->children );
196 for( i=0; i<n; ++i )
197 allocateBandwidth( children[i], priority, dir, period_msec, peer_pool );
201 static void
202 phaseOne( tr_ptrArray * peerArray, tr_direction dir )
204 int i, n;
205 int peerCount = tr_ptrArraySize( peerArray );
206 struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray );
208 /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
209 * peers from starving the others. Loop through the peers, giving each a
210 * small chunk of bandwidth. Keep looping until we run out of bandwidth
211 * and/or peers that can use it */
212 n = peerCount;
213 dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") );
214 i = n ? tr_cryptoWeakRandInt( n ) : 0; /* pick a random starting point */
215 while( n > 1 )
217 const size_t increment = 1024;
218 const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment );
220 dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed );
222 if( bytesUsed == (int)increment )
223 ++i;
224 else {
225 /* peer is done writing for now; move it to the end of the list */
226 tr_peerIo * pio = peers[i];
227 peers[i] = peers[n-1];
228 peers[n-1] = pio;
229 --n;
232 if( i == n )
233 i = 0;
237 void
238 tr_bandwidthAllocate( tr_bandwidth * b,
239 tr_direction dir,
240 unsigned int period_msec )
242 int i, peerCount;
243 tr_ptrArray tmp = TR_PTR_ARRAY_INIT;
244 tr_ptrArray low = TR_PTR_ARRAY_INIT;
245 tr_ptrArray high = TR_PTR_ARRAY_INIT;
246 tr_ptrArray normal = TR_PTR_ARRAY_INIT;
247 struct tr_peerIo ** peers;
249 /* allocateBandwidth() is a helper function with two purposes:
250 * 1. allocate bandwidth to b and its subtree
251 * 2. accumulate an array of all the peerIos from b and its subtree. */
252 allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp );
253 peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp );
254 peerCount = tr_ptrArraySize( &tmp );
256 for( i=0; i<peerCount; ++i )
258 tr_peerIo * io = peers[i];
259 tr_peerIoRef( io );
261 tr_peerIoFlushOutgoingProtocolMsgs( io );
263 switch( io->priority ) {
264 case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */
265 case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */
266 default: tr_ptrArrayAppend( &low, io );
270 /* First phase of IO. Tries to distribute bandwidth fairly to keep faster
271 * peers from starving the others. Loop through the peers, giving each a
272 * small chunk of bandwidth. Keep looping until we run out of bandwidth
273 * and/or peers that can use it */
274 phaseOne( &high, dir );
275 phaseOne( &normal, dir );
276 phaseOne( &low, dir );
278 /* Second phase of IO. To help us scale in high bandwidth situations,
279 * enable on-demand IO for peers with bandwidth left to burn.
280 * This on-demand IO is enabled until (1) the peer runs out of bandwidth,
281 * or (2) the next tr_bandwidthAllocate() call, when we start over again. */
282 for( i=0; i<peerCount; ++i )
283 tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) );
285 for( i=0; i<peerCount; ++i )
286 tr_peerIoUnref( peers[i] );
288 /* cleanup */
289 tr_ptrArrayDestruct( &normal, NULL );
290 tr_ptrArrayDestruct( &high, NULL );
291 tr_ptrArrayDestruct( &low, NULL );
292 tr_ptrArrayDestruct( &tmp, NULL );
295 void
296 tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer )
298 assert( tr_isBandwidth( b ) );
299 assert( ( peer == NULL ) || tr_isPeerIo( peer ) );
301 b->peer = peer;
304 /***
305 ****
306 ***/
308 unsigned int
309 tr_bandwidthClamp( const tr_bandwidth * b,
310 tr_direction dir,
311 unsigned int byteCount )
313 assert( tr_isBandwidth( b ) );
314 assert( tr_isDirection( dir ) );
316 if( b )
318 if( b->band[dir].isLimited )
319 byteCount = MIN( byteCount, b->band[dir].bytesLeft );
321 if( b->parent && b->band[dir].honorParentLimits )
322 byteCount = tr_bandwidthClamp( b->parent, dir, byteCount );
325 return byteCount;
328 unsigned int
329 tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
331 assert( tr_isBandwidth( b ) );
332 assert( tr_isDirection( dir ) );
334 return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now );
337 unsigned int
338 tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir )
340 assert( tr_isBandwidth( b ) );
341 assert( tr_isDirection( dir ) );
343 return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now );
346 void
347 tr_bandwidthUsed( tr_bandwidth * b,
348 tr_direction dir,
349 size_t byteCount,
350 tr_bool isPieceData,
351 uint64_t now )
353 struct tr_band * band;
355 assert( tr_isBandwidth( b ) );
356 assert( tr_isDirection( dir ) );
358 band = &b->band[dir];
360 if( band->isLimited && isPieceData )
361 band->bytesLeft -= MIN( band->bytesLeft, byteCount );
363 #ifdef DEBUG_DIRECTION
364 if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) )
365 fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n",
366 b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft );
367 #endif
369 bytesUsed( now, &band->raw, byteCount );
371 if( isPieceData )
372 bytesUsed( now, &band->piece, byteCount );
374 if( b->parent != NULL )
375 tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now );