2 * Mausezahn - A fast versatile traffic generator
3 * Copyright (C) 2008-2010 Herbert Haas
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11 * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see http://www.gnu.org/licenses/gpl-2.0.html
26 void mops_set_active (struct mops
*mp
)
28 pthread_mutex_lock (& (mp
->mops_mutex
) );
29 mp
->state
= MOPS_STATE_ACTIVE
;
30 pthread_mutex_unlock (& (mp
->mops_mutex
) );
33 void mops_set_seqact (struct mops
*mp
)
35 pthread_mutex_lock (& (mp
->mops_mutex
) );
36 mp
->state
= MOPS_STATE_SEQACT
;
37 pthread_mutex_unlock (& (mp
->mops_mutex
) );
41 void mops_set_conf (struct mops
*mp
)
43 pthread_mutex_lock (& (mp
->mops_mutex
) );
44 mp
->state
= MOPS_STATE_CONFIG
;
45 pthread_mutex_unlock (& (mp
->mops_mutex
) );
49 int mops_is_active (struct mops
*mp
)
52 pthread_mutex_lock (& (mp
->mops_mutex
) );
53 if (mp
->state
== MOPS_STATE_ACTIVE
) i
=1;
54 pthread_mutex_unlock (& (mp
->mops_mutex
) );
58 // Returns 1 if the packet is in any running state
59 // such as MOPS_STATE_ACTIVE or MOPS_STATE_SEQACT
60 int mops_is_any_active (struct mops
*mp
)
63 pthread_mutex_lock (& (mp
->mops_mutex
) );
64 if (mp
->state
> MOPS_STATE_CONFIG
) i
=1;
65 pthread_mutex_unlock (& (mp
->mops_mutex
) );
70 int mops_is_seqact (struct mops
*mp
)
73 pthread_mutex_lock (& (mp
->mops_mutex
) );
74 if (mp
->state
== MOPS_STATE_SEQACT
) i
=1;
75 pthread_mutex_unlock (& (mp
->mops_mutex
) );
81 // return mops state (0=MOPS_STATE_NULL, 1=MOPS_STATE_INIT, 2=MOPS_STATE_CONFIG, 3=MOPS_STATE_ACTIVE, 4=MOPS_STATE_SEQACT)
82 int mops_state (struct mops
*mp
)
85 pthread_mutex_lock (& (mp
->mops_mutex
) );
87 pthread_mutex_unlock (& (mp
->mops_mutex
) );
92 int mops_tx_simple (struct mops
*mp
)
95 if (mops_is_active(mp
)) {
99 if (mp
->interval_used
) {
100 if ( pthread_create( &(mp
->interval_thread
), NULL
, mops_interval_thread
, mp
) ) {
101 mp
->interval_used
=1; // 1 means interval only configured
102 return 1; // Error creating thread
104 } else // normal packet train
105 if ( pthread_create( &(mp
->mops_thread
), NULL
, mops_tx_thread_native
, mp
) ) {
106 return 1; // Error creating thread
113 // Starts a packet sequence.
115 // RETURN VALUES: 0 upon success
116 // 1 failure: packet not in CONFIG state
117 // 2 failure: packet has infinite count
118 int mops_tx_sequence (struct mz_ll
*seq
)
123 // verify 1) that all packets are in config state
124 // 2) and have finite count:
125 cur
= (struct pseq
*) seq
->data
;
126 for (i
=0; i
<cur
->count
; i
++) {
127 if (cur
->packet
[i
]->state
!=MOPS_STATE_CONFIG
) return 1;
128 if (cur
->packet
[i
]->count
==0) return 2;
131 // Set all packets in this sequence into state SEQACT:
132 for (i
=0; i
<cur
->count
; i
++)
133 mops_set_seqact (cur
->packet
[i
]);
135 if ( pthread_create( &(seq
->sequence_thread
), NULL
, mops_sequence_thread
, seq
) ) {
136 return 3; // Error creating thread
143 // This is the sequence sending thread
144 void *mops_sequence_thread (void *arg
)
146 struct mz_ll
*seq
= (struct mz_ll
*) arg
;
150 cur
= (struct pseq
*) seq
->data
;
152 // Send one packet after each other, possibly with gaps inbetween:
153 for (i
=0; i
<cur
->count
; i
++) {
154 mops_tx_thread_native (cur
->packet
[i
]);
156 if ((cur
->gap
[i
].tv_sec
) || (cur
->gap
[i
].tv_nsec
)) {
157 nanosleep(&cur
->gap
[i
], NULL
); //...apply it.
162 // 1) reset all packets into config state
163 for (i
=0; i
<cur
->count
; i
++)
164 cur
->packet
[i
]->state
=MOPS_STATE_CONFIG
;
167 // 3) set sequence state to inactive (=0)
173 // This is the interval management thread which starts
174 // packet transmission threads by itself.
176 // Note how this works: After the while statement below we have actually
177 // two threads, mops_tx_thread_native (sending the packet) and mops_interval_thread which
178 // starts mops_tx_thread_native every mp->interval. If mp->interval is smaller than
179 // mp->delay (and mp->count > 1) then multiple transmission threads will be active at the
180 // same time which is usually not what the user wants. We do not catch this case here
181 // but the user interface should do that (it is done in 'cmd_packet_interval').
183 void *mops_interval_thread (void *arg
)
185 struct mops
*mp
= (struct mops
*) arg
;
187 mp
->interval_used
=2; // 2 means active interval
189 if ( pthread_create( &(mp
->mops_thread
), NULL
, mops_tx_thread_native
, mp
) ) {
193 nanosleep(&mp
->interval
, NULL
);
196 pthread_exit(NULL
); // hmm...does this make sense?
201 // General MOPS sending thread using packet sockets.
203 void *mops_tx_thread_native (void *arg
)
205 struct mops
*mp
= (struct mops
*) arg
;
206 struct mops_ext_rtp
* pd
;
209 // Local vars are faster --------------------------
211 register int infinity
, devind
;
212 int ip_src_isrange
= mp
->use_IP
& mp
->ip_src_isrange
;
213 int ip_dst_isrange
= mp
->use_IP
& mp
->ip_dst_isrange
;
214 int sp_isrange
= (mp
->use_UDP
| mp
->use_TCP
) & mp
->sp_isrange
;
215 int dp_isrange
= (mp
->use_UDP
| mp
->use_TCP
) & mp
->dp_isrange
;
216 int ip_src_israndom
= mp
->use_IP
& mp
->ip_src_israndom
;
217 int sp_isrand
= (mp
->use_UDP
| mp
->use_TCP
) & mp
->sp_isrand
;
218 int dp_isrand
= (mp
->use_UDP
| mp
->use_TCP
) & mp
->dp_isrand
;
222 ip_src_start
= mp
->ip_src_start
,
223 ip_src_stop
= mp
->ip_src_stop
,
224 ip_dst_start
= mp
->ip_dst_start
,
225 ip_dst_stop
= mp
->ip_dst_stop
,
226 tcp_seq_delta
= mp
->tcp_seq_delta
,
228 tcp_ack_delta
= mp
->tcp_ack_delta
,
234 sp_start
= mp
->sp_start
,
235 dp_start
= mp
->dp_start
,
236 sp_stop
= mp
->sp_stop
,
237 dp_stop
= mp
->dp_stop
;
240 rtp_mode
= 0; // RTP not used
248 begin_ip_payload
= 0,
251 whats_used
= 0; // store use_UDP or use_TCP here to clean up packet parameters finally
253 original_msg
[MAX_MOPS_MSG_SIZE
+1], // temporary buffer when fragmentation is needed
254 ip_payload
[MAX_MOPS_MSG_SIZE
+1]; // temporary buffer when fragmentation is needed
257 // -------------------------------------------------
260 /////////////////////////////
261 // NOTE: If packet is part of a sequence, then this function is already part of a sequence thread
262 // and all packets are already in state SEQACT. Otherwise we set the packet in state ACTIVE.
263 if (!mops_is_seqact(mp
))
264 mops_set_active (mp
);
265 /////////////////////////////
268 // infinite or not? Count up or down?
269 if (mp
->count
== 0) {
275 mp
->cntx
= mp
->count
; // count down
279 tv
.tv_sec
= mp
->ndelay
.tv_sec
;
280 tv
.tv_nsec
= mp
->ndelay
.tv_nsec
;
283 for (i
=0; i
<device_list_entries
; i
++) {
284 if (strncmp(device_list
[i
].dev
, mp
->device
, 15)==0) break;
288 // Packet socket already existing and valid?
289 ps
= device_list
[devind
].ps
; // the packet socket
292 // Automatic direct or indirect delivery for IP packets?
293 if ((mp
->use_IP
) && (mp
->auto_delivery_off
== 0)) {
294 if (mp
->ip_dst_isrange
)
295 mops_hton4(&mp
->ip_dst_start
, DA
);
297 mops_hton4(&mp
->ip_dst
, DA
);
299 mops_ip_get_dst_mac(&device_list
[devind
], DA
, mp
->eth_dst
);
303 // Impossible settings
304 if (((ip_src_isrange
) && (ip_src_israndom
)) ||
305 ((sp_isrand
) && (sp_isrange
)) ||
306 ((dp_isrand
) && (dp_isrange
))) {
307 fprintf(stderr
, "[ERROR] (mops_tx_thread_native) -- conflicting requirements: both range and random!\n");
311 // Initialize start values when ranges have been defined
312 if (ip_src_isrange
) mp
->ip_src
= mp
->ip_src_start
;
313 if (ip_dst_isrange
) mp
->ip_dst
= mp
->ip_dst_start
;
314 if (sp_isrange
) mp
->sp
= mp
->sp_start
;
315 if (dp_isrange
) mp
->dp
= mp
->dp_start
;
317 tcp_seq_range
= mops_tcp_complexity_sqnr(mp
);
318 mp
->tcp_seq
= mp
->tcp_seq_start
;
319 tcp_seq_count
= tcp_seq_range
;
322 tcp_ack_range
= mops_tcp_complexity_acknr(mp
);
323 mp
->tcp_ack
= mp
->tcp_ack_start
;
324 tcp_ack_count
= tcp_ack_range
;
327 // RTP special message treatment
328 if (mp
->p_desc_type
== MOPS_RTP
) {
330 if (pd
==NULL
) return NULL
;
331 if (pd
->source
== DSP_SOURCE
)
332 rtp_mode
= 2; // dsp payload
334 rtp_mode
= 1; // zero payload
336 mops_update_rtp (mp
); // initialize RTP packet here
339 // TODO: VLAN, MPLS - ranges
342 // ---------------------- The holy transmission loop ---------------- //
345 // Update whole packet (once before loop!)
346 mops_ext_update (mp
);
350 // Check if IP fragmentation is desired.
351 // If yes, set local 'fragsize' and 'begin_ip_payload' pointer.
352 if (mp
->ip_fragsize
) {
354 fragsize
= mp
->ip_fragsize
;
355 frag_overlap
= mp
->ip_frag_overlap
;
356 offset
= mp
->ip_frag_offset
;
357 offset_delta
= (fragsize
-frag_overlap
)/8;
359 begin_ip_payload
= mp
->begin_UDP
;
361 } else if (mp
->use_TCP
) {
362 begin_ip_payload
= mp
->begin_TCP
;
365 begin_ip_payload
= mp
->begin_MSG
;
368 ip_payload_s
= mp
->frame_s
- begin_ip_payload
;
369 memcpy((void*) original_msg
, (void*) mp
->msg
, mp
->msg_s
);
370 original_msg_s
= mp
->msg_s
;
371 memcpy((void*) ip_payload
, (void*) &mp
->frame
[begin_ip_payload
], ip_payload_s
);
376 goto START
; // looks like a dirty hack but reduces a few cpu cycles each loop
380 nanosleep(&tv
, NULL
); // don't apply this before first and after last packet.
383 // +++++++++++++++++++++++++++++++++++
386 // ------ IP fragmentation required? ------------------------------------------------------
388 // Basic idea: At this point we assume that all updates have been already applied
389 // so mp->frame contains a valid packet. But now we do the following:
391 // 1. Determine first byte after end of IP header (IP options may be used) [done above]
392 // 2. Store the 'IP payload' in the temporary buffer 'ip_payload' [done above]
393 // 3. Create a new IP payload but take only the first fragsize bytes out of 'ip_payload'
394 // 4. This new IP payload is copied into mp->msg
395 // 5. Set the IP parameters: MF=1, offset=0
396 // 6. Call mops_update() and send the packet
397 // 7. offset = offset + fragsize/8
398 // 8. Increment the IP identification number
399 // 9. Repeat this until the last fragment is reached. For the last fragment
400 // set the flag MF=0.
401 // 10. Restore the original IP parameters (use_UDP or use_TCP)
405 fragptr
=0; // NOTE: by intention we do not set mp->ip_frag_offset to 0 here !!! The user knows what she does!
407 mp
->ip_id
++; // automatically wraps around correctly (u_int16_t)
408 // send all fragments except the last one:
409 while(fragptr
+fragsize
< ip_payload_s
) {
410 memcpy((void*) mp
->msg
, (void*) ip_payload
+fragptr
, fragsize
);
411 mp
->msg_s
= fragsize
;
413 n
= write(ps
, mp
->frame
, mp
->frame_s
);
414 if (n
!=mp
->frame_s
) {
415 fprintf(stderr
, "ERROR: Could not send IP fragment through interface %s\n", mp
->device
);
420 mp
->ip_frag_offset
+= offset_delta
;
422 // send last fragment:
424 memcpy((void*) mp
->msg
, (void*) ip_payload
+fragptr
, ip_payload_s
-fragptr
);
425 mp
->msg_s
= ip_payload_s
-fragptr
;
427 n
= write(ps
, mp
->frame
, mp
->frame_s
);
428 if (n
!=mp
->frame_s
) {
429 fprintf(stderr
, "ERROR: Could not send IP fragment through interface %s\n", mp
->device
);
434 // -- restore original mops parameters --
435 switch (whats_used
) {
436 case 1: mp
->use_UDP
= 1; break;
437 case 2: mp
->use_TCP
= 1; break;
439 memcpy((void*) mp
->msg
, (void*) original_msg
, original_msg_s
);
440 mp
->msg_s
= original_msg_s
;
441 mp
->ip_frag_offset
=offset
;
445 // -- send unfragmented packets here: --
446 n
= write(ps
, mp
->frame
, mp
->frame_s
);
447 if (n
!=mp
->frame_s
) {
448 fprintf(stderr
, "ERROR: Could not send packet through interface %s\n", mp
->device
);
455 /* [ RTP TODO: ] Use another thread reading from /dev/dsp and signalling us to continue!
456 * It should work like this: (pseudocode below)
458 * if (rtp_mode == DSP_SOURCE) {
459 * pthread_cond_wait ( &mycond, &mymutex ); // wait until pthread condition is signaled
460 * // now, frame should contain 160 bytes from /dev/dsp
464 * The reading thread will do something like this: (again fuzzy code only)
467 * read(fd, pd->rtp_payload, 160); // this takes 20 msec anyway
468 * mops_update_rtp_dynamics (mp); // also updates dynamic header fields
469 * pthread_cond_broadcast (&mycond); // wake up TX thread
473 * http://www.oreilly.de/catalog/multilinux/excerpt/ch14-05.htm
475 * NOTE that we must not reach nanosleep below because the 20 msec delay is
476 * done implicitely by reading 160 bytes from /dev/dsp
480 case 1: // dummy payload => segmentation delay is controlled by nanosleep below!
481 mops_update_rtp_dynamics (mp
);
483 case 2: // await data from /dev/dsp => segmentation delay is controlled by a reading thread!
484 /* pthread_cond_wait ( &mycond, &mymutex ); // wait until pthread condition is signaled
485 * // now, frame should contain 160 bytes from /dev/dsp
490 // no RTP, continue as usual
495 // +++++++++++++++++++++++++++++++++++
497 // *** begin of modifiers -- order is important! *** *************** //
500 if (--tcp_seq_count
) {
501 mp
->tcp_seq
+= tcp_seq_delta
;
505 tcp_seq_count
= tcp_seq_range
;
506 mp
->tcp_seq
= mp
->tcp_seq_start
;
512 if (--tcp_ack_count
) {
513 mp
->tcp_ack
+= tcp_ack_delta
;
517 tcp_ack_count
= tcp_ack_range
;
518 mp
->tcp_ack
= mp
->tcp_ack_start
;
523 if (ip_src_isrange
) {
524 if (++mp
->ip_src
> ip_src_stop
) {
525 mp
->ip_src
= ip_src_start
;
534 if (ip_src_israndom
) {
535 mp
->ip_src
= 0x01000001 + (u_int32_t
) ( ((float) rand()/RAND_MAX
)*0xE0000000); //latter is 224.0.0.0
538 if (ip_dst_isrange
) {
539 if (++mp
->ip_dst
> ip_dst_stop
) {
540 mp
->ip_dst
= ip_dst_start
;
541 if (mp
->auto_delivery_off
== 0) {
542 mops_hton4(&mp
->ip_dst
, DA
);
543 mp
->eth_dst
[0] = 0x01;
544 mp
->eth_dst
[1] = 0x00;
545 mp
->eth_dst
[2] = 0x5e;
546 mp
->eth_dst
[3] = DA
[1] & 127;
547 mp
->eth_dst
[4] = DA
[2];
548 mp
->eth_dst
[5] = DA
[3];
553 if (mp
->auto_delivery_off
== 0) {
554 mops_hton4(&mp
->ip_dst
, DA
);
555 mp
->eth_dst
[0] = 0x01;
556 mp
->eth_dst
[1] = 0x00;
557 mp
->eth_dst
[2] = 0x5e;
558 mp
->eth_dst
[3] = DA
[1] & 127;
559 mp
->eth_dst
[4] = DA
[2];
560 mp
->eth_dst
[5] = DA
[3];
568 if (++mp
->dp
> dp_stop
) {
579 mp
->dp
= (u_int16_t
) ( ((float) rand()/RAND_MAX
)*0xffff);
584 if (++mp
->sp
> sp_stop
) {
595 mp
->sp
= (u_int16_t
) ( ((float) rand()/RAND_MAX
)*0xffff);
599 // *** end of modifiers ******************************************** //
601 mp
->cntx
++; // count up
604 } while (--mp
->cntx
);
607 if (!mops_is_seqact(mp
)) {
608 // only [change state and close thread] if packet is NOT part of a sequence.
609 // If the packet is part of a sequence then THIS function is already part of
610 // a sequence thread and it will be closed in 'mops_sequence_thread'.
621 int mops_destroy_thread (struct mops
*mp
)
625 if (mp
->interval_used
==2) {
626 pthread_cancel(mp
->interval_thread
);
631 if (mops_is_active(mp
)) {
632 pthread_cancel(mp
->mops_thread
);
633 pthread_mutex_destroy(& mp
->mops_mutex
);