revert breaks some stupid old compilers
[oscam.git] / module-cw-cycle-check.c
blob499a0494c7fbd47c1fd9a3f4a3f61e9f5742961d
1 #define MODULE_LOG_PREFIX "cwccheck"
3 #include "globals.h"
4 #ifdef CW_CYCLE_CHECK
6 #include "module-cw-cycle-check.h"
7 #include "oscam-chk.h"
8 #include "oscam-client.h"
9 #include "oscam-ecm.h"
10 #include "oscam-lock.h"
11 #include "oscam-string.h"
12 #include "oscam-cache.h"
14 struct s_cwc_md5
16 uchar md5[CS_ECMSTORESIZE];
17 uint32_t csp_hash;
18 uchar cw[16];
21 struct s_cw_cycle_check
23 uchar cw[16];
24 time_t time;
25 time_t locktime; // lock in learning
26 uint16_t caid;
27 uint16_t sid;
28 uint16_t chid;
29 uint32_t provid;
30 int16_t ecmlen;
31 int8_t stage;
32 int32_t cycletime;
33 int32_t dyncycletime;
34 int8_t nextcyclecw;
35 struct s_cwc_md5 ecm_md5[15]; // max 15 old ecm md5 /csp-hashs
36 int8_t cwc_hist_entry;
37 uint8_t old;
38 int8_t stage4_repeat;
39 struct s_cw_cycle_check *prev;
40 struct s_cw_cycle_check *next;
43 extern CS_MUTEX_LOCK cwcycle_lock;
45 static struct s_cw_cycle_check *cw_cc_list;
46 static int32_t cw_cc_list_size;
47 static time_t last_cwcyclecleaning;
50 * Check for CW CYCLE
53 static uint8_t chk_is_pos_fallback(ECM_REQUEST *er, char *reader)
55 struct s_ecm_answer *ea;
56 struct s_reader *fbrdr;
57 char fb_reader[64];
59 for(ea = er->matching_rdr; ea; ea = ea->next)
61 if(ea->reader)
63 fbrdr = ea->reader;
64 snprintf(fb_reader, sizeof(fb_reader), "%s", ea->reader->label);
65 if(!strcmp(reader, fb_reader) && chk_is_fixed_fallback(fbrdr, er))
67 cs_log("cyclecheck [check Fixed FB] %s is set as fixed fallback", reader);
68 return 1;
72 return 0;
75 static inline uint8_t checkECMD5CW(uchar *ecmd5_cw)
77 int8_t i;
78 for(i = 0; i < CS_ECMSTORESIZE; i++)
79 if(ecmd5_cw[i]) { return 1; }
80 return 0;
84 * countCWpart is to prevent like this
85 * D41A1A08B01DAD7A 0F1D0A36AF9777BD found -> ok
86 * E9151917B01DAD7A 0F1D0A36AF9777BD found last -> worng (freeze), but for cwc is ok
87 * 7730F59C6653A55E D3822A7F133D3C8C cwc bad -> but cw is right, cwc out of step
89 static uint8_t countCWpart(ECM_REQUEST *er, struct s_cw_cycle_check *cwc)
91 uint8_t eo = cwc->nextcyclecw ? 0 : 8;
92 int8_t i, ret = 0;
93 char cwc_cw[9 * 3];
94 char er_cw[9 * 3];
96 for(i = 0; i < 8; i++)
98 if(cwc->cw[i + eo] == er->cw[i + eo])
100 ret++;
104 cs_hexdump(0, cwc->cw + eo, 8, cwc_cw, sizeof(cwc_cw));
105 cs_hexdump(0, er->cw + eo, 8, er_cw, sizeof(er_cw));
106 cs_log_dbg(D_CWC, "cyclecheck [countCWpart] er-cw %s", er_cw);
107 cs_log_dbg(D_CWC, "cyclecheck [countCWpart] cw-cw %s", cwc_cw);
108 if(ret > cfg.cwcycle_sensitive)
110 cs_log("cyclecheck [countCWpart] new cw is to like old one (unused part), sensitive %d, same bytes %d", cfg.cwcycle_sensitive, ret);
112 return ret;
115 static uint8_t checkvalidCW(ECM_REQUEST *er)
117 uint8_t ret = 1;
118 if(chk_is_null_CW(er->cw) && er->caid !=0x2600) // 0x2600 used by biss and constant cw could be indeed zero
119 { er->rc = E_NOTFOUND; }
121 if(er->rc == E_NOTFOUND)
122 { return 0; } //wrong leave the check
124 if(checkCWpart(er->cw, 0) && checkCWpart(er->cw, 1))
125 { return 1; } //cw1 and cw2 is filled -> we can check for cwc
127 if((!checkCWpart(er->cw, 0) || !checkCWpart(er->cw, 1)) && caid_is_videoguard(er->caid))
129 cs_log("CAID: %04X uses obviously half cycle cw's : NO need to check it with CWC! Remove CAID: %04X from CWC Config!", er->caid, er->caid);
130 ret = 0; // cw1 or cw2 is null
133 return ret;
136 void cleanupcwcycle(void)
138 time_t now = time(NULL);
139 if(last_cwcyclecleaning + 120 > now) //only clean once every 2min
140 { return; }
142 last_cwcyclecleaning = now;
143 int32_t count = 0, kct = cfg.keepcycletime * 60 + 30; // if keepcycletime is set, wait more before deleting
144 struct s_cw_cycle_check *prv = NULL, *currentnode = NULL, *temp = NULL;
146 bool bcleanup = false;
148 //write lock
149 cs_writelock(__func__, &cwcycle_lock);
150 for(currentnode = cw_cc_list, prv = NULL; currentnode; prv = currentnode, currentnode = currentnode->next, count++) // First Remove old Entrys
152 if((now - currentnode->time) <= kct) // delete Entry which old to hold list small
154 continue;
156 cs_log_dbg(D_CWC, "cyclecheck [Cleanup] diff: %ld kct: %i", now - currentnode->time, kct);
157 if(prv != NULL)
159 prv->next = NULL;
161 else
163 cw_cc_list = NULL;
165 bcleanup = true;
166 break; //we need only once, all follow to old
168 cs_writeunlock(__func__, &cwcycle_lock);
169 while(currentnode != NULL)
171 temp = currentnode->next;
172 if(!currentnode->old)
173 { cw_cc_list_size--; }
174 NULLFREE(currentnode);
175 currentnode = temp;
177 if(bcleanup)
178 { cs_log_dbg(D_CWC, "cyclecheck [Cleanup] list new size: %d (realsize: %d)", cw_cc_list_size, count); }
181 static int32_t checkcwcycle_int(ECM_REQUEST *er, char *er_ecmf , char *user, uchar *cw , char *reader, uint8_t cycletime_fr, uint8_t next_cw_cycle_fr)
184 int8_t i, ret = 6; // ret = 6 no checked
185 int8_t cycleok = -1;
186 time_t now = er->tps.time;//time(NULL);
187 uint8_t need_new_entry = 1, upd_entry = 1;
188 char cwstr[17 * 3]; //cw to check
190 char cwc_ecmf[ECM_FMT_LEN];
191 char cwc_md5[17 * 3];
192 char cwc_cw[17 * 3];
193 char cwc_csp[5 * 3];
194 int8_t n = 1, m = 1, k;
195 int32_t mcl = cfg.maxcyclelist;
196 struct s_cw_cycle_check *currentnode = NULL, *cwc = NULL;
198 /*for(list = cw_cc_list; list; list = list->next) { // List all Entrys in Log for DEBUG
199 cs_log_dbg(D_CWC, "cyclecheck: [LIST] %04X@%06X:%04X OLD: %i Time: %ld DifftoNow: %ld Stage: %i cw: %s", list->caid, list->provid, list->sid, list->old, list->time, now - list->time, list->stage, cs_hexdump(0, list->cw, 16, cwstr, sizeof(cwstr)));
203 if(!checkvalidCW(er))
204 { return 3; } //cwc ign
206 //read lock
207 cs_readlock(__func__, &cwcycle_lock);
208 for(currentnode = cw_cc_list; currentnode; currentnode = currentnode->next)
210 if(currentnode->caid != er->caid || currentnode->provid != er->prid || currentnode->sid != er->srvid || currentnode->chid != er->chid)
212 continue;
214 if(er->ecmlen != 0 && currentnode->ecmlen != 0)
216 if(currentnode->ecmlen != er->ecmlen)
218 cs_log_dbg(D_CWC, "cyclecheck [other ECM LEN] -> don't check");
219 continue;
223 need_new_entry = 0; // we got a entry for caid/prov/sid so we dont need new one
225 cs_hexdump(0, cw, 16, cwstr, sizeof(cwstr)); //checked cw for log
227 if(cs_malloc(&cwc, sizeof(struct s_cw_cycle_check)))
230 memcpy(cwc, currentnode, sizeof(struct s_cw_cycle_check)); //copy current to new
234 if(!currentnode->old)
236 currentnode->old = 1; //need later to counting
237 cw_cc_list_size--;
239 //now we have all data and can leave read lock
240 cs_readunlock(__func__, &cwcycle_lock);
242 cs_hexdump(0, cwc->ecm_md5[cwc->cwc_hist_entry].md5, 16, cwc_md5, sizeof(cwc_md5));
243 cs_hexdump(0, (void *)&cwc->ecm_md5[cwc->cwc_hist_entry].csp_hash, 4, cwc_csp, sizeof(cwc_csp));
244 cs_hexdump(0, cwc->cw, 16, cwc_cw, sizeof(cwc_cw));
245 ecmfmt(cwc_ecmf, ECM_FMT_LEN, cwc->caid, 0, cwc->provid, cwc->chid, 0, cwc->sid, cwc->ecmlen, cwc_md5, cwc_csp, cwc_cw, 0, 0, NULL, NULL);
247 // Cycletime over Cacheex
248 if (cfg.cwcycle_usecwcfromce)
250 if(cycletime_fr > 0 && next_cw_cycle_fr < 2)
252 cs_log_dbg(D_CWC, "cyclecheck [Use Info in Request] Client: %s cycletime: %isek - nextcwcycle: CW%i for %04X@%06X:%04X", user, cycletime_fr, next_cw_cycle_fr, er->caid, er->prid, er->srvid);
253 cwc->stage = 3;
254 cwc->cycletime = cycletime_fr;
255 cwc->nextcyclecw = next_cw_cycle_fr;
256 ret = 8;
257 if(memcmp(cwc->cw, cw, 16) == 0) //check if the store cw the same like the current
259 cs_log_dbg(D_CWC, "cyclecheck [Dump Stored CW] Client: %s EA: %s CW: %s Time: %ld", user, cwc_ecmf, cwc_cw, cwc->time);
260 cs_log_dbg(D_CWC, "cyclecheck [Dump CheckedCW] Client: %s EA: %s CW: %s Time: %ld Timediff: %ld", user, er_ecmf, cwstr, now, now - cwc->time);
261 if(now - cwc->time >= cwc->cycletime - cwc->dyncycletime)
263 cs_log_dbg(D_CWC, "cyclecheck [Same CW but much too late] Client: %s EA: %s CW: %s Time: %ld Timediff: %ld", user, er_ecmf, cwstr, now, now - cwc->time);
264 ret = cfg.cwcycle_dropold ? 2 : 4;
266 else
268 ret = 4; // Return 4 same CW
270 upd_entry = 0;
272 break;
276 if(cwc->stage == 3 && cwc->nextcyclecw < 2 && now - cwc->time < cwc->cycletime * 2 - cwc->dyncycletime - 1) // Check for Cycle no need to check Entrys others like stage 3
278 /*for (k=0; k<15; k++) { // debug md5
279 cs_log_dbg(D_CWC, "cyclecheck [checksumlist[%i]]: ecm_md5: %s csp-hash: %d Entry: %i", k, cs_hexdump(0, cwc->ecm_md5[k].md5, 16, ecm_md5, sizeof(ecm_md5)), cwc->ecm_md5[k].csp_hash, cwc->cwc_hist_entry);
280 } */
282 // first we check if the store cw the same like the current
283 if(memcmp(cwc->cw, cw, 16) == 0)
285 cs_log_dbg(D_CWC, "cyclecheck [Dump Stored CW] Client: %s EA: %s CW: %s Time: %ld", user, cwc_ecmf, cwc_cw, cwc->time);
286 cs_log_dbg(D_CWC, "cyclecheck [Dump CheckedCW] Client: %s EA: %s CW: %s Time: %ld Timediff: %ld", user, er_ecmf, cwstr, now, now - cwc->time);
287 if(now - cwc->time >= cwc->cycletime - cwc->dyncycletime)
289 cs_log_dbg(D_CWC, "cyclecheck [Same CW but much too late] Client: %s EA: %s CW: %s Time: %ld Timediff: %ld", user, er_ecmf, cwstr, now, now - cwc->time);
290 ret = cfg.cwcycle_dropold ? 2 : 4;
292 else
294 ret = 4; // Return 4 same CW
296 upd_entry = 0;
297 break;
300 if(cwc->nextcyclecw == 0) //CW0 must Cycle
302 for(i = 0; i < 8; i++)
304 if(cwc->cw[i] == cw[i])
306 cycleok = 0; //means CW0 Cycle OK
308 else
310 cycleok = -1;
311 break;
315 else if(cwc->nextcyclecw == 1) //CW1 must Cycle
317 for(i = 0; i < 8; i++)
319 if(cwc->cw[i + 8] == cw[i + 8])
321 cycleok = 1; //means CW1 Cycle OK
323 else
325 cycleok = -1;
326 break;
331 if(cycleok >= 0 && cfg.cwcycle_sensitive && countCWpart(er, cwc) >= cfg.cwcycle_sensitive) //2,3,4, 0 = off
333 cycleok = -2;
336 if(cycleok >= 0)
338 ret = 0; // return Code 0 Cycle OK
339 if(cycleok == 0)
341 cwc->nextcyclecw = 1;
342 er->cwc_next_cw_cycle = 1;
343 if(cwc->cycletime < 128 && (!(cwc->caid == 0x0100 && cwc->provid == 0x00006A))) // make sure cycletime is lower dez 128 because share over cacheex buf[18] bit 8 is used for cwc_next_cw_cycle
344 { er->cwc_cycletime = cwc->cycletime; }
345 cs_log_dbg(D_CWC, "cyclecheck [Valid CW 0 Cycle] Client: %s EA: %s Timediff: %ld Stage: %i Cycletime: %i dyncycletime: %i nextCycleCW = CW%i from Reader: %s", user, er_ecmf, now - cwc->time, cwc->stage, cwc->cycletime, cwc->dyncycletime, cwc->nextcyclecw, reader);
347 else if(cycleok == 1)
349 cwc->nextcyclecw = 0;
350 er->cwc_next_cw_cycle = 0;
351 if(cwc->cycletime < 128 && (!(cwc->caid == 0x0100 && cwc->provid == 0x00006A))) // make sure cycletime is lower dez 128 because share over cacheex buf[18] bit 8 is used for cwc_next_cw_cycle
352 { er->cwc_cycletime = cwc->cycletime; }
353 cs_log_dbg(D_CWC, "cyclecheck [Valid CW 1 Cycle] Client: %s EA: %s Timediff: %ld Stage: %i Cycletime: %i dyncycletime: %i nextCycleCW = CW%i from Reader: %s", user, er_ecmf, now - cwc->time, cwc->stage, cwc->cycletime, cwc->dyncycletime, cwc->nextcyclecw, reader);
355 cs_log_dbg(D_CWC, "cyclecheck [Dump Stored CW] Client: %s EA: %s CW: %s Time: %ld", user, cwc_ecmf, cwc_cw, cwc->time);
356 cs_log_dbg(D_CWC, "cyclecheck [Dump CheckedCW] Client: %s EA: %s CW: %s Time: %ld Timediff: %ld", user, er_ecmf, cwstr, now, now - cwc->time);
358 else
361 for(k = 0; k < 15; k++) // check for old ECMs
363 #ifdef CS_CACHEEX
364 if((checkECMD5CW(er->ecmd5) && checkECMD5CW(cwc->ecm_md5[k].md5) && !(memcmp(er->ecmd5, cwc->ecm_md5[k].md5, sizeof(er->ecmd5)))) || (er->csp_hash && cwc->ecm_md5[k].csp_hash && er->csp_hash == cwc->ecm_md5[k].csp_hash))
365 #else
366 if((memcmp(er->ecmd5, cwc->ecm_md5[k].md5, sizeof(er->ecmd5))) == 0)
367 #endif
369 cs_log_dbg(D_CWC, "cyclecheck [OLD] [CheckedECM] Client: %s EA: %s", user, er_ecmf);
370 cs_hexdump(0, cwc->ecm_md5[k].md5, 16, cwc_md5, sizeof(cwc_md5));
371 cs_hexdump(0, (void *)&cwc->ecm_md5[k].csp_hash, 4, cwc_csp, sizeof(cwc_csp));
372 cs_log_dbg(D_CWC, "cyclecheck [OLD] [Stored ECM] Client: %s EA: %s.%s", user, cwc_md5, cwc_csp);
373 if(!cfg.cwcycle_dropold && !memcmp(cwc->ecm_md5[k].cw, cw, 16))
374 { ret = 4; }
375 else
376 { ret = 2; } // old ER
377 upd_entry = 0;
378 break;
381 if(!upd_entry) { break; }
382 if(cycleok == -2)
383 { cs_log_dbg(D_CWC, "cyclecheck [ATTENTION!! NON Valid CW] Client: %s EA: %s Timediff: %ld Stage: %i Cycletime: %i dyncycletime: %i nextCycleCW = CW%i from Reader: %s", user, er_ecmf, now - cwc->time, cwc->stage, cwc->cycletime, cwc->dyncycletime, cwc->nextcyclecw, reader); }
384 else
385 { cs_log_dbg(D_CWC, "cyclecheck [ATTENTION!! NON Valid CW Cycle] NO CW Cycle detected! Client: %s EA: %s Timediff: %ld Stage: %i Cycletime: %i dyncycletime: %i nextCycleCW = CW%i from Reader: %s", user, er_ecmf, now - cwc->time, cwc->stage, cwc->cycletime, cwc->dyncycletime, cwc->nextcyclecw, reader); }
386 cs_log_dbg(D_CWC, "cyclecheck [Dump Stored CW] Client: %s EA: %s CW: %s Time: %ld", user, cwc_ecmf, cwc_cw, cwc->time);
387 cs_log_dbg(D_CWC, "cyclecheck [Dump CheckedCW] Client: %s EA: %s CW: %s Time: %ld Timediff: %ld", user, er_ecmf, cwstr, now, now - cwc->time);
388 ret = 1; // bad cycle
389 upd_entry = 0;
390 if(cfg.cwcycle_allowbadfromffb)
392 if(chk_is_pos_fallback(er, reader))
394 ret = 5;
395 cwc->stage = 4;
396 upd_entry = 1;
397 cwc->nextcyclecw = 2;
398 break;
401 break;
404 else
406 if(cwc->stage == 3)
408 if(cfg.keepcycletime > 0 && now - cwc->time < cfg.keepcycletime * 60) // we are in keepcycletime window
410 cwc->stage++; // go to stage 4
411 cs_log_dbg(D_CWC, "cyclecheck [Set Stage 4] for Entry: %s Cycletime: %i -> Entry too old but in keepcycletime window - no cycletime learning - only check which CW must cycle", cwc_ecmf, cwc->cycletime);
413 else
415 cwc->stage--; // go one stage back, we are not in keepcycletime window
416 cs_log_dbg(D_CWC, "cyclecheck [Back to Stage 2] for Entry: %s Cycletime: %i -> new cycletime learning", cwc_ecmf, cwc->cycletime);
418 memset(cwc->cw, 0, sizeof(cwc->cw)); //fake cw for stage 2/4
419 ret = 3;
420 cwc->nextcyclecw = 2;
423 if(upd_entry) // learning stages
425 if(now > cwc->locktime)
427 int16_t diff = now - cwc->time - cwc->cycletime;
428 if(cwc->stage <= 0) // stage 0 is passed; we update the cw's and time and store cycletime
430 // if(cwc->cycletime == now - cwc->time) // if we got a stable cycletime we go to stage 1
431 if(diff > -2 && diff < 2) // if we got a stable cycletime we go to stage 1
433 cwc->cycletime = now - cwc->time;
434 cs_log_dbg(D_CWC, "cyclecheck [Set Stage 1] %s Cycletime: %i Lockdiff: %ld", cwc_ecmf, cwc->cycletime, now - cwc->locktime);
435 cwc->stage++; // increase stage
437 else
439 cs_log_dbg(D_CWC, "cyclecheck [Stay on Stage 0] %s Cycletime: %i -> no constant CW-Change-Time", cwc_ecmf, cwc->cycletime);
443 else if(cwc->stage == 1) // stage 1 is passed; we update the cw's and time and store cycletime
445 // if(cwc->cycletime == now - cwc->time) // if we got a stable cycletime we go to stage 2
446 if(diff > -2 && diff < 2) // if we got a stable cycletime we go to stage 2
448 cwc->cycletime = now - cwc->time;
449 cs_log_dbg(D_CWC, "cyclecheck [Set Stage 2] %s Cycletime: %i Lockdiff: %ld", cwc_ecmf, cwc->cycletime, now - cwc->locktime);
450 cwc->stage++; // increase stage
452 else
454 cs_log_dbg(D_CWC, "cyclecheck [Back to Stage 0] for Entry %s Cycletime: %i -> no constant CW-Change-Time", cwc_ecmf, cwc->cycletime);
455 cwc->stage--;
458 else if(cwc->stage == 2) // stage 2 is passed; we update the cw's and compare cycletime
460 // if(cwc->cycletime == now - cwc->time && cwc->cycletime > 0) // if we got a stable cycletime we go to stage 3
461 if(diff > -2 && diff < 2 && cwc->cycletime > 0) // if we got a stable cycletime we go to stage 3
463 cwc->cycletime = now - cwc->time;
464 n = memcmp(cwc->cw, cw, 8);
465 m = memcmp(cwc->cw + 8, cw + 8, 8);
466 if(n == 0)
468 cwc->nextcyclecw = 1;
470 if(m == 0)
472 cwc->nextcyclecw = 0;
474 if(n == m || !checkECMD5CW(cw)) { cwc->nextcyclecw = 2; } //be sure only one cw part cycle and is valid
475 if(cwc->nextcyclecw < 2)
477 cs_log_dbg(D_CWC, "cyclecheck [Set Stage 3] %s Cycletime: %i Lockdiff: %ld nextCycleCW = CW%i", cwc_ecmf, cwc->cycletime, now - cwc->locktime, cwc->nextcyclecw);
478 cs_log_dbg(D_CWC, "cyclecheck [Set Cycletime %i] for Entry: %s -> now we can check CW's", cwc->cycletime, cwc_ecmf);
479 cwc->stage = 3; // increase stage
481 else
483 cs_log_dbg(D_CWC, "cyclecheck [Back to Stage 1] for Entry %s Cycletime: %i -> no CW-Cycle in Learning Stage", cwc_ecmf, cwc->cycletime); // if a server asked only every twice ECM we got a stable cycletime*2 ->but thats wrong
484 cwc->stage = 1;
488 else
491 cs_log_dbg(D_CWC, "cyclecheck [Back to Stage 1] for Entry %s Cycletime: %i -> no constant CW-Change-Time", cwc_ecmf, cwc->cycletime);
492 cwc->stage = 1;
495 else if(cwc->stage == 4) // we got a early learned cycletime.. use this cycletime and check only which cw cycle
497 n = memcmp(cwc->cw, cw, 8);
498 m = memcmp(cwc->cw + 8, cw + 8, 8);
499 if(n == 0)
501 cwc->nextcyclecw = 1;
503 if(m == 0)
505 cwc->nextcyclecw = 0;
507 if(n == m || !checkECMD5CW(cw)) { cwc->nextcyclecw = 2; } //be sure only one cw part cycle and is valid
508 if(cwc->nextcyclecw < 2)
510 cs_log_dbg(D_CWC, "cyclecheck [Back to Stage 3] %s Cycletime: %i Lockdiff: %ld nextCycleCW = CW%i", cwc_ecmf, cwc->cycletime, now - cwc->locktime, cwc->nextcyclecw);
511 cs_log_dbg(D_CWC, "cyclecheck [Set old Cycletime %i] for Entry: %s -> now we can check CW's", cwc->cycletime, cwc_ecmf);
512 cwc->stage = 3; // go back to stage 3
514 else
516 cs_log_dbg(D_CWC, "cyclecheck [Stay on Stage %d] for Entry %s Cycletime: %i no cycle detect!", cwc->stage, cwc_ecmf, cwc->cycletime);
517 if (cwc->stage4_repeat > 12)
519 cwc->stage = 1;
520 cs_log_dbg(D_CWC, "cyclecheck [Back to Stage 1] too much cyclefailure, maybe cycletime not correct %s Cycletime: %i Lockdiff: %ld nextCycleCW = CW%i", cwc_ecmf, cwc->cycletime, now - cwc->locktime, cwc->nextcyclecw);
523 cwc->stage4_repeat++;
524 ret = ret == 3 ? 3 : 7; // IGN for first stage4 otherwise LEARN
526 if(cwc->stage == 3)
528 cwc->locktime = 0;
529 cwc->stage4_repeat = 0;
531 else
533 if(cwc->stage < 3) { cwc->cycletime = now - cwc->time; }
534 cwc->locktime = now + (get_fallbacktimeout(cwc->caid) / 1000);
537 else if(cwc->stage != 3)
539 cs_log_dbg(D_CWC, "cyclecheck [Ignore this EA] for LearningStages because of locktime EA: %s Lockdiff: %ld", cwc_ecmf, now - cwc->locktime);
540 upd_entry = 0;
543 if(cwc->stage == 3) // we stay in Stage 3 so we update only time and cw
545 if(now - cwc->time > cwc->cycletime)
547 cwc->dyncycletime = now - cwc->time - cwc->cycletime;
549 else
551 cwc->dyncycletime = 0;
556 else
558 upd_entry = 0;
559 cwc = NULL;
561 break;
564 if(need_new_entry)
566 cs_readunlock(__func__, &cwcycle_lock);
567 if(cw_cc_list_size <= mcl) //only add when we have space
569 struct s_cw_cycle_check *new = NULL;
570 if(cs_malloc(&new, sizeof(struct s_cw_cycle_check))) // store cw on top in cyclelist
572 memcpy(new->cw, cw, sizeof(new->cw));
573 // csp cache got no ecm and no md5 hash
574 memcpy(new->ecm_md5[0].md5, er->ecmd5, sizeof(er->ecmd5));
575 #ifdef CS_CACHEEX
576 new->ecm_md5[0].csp_hash = er->csp_hash; // we got no ecm_md5 so CSP-Hash could be necessary
577 #else
578 new->ecm_md5[0].csp_hash = 0; //fake CSP-Hash we got a ecm_md5 so CSP-Hash is not necessary
579 #endif
580 memcpy(new->ecm_md5[0].cw, cw, sizeof(new->cw));
581 new->ecmlen = er->ecmlen;
582 new->cwc_hist_entry = 0;
583 new->caid = er->caid;
584 new->provid = er->prid;
585 new->sid = er->srvid;
586 new->chid = er->chid;
587 new->time = now;
588 new->locktime = now + (get_fallbacktimeout(er->caid) / 1000);
589 new->dyncycletime = 0; // to react of share timings
590 // cycletime over Cacheex
591 new->stage = (cfg.cwcycle_usecwcfromce && cycletime_fr > 0 && next_cw_cycle_fr < 2) ? 3 : 0;
592 new->cycletime = (cfg.cwcycle_usecwcfromce && cycletime_fr > 0 && next_cw_cycle_fr < 2) ? cycletime_fr : 99;
593 new->nextcyclecw = (cfg.cwcycle_usecwcfromce && cycletime_fr > 0 && next_cw_cycle_fr < 2) ? next_cw_cycle_fr : 2; //2=we dont know which next cw Cycle; 0= next cw Cycle CW0; 1= next cw Cycle CW1;
594 ret = (cycletime_fr > 0 && next_cw_cycle_fr < 2) ? 8 : 6;
596 new->prev = new->next = NULL;
597 new->old = 0;
598 new->stage4_repeat = 0;
599 //write lock
600 cs_writelock(__func__, &cwcycle_lock);
601 if(cw_cc_list) // the new entry on top
603 cw_cc_list->prev = new;
604 new->next = cw_cc_list;
606 cw_cc_list = new;
607 cw_cc_list_size++;
608 //write unlock /
609 cs_writeunlock(__func__, &cwcycle_lock);
611 cs_log_dbg(D_CWC, "cyclecheck [Store New Entry] %s Time: %ld Stage: %i Cycletime: %i Locktime: %ld", er_ecmf, new->time, new->stage, new->cycletime, new->locktime);
614 else
616 cs_log("cyclecheck [Store New Entry] Max List arrived -> dont store new Entry list_size: %i, mcl: %i", cw_cc_list_size, mcl);
619 else if(upd_entry && cwc)
621 cwc->prev = cwc->next = NULL;
622 cwc->old = 0;
623 memcpy(cwc->cw, cw, sizeof(cwc->cw));
624 cwc->time = now;
625 cwc->cwc_hist_entry++;
626 if(cwc->cwc_hist_entry > 14) //ringbuffer for md5
628 cwc->cwc_hist_entry = 0;
630 // csp cache got no ecm and no md5 hash
631 memcpy(cwc->ecm_md5[cwc->cwc_hist_entry].md5, er->ecmd5, sizeof(cwc->ecm_md5[0].md5));
632 #ifdef CS_CACHEEX
633 cwc->ecm_md5[cwc->cwc_hist_entry].csp_hash = er->csp_hash;
634 #else
635 cwc->ecm_md5[cwc->cwc_hist_entry].csp_hash = 0; //fake CSP-Hash for logging
636 #endif
637 memcpy(cwc->ecm_md5[cwc->cwc_hist_entry].cw, cw, sizeof(cwc->cw));
638 cwc->ecmlen = er->ecmlen;
639 //write lock /
640 cs_writelock(__func__, &cwcycle_lock);
641 if(cw_cc_list) // the clone entry on top
643 cw_cc_list->prev = cwc;
644 cwc->next = cw_cc_list;
646 cw_cc_list = cwc;
647 cw_cc_list_size++;
648 //write unlock /
649 cs_writeunlock(__func__, &cwcycle_lock);
650 cs_log_dbg(D_CWC, "cyclecheck [Update Entry and add on top] %s Time: %ld Stage: %i Cycletime: %i", er_ecmf, cwc->time, cwc->stage, cwc->cycletime);
652 else if(cwc)
654 NULLFREE(cwc);
656 return ret;
659 static void count_ok(struct s_client *client)
661 if(client)
663 client->cwcycledchecked++;
664 client->cwcycledok++;
666 if(client && client->account)
668 client->account->cwcycledchecked++;
669 client->account->cwcycledok++;
673 static void count_nok(struct s_client *client)
675 if(client)
677 client->cwcycledchecked++;
678 client->cwcyclednok++;
680 if(client && client->account)
682 client->account->cwcycledchecked++;
683 client->account->cwcyclednok++;
687 static void count_ign(struct s_client *client)
689 if(client)
691 client->cwcycledchecked++;
692 client->cwcycledign++;
694 if(client && client->account)
696 client->account->cwcycledchecked++;
697 client->account->cwcycledign++;
701 uint8_t checkcwcycle(struct s_client *client, ECM_REQUEST *er, struct s_reader *reader, uchar *cw, int8_t rc, uint8_t cycletime_fr, uint8_t next_cw_cycle_fr)
704 if(!cfg.cwcycle_check_enable)
705 { return 3; }
706 if(client && client->account && client->account->cwc_disable)
707 { return 3; }
708 // if (!(rc == E_FOUND) && !(rc == E_CACHEEX))
709 if(rc >= E_NOTFOUND)
710 { return 2; }
711 if(!cw || !er)
712 { return 2; }
713 if(!(chk_ctab_ex(er->caid, &cfg.cwcycle_check_caidtab))) // dont check caid not in list
714 { return 1; } // no match leave the check
715 if(is_halfCW_er(er))
716 { return 1; } // half cw cycle, checks are done in ecm-handler
718 memcpy(er->cw, cw, 16);
719 char er_ecmf[ECM_FMT_LEN];
720 format_ecm(er, er_ecmf, ECM_FMT_LEN);
722 char c_reader[64];
723 char user[64];
725 if(!streq(username(client), "NULL"))
726 { snprintf(user, sizeof(user), "%s", username(client)); }
727 else
728 { snprintf(user, sizeof(user), "---"); }
730 if(reader)
731 { snprintf(c_reader, sizeof(c_reader), "%s", reader->label); }
732 else
733 { snprintf(c_reader, sizeof(c_reader), "cache"); }
736 cs_log_dbg(D_CWC | D_TRACE, "cyclecheck EA: %s rc: %i reader: %s", er_ecmf, rc, c_reader);
738 switch(checkcwcycle_int(er, er_ecmf, user, cw, c_reader, cycletime_fr, next_cw_cycle_fr))
741 case 0: // CWCYCLE OK
742 count_ok(client);
743 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc OK");
744 break;
746 case 1: // CWCYCLE NOK
747 count_nok(client);
748 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc NOK");
749 if(cfg.onbadcycle > 0) // ignore ECM Request
751 cs_log("cyclecheck [Bad CW Cycle] for: %s %s from: %s -> drop cw (ECM Answer)", user, er_ecmf, c_reader); //D_CWC| D_TRACE
752 return 0;
754 else // only logging
756 cs_log("cyclecheck [Bad CW Cycle] for: %s %s from: %s -> do nothing", user, er_ecmf, c_reader);//D_CWC| D_TRACE
757 break;
760 case 2: // ER to OLD
761 count_nok(client);
762 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc NOK(old)");
763 cs_log("cyclecheck [Bad CW Cycle] for: %s %s from: %s -> ECM Answer is too OLD -> drop cw (ECM Answer)", user, er_ecmf, c_reader);//D_CWC| D_TRACE
764 return 0;
766 case 3: // CycleCheck ignored (stage 3 to stage 4)
767 count_ign(client);
768 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc IGN");
769 break;
771 case 4: // same CW
772 cs_log_dbg(D_CWC, "cyclecheck [Same CW] for: %s %s -> same CW detected from: %s -> do nothing ", user, er_ecmf, c_reader);
773 break;
775 case 5: //answer from fixed Fallbackreader with Bad Cycle
776 count_nok(client);
777 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc NOK but IGN (fixed FB)");
778 cs_log("cyclecheck [Bad CW Cycle] for: %s %s from: %s -> But Ignored because of answer from Fixed Fallback Reader", user, er_ecmf, c_reader);
779 break;
781 case 6: // not checked ( learning Stages Cycletime and CWCycle Stage < 3)
782 case 7: // not checked ( learning Stages only CWCycle Stage 4)
783 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc LEARN");
784 break;
786 case 8: // use Cyclecheck from CE Source
787 count_ok(client);
788 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc OK(CE)");
789 break;
791 case 9: // CWCYCLE NOK without counting
792 snprintf(er->cwc_msg_log, sizeof(er->cwc_msg_log), "cwc NOK");
793 if(cfg.onbadcycle > 0) // ignore ECM Request
795 cs_log("cyclecheck [Bad CW Cycle already Counted] for: %s %s from: %s -> drop cw (ECM Answer)", user, er_ecmf, c_reader);
796 return 0;
798 else // only logging
800 cs_log("cyclecheck [Bad CW Cycle already Counted] for: %s %s from: %s -> do nothing", user, er_ecmf, c_reader);
801 break;
805 return 1;
813 #endif