Import version 1.8.3
[s390-tools.git] / ziomon / ziorep_collapser.cpp
blob5cadd7c9888a301dbf02865862584c915c6c159a
1 /*
2 * FCP report generators
4 * Utility classes to get indices for collapsing data by
5 * variable criteria
7 * Copyright IBM Corp. 2008
8 * Author(s): Stefan Raspl <raspl@linux.vnet.ibm.com>
9 */
11 #include <string.h>
12 #include <assert.h>
14 #include "ziorep_collapser.hpp"
16 extern "C" {
17 #include "ziomon_tools.h"
18 #include "zt_common.h"
21 extern const char *toolname;
22 extern int verbose;
25 Collapser::Collapser(Aggregator criterion)
26 : m_criterion(criterion) {
30 Collapser::~Collapser()
35 Aggregator Collapser::get_criterion() const
37 return m_criterion;
41 void Collapser::add_to_index(struct ident_mapping *new_mapping) const
43 list<struct ident_mapping>::iterator i;
45 for (i = m_idents.begin(); i != m_idents.end()
46 && compare_hctl_idents(&new_mapping->ident, &(*i).ident) >= 0;
47 ++i) ;
48 if (i == m_idents.end() || compare_hctl_idents(&new_mapping->ident, &(*i).ident) != 0)
49 m_idents.insert(i, *new_mapping);
53 void Collapser::add_to_index(struct device_mapping *new_mapping) const
55 list<struct device_mapping>::iterator i;
57 for (i = m_devices.begin(); i != m_devices.end()
58 && new_mapping->device >= (*i).device;
59 ++i) ;
60 if (i == m_devices.end() || (*i).device != new_mapping->device)
61 m_devices.insert(i, *new_mapping);
65 void Collapser::add_to_index(struct host_id_mapping *new_mapping) const
67 list<struct host_id_mapping>::iterator i;
69 for (i = m_host_ids.begin(); i != m_host_ids.end()
70 && new_mapping->h >= (*i).h;
71 ++i) ;
72 if (i == m_host_ids.end() || (*i).h != new_mapping->h)
73 m_host_ids.insert(i, *new_mapping);
77 int Collapser::lookup_index(struct hctl_ident *identifier) const
79 int rc;
81 for (list<struct ident_mapping>::const_iterator i = m_idents.begin();
82 i != m_idents.end() && (rc = compare_hctl_idents(identifier, &(*i).ident)) >= 0; ++i ) {
83 if (rc == 0)
84 return (*i).idx;
87 return -1;
91 int Collapser::lookup_index(__u32 device) const
93 for (list<struct device_mapping>::const_iterator i = m_devices.begin();
94 i != m_devices.end() && device >= (*i).device; ++i ) {
95 if ((*i).device == device)
96 return (*i).idx;
99 return -1;
103 int Collapser::lookup_index_by_host_id(__u32 h) const
105 for (list<struct host_id_mapping>::const_iterator i = m_host_ids.begin();
106 i != m_host_ids.end() && h >= (*i).h; ++i ) {
107 if ((*i).h == h)
108 return (*i).idx;
111 return -1;
115 NoopCollapser::NoopCollapser()
116 : Collapser(none) {
117 m_criterion = none;
121 unsigned int NoopCollapser::get_index(struct hctl_ident *identifier) const
123 int rc;
125 rc = lookup_index(identifier);
126 if (rc < 0) {
127 struct ident_mapping new_mapping;
128 new_mapping.ident = *identifier;
129 new_mapping.idx = m_idents.size();
130 add_to_index(&new_mapping);
131 rc = new_mapping.idx;
134 return rc;
138 unsigned int NoopCollapser::get_index(__u32 device) const
140 int rc;
142 rc = lookup_index(device);
143 if (rc < 0) {
144 struct device_mapping new_mapping;
145 new_mapping.device = device;
146 new_mapping.idx = m_devices.size();
147 add_to_index(&new_mapping);
148 rc = new_mapping.idx;
151 return rc;
156 unsigned int NoopCollapser::get_index_by_host_id(__u32 h) const
158 int rc;
160 rc = lookup_index_by_host_id(h);
161 if (rc < 0) {
162 struct host_id_mapping new_mapping;
163 new_mapping.h = h;
164 new_mapping.idx = m_host_ids.size();
165 add_to_index(&new_mapping);
166 rc = new_mapping.idx;
169 return rc;
173 TotalCollapser::TotalCollapser()
174 : Collapser(all)
179 unsigned int TotalCollapser::get_index(struct hctl_ident* UNUSED(identifier)) const
181 return 0;
185 unsigned int TotalCollapser::get_index(__u32 UNUSED(device)) const
187 return 0;
191 unsigned int TotalCollapser::get_index_by_host_id(__u32 UNUSED(h)) const
193 return 0;
197 AggregationCollapser::AggregationCollapser(ConfigReader &cfg,
198 Aggregator &criterion, DeviceFilter &dev_filt, int *rc)
199 : Collapser(criterion)
201 *rc = 0;
203 * setup everything so we won't ever have a miss when looking up
205 verbose_msg("AggregationCollapser initializing\n");
207 switch(m_criterion) {
208 case chpid:
209 setup_by_chpid(cfg, dev_filt);
210 break;
211 case devno:
212 setup_by_devno(cfg, dev_filt);
213 break;
214 case wwpn:
215 setup_by_wwpn(cfg, dev_filt);
216 break;
217 case multipath_device:
218 *rc = setup_by_multipath(cfg, dev_filt);
219 break;
220 default:
221 assert(false);
224 verbose_msg("AggregationCollapser type %d constructed, mapping by:\n", m_criterion);
225 verbose_msg(" %zu host ids\n", m_host_ids.size());
226 verbose_msg(" %zu hctl devices\n", m_idents.size());
227 verbose_msg(" %zu devices\n", m_devices.size());
231 int AggregationCollapser::get_index_u32(list<__u32> &lst, __u32 chpid)
233 int idx = 0;
235 for (list<__u32>::const_iterator i = lst.begin();
236 i != lst.end(); ++i, ++idx) {
237 if (chpid == *i)
238 return idx;
241 return -1;
245 int AggregationCollapser::get_index_u64(list<__u64> &lst, __u64 chpid)
247 int idx = 0;
249 for (list<__u64>::const_iterator i = lst.begin();
250 i != lst.end(); ++i, ++idx) {
251 if (chpid == *i)
252 return idx;
255 return -1;
259 void AggregationCollapser::setup_by_chpid(ConfigReader &cfg,
260 DeviceFilter &dev_filt)
262 list<__u32> mms;
263 list<struct hctl_ident> idents;
264 list<__u32> host_ids;
265 struct host_id_mapping host_id_mapping;
266 struct device_mapping dev_mapping;
267 struct ident_mapping ide_mapping;
268 __u32 chpid;
269 int rc = 0;
271 // this is our master list for collapsing
272 dev_filt.get_eligible_chpids(cfg, m_reference_values_u32);
274 cfg.get_unique_mms(mms);
275 for (list<__u32>::const_iterator i = mms.begin();
276 i != mms.end(); ++i) {
277 if (!dev_filt.is_eligible_mm(*i))
278 continue;
279 dev_mapping.device = *i;
280 dev_mapping.idx = -1;
281 chpid = cfg.get_chpid_by_mm_internal(*i, &rc);
282 assert(rc == 0);
283 dev_mapping.idx = get_index_u32(m_reference_values_u32, chpid);
284 assert(dev_mapping.idx >= 0);
285 add_to_index(&dev_mapping);
286 vverbose_msg(" map mm %d to chpid %x (index %d)\n", *i,
287 chpid, dev_mapping.idx);
290 cfg.get_unique_host_ids(host_ids);
291 for (list<__u32>::const_iterator i = host_ids.begin();
292 i != host_ids.end(); ++i) {
293 if (!dev_filt.is_eligible_host_id(*i))
294 continue;
295 host_id_mapping.h = *i;
296 host_id_mapping.idx = -1;
297 chpid = cfg.get_chpid_by_host_id(*i, &rc);
298 assert(rc == 0);
299 host_id_mapping.idx = get_index_u32(m_reference_values_u32,
300 chpid);
301 assert(host_id_mapping.idx >= 0);
302 add_to_index(&host_id_mapping);
303 vverbose_msg(" map host id %d to chpid %x (index %d)\n", *i,
304 chpid, host_id_mapping.idx);
307 cfg.get_unique_devices(idents);
308 for (list<struct hctl_ident>::iterator i = idents.begin();
309 i != idents.end(); ++i) {
310 if (!dev_filt.is_eligible_ident(&(*i)))
311 continue;
312 ide_mapping.ident = *i;
313 ide_mapping.idx = -1;
314 chpid = cfg.get_chpid_by_ident(&(*i), &rc);
315 assert(rc == 0);
316 ide_mapping.idx = get_index_u32(m_reference_values_u32, chpid);
317 assert(ide_mapping.idx >= 0);
318 add_to_index(&ide_mapping);
319 vverbose_msg(" map device [%d:%d:%d:%d] to chpid %x (index %d)\n",
320 (*i).host, (*i).channel, (*i).target, (*i).lun,
321 chpid, ide_mapping.idx);
326 void AggregationCollapser::setup_by_devno(ConfigReader &cfg,
327 DeviceFilter &dev_filt)
329 list<__u32> mms;
330 list<struct hctl_ident> idents;
331 list<__u32> host_ids;
332 struct host_id_mapping host_id_mapping;
333 struct device_mapping dev_mapping;
334 struct ident_mapping ide_mapping;
335 __u32 devno;
336 int rc = 0;
338 /* this is our master list for collapsing
340 dev_filt.get_eligible_devnos(cfg, m_reference_values_u32);
342 cfg.get_unique_mms(mms);
343 for (list<__u32>::const_iterator i = mms.begin();
344 i != mms.end(); ++i) {
345 if (!dev_filt.is_eligible_mm(*i))
346 continue;
347 dev_mapping.device = *i;
348 dev_mapping.idx = -1;
349 devno = cfg.get_devno_by_mm_internal(*i, &rc);
350 assert(rc == 0);
351 dev_mapping.idx = get_index_u32(m_reference_values_u32, devno);
352 assert(dev_mapping.idx >= 0);
353 add_to_index(&dev_mapping);
354 vverbose_msg(" map mm %d to bus id 0.0.%x (index %d)\n", *i,
355 devno, dev_mapping.idx);
358 cfg.get_unique_host_ids(host_ids);
359 for (list<__u32>::const_iterator i = host_ids.begin();
360 i != host_ids.end(); ++i) {
361 if (!dev_filt.is_eligible_host_id(*i))
362 continue;
363 host_id_mapping.h = *i;
364 host_id_mapping.idx = -1;
365 devno = cfg.get_devno_by_host_id(*i, &rc);
366 assert(rc == 0);
367 host_id_mapping.idx = get_index_u32(m_reference_values_u32,
368 devno);
369 assert(host_id_mapping.idx >= 0);
370 add_to_index(&host_id_mapping);
371 vverbose_msg(" map host id %d to bus id 0.0.%x"
372 " (index %d)\n", *i, devno, host_id_mapping.idx);
375 cfg.get_unique_devices(idents);
376 for (list<struct hctl_ident>::iterator i = idents.begin();
377 i != idents.end(); ++i) {
378 if (!dev_filt.is_eligible_ident(&(*i)))
379 continue;
380 ide_mapping.ident = *i;
381 ide_mapping.idx = -1;
382 devno = cfg.get_devno_by_ident(&(*i), &rc);
383 assert(rc == 0);
384 ide_mapping.idx = get_index_u32(m_reference_values_u32, devno);
385 assert(ide_mapping.idx >= 0);
386 add_to_index(&ide_mapping);
387 vverbose_msg(" map device [%d:%d:%d:%d] to bus id 0.0.%x"
388 " (index %d)\n",
389 (*i).host, (*i).channel, (*i).target, (*i).lun,
390 devno, ide_mapping.idx);
395 void AggregationCollapser::setup_by_wwpn(ConfigReader &cfg,
396 DeviceFilter &dev_filt)
398 list<__u32> mms;
399 list<struct hctl_ident> idents;
400 struct device_mapping dev_mapping;
401 struct ident_mapping ide_mapping;
402 __u64 wwpn;
403 int rc = 0;
405 // this is our master list for collapsing
406 dev_filt.get_eligible_wwpns(cfg, m_reference_values_u64);
408 cfg.get_unique_mms(mms);
409 for (list<__u32>::const_iterator i = mms.begin();
410 i != mms.end(); ++i) {
411 if (!dev_filt.is_eligible_mm(*i))
412 continue;
413 dev_mapping.device = *i;
414 dev_mapping.idx = -1;
415 wwpn = cfg.get_wwpn_by_mm_internal(*i, &rc);
416 assert(rc == 0);
417 dev_mapping.idx = get_index_u64(m_reference_values_u64, wwpn);
418 assert(dev_mapping.idx >= 0);
419 add_to_index(&dev_mapping);
420 vverbose_msg(" map mm %d to wwpn %016Lx (index %d)\n", *i,
421 (long long unsigned int)wwpn, dev_mapping.idx);
424 cfg.get_unique_devices(idents);
425 for (list<struct hctl_ident>::iterator i = idents.begin();
426 i != idents.end(); ++i) {
427 if (!dev_filt.is_eligible_ident(&(*i)))
428 continue;
429 ide_mapping.ident = *i;
430 ide_mapping.idx = -1;
431 wwpn = cfg.get_wwpn_by_ident(&(*i), &rc);
432 assert(rc == 0);
433 ide_mapping.idx = get_index_u64(m_reference_values_u64, wwpn);
434 assert(ide_mapping.idx >= 0);
435 add_to_index(&ide_mapping);
436 vverbose_msg(" map device [%d:%d:%d:%d] to wwpn %016Lx"
437 " (index %d)\n",
438 (*i).host, (*i).channel, (*i).target, (*i).lun,
439 (long long unsigned int)wwpn, ide_mapping.idx);
444 int AggregationCollapser::setup_by_multipath(ConfigReader &cfg,
445 DeviceFilter &dev_filt)
447 list<__u32> mms;
448 list<struct hctl_ident> idents;
449 struct device_mapping dev_mapping;
450 struct ident_mapping ide_mapping;
451 __u32 mp_mm;
452 int rc = 0, grc = 0;
454 // this is our master list for collapsing
455 dev_filt.get_eligible_mp_mms(cfg, m_reference_values_u32);
457 if (m_reference_values_u32.size() == 0) {
458 fprintf(stderr, "%s: No multipath devices in configuration"
459 " found. Aggregation by multipath devices not"
460 " possible with this data.\n", toolname);
461 return -1;
464 cfg.get_unique_mms(mms);
465 for (list<__u32>::const_iterator i = mms.begin();
466 i != mms.end(); ++i) {
467 if (!dev_filt.is_eligible_mm(*i))
468 continue;
469 dev_mapping.device = *i;
470 dev_mapping.idx = -1;
471 mp_mm = cfg.get_mp_mm_by_mm_internal(*i, &rc);
472 if (mp_mm == 0) {
473 fprintf(stderr, "%s: Device %s is not in a multipath "
474 "group. Please remove via command line options "
475 "and try again.\n", toolname, cfg.get_dev_by_mm_internal(*i, &rc));
476 grc = -1;
477 continue;
479 dev_mapping.idx = get_index_u32(m_reference_values_u32, mp_mm);
480 assert(dev_mapping.idx >= 0);
481 add_to_index(&dev_mapping);
482 vverbose_msg(" map mm %d to mp_mm %x (index %d)\n", *i,
483 mp_mm, dev_mapping.idx);
485 if (grc)
486 return grc;
488 cfg.get_unique_devices(idents);
489 for (list<struct hctl_ident>::iterator i = idents.begin();
490 i != idents.end(); ++i) {
491 if (!dev_filt.is_eligible_ident(&(*i)))
492 continue;
493 ide_mapping.ident = *i;
494 ide_mapping.idx = -1;
495 mp_mm = cfg.get_mp_mm_by_ident(&(*i), &rc);
496 assert(rc == 0);
497 ide_mapping.idx = get_index_u32(m_reference_values_u32, mp_mm);
498 assert(ide_mapping.idx >= 0);
499 add_to_index(&ide_mapping);
500 vverbose_msg(" map device [%d:%d:%d:%d] to mp_mm %x"
501 " (index %d)\n",
502 (*i).host, (*i).channel, (*i).target, (*i).lun,
503 mp_mm, ide_mapping.idx);
506 return grc;
510 unsigned int AggregationCollapser::get_index(struct hctl_ident *identifier) const
512 int rc = lookup_index(identifier);
514 assert(rc >= 0);
516 return rc;
520 unsigned int AggregationCollapser::get_index(__u32 device) const
522 int rc = lookup_index(device);
524 assert(rc >= 0);
526 return rc;
530 unsigned int AggregationCollapser::get_index_by_host_id(__u32 h) const
532 int rc;
534 // since a host_id can host multiple wwpns and multipath devices,
535 // no mapping is possible in these cases.
536 assert(m_criterion != wwpn);
537 assert(m_criterion != multipath_device);
539 rc = lookup_index_by_host_id(h);
541 assert(rc >= 0);
543 return rc;
547 const list<__u32>& AggregationCollapser::get_reference_chpids() const
549 return m_reference_values_u32;
553 const list<__u64>& AggregationCollapser::get_reference_wwpns() const
555 return m_reference_values_u64;
559 const list<__u32>& AggregationCollapser::get_reference_devnos() const
561 return m_reference_values_u32;
565 const list<__u32>& AggregationCollapser::get_reference_mp_mms() const
567 return m_reference_values_u32;