2 * FCP report generators
4 * Utility classes to get indices for collapsing data by
7 * Copyright IBM Corp. 2008
8 * Author(s): Stefan Raspl <raspl@linux.vnet.ibm.com>
14 #include "ziorep_collapser.hpp"
17 #include "ziomon_tools.h"
18 #include "zt_common.h"
21 extern const char *toolname
;
25 Collapser::Collapser(Aggregator criterion
)
26 : m_criterion(criterion
) {
30 Collapser::~Collapser()
35 Aggregator
Collapser::get_criterion() const
41 void Collapser::add_to_index(struct ident_mapping
*new_mapping
) const
43 list
<struct ident_mapping
>::iterator i
;
45 for (i
= m_idents
.begin(); i
!= m_idents
.end()
46 && compare_hctl_idents(&new_mapping
->ident
, &(*i
).ident
) >= 0;
48 if (i
== m_idents
.end() || compare_hctl_idents(&new_mapping
->ident
, &(*i
).ident
) != 0)
49 m_idents
.insert(i
, *new_mapping
);
53 void Collapser::add_to_index(struct device_mapping
*new_mapping
) const
55 list
<struct device_mapping
>::iterator i
;
57 for (i
= m_devices
.begin(); i
!= m_devices
.end()
58 && new_mapping
->device
>= (*i
).device
;
60 if (i
== m_devices
.end() || (*i
).device
!= new_mapping
->device
)
61 m_devices
.insert(i
, *new_mapping
);
65 void Collapser::add_to_index(struct host_id_mapping
*new_mapping
) const
67 list
<struct host_id_mapping
>::iterator i
;
69 for (i
= m_host_ids
.begin(); i
!= m_host_ids
.end()
70 && new_mapping
->h
>= (*i
).h
;
72 if (i
== m_host_ids
.end() || (*i
).h
!= new_mapping
->h
)
73 m_host_ids
.insert(i
, *new_mapping
);
77 int Collapser::lookup_index(struct hctl_ident
*identifier
) const
81 for (list
<struct ident_mapping
>::const_iterator i
= m_idents
.begin();
82 i
!= m_idents
.end() && (rc
= compare_hctl_idents(identifier
, &(*i
).ident
)) >= 0; ++i
) {
91 int Collapser::lookup_index(__u32 device
) const
93 for (list
<struct device_mapping
>::const_iterator i
= m_devices
.begin();
94 i
!= m_devices
.end() && device
>= (*i
).device
; ++i
) {
95 if ((*i
).device
== device
)
103 int Collapser::lookup_index_by_host_id(__u32 h
) const
105 for (list
<struct host_id_mapping
>::const_iterator i
= m_host_ids
.begin();
106 i
!= m_host_ids
.end() && h
>= (*i
).h
; ++i
) {
115 NoopCollapser::NoopCollapser()
121 unsigned int NoopCollapser::get_index(struct hctl_ident
*identifier
) const
125 rc
= lookup_index(identifier
);
127 struct ident_mapping new_mapping
;
128 new_mapping
.ident
= *identifier
;
129 new_mapping
.idx
= m_idents
.size();
130 add_to_index(&new_mapping
);
131 rc
= new_mapping
.idx
;
138 unsigned int NoopCollapser::get_index(__u32 device
) const
142 rc
= lookup_index(device
);
144 struct device_mapping new_mapping
;
145 new_mapping
.device
= device
;
146 new_mapping
.idx
= m_devices
.size();
147 add_to_index(&new_mapping
);
148 rc
= new_mapping
.idx
;
156 unsigned int NoopCollapser::get_index_by_host_id(__u32 h
) const
160 rc
= lookup_index_by_host_id(h
);
162 struct host_id_mapping new_mapping
;
164 new_mapping
.idx
= m_host_ids
.size();
165 add_to_index(&new_mapping
);
166 rc
= new_mapping
.idx
;
173 TotalCollapser::TotalCollapser()
179 unsigned int TotalCollapser::get_index(struct hctl_ident
* UNUSED(identifier
)) const
185 unsigned int TotalCollapser::get_index(__u32
UNUSED(device
)) const
191 unsigned int TotalCollapser::get_index_by_host_id(__u32
UNUSED(h
)) const
197 AggregationCollapser::AggregationCollapser(ConfigReader
&cfg
,
198 Aggregator
&criterion
, DeviceFilter
&dev_filt
, int *rc
)
199 : Collapser(criterion
)
203 * setup everything so we won't ever have a miss when looking up
205 verbose_msg("AggregationCollapser initializing\n");
207 switch(m_criterion
) {
209 setup_by_chpid(cfg
, dev_filt
);
212 setup_by_devno(cfg
, dev_filt
);
215 setup_by_wwpn(cfg
, dev_filt
);
217 case multipath_device
:
218 *rc
= setup_by_multipath(cfg
, dev_filt
);
224 verbose_msg("AggregationCollapser type %d constructed, mapping by:\n", m_criterion
);
225 verbose_msg(" %zu host ids\n", m_host_ids
.size());
226 verbose_msg(" %zu hctl devices\n", m_idents
.size());
227 verbose_msg(" %zu devices\n", m_devices
.size());
231 int AggregationCollapser::get_index_u32(list
<__u32
> &lst
, __u32 chpid
)
235 for (list
<__u32
>::const_iterator i
= lst
.begin();
236 i
!= lst
.end(); ++i
, ++idx
) {
245 int AggregationCollapser::get_index_u64(list
<__u64
> &lst
, __u64 chpid
)
249 for (list
<__u64
>::const_iterator i
= lst
.begin();
250 i
!= lst
.end(); ++i
, ++idx
) {
259 void AggregationCollapser::setup_by_chpid(ConfigReader
&cfg
,
260 DeviceFilter
&dev_filt
)
263 list
<struct hctl_ident
> idents
;
264 list
<__u32
> host_ids
;
265 struct host_id_mapping host_id_mapping
;
266 struct device_mapping dev_mapping
;
267 struct ident_mapping ide_mapping
;
271 // this is our master list for collapsing
272 dev_filt
.get_eligible_chpids(cfg
, m_reference_values_u32
);
274 cfg
.get_unique_mms(mms
);
275 for (list
<__u32
>::const_iterator i
= mms
.begin();
276 i
!= mms
.end(); ++i
) {
277 if (!dev_filt
.is_eligible_mm(*i
))
279 dev_mapping
.device
= *i
;
280 dev_mapping
.idx
= -1;
281 chpid
= cfg
.get_chpid_by_mm_internal(*i
, &rc
);
283 dev_mapping
.idx
= get_index_u32(m_reference_values_u32
, chpid
);
284 assert(dev_mapping
.idx
>= 0);
285 add_to_index(&dev_mapping
);
286 vverbose_msg(" map mm %d to chpid %x (index %d)\n", *i
,
287 chpid
, dev_mapping
.idx
);
290 cfg
.get_unique_host_ids(host_ids
);
291 for (list
<__u32
>::const_iterator i
= host_ids
.begin();
292 i
!= host_ids
.end(); ++i
) {
293 if (!dev_filt
.is_eligible_host_id(*i
))
295 host_id_mapping
.h
= *i
;
296 host_id_mapping
.idx
= -1;
297 chpid
= cfg
.get_chpid_by_host_id(*i
, &rc
);
299 host_id_mapping
.idx
= get_index_u32(m_reference_values_u32
,
301 assert(host_id_mapping
.idx
>= 0);
302 add_to_index(&host_id_mapping
);
303 vverbose_msg(" map host id %d to chpid %x (index %d)\n", *i
,
304 chpid
, host_id_mapping
.idx
);
307 cfg
.get_unique_devices(idents
);
308 for (list
<struct hctl_ident
>::iterator i
= idents
.begin();
309 i
!= idents
.end(); ++i
) {
310 if (!dev_filt
.is_eligible_ident(&(*i
)))
312 ide_mapping
.ident
= *i
;
313 ide_mapping
.idx
= -1;
314 chpid
= cfg
.get_chpid_by_ident(&(*i
), &rc
);
316 ide_mapping
.idx
= get_index_u32(m_reference_values_u32
, chpid
);
317 assert(ide_mapping
.idx
>= 0);
318 add_to_index(&ide_mapping
);
319 vverbose_msg(" map device [%d:%d:%d:%d] to chpid %x (index %d)\n",
320 (*i
).host
, (*i
).channel
, (*i
).target
, (*i
).lun
,
321 chpid
, ide_mapping
.idx
);
326 void AggregationCollapser::setup_by_devno(ConfigReader
&cfg
,
327 DeviceFilter
&dev_filt
)
330 list
<struct hctl_ident
> idents
;
331 list
<__u32
> host_ids
;
332 struct host_id_mapping host_id_mapping
;
333 struct device_mapping dev_mapping
;
334 struct ident_mapping ide_mapping
;
338 /* this is our master list for collapsing
340 dev_filt
.get_eligible_devnos(cfg
, m_reference_values_u32
);
342 cfg
.get_unique_mms(mms
);
343 for (list
<__u32
>::const_iterator i
= mms
.begin();
344 i
!= mms
.end(); ++i
) {
345 if (!dev_filt
.is_eligible_mm(*i
))
347 dev_mapping
.device
= *i
;
348 dev_mapping
.idx
= -1;
349 devno
= cfg
.get_devno_by_mm_internal(*i
, &rc
);
351 dev_mapping
.idx
= get_index_u32(m_reference_values_u32
, devno
);
352 assert(dev_mapping
.idx
>= 0);
353 add_to_index(&dev_mapping
);
354 vverbose_msg(" map mm %d to bus id 0.0.%x (index %d)\n", *i
,
355 devno
, dev_mapping
.idx
);
358 cfg
.get_unique_host_ids(host_ids
);
359 for (list
<__u32
>::const_iterator i
= host_ids
.begin();
360 i
!= host_ids
.end(); ++i
) {
361 if (!dev_filt
.is_eligible_host_id(*i
))
363 host_id_mapping
.h
= *i
;
364 host_id_mapping
.idx
= -1;
365 devno
= cfg
.get_devno_by_host_id(*i
, &rc
);
367 host_id_mapping
.idx
= get_index_u32(m_reference_values_u32
,
369 assert(host_id_mapping
.idx
>= 0);
370 add_to_index(&host_id_mapping
);
371 vverbose_msg(" map host id %d to bus id 0.0.%x"
372 " (index %d)\n", *i
, devno
, host_id_mapping
.idx
);
375 cfg
.get_unique_devices(idents
);
376 for (list
<struct hctl_ident
>::iterator i
= idents
.begin();
377 i
!= idents
.end(); ++i
) {
378 if (!dev_filt
.is_eligible_ident(&(*i
)))
380 ide_mapping
.ident
= *i
;
381 ide_mapping
.idx
= -1;
382 devno
= cfg
.get_devno_by_ident(&(*i
), &rc
);
384 ide_mapping
.idx
= get_index_u32(m_reference_values_u32
, devno
);
385 assert(ide_mapping
.idx
>= 0);
386 add_to_index(&ide_mapping
);
387 vverbose_msg(" map device [%d:%d:%d:%d] to bus id 0.0.%x"
389 (*i
).host
, (*i
).channel
, (*i
).target
, (*i
).lun
,
390 devno
, ide_mapping
.idx
);
395 void AggregationCollapser::setup_by_wwpn(ConfigReader
&cfg
,
396 DeviceFilter
&dev_filt
)
399 list
<struct hctl_ident
> idents
;
400 struct device_mapping dev_mapping
;
401 struct ident_mapping ide_mapping
;
405 // this is our master list for collapsing
406 dev_filt
.get_eligible_wwpns(cfg
, m_reference_values_u64
);
408 cfg
.get_unique_mms(mms
);
409 for (list
<__u32
>::const_iterator i
= mms
.begin();
410 i
!= mms
.end(); ++i
) {
411 if (!dev_filt
.is_eligible_mm(*i
))
413 dev_mapping
.device
= *i
;
414 dev_mapping
.idx
= -1;
415 wwpn
= cfg
.get_wwpn_by_mm_internal(*i
, &rc
);
417 dev_mapping
.idx
= get_index_u64(m_reference_values_u64
, wwpn
);
418 assert(dev_mapping
.idx
>= 0);
419 add_to_index(&dev_mapping
);
420 vverbose_msg(" map mm %d to wwpn %016Lx (index %d)\n", *i
,
421 (long long unsigned int)wwpn
, dev_mapping
.idx
);
424 cfg
.get_unique_devices(idents
);
425 for (list
<struct hctl_ident
>::iterator i
= idents
.begin();
426 i
!= idents
.end(); ++i
) {
427 if (!dev_filt
.is_eligible_ident(&(*i
)))
429 ide_mapping
.ident
= *i
;
430 ide_mapping
.idx
= -1;
431 wwpn
= cfg
.get_wwpn_by_ident(&(*i
), &rc
);
433 ide_mapping
.idx
= get_index_u64(m_reference_values_u64
, wwpn
);
434 assert(ide_mapping
.idx
>= 0);
435 add_to_index(&ide_mapping
);
436 vverbose_msg(" map device [%d:%d:%d:%d] to wwpn %016Lx"
438 (*i
).host
, (*i
).channel
, (*i
).target
, (*i
).lun
,
439 (long long unsigned int)wwpn
, ide_mapping
.idx
);
444 int AggregationCollapser::setup_by_multipath(ConfigReader
&cfg
,
445 DeviceFilter
&dev_filt
)
448 list
<struct hctl_ident
> idents
;
449 struct device_mapping dev_mapping
;
450 struct ident_mapping ide_mapping
;
454 // this is our master list for collapsing
455 dev_filt
.get_eligible_mp_mms(cfg
, m_reference_values_u32
);
457 if (m_reference_values_u32
.size() == 0) {
458 fprintf(stderr
, "%s: No multipath devices in configuration"
459 " found. Aggregation by multipath devices not"
460 " possible with this data.\n", toolname
);
464 cfg
.get_unique_mms(mms
);
465 for (list
<__u32
>::const_iterator i
= mms
.begin();
466 i
!= mms
.end(); ++i
) {
467 if (!dev_filt
.is_eligible_mm(*i
))
469 dev_mapping
.device
= *i
;
470 dev_mapping
.idx
= -1;
471 mp_mm
= cfg
.get_mp_mm_by_mm_internal(*i
, &rc
);
473 fprintf(stderr
, "%s: Device %s is not in a multipath "
474 "group. Please remove via command line options "
475 "and try again.\n", toolname
, cfg
.get_dev_by_mm_internal(*i
, &rc
));
479 dev_mapping
.idx
= get_index_u32(m_reference_values_u32
, mp_mm
);
480 assert(dev_mapping
.idx
>= 0);
481 add_to_index(&dev_mapping
);
482 vverbose_msg(" map mm %d to mp_mm %x (index %d)\n", *i
,
483 mp_mm
, dev_mapping
.idx
);
488 cfg
.get_unique_devices(idents
);
489 for (list
<struct hctl_ident
>::iterator i
= idents
.begin();
490 i
!= idents
.end(); ++i
) {
491 if (!dev_filt
.is_eligible_ident(&(*i
)))
493 ide_mapping
.ident
= *i
;
494 ide_mapping
.idx
= -1;
495 mp_mm
= cfg
.get_mp_mm_by_ident(&(*i
), &rc
);
497 ide_mapping
.idx
= get_index_u32(m_reference_values_u32
, mp_mm
);
498 assert(ide_mapping
.idx
>= 0);
499 add_to_index(&ide_mapping
);
500 vverbose_msg(" map device [%d:%d:%d:%d] to mp_mm %x"
502 (*i
).host
, (*i
).channel
, (*i
).target
, (*i
).lun
,
503 mp_mm
, ide_mapping
.idx
);
510 unsigned int AggregationCollapser::get_index(struct hctl_ident
*identifier
) const
512 int rc
= lookup_index(identifier
);
520 unsigned int AggregationCollapser::get_index(__u32 device
) const
522 int rc
= lookup_index(device
);
530 unsigned int AggregationCollapser::get_index_by_host_id(__u32 h
) const
534 // since a host_id can host multiple wwpns and multipath devices,
535 // no mapping is possible in these cases.
536 assert(m_criterion
!= wwpn
);
537 assert(m_criterion
!= multipath_device
);
539 rc
= lookup_index_by_host_id(h
);
547 const list
<__u32
>& AggregationCollapser::get_reference_chpids() const
549 return m_reference_values_u32
;
553 const list
<__u64
>& AggregationCollapser::get_reference_wwpns() const
555 return m_reference_values_u64
;
559 const list
<__u32
>& AggregationCollapser::get_reference_devnos() const
561 return m_reference_values_u32
;
565 const list
<__u32
>& AggregationCollapser::get_reference_mp_mms() const
567 return m_reference_values_u32
;