2 * Device probing and sysfs code.
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/module.h>
22 #include <linux/wait.h>
23 #include <linux/errno.h>
24 #include <linux/kthread.h>
25 #include <linux/device.h>
26 #include <linux/delay.h>
27 #include <linux/idr.h>
28 #include <linux/jiffies.h>
29 #include <linux/string.h>
30 #include <linux/mutex.h>
31 #include <linux/rwsem.h>
32 #include <linux/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <asm/system.h>
35 #include <linux/ctype.h>
36 #include "fw-transaction.h"
37 #include "fw-topology.h"
38 #include "fw-device.h"
40 void fw_csr_iterator_init(struct fw_csr_iterator
*ci
, u32
* p
)
43 ci
->end
= ci
->p
+ (p
[0] >> 16);
45 EXPORT_SYMBOL(fw_csr_iterator_init
);
47 int fw_csr_iterator_next(struct fw_csr_iterator
*ci
, int *key
, int *value
)
50 *value
= *ci
->p
& 0xffffff;
52 return ci
->p
++ < ci
->end
;
54 EXPORT_SYMBOL(fw_csr_iterator_next
);
56 static int is_fw_unit(struct device
*dev
);
58 static int match_unit_directory(u32
* directory
, const struct fw_device_id
*id
)
60 struct fw_csr_iterator ci
;
61 int key
, value
, match
;
64 fw_csr_iterator_init(&ci
, directory
);
65 while (fw_csr_iterator_next(&ci
, &key
, &value
)) {
66 if (key
== CSR_VENDOR
&& value
== id
->vendor
)
67 match
|= FW_MATCH_VENDOR
;
68 if (key
== CSR_MODEL
&& value
== id
->model
)
69 match
|= FW_MATCH_MODEL
;
70 if (key
== CSR_SPECIFIER_ID
&& value
== id
->specifier_id
)
71 match
|= FW_MATCH_SPECIFIER_ID
;
72 if (key
== CSR_VERSION
&& value
== id
->version
)
73 match
|= FW_MATCH_VERSION
;
76 return (match
& id
->match_flags
) == id
->match_flags
;
79 static int fw_unit_match(struct device
*dev
, struct device_driver
*drv
)
81 struct fw_unit
*unit
= fw_unit(dev
);
82 struct fw_driver
*driver
= fw_driver(drv
);
85 /* We only allow binding to fw_units. */
89 for (i
= 0; driver
->id_table
[i
].match_flags
!= 0; i
++) {
90 if (match_unit_directory(unit
->directory
, &driver
->id_table
[i
]))
97 static int get_modalias(struct fw_unit
*unit
, char *buffer
, size_t buffer_size
)
99 struct fw_device
*device
= fw_device(unit
->device
.parent
);
100 struct fw_csr_iterator ci
;
105 int specifier_id
= 0;
108 fw_csr_iterator_init(&ci
, &device
->config_rom
[5]);
109 while (fw_csr_iterator_next(&ci
, &key
, &value
)) {
120 fw_csr_iterator_init(&ci
, unit
->directory
);
121 while (fw_csr_iterator_next(&ci
, &key
, &value
)) {
123 case CSR_SPECIFIER_ID
:
124 specifier_id
= value
;
132 return snprintf(buffer
, buffer_size
,
133 "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
134 vendor
, model
, specifier_id
, version
);
137 static int fw_unit_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
139 struct fw_unit
*unit
= fw_unit(dev
);
142 get_modalias(unit
, modalias
, sizeof(modalias
));
144 if (add_uevent_var(env
, "MODALIAS=%s", modalias
))
150 struct bus_type fw_bus_type
= {
152 .match
= fw_unit_match
,
154 EXPORT_SYMBOL(fw_bus_type
);
156 static void fw_device_release(struct device
*dev
)
158 struct fw_device
*device
= fw_device(dev
);
159 struct fw_card
*card
= device
->card
;
163 * Take the card lock so we don't set this to NULL while a
164 * FW_NODE_UPDATED callback is being handled or while the
165 * bus manager work looks at this node.
167 spin_lock_irqsave(&card
->lock
, flags
);
168 device
->node
->data
= NULL
;
169 spin_unlock_irqrestore(&card
->lock
, flags
);
171 fw_node_put(device
->node
);
172 kfree(device
->config_rom
);
177 int fw_device_enable_phys_dma(struct fw_device
*device
)
179 int generation
= device
->generation
;
181 /* device->node_id, accessed below, must not be older than generation */
184 return device
->card
->driver
->enable_phys_dma(device
->card
,
188 EXPORT_SYMBOL(fw_device_enable_phys_dma
);
190 struct config_rom_attribute
{
191 struct device_attribute attr
;
195 static ssize_t
show_immediate(struct device
*dev
,
196 struct device_attribute
*dattr
, char *buf
)
198 struct config_rom_attribute
*attr
=
199 container_of(dattr
, struct config_rom_attribute
, attr
);
200 struct fw_csr_iterator ci
;
202 int key
, value
, ret
= -ENOENT
;
204 down_read(&fw_device_rwsem
);
207 dir
= fw_unit(dev
)->directory
;
209 dir
= fw_device(dev
)->config_rom
+ 5;
211 fw_csr_iterator_init(&ci
, dir
);
212 while (fw_csr_iterator_next(&ci
, &key
, &value
))
213 if (attr
->key
== key
) {
214 ret
= snprintf(buf
, buf
? PAGE_SIZE
: 0,
219 up_read(&fw_device_rwsem
);
224 #define IMMEDIATE_ATTR(name, key) \
225 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
227 static ssize_t
show_text_leaf(struct device
*dev
,
228 struct device_attribute
*dattr
, char *buf
)
230 struct config_rom_attribute
*attr
=
231 container_of(dattr
, struct config_rom_attribute
, attr
);
232 struct fw_csr_iterator ci
;
233 u32
*dir
, *block
= NULL
, *p
, *end
;
234 int length
, key
, value
, last_key
= 0, ret
= -ENOENT
;
237 down_read(&fw_device_rwsem
);
240 dir
= fw_unit(dev
)->directory
;
242 dir
= fw_device(dev
)->config_rom
+ 5;
244 fw_csr_iterator_init(&ci
, dir
);
245 while (fw_csr_iterator_next(&ci
, &key
, &value
)) {
246 if (attr
->key
== last_key
&&
247 key
== (CSR_DESCRIPTOR
| CSR_LEAF
))
248 block
= ci
.p
- 1 + value
;
255 length
= min(block
[0] >> 16, 256U);
259 if (block
[1] != 0 || block
[2] != 0)
260 /* Unknown encoding. */
269 end
= &block
[length
+ 1];
270 for (p
= &block
[3]; p
< end
; p
++, b
+= 4)
271 * (u32
*) b
= (__force u32
) __cpu_to_be32(*p
);
273 /* Strip trailing whitespace and add newline. */
274 while (b
--, (isspace(*b
) || *b
== '\0') && b
> buf
);
278 up_read(&fw_device_rwsem
);
283 #define TEXT_LEAF_ATTR(name, key) \
284 { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
286 static struct config_rom_attribute config_rom_attributes
[] = {
287 IMMEDIATE_ATTR(vendor
, CSR_VENDOR
),
288 IMMEDIATE_ATTR(hardware_version
, CSR_HARDWARE_VERSION
),
289 IMMEDIATE_ATTR(specifier_id
, CSR_SPECIFIER_ID
),
290 IMMEDIATE_ATTR(version
, CSR_VERSION
),
291 IMMEDIATE_ATTR(model
, CSR_MODEL
),
292 TEXT_LEAF_ATTR(vendor_name
, CSR_VENDOR
),
293 TEXT_LEAF_ATTR(model_name
, CSR_MODEL
),
294 TEXT_LEAF_ATTR(hardware_version_name
, CSR_HARDWARE_VERSION
),
297 static void init_fw_attribute_group(struct device
*dev
,
298 struct device_attribute
*attrs
,
299 struct fw_attribute_group
*group
)
301 struct device_attribute
*attr
;
304 for (j
= 0; attrs
[j
].attr
.name
!= NULL
; j
++)
305 group
->attrs
[j
] = &attrs
[j
].attr
;
307 for (i
= 0; i
< ARRAY_SIZE(config_rom_attributes
); i
++) {
308 attr
= &config_rom_attributes
[i
].attr
;
309 if (attr
->show(dev
, attr
, NULL
) < 0)
311 group
->attrs
[j
++] = &attr
->attr
;
314 BUG_ON(j
>= ARRAY_SIZE(group
->attrs
));
315 group
->attrs
[j
++] = NULL
;
316 group
->groups
[0] = &group
->group
;
317 group
->groups
[1] = NULL
;
318 group
->group
.attrs
= group
->attrs
;
319 dev
->groups
= group
->groups
;
322 static ssize_t
modalias_show(struct device
*dev
,
323 struct device_attribute
*attr
, char *buf
)
325 struct fw_unit
*unit
= fw_unit(dev
);
328 length
= get_modalias(unit
, buf
, PAGE_SIZE
);
329 strcpy(buf
+ length
, "\n");
334 static ssize_t
rom_index_show(struct device
*dev
,
335 struct device_attribute
*attr
, char *buf
)
337 struct fw_device
*device
= fw_device(dev
->parent
);
338 struct fw_unit
*unit
= fw_unit(dev
);
340 return snprintf(buf
, PAGE_SIZE
, "%d\n",
341 (int)(unit
->directory
- device
->config_rom
));
344 static struct device_attribute fw_unit_attributes
[] = {
346 __ATTR_RO(rom_index
),
350 static ssize_t
config_rom_show(struct device
*dev
,
351 struct device_attribute
*attr
, char *buf
)
353 struct fw_device
*device
= fw_device(dev
);
356 down_read(&fw_device_rwsem
);
357 length
= device
->config_rom_length
* 4;
358 memcpy(buf
, device
->config_rom
, length
);
359 up_read(&fw_device_rwsem
);
364 static ssize_t
guid_show(struct device
*dev
,
365 struct device_attribute
*attr
, char *buf
)
367 struct fw_device
*device
= fw_device(dev
);
370 down_read(&fw_device_rwsem
);
371 ret
= snprintf(buf
, PAGE_SIZE
, "0x%08x%08x\n",
372 device
->config_rom
[3], device
->config_rom
[4]);
373 up_read(&fw_device_rwsem
);
378 static struct device_attribute fw_device_attributes
[] = {
379 __ATTR_RO(config_rom
),
384 static int read_rom(struct fw_device
*device
,
385 int generation
, int index
, u32
*data
)
389 /* device->node_id, accessed below, must not be older than generation */
392 rcode
= fw_run_transaction(device
->card
, TCODE_READ_QUADLET_REQUEST
,
393 device
->node_id
, generation
, device
->max_speed
,
394 (CSR_REGISTER_BASE
| CSR_CONFIG_ROM
) + index
* 4,
401 #define READ_BIB_ROM_SIZE 256
402 #define READ_BIB_STACK_SIZE 16
405 * Read the bus info block, perform a speed probe, and read all of the rest of
406 * the config ROM. We do all this with a cached bus generation. If the bus
407 * generation changes under us, read_bus_info_block will fail and get retried.
408 * It's better to start all over in this case because the node from which we
409 * are reading the ROM may have changed the ROM during the reset.
411 static int read_bus_info_block(struct fw_device
*device
, int generation
)
413 u32
*rom
, *stack
, *old_rom
, *new_rom
;
415 int i
, end
, length
, ret
= -1;
417 rom
= kmalloc(sizeof(*rom
) * READ_BIB_ROM_SIZE
+
418 sizeof(*stack
) * READ_BIB_STACK_SIZE
, GFP_KERNEL
);
422 stack
= &rom
[READ_BIB_ROM_SIZE
];
424 device
->max_speed
= SCODE_100
;
426 /* First read the bus info block. */
427 for (i
= 0; i
< 5; i
++) {
428 if (read_rom(device
, generation
, i
, &rom
[i
]) != RCODE_COMPLETE
)
431 * As per IEEE1212 7.2, during power-up, devices can
432 * reply with a 0 for the first quadlet of the config
433 * rom to indicate that they are booting (for example,
434 * if the firmware is on the disk of a external
435 * harddisk). In that case we just fail, and the
436 * retry mechanism will try again later.
438 if (i
== 0 && rom
[i
] == 0)
442 device
->max_speed
= device
->node
->max_speed
;
445 * Determine the speed of
446 * - devices with link speed less than PHY speed,
447 * - devices with 1394b PHY (unless only connected to 1394a PHYs),
448 * - all devices if there are 1394b repeaters.
449 * Note, we cannot use the bus info block's link_spd as starting point
450 * because some buggy firmwares set it lower than necessary and because
451 * 1394-1995 nodes do not have the field.
453 if ((rom
[2] & 0x7) < device
->max_speed
||
454 device
->max_speed
== SCODE_BETA
||
455 device
->card
->beta_repeaters_present
) {
458 /* for S1600 and S3200 */
459 if (device
->max_speed
== SCODE_BETA
)
460 device
->max_speed
= device
->card
->link_speed
;
462 while (device
->max_speed
> SCODE_100
) {
463 if (read_rom(device
, generation
, 0, &dummy
) ==
471 * Now parse the config rom. The config rom is a recursive
472 * directory structure so we parse it using a stack of
473 * references to the blocks that make up the structure. We
474 * push a reference to the root directory on the stack to
479 stack
[sp
++] = 0xc0000005;
482 * Pop the next block reference of the stack. The
483 * lower 24 bits is the offset into the config rom,
484 * the upper 8 bits are the type of the reference the
489 if (i
>= READ_BIB_ROM_SIZE
)
491 * The reference points outside the standard
492 * config rom area, something's fishy.
496 /* Read header quadlet for the block to get the length. */
497 if (read_rom(device
, generation
, i
, &rom
[i
]) != RCODE_COMPLETE
)
499 end
= i
+ (rom
[i
] >> 16) + 1;
501 if (end
> READ_BIB_ROM_SIZE
)
503 * This block extends outside standard config
504 * area (and the array we're reading it
505 * into). That's broken, so ignore this
511 * Now read in the block. If this is a directory
512 * block, check the entries as we read them to see if
513 * it references another block, and push it in that case.
516 if (read_rom(device
, generation
, i
, &rom
[i
]) !=
519 if ((key
>> 30) == 3 && (rom
[i
] >> 30) > 1 &&
520 sp
< READ_BIB_STACK_SIZE
)
521 stack
[sp
++] = i
+ rom
[i
];
528 old_rom
= device
->config_rom
;
529 new_rom
= kmemdup(rom
, length
* 4, GFP_KERNEL
);
533 down_write(&fw_device_rwsem
);
534 device
->config_rom
= new_rom
;
535 device
->config_rom_length
= length
;
536 up_write(&fw_device_rwsem
);
540 device
->cmc
= rom
[2] & 1 << 30;
547 static void fw_unit_release(struct device
*dev
)
549 struct fw_unit
*unit
= fw_unit(dev
);
554 static struct device_type fw_unit_type
= {
555 .uevent
= fw_unit_uevent
,
556 .release
= fw_unit_release
,
559 static int is_fw_unit(struct device
*dev
)
561 return dev
->type
== &fw_unit_type
;
564 static void create_units(struct fw_device
*device
)
566 struct fw_csr_iterator ci
;
567 struct fw_unit
*unit
;
571 fw_csr_iterator_init(&ci
, &device
->config_rom
[5]);
572 while (fw_csr_iterator_next(&ci
, &key
, &value
)) {
573 if (key
!= (CSR_UNIT
| CSR_DIRECTORY
))
577 * Get the address of the unit directory and try to
578 * match the drivers id_tables against it.
580 unit
= kzalloc(sizeof(*unit
), GFP_KERNEL
);
582 fw_error("failed to allocate memory for unit\n");
586 unit
->directory
= ci
.p
+ value
- 1;
587 unit
->device
.bus
= &fw_bus_type
;
588 unit
->device
.type
= &fw_unit_type
;
589 unit
->device
.parent
= &device
->device
;
590 dev_set_name(&unit
->device
, "%s.%d", dev_name(&device
->device
), i
++);
592 init_fw_attribute_group(&unit
->device
,
594 &unit
->attribute_group
);
595 if (device_register(&unit
->device
) < 0)
605 static int shutdown_unit(struct device
*device
, void *data
)
607 device_unregister(device
);
613 * fw_device_rwsem acts as dual purpose mutex:
614 * - serializes accesses to fw_device_idr,
615 * - serializes accesses to fw_device.config_rom/.config_rom_length and
616 * fw_unit.directory, unless those accesses happen at safe occasions
618 DECLARE_RWSEM(fw_device_rwsem
);
620 DEFINE_IDR(fw_device_idr
);
623 struct fw_device
*fw_device_get_by_devt(dev_t devt
)
625 struct fw_device
*device
;
627 down_read(&fw_device_rwsem
);
628 device
= idr_find(&fw_device_idr
, MINOR(devt
));
630 fw_device_get(device
);
631 up_read(&fw_device_rwsem
);
637 * These defines control the retry behavior for reading the config
638 * rom. It shouldn't be necessary to tweak these; if the device
639 * doesn't respond to a config rom read within 10 seconds, it's not
640 * going to respond at all. As for the initial delay, a lot of
641 * devices will be able to respond within half a second after bus
642 * reset. On the other hand, it's not really worth being more
643 * aggressive than that, since it scales pretty well; if 10 devices
644 * are plugged in, they're all getting read within one second.
647 #define MAX_RETRIES 10
648 #define RETRY_DELAY (3 * HZ)
649 #define INITIAL_DELAY (HZ / 2)
650 #define SHUTDOWN_DELAY (2 * HZ)
652 static void fw_device_shutdown(struct work_struct
*work
)
654 struct fw_device
*device
=
655 container_of(work
, struct fw_device
, work
.work
);
656 int minor
= MINOR(device
->device
.devt
);
658 if (time_is_after_jiffies(device
->card
->reset_jiffies
+ SHUTDOWN_DELAY
)
659 && !list_empty(&device
->card
->link
)) {
660 schedule_delayed_work(&device
->work
, SHUTDOWN_DELAY
);
664 if (atomic_cmpxchg(&device
->state
,
666 FW_DEVICE_SHUTDOWN
) != FW_DEVICE_GONE
)
669 fw_device_cdev_remove(device
);
670 device_for_each_child(&device
->device
, NULL
, shutdown_unit
);
671 device_unregister(&device
->device
);
673 down_write(&fw_device_rwsem
);
674 idr_remove(&fw_device_idr
, minor
);
675 up_write(&fw_device_rwsem
);
677 fw_device_put(device
);
680 static struct device_type fw_device_type
= {
681 .release
= fw_device_release
,
684 static void fw_device_update(struct work_struct
*work
);
687 * If a device was pending for deletion because its node went away but its
688 * bus info block and root directory header matches that of a newly discovered
689 * device, revive the existing fw_device.
690 * The newly allocated fw_device becomes obsolete instead.
692 static int lookup_existing_device(struct device
*dev
, void *data
)
694 struct fw_device
*old
= fw_device(dev
);
695 struct fw_device
*new = data
;
696 struct fw_card
*card
= new->card
;
699 down_read(&fw_device_rwsem
); /* serialize config_rom access */
700 spin_lock_irq(&card
->lock
); /* serialize node access */
702 if (memcmp(old
->config_rom
, new->config_rom
, 6 * 4) == 0 &&
703 atomic_cmpxchg(&old
->state
,
705 FW_DEVICE_RUNNING
) == FW_DEVICE_GONE
) {
706 struct fw_node
*current_node
= new->node
;
707 struct fw_node
*obsolete_node
= old
->node
;
709 new->node
= obsolete_node
;
710 new->node
->data
= new;
711 old
->node
= current_node
;
712 old
->node
->data
= old
;
714 old
->max_speed
= new->max_speed
;
715 old
->node_id
= current_node
->node_id
;
716 smp_wmb(); /* update node_id before generation */
717 old
->generation
= card
->generation
;
718 old
->config_rom_retries
= 0;
719 fw_notify("rediscovered device %s\n", dev_name(dev
));
721 PREPARE_DELAYED_WORK(&old
->work
, fw_device_update
);
722 schedule_delayed_work(&old
->work
, 0);
724 if (current_node
== card
->root_node
)
725 fw_schedule_bm_work(card
, 0);
730 spin_unlock_irq(&card
->lock
);
731 up_read(&fw_device_rwsem
);
736 static void fw_device_init(struct work_struct
*work
)
738 struct fw_device
*device
=
739 container_of(work
, struct fw_device
, work
.work
);
740 struct device
*revived_dev
;
744 * All failure paths here set node->data to NULL, so that we
745 * don't try to do device_for_each_child() on a kfree()'d
749 if (read_bus_info_block(device
, device
->generation
) < 0) {
750 if (device
->config_rom_retries
< MAX_RETRIES
&&
751 atomic_read(&device
->state
) == FW_DEVICE_INITIALIZING
) {
752 device
->config_rom_retries
++;
753 schedule_delayed_work(&device
->work
, RETRY_DELAY
);
755 fw_notify("giving up on config rom for node id %x\n",
757 if (device
->node
== device
->card
->root_node
)
758 fw_schedule_bm_work(device
->card
, 0);
759 fw_device_release(&device
->device
);
764 revived_dev
= device_find_child(device
->card
->device
,
765 device
, lookup_existing_device
);
767 put_device(revived_dev
);
768 fw_device_release(&device
->device
);
773 device_initialize(&device
->device
);
775 fw_device_get(device
);
776 down_write(&fw_device_rwsem
);
777 err
= idr_pre_get(&fw_device_idr
, GFP_KERNEL
) ?
778 idr_get_new(&fw_device_idr
, device
, &minor
) :
780 up_write(&fw_device_rwsem
);
785 device
->device
.bus
= &fw_bus_type
;
786 device
->device
.type
= &fw_device_type
;
787 device
->device
.parent
= device
->card
->device
;
788 device
->device
.devt
= MKDEV(fw_cdev_major
, minor
);
789 dev_set_name(&device
->device
, "fw%d", minor
);
791 init_fw_attribute_group(&device
->device
,
792 fw_device_attributes
,
793 &device
->attribute_group
);
794 if (device_add(&device
->device
)) {
795 fw_error("Failed to add device.\n");
796 goto error_with_cdev
;
799 create_units(device
);
802 * Transition the device to running state. If it got pulled
803 * out from under us while we did the intialization work, we
804 * have to shut down the device again here. Normally, though,
805 * fw_node_event will be responsible for shutting it down when
806 * necessary. We have to use the atomic cmpxchg here to avoid
807 * racing with the FW_NODE_DESTROYED case in
810 if (atomic_cmpxchg(&device
->state
,
811 FW_DEVICE_INITIALIZING
,
812 FW_DEVICE_RUNNING
) == FW_DEVICE_GONE
) {
813 PREPARE_DELAYED_WORK(&device
->work
, fw_device_shutdown
);
814 schedule_delayed_work(&device
->work
, SHUTDOWN_DELAY
);
816 if (device
->config_rom_retries
)
817 fw_notify("created device %s: GUID %08x%08x, S%d00, "
818 "%d config ROM retries\n",
819 dev_name(&device
->device
),
820 device
->config_rom
[3], device
->config_rom
[4],
821 1 << device
->max_speed
,
822 device
->config_rom_retries
);
824 fw_notify("created device %s: GUID %08x%08x, S%d00\n",
825 dev_name(&device
->device
),
826 device
->config_rom
[3], device
->config_rom
[4],
827 1 << device
->max_speed
);
828 device
->config_rom_retries
= 0;
832 * Reschedule the IRM work if we just finished reading the
833 * root node config rom. If this races with a bus reset we
834 * just end up running the IRM work a couple of extra times -
837 if (device
->node
== device
->card
->root_node
)
838 fw_schedule_bm_work(device
->card
, 0);
843 down_write(&fw_device_rwsem
);
844 idr_remove(&fw_device_idr
, minor
);
845 up_write(&fw_device_rwsem
);
847 fw_device_put(device
); /* fw_device_idr's reference */
849 put_device(&device
->device
); /* our reference */
852 static int update_unit(struct device
*dev
, void *data
)
854 struct fw_unit
*unit
= fw_unit(dev
);
855 struct fw_driver
*driver
= (struct fw_driver
*)dev
->driver
;
857 if (is_fw_unit(dev
) && driver
!= NULL
&& driver
->update
!= NULL
) {
859 driver
->update(unit
);
866 static void fw_device_update(struct work_struct
*work
)
868 struct fw_device
*device
=
869 container_of(work
, struct fw_device
, work
.work
);
871 fw_device_cdev_update(device
);
872 device_for_each_child(&device
->device
, NULL
, update_unit
);
878 REREAD_BIB_UNCHANGED
,
882 /* Reread and compare bus info block and header of root directory */
883 static int reread_bus_info_block(struct fw_device
*device
, int generation
)
888 for (i
= 0; i
< 6; i
++) {
889 if (read_rom(device
, generation
, i
, &q
) != RCODE_COMPLETE
)
890 return REREAD_BIB_ERROR
;
892 if (i
== 0 && q
== 0)
893 return REREAD_BIB_GONE
;
895 if (i
> device
->config_rom_length
|| q
!= device
->config_rom
[i
])
896 return REREAD_BIB_CHANGED
;
899 return REREAD_BIB_UNCHANGED
;
902 static void fw_device_refresh(struct work_struct
*work
)
904 struct fw_device
*device
=
905 container_of(work
, struct fw_device
, work
.work
);
906 struct fw_card
*card
= device
->card
;
907 int node_id
= device
->node_id
;
909 switch (reread_bus_info_block(device
, device
->generation
)) {
910 case REREAD_BIB_ERROR
:
911 if (device
->config_rom_retries
< MAX_RETRIES
/ 2 &&
912 atomic_read(&device
->state
) == FW_DEVICE_INITIALIZING
) {
913 device
->config_rom_retries
++;
914 schedule_delayed_work(&device
->work
, RETRY_DELAY
/ 2);
920 case REREAD_BIB_GONE
:
923 case REREAD_BIB_UNCHANGED
:
924 if (atomic_cmpxchg(&device
->state
,
925 FW_DEVICE_INITIALIZING
,
926 FW_DEVICE_RUNNING
) == FW_DEVICE_GONE
)
929 fw_device_update(work
);
930 device
->config_rom_retries
= 0;
933 case REREAD_BIB_CHANGED
:
938 * Something changed. We keep things simple and don't investigate
939 * further. We just destroy all previous units and create new ones.
941 device_for_each_child(&device
->device
, NULL
, shutdown_unit
);
943 if (read_bus_info_block(device
, device
->generation
) < 0) {
944 if (device
->config_rom_retries
< MAX_RETRIES
&&
945 atomic_read(&device
->state
) == FW_DEVICE_INITIALIZING
) {
946 device
->config_rom_retries
++;
947 schedule_delayed_work(&device
->work
, RETRY_DELAY
);
954 create_units(device
);
956 if (atomic_cmpxchg(&device
->state
,
957 FW_DEVICE_INITIALIZING
,
958 FW_DEVICE_RUNNING
) == FW_DEVICE_GONE
)
961 fw_notify("refreshed device %s\n", dev_name(&device
->device
));
962 device
->config_rom_retries
= 0;
966 fw_notify("giving up on refresh of device %s\n", dev_name(&device
->device
));
968 atomic_set(&device
->state
, FW_DEVICE_GONE
);
969 PREPARE_DELAYED_WORK(&device
->work
, fw_device_shutdown
);
970 schedule_delayed_work(&device
->work
, SHUTDOWN_DELAY
);
972 if (node_id
== card
->root_node
->node_id
)
973 fw_schedule_bm_work(card
, 0);
976 void fw_node_event(struct fw_card
*card
, struct fw_node
*node
, int event
)
978 struct fw_device
*device
;
981 case FW_NODE_CREATED
:
982 case FW_NODE_LINK_ON
:
986 device
= kzalloc(sizeof(*device
), GFP_ATOMIC
);
991 * Do minimal intialization of the device here, the
992 * rest will happen in fw_device_init().
994 * Attention: A lot of things, even fw_device_get(),
995 * cannot be done before fw_device_init() finished!
996 * You can basically just check device->state and
997 * schedule work until then, but only while holding
1000 atomic_set(&device
->state
, FW_DEVICE_INITIALIZING
);
1001 device
->card
= fw_card_get(card
);
1002 device
->node
= fw_node_get(node
);
1003 device
->node_id
= node
->node_id
;
1004 device
->generation
= card
->generation
;
1005 mutex_init(&device
->client_list_mutex
);
1006 INIT_LIST_HEAD(&device
->client_list
);
1009 * Set the node data to point back to this device so
1010 * FW_NODE_UPDATED callbacks can update the node_id
1011 * and generation for the device.
1013 node
->data
= device
;
1016 * Many devices are slow to respond after bus resets,
1017 * especially if they are bus powered and go through
1018 * power-up after getting plugged in. We schedule the
1019 * first config rom scan half a second after bus reset.
1021 INIT_DELAYED_WORK(&device
->work
, fw_device_init
);
1022 schedule_delayed_work(&device
->work
, INITIAL_DELAY
);
1025 case FW_NODE_INITIATED_RESET
:
1026 device
= node
->data
;
1030 device
->node_id
= node
->node_id
;
1031 smp_wmb(); /* update node_id before generation */
1032 device
->generation
= card
->generation
;
1033 if (atomic_cmpxchg(&device
->state
,
1035 FW_DEVICE_INITIALIZING
) == FW_DEVICE_RUNNING
) {
1036 PREPARE_DELAYED_WORK(&device
->work
, fw_device_refresh
);
1037 schedule_delayed_work(&device
->work
,
1038 node
== card
->local_node
? 0 : INITIAL_DELAY
);
1042 case FW_NODE_UPDATED
:
1043 if (!node
->link_on
|| node
->data
== NULL
)
1046 device
= node
->data
;
1047 device
->node_id
= node
->node_id
;
1048 smp_wmb(); /* update node_id before generation */
1049 device
->generation
= card
->generation
;
1050 if (atomic_read(&device
->state
) == FW_DEVICE_RUNNING
) {
1051 PREPARE_DELAYED_WORK(&device
->work
, fw_device_update
);
1052 schedule_delayed_work(&device
->work
, 0);
1056 case FW_NODE_DESTROYED
:
1057 case FW_NODE_LINK_OFF
:
1062 * Destroy the device associated with the node. There
1063 * are two cases here: either the device is fully
1064 * initialized (FW_DEVICE_RUNNING) or we're in the
1065 * process of reading its config rom
1066 * (FW_DEVICE_INITIALIZING). If it is fully
1067 * initialized we can reuse device->work to schedule a
1068 * full fw_device_shutdown(). If not, there's work
1069 * scheduled to read it's config rom, and we just put
1070 * the device in shutdown state to have that code fail
1071 * to create the device.
1073 device
= node
->data
;
1074 if (atomic_xchg(&device
->state
,
1075 FW_DEVICE_GONE
) == FW_DEVICE_RUNNING
) {
1076 PREPARE_DELAYED_WORK(&device
->work
, fw_device_shutdown
);
1077 schedule_delayed_work(&device
->work
,
1078 list_empty(&card
->link
) ? 0 : SHUTDOWN_DELAY
);