sparsemem: ensure we initialise the node mapping for SPARSEMEM_STATIC
[linux-2.6/mini2440.git] / drivers / ieee1394 / csr1212.c
blobd08166bda1c54b2938e720ce19dcfdc5a21fe222
1 /*
2 * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
4 * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
5 * Steve Kinneberg <kinnebergsteve@acmsystems.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
27 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 /* TODO List:
32 * - Verify interface consistency: i.e., public functions that take a size
33 * parameter expect size to be in bytes.
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/string.h>
39 #include <asm/bug.h>
40 #include <asm/byteorder.h>
42 #include "csr1212.h"
45 /* Permitted key type for each key id */
46 #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
47 #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
48 #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
49 #define __L (1 << CSR1212_KV_TYPE_LEAF)
50 static const u8 csr1212_key_id_type_map[0x30] = {
51 __C, /* used by Apple iSight */
52 __D | __L, /* Descriptor */
53 __I | __D | __L, /* Bus_Dependent_Info */
54 __I | __D | __L, /* Vendor */
55 __I, /* Hardware_Version */
56 0, 0, /* Reserved */
57 __D | __L | __I, /* Module */
58 __I, 0, 0, 0, /* used by Apple iSight, Reserved */
59 __I, /* Node_Capabilities */
60 __L, /* EUI_64 */
61 0, 0, 0, /* Reserved */
62 __D, /* Unit */
63 __I, /* Specifier_ID */
64 __I, /* Version */
65 __I | __C | __D | __L, /* Dependent_Info */
66 __L, /* Unit_Location */
67 0, /* Reserved */
68 __I, /* Model */
69 __D, /* Instance */
70 __L, /* Keyword */
71 __D, /* Feature */
72 __L, /* Extended_ROM */
73 __I, /* Extended_Key_Specifier_ID */
74 __I, /* Extended_Key */
75 __I | __C | __D | __L, /* Extended_Data */
76 __L, /* Modifiable_Descriptor */
77 __I, /* Directory_ID */
78 __I, /* Revision */
80 #undef __I
81 #undef __C
82 #undef __D
83 #undef __L
86 #define quads_to_bytes(_q) ((_q) * sizeof(u32))
87 #define bytes_to_quads(_b) (((_b) + sizeof(u32) - 1) / sizeof(u32))
89 static void free_keyval(struct csr1212_keyval *kv)
91 if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
92 (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
93 CSR1212_FREE(kv->value.leaf.data);
95 CSR1212_FREE(kv);
98 static u16 csr1212_crc16(const u32 *buffer, size_t length)
100 int shift;
101 u32 data;
102 u16 sum, crc = 0;
104 for (; length; length--) {
105 data = be32_to_cpu(*buffer);
106 buffer++;
107 for (shift = 28; shift >= 0; shift -= 4 ) {
108 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
109 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
111 crc &= 0xffff;
114 return cpu_to_be16(crc);
117 /* Microsoft computes the CRC with the bytes in reverse order. */
118 static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
120 int shift;
121 u32 data;
122 u16 sum, crc = 0;
124 for (; length; length--) {
125 data = le32_to_cpu(*buffer);
126 buffer++;
127 for (shift = 28; shift >= 0; shift -= 4 ) {
128 sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
129 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
131 crc &= 0xffff;
134 return cpu_to_be16(crc);
137 static struct csr1212_dentry *
138 csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
140 struct csr1212_dentry *pos;
142 for (pos = dir->value.directory.dentries_head;
143 pos != NULL; pos = pos->next)
144 if (pos->kv == kv)
145 return pos;
146 return NULL;
149 static struct csr1212_keyval *
150 csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
152 struct csr1212_keyval *kv;
154 for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
155 if (kv->offset == offset)
156 return kv;
157 return NULL;
161 /* Creation Routines */
163 struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
164 size_t bus_info_size, void *private)
166 struct csr1212_csr *csr;
168 csr = CSR1212_MALLOC(sizeof(*csr));
169 if (!csr)
170 return NULL;
172 csr->cache_head =
173 csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
174 CSR1212_CONFIG_ROM_SPACE_SIZE);
175 if (!csr->cache_head) {
176 CSR1212_FREE(csr);
177 return NULL;
180 /* The keyval key id is not used for the root node, but a valid key id
181 * that can be used for a directory needs to be passed to
182 * csr1212_new_directory(). */
183 csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
184 if (!csr->root_kv) {
185 CSR1212_FREE(csr->cache_head);
186 CSR1212_FREE(csr);
187 return NULL;
190 csr->bus_info_data = csr->cache_head->data;
191 csr->bus_info_len = bus_info_size;
192 csr->crc_len = bus_info_size;
193 csr->ops = ops;
194 csr->private = private;
195 csr->cache_tail = csr->cache_head;
197 return csr;
200 void csr1212_init_local_csr(struct csr1212_csr *csr,
201 const u32 *bus_info_data, int max_rom)
203 static const int mr_map[] = { 4, 64, 1024, 0 };
205 BUG_ON(max_rom & ~0x3);
206 csr->max_rom = mr_map[max_rom];
207 memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
210 static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
212 struct csr1212_keyval *kv;
214 if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
215 return NULL;
217 kv = CSR1212_MALLOC(sizeof(*kv));
218 if (!kv)
219 return NULL;
221 kv->key.type = type;
222 kv->key.id = key;
224 kv->associate = NULL;
225 kv->refcnt = 1;
227 kv->next = NULL;
228 kv->prev = NULL;
229 kv->offset = 0;
230 kv->valid = 0;
231 return kv;
234 struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
236 struct csr1212_keyval *kv;
238 kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
239 if (!kv)
240 return NULL;
242 kv->value.immediate = value;
243 kv->valid = 1;
244 return kv;
247 static struct csr1212_keyval *
248 csr1212_new_leaf(u8 key, const void *data, size_t data_len)
250 struct csr1212_keyval *kv;
252 kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
253 if (!kv)
254 return NULL;
256 if (data_len > 0) {
257 kv->value.leaf.data = CSR1212_MALLOC(data_len);
258 if (!kv->value.leaf.data) {
259 CSR1212_FREE(kv);
260 return NULL;
263 if (data)
264 memcpy(kv->value.leaf.data, data, data_len);
265 } else {
266 kv->value.leaf.data = NULL;
269 kv->value.leaf.len = bytes_to_quads(data_len);
270 kv->offset = 0;
271 kv->valid = 1;
273 return kv;
276 static struct csr1212_keyval *
277 csr1212_new_csr_offset(u8 key, u32 csr_offset)
279 struct csr1212_keyval *kv;
281 kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
282 if (!kv)
283 return NULL;
285 kv->value.csr_offset = csr_offset;
287 kv->offset = 0;
288 kv->valid = 1;
289 return kv;
292 struct csr1212_keyval *csr1212_new_directory(u8 key)
294 struct csr1212_keyval *kv;
296 kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
297 if (!kv)
298 return NULL;
300 kv->value.directory.len = 0;
301 kv->offset = 0;
302 kv->value.directory.dentries_head = NULL;
303 kv->value.directory.dentries_tail = NULL;
304 kv->valid = 1;
305 return kv;
308 void csr1212_associate_keyval(struct csr1212_keyval *kv,
309 struct csr1212_keyval *associate)
311 BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
312 (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
313 associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
314 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
315 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
316 associate->key.id < 0x30) ||
317 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
318 associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
319 (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
320 associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
321 (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
322 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
323 (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
324 kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
326 if (kv->associate)
327 csr1212_release_keyval(kv->associate);
329 associate->refcnt++;
330 kv->associate = associate;
333 int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
334 struct csr1212_keyval *kv)
336 struct csr1212_dentry *dentry;
338 BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
340 dentry = CSR1212_MALLOC(sizeof(*dentry));
341 if (!dentry)
342 return -ENOMEM;
344 dentry->kv = kv;
346 kv->refcnt++;
348 dentry->next = NULL;
349 dentry->prev = dir->value.directory.dentries_tail;
351 if (!dir->value.directory.dentries_head)
352 dir->value.directory.dentries_head = dentry;
354 if (dir->value.directory.dentries_tail)
355 dir->value.directory.dentries_tail->next = dentry;
356 dir->value.directory.dentries_tail = dentry;
358 return CSR1212_SUCCESS;
361 #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
362 (&((kv)->value.leaf.data[1]))
364 #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
365 ((kv)->value.leaf.data[0] = \
366 cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
367 ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
368 #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
369 ((kv)->value.leaf.data[0] = \
370 cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
371 CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
372 ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
374 static struct csr1212_keyval *
375 csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
376 const void *data, size_t data_len)
378 struct csr1212_keyval *kv;
380 kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
381 data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
382 if (!kv)
383 return NULL;
385 CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
386 CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
388 if (data)
389 memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
391 return kv;
394 /* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
395 static int csr1212_check_minimal_ascii(const char *s)
397 static const char minimal_ascii_table[] = {
398 /* 1 2 4 8 16 32 64 128 */
399 128, /* --, --, --, --, --, --, --, 07, */
400 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
401 0, /* --, --, --, --, --, --, --, --, */
402 0, /* --, --, --, --, --, --, --, --, */
403 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
404 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
405 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
406 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
407 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
408 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
409 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
410 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
411 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
412 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
413 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
414 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
416 int i, j;
418 for (; *s; s++) {
419 i = *s >> 3; /* i = *s / 8; */
420 j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
422 if (i >= ARRAY_SIZE(minimal_ascii_table) ||
423 !(minimal_ascii_table[i] & j))
424 return -EINVAL;
426 return 0;
429 /* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
430 struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
432 struct csr1212_keyval *kv;
433 u32 *text;
434 size_t str_len, quads;
436 if (!s || !*s || csr1212_check_minimal_ascii(s))
437 return NULL;
439 str_len = strlen(s);
440 quads = bytes_to_quads(str_len);
441 kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
442 CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
443 if (!kv)
444 return NULL;
446 kv->value.leaf.data[1] = 0; /* width, character_set, language */
447 text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
448 text[quads - 1] = 0; /* padding */
449 memcpy(text, s, str_len);
451 return kv;
455 /* Destruction Routines */
457 void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
458 struct csr1212_keyval *kv)
460 struct csr1212_dentry *dentry;
462 if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
463 return;
465 dentry = csr1212_find_keyval(dir, kv);
467 if (!dentry)
468 return;
470 if (dentry->prev)
471 dentry->prev->next = dentry->next;
472 if (dentry->next)
473 dentry->next->prev = dentry->prev;
474 if (dir->value.directory.dentries_head == dentry)
475 dir->value.directory.dentries_head = dentry->next;
476 if (dir->value.directory.dentries_tail == dentry)
477 dir->value.directory.dentries_tail = dentry->prev;
479 CSR1212_FREE(dentry);
481 csr1212_release_keyval(kv);
484 /* This function is used to free the memory taken by a keyval. If the given
485 * keyval is a directory type, then any keyvals contained in that directory
486 * will be destroyed as well if their respective refcnts are 0. By means of
487 * list manipulation, this routine will descend a directory structure in a
488 * non-recursive manner. */
489 static void csr1212_destroy_keyval(struct csr1212_keyval *kv)
491 struct csr1212_keyval *k, *a;
492 struct csr1212_dentry dentry;
493 struct csr1212_dentry *head, *tail;
495 dentry.kv = kv;
496 dentry.next = NULL;
497 dentry.prev = NULL;
499 head = &dentry;
500 tail = head;
502 while (head) {
503 k = head->kv;
505 while (k) {
506 k->refcnt--;
508 if (k->refcnt > 0)
509 break;
511 a = k->associate;
513 if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
514 /* If the current entry is a directory, move all
515 * the entries to the destruction list. */
516 if (k->value.directory.dentries_head) {
517 tail->next =
518 k->value.directory.dentries_head;
519 k->value.directory.dentries_head->prev =
520 tail;
521 tail = k->value.directory.dentries_tail;
524 free_keyval(k);
525 k = a;
528 head = head->next;
529 if (head) {
530 if (head->prev && head->prev != &dentry)
531 CSR1212_FREE(head->prev);
532 head->prev = NULL;
533 } else if (tail != &dentry) {
534 CSR1212_FREE(tail);
539 void csr1212_release_keyval(struct csr1212_keyval *kv)
541 if (kv->refcnt > 1)
542 kv->refcnt--;
543 else
544 csr1212_destroy_keyval(kv);
547 void csr1212_destroy_csr(struct csr1212_csr *csr)
549 struct csr1212_csr_rom_cache *c, *oc;
550 struct csr1212_cache_region *cr, *ocr;
552 csr1212_release_keyval(csr->root_kv);
554 c = csr->cache_head;
555 while (c) {
556 oc = c;
557 cr = c->filled_head;
558 while (cr) {
559 ocr = cr;
560 cr = cr->next;
561 CSR1212_FREE(ocr);
563 c = c->next;
564 CSR1212_FREE(oc);
567 CSR1212_FREE(csr);
571 /* CSR Image Creation */
573 static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
575 struct csr1212_csr_rom_cache *cache;
576 u64 csr_addr;
578 BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
579 !csr->ops->release_addr || csr->max_rom < 1);
581 /* ROM size must be a multiple of csr->max_rom */
582 romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
584 csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
585 csr->private);
586 if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
587 return -ENOMEM;
589 if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
590 /* Invalid address returned from allocate_addr_range(). */
591 csr->ops->release_addr(csr_addr, csr->private);
592 return -ENOMEM;
595 cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
596 romsize);
597 if (!cache) {
598 csr->ops->release_addr(csr_addr, csr->private);
599 return -ENOMEM;
602 cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
603 CSR1212_KV_ID_EXTENDED_ROM);
604 if (!cache->ext_rom) {
605 csr->ops->release_addr(csr_addr, csr->private);
606 CSR1212_FREE(cache);
607 return -ENOMEM;
610 if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
611 CSR1212_SUCCESS) {
612 csr1212_release_keyval(cache->ext_rom);
613 csr->ops->release_addr(csr_addr, csr->private);
614 CSR1212_FREE(cache);
615 return -ENOMEM;
617 cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
618 cache->ext_rom->value.leaf.len = -1;
619 cache->ext_rom->value.leaf.data = cache->data;
621 /* Add cache to tail of cache list */
622 cache->prev = csr->cache_tail;
623 csr->cache_tail->next = cache;
624 csr->cache_tail = cache;
625 return CSR1212_SUCCESS;
628 static void csr1212_remove_cache(struct csr1212_csr *csr,
629 struct csr1212_csr_rom_cache *cache)
631 if (csr->cache_head == cache)
632 csr->cache_head = cache->next;
633 if (csr->cache_tail == cache)
634 csr->cache_tail = cache->prev;
636 if (cache->prev)
637 cache->prev->next = cache->next;
638 if (cache->next)
639 cache->next->prev = cache->prev;
641 if (cache->ext_rom) {
642 csr1212_detach_keyval_from_directory(csr->root_kv,
643 cache->ext_rom);
644 csr1212_release_keyval(cache->ext_rom);
647 CSR1212_FREE(cache);
650 static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
651 struct csr1212_keyval **layout_tail)
653 struct csr1212_dentry *dentry;
654 struct csr1212_keyval *dkv;
655 struct csr1212_keyval *last_extkey_spec = NULL;
656 struct csr1212_keyval *last_extkey = NULL;
657 int num_entries = 0;
659 for (dentry = dir->value.directory.dentries_head; dentry;
660 dentry = dentry->next) {
661 for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
662 /* Special Case: Extended Key Specifier_ID */
663 if (dkv->key.id ==
664 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
665 if (last_extkey_spec == NULL)
666 last_extkey_spec = dkv;
667 else if (dkv->value.immediate !=
668 last_extkey_spec->value.immediate)
669 last_extkey_spec = dkv;
670 else
671 continue;
672 /* Special Case: Extended Key */
673 } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
674 if (last_extkey == NULL)
675 last_extkey = dkv;
676 else if (dkv->value.immediate !=
677 last_extkey->value.immediate)
678 last_extkey = dkv;
679 else
680 continue;
683 num_entries += 1;
685 switch (dkv->key.type) {
686 default:
687 case CSR1212_KV_TYPE_IMMEDIATE:
688 case CSR1212_KV_TYPE_CSR_OFFSET:
689 break;
690 case CSR1212_KV_TYPE_LEAF:
691 case CSR1212_KV_TYPE_DIRECTORY:
692 /* Remove from list */
693 if (dkv->prev && (dkv->prev->next == dkv))
694 dkv->prev->next = dkv->next;
695 if (dkv->next && (dkv->next->prev == dkv))
696 dkv->next->prev = dkv->prev;
697 //if (dkv == *layout_tail)
698 // *layout_tail = dkv->prev;
700 /* Special case: Extended ROM leafs */
701 if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
702 dkv->value.leaf.len = -1;
703 /* Don't add Extended ROM leafs in the
704 * layout list, they are handled
705 * differently. */
706 break;
709 /* Add to tail of list */
710 dkv->next = NULL;
711 dkv->prev = *layout_tail;
712 (*layout_tail)->next = dkv;
713 *layout_tail = dkv;
714 break;
718 return num_entries;
721 static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
723 struct csr1212_keyval *ltail = kv;
724 size_t agg_size = 0;
726 while (kv) {
727 switch (kv->key.type) {
728 case CSR1212_KV_TYPE_LEAF:
729 /* Add 1 quadlet for crc/len field */
730 agg_size += kv->value.leaf.len + 1;
731 break;
733 case CSR1212_KV_TYPE_DIRECTORY:
734 kv->value.directory.len =
735 csr1212_generate_layout_subdir(kv, &ltail);
736 /* Add 1 quadlet for crc/len field */
737 agg_size += kv->value.directory.len + 1;
738 break;
740 kv = kv->next;
742 return quads_to_bytes(agg_size);
745 static struct csr1212_keyval *
746 csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
747 struct csr1212_keyval *start_kv, int start_pos)
749 struct csr1212_keyval *kv = start_kv;
750 struct csr1212_keyval *okv = start_kv;
751 int pos = start_pos;
752 int kv_len = 0, okv_len = 0;
754 cache->layout_head = kv;
756 while (kv && pos < cache->size) {
757 /* Special case: Extended ROM leafs */
758 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
759 kv->offset = cache->offset + pos;
761 switch (kv->key.type) {
762 case CSR1212_KV_TYPE_LEAF:
763 kv_len = kv->value.leaf.len;
764 break;
766 case CSR1212_KV_TYPE_DIRECTORY:
767 kv_len = kv->value.directory.len;
768 break;
770 default:
771 /* Should never get here */
772 WARN_ON(1);
773 break;
776 pos += quads_to_bytes(kv_len + 1);
778 if (pos <= cache->size) {
779 okv = kv;
780 okv_len = kv_len;
781 kv = kv->next;
785 cache->layout_tail = okv;
786 cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
788 return kv;
791 #define CSR1212_KV_KEY_SHIFT 24
792 #define CSR1212_KV_KEY_TYPE_SHIFT 6
793 #define CSR1212_KV_KEY_ID_MASK 0x3f
794 #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
796 static void
797 csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
799 struct csr1212_dentry *dentry;
800 struct csr1212_keyval *last_extkey_spec = NULL;
801 struct csr1212_keyval *last_extkey = NULL;
802 int index = 0;
804 for (dentry = dir->value.directory.dentries_head;
805 dentry;
806 dentry = dentry->next) {
807 struct csr1212_keyval *a;
809 for (a = dentry->kv; a; a = a->associate) {
810 u32 value = 0;
812 /* Special Case: Extended Key Specifier_ID */
813 if (a->key.id ==
814 CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
815 if (last_extkey_spec == NULL)
816 last_extkey_spec = a;
817 else if (a->value.immediate !=
818 last_extkey_spec->value.immediate)
819 last_extkey_spec = a;
820 else
821 continue;
823 /* Special Case: Extended Key */
824 } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
825 if (last_extkey == NULL)
826 last_extkey = a;
827 else if (a->value.immediate !=
828 last_extkey->value.immediate)
829 last_extkey = a;
830 else
831 continue;
834 switch (a->key.type) {
835 case CSR1212_KV_TYPE_IMMEDIATE:
836 value = a->value.immediate;
837 break;
838 case CSR1212_KV_TYPE_CSR_OFFSET:
839 value = a->value.csr_offset;
840 break;
841 case CSR1212_KV_TYPE_LEAF:
842 value = a->offset;
843 value -= dir->offset + quads_to_bytes(1+index);
844 value = bytes_to_quads(value);
845 break;
846 case CSR1212_KV_TYPE_DIRECTORY:
847 value = a->offset;
848 value -= dir->offset + quads_to_bytes(1+index);
849 value = bytes_to_quads(value);
850 break;
851 default:
852 /* Should never get here */
853 WARN_ON(1);
854 break;
857 value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
858 CSR1212_KV_KEY_SHIFT;
859 value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
860 (CSR1212_KV_KEY_SHIFT +
861 CSR1212_KV_KEY_TYPE_SHIFT);
862 data_buffer[index] = cpu_to_be32(value);
863 index++;
868 struct csr1212_keyval_img {
869 u16 length;
870 u16 crc;
872 /* Must be last */
873 u32 data[0]; /* older gcc can't handle [] which is standard */
876 static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
878 struct csr1212_keyval *kv, *nkv;
879 struct csr1212_keyval_img *kvi;
881 for (kv = cache->layout_head;
882 kv != cache->layout_tail->next;
883 kv = nkv) {
884 kvi = (struct csr1212_keyval_img *)(cache->data +
885 bytes_to_quads(kv->offset - cache->offset));
886 switch (kv->key.type) {
887 default:
888 case CSR1212_KV_TYPE_IMMEDIATE:
889 case CSR1212_KV_TYPE_CSR_OFFSET:
890 /* Should never get here */
891 WARN_ON(1);
892 break;
894 case CSR1212_KV_TYPE_LEAF:
895 /* Don't copy over Extended ROM areas, they are
896 * already filled out! */
897 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
898 memcpy(kvi->data, kv->value.leaf.data,
899 quads_to_bytes(kv->value.leaf.len));
901 kvi->length = cpu_to_be16(kv->value.leaf.len);
902 kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
903 break;
905 case CSR1212_KV_TYPE_DIRECTORY:
906 csr1212_generate_tree_subdir(kv, kvi->data);
908 kvi->length = cpu_to_be16(kv->value.directory.len);
909 kvi->crc = csr1212_crc16(kvi->data,
910 kv->value.directory.len);
911 break;
914 nkv = kv->next;
915 if (kv->prev)
916 kv->prev->next = NULL;
917 if (kv->next)
918 kv->next->prev = NULL;
919 kv->prev = NULL;
920 kv->next = NULL;
924 /* This size is arbitrarily chosen.
925 * The struct overhead is subtracted for more economic allocations. */
926 #define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
928 int csr1212_generate_csr_image(struct csr1212_csr *csr)
930 struct csr1212_bus_info_block_img *bi;
931 struct csr1212_csr_rom_cache *cache;
932 struct csr1212_keyval *kv;
933 size_t agg_size;
934 int ret;
935 int init_offset;
937 BUG_ON(!csr);
939 cache = csr->cache_head;
941 bi = (struct csr1212_bus_info_block_img*)cache->data;
943 bi->length = bytes_to_quads(csr->bus_info_len) - 1;
944 bi->crc_length = bi->length;
945 bi->crc = csr1212_crc16(bi->data, bi->crc_length);
947 csr->root_kv->next = NULL;
948 csr->root_kv->prev = NULL;
950 agg_size = csr1212_generate_layout_order(csr->root_kv);
952 init_offset = csr->bus_info_len;
954 for (kv = csr->root_kv, cache = csr->cache_head;
956 cache = cache->next) {
957 if (!cache) {
958 /* Estimate approximate number of additional cache
959 * regions needed (it assumes that the cache holding
960 * the first 1K Config ROM space always exists). */
961 int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
962 (2 * sizeof(u32))) + 1;
964 /* Add additional cache regions, extras will be
965 * removed later */
966 for (; est_c; est_c--) {
967 ret = csr1212_append_new_cache(csr,
968 CSR1212_EXTENDED_ROM_SIZE);
969 if (ret != CSR1212_SUCCESS)
970 return ret;
972 /* Need to re-layout for additional cache regions */
973 agg_size = csr1212_generate_layout_order(csr->root_kv);
974 kv = csr->root_kv;
975 cache = csr->cache_head;
976 init_offset = csr->bus_info_len;
978 kv = csr1212_generate_positions(cache, kv, init_offset);
979 agg_size -= cache->len;
980 init_offset = sizeof(u32);
983 /* Remove unused, excess cache regions */
984 while (cache) {
985 struct csr1212_csr_rom_cache *oc = cache;
987 cache = cache->next;
988 csr1212_remove_cache(csr, oc);
991 /* Go through the list backward so that when done, the correct CRC
992 * will be calculated for the Extended ROM areas. */
993 for (cache = csr->cache_tail; cache; cache = cache->prev) {
994 /* Only Extended ROM caches should have this set. */
995 if (cache->ext_rom) {
996 int leaf_size;
998 /* Make sure the Extended ROM leaf is a multiple of
999 * max_rom in size. */
1000 BUG_ON(csr->max_rom < 1);
1001 leaf_size = (cache->len + (csr->max_rom - 1)) &
1002 ~(csr->max_rom - 1);
1004 /* Zero out the unused ROM region */
1005 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1006 leaf_size - cache->len);
1008 /* Subtract leaf header */
1009 leaf_size -= sizeof(u32);
1011 /* Update the Extended ROM leaf length */
1012 cache->ext_rom->value.leaf.len =
1013 bytes_to_quads(leaf_size);
1014 } else {
1015 /* Zero out the unused ROM region */
1016 memset(cache->data + bytes_to_quads(cache->len), 0x00,
1017 cache->size - cache->len);
1020 /* Copy the data into the cache buffer */
1021 csr1212_fill_cache(cache);
1023 if (cache != csr->cache_head) {
1024 /* Set the length and CRC of the extended ROM. */
1025 struct csr1212_keyval_img *kvi =
1026 (struct csr1212_keyval_img*)cache->data;
1027 u16 len = bytes_to_quads(cache->len) - 1;
1029 kvi->length = cpu_to_be16(len);
1030 kvi->crc = csr1212_crc16(kvi->data, len);
1034 return CSR1212_SUCCESS;
1037 int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
1039 struct csr1212_csr_rom_cache *cache;
1041 for (cache = csr->cache_head; cache; cache = cache->next)
1042 if (offset >= cache->offset &&
1043 (offset + len) <= (cache->offset + cache->size)) {
1044 memcpy(buffer, &cache->data[
1045 bytes_to_quads(offset - cache->offset)],
1046 len);
1047 return CSR1212_SUCCESS;
1050 return -ENOENT;
1054 /* Parse a chunk of data as a Config ROM */
1056 static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
1058 struct csr1212_bus_info_block_img *bi;
1059 struct csr1212_cache_region *cr;
1060 int i;
1061 int ret;
1063 /* IEEE 1212 says that the entire bus info block should be readable in
1064 * a single transaction regardless of the max_rom value.
1065 * Unfortunately, many IEEE 1394 devices do not abide by that, so the
1066 * bus info block will be read 1 quadlet at a time. The rest of the
1067 * ConfigROM will be read according to the max_rom field. */
1068 for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
1069 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1070 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1071 csr->private);
1072 if (ret != CSR1212_SUCCESS)
1073 return ret;
1075 /* check ROM header's info_length */
1076 if (i == 0 &&
1077 be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
1078 bytes_to_quads(csr->bus_info_len) - 1)
1079 return -EINVAL;
1082 bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
1083 csr->crc_len = quads_to_bytes(bi->crc_length);
1085 /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
1086 * is not always the case, so read the rest of the crc area 1 quadlet at
1087 * a time. */
1088 for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
1089 ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
1090 sizeof(u32), &csr->cache_head->data[bytes_to_quads(i)],
1091 csr->private);
1092 if (ret != CSR1212_SUCCESS)
1093 return ret;
1096 /* Apparently there are many different wrong implementations of the CRC
1097 * algorithm. We don't fail, we just warn. */
1098 if ((csr1212_crc16(bi->data, bi->crc_length) != bi->crc) &&
1099 (csr1212_msft_crc16(bi->data, bi->crc_length) != bi->crc))
1100 printk(KERN_DEBUG "IEEE 1394 device has ROM CRC error\n");
1102 cr = CSR1212_MALLOC(sizeof(*cr));
1103 if (!cr)
1104 return -ENOMEM;
1106 cr->next = NULL;
1107 cr->prev = NULL;
1108 cr->offset_start = 0;
1109 cr->offset_end = csr->crc_len + 4;
1111 csr->cache_head->filled_head = cr;
1112 csr->cache_head->filled_tail = cr;
1114 return CSR1212_SUCCESS;
1117 #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
1118 #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
1119 #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
1120 #define CSR1212_KV_VAL_MASK 0xffffff
1121 #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
1123 static int
1124 csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
1126 int ret = CSR1212_SUCCESS;
1127 struct csr1212_keyval *k = NULL;
1128 u32 offset;
1130 switch (CSR1212_KV_KEY_TYPE(ki)) {
1131 case CSR1212_KV_TYPE_IMMEDIATE:
1132 k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
1133 CSR1212_KV_VAL(ki));
1134 if (!k) {
1135 ret = -ENOMEM;
1136 goto out;
1139 k->refcnt = 0; /* Don't keep local reference when parsing. */
1140 break;
1142 case CSR1212_KV_TYPE_CSR_OFFSET:
1143 k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
1144 CSR1212_KV_VAL(ki));
1145 if (!k) {
1146 ret = -ENOMEM;
1147 goto out;
1149 k->refcnt = 0; /* Don't keep local reference when parsing. */
1150 break;
1152 default:
1153 /* Compute the offset from 0xffff f000 0000. */
1154 offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
1155 if (offset == kv_pos) {
1156 /* Uh-oh. Can't have a relative offset of 0 for Leaves
1157 * or Directories. The Config ROM image is most likely
1158 * messed up, so we'll just abort here. */
1159 ret = -EIO;
1160 goto out;
1163 k = csr1212_find_keyval_offset(dir, offset);
1165 if (k)
1166 break; /* Found it. */
1168 if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
1169 k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
1170 else
1171 k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
1173 if (!k) {
1174 ret = -ENOMEM;
1175 goto out;
1177 k->refcnt = 0; /* Don't keep local reference when parsing. */
1178 k->valid = 0; /* Contents not read yet so it's not valid. */
1179 k->offset = offset;
1181 k->prev = dir;
1182 k->next = dir->next;
1183 dir->next->prev = k;
1184 dir->next = k;
1186 ret = csr1212_attach_keyval_to_directory(dir, k);
1187 out:
1188 if (ret != CSR1212_SUCCESS && k != NULL)
1189 free_keyval(k);
1190 return ret;
1193 int csr1212_parse_keyval(struct csr1212_keyval *kv,
1194 struct csr1212_csr_rom_cache *cache)
1196 struct csr1212_keyval_img *kvi;
1197 int i;
1198 int ret = CSR1212_SUCCESS;
1199 int kvi_len;
1201 kvi = (struct csr1212_keyval_img*)
1202 &cache->data[bytes_to_quads(kv->offset - cache->offset)];
1203 kvi_len = be16_to_cpu(kvi->length);
1205 /* Apparently there are many different wrong implementations of the CRC
1206 * algorithm. We don't fail, we just warn. */
1207 if ((csr1212_crc16(kvi->data, kvi_len) != kvi->crc) &&
1208 (csr1212_msft_crc16(kvi->data, kvi_len) != kvi->crc))
1209 printk(KERN_DEBUG "IEEE 1394 device has ROM CRC error\n");
1211 switch (kv->key.type) {
1212 case CSR1212_KV_TYPE_DIRECTORY:
1213 for (i = 0; i < kvi_len; i++) {
1214 u32 ki = kvi->data[i];
1216 /* Some devices put null entries in their unit
1217 * directories. If we come across such an entry,
1218 * then skip it. */
1219 if (ki == 0x0)
1220 continue;
1221 ret = csr1212_parse_dir_entry(kv, ki,
1222 kv->offset + quads_to_bytes(i + 1));
1224 kv->value.directory.len = kvi_len;
1225 break;
1227 case CSR1212_KV_TYPE_LEAF:
1228 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
1229 size_t size = quads_to_bytes(kvi_len);
1231 kv->value.leaf.data = CSR1212_MALLOC(size);
1232 if (!kv->value.leaf.data) {
1233 ret = -ENOMEM;
1234 goto out;
1237 kv->value.leaf.len = kvi_len;
1238 memcpy(kv->value.leaf.data, kvi->data, size);
1240 break;
1243 kv->valid = 1;
1244 out:
1245 return ret;
1248 static int
1249 csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1251 struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
1252 struct csr1212_keyval_img *kvi = NULL;
1253 struct csr1212_csr_rom_cache *cache;
1254 int cache_index;
1255 u64 addr;
1256 u32 *cache_ptr;
1257 u16 kv_len = 0;
1259 BUG_ON(!csr || !kv || csr->max_rom < 1);
1261 /* First find which cache the data should be in (or go in if not read
1262 * yet). */
1263 for (cache = csr->cache_head; cache; cache = cache->next)
1264 if (kv->offset >= cache->offset &&
1265 kv->offset < (cache->offset + cache->size))
1266 break;
1268 if (!cache) {
1269 u32 q, cache_size;
1271 /* Only create a new cache for Extended ROM leaves. */
1272 if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
1273 return -EINVAL;
1275 if (csr->ops->bus_read(csr,
1276 CSR1212_REGISTER_SPACE_BASE + kv->offset,
1277 sizeof(u32), &q, csr->private))
1278 return -EIO;
1280 kv->value.leaf.len = be32_to_cpu(q) >> 16;
1282 cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
1283 (csr->max_rom - 1)) & ~(csr->max_rom - 1);
1285 cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
1286 if (!cache)
1287 return -ENOMEM;
1289 kv->value.leaf.data = &cache->data[1];
1290 csr->cache_tail->next = cache;
1291 cache->prev = csr->cache_tail;
1292 cache->next = NULL;
1293 csr->cache_tail = cache;
1294 cache->filled_head =
1295 CSR1212_MALLOC(sizeof(*cache->filled_head));
1296 if (!cache->filled_head)
1297 return -ENOMEM;
1299 cache->filled_head->offset_start = 0;
1300 cache->filled_head->offset_end = sizeof(u32);
1301 cache->filled_tail = cache->filled_head;
1302 cache->filled_head->next = NULL;
1303 cache->filled_head->prev = NULL;
1304 cache->data[0] = q;
1306 /* Don't read the entire extended ROM now. Pieces of it will
1307 * be read when entries inside it are read. */
1308 return csr1212_parse_keyval(kv, cache);
1311 cache_index = kv->offset - cache->offset;
1313 /* Now seach read portions of the cache to see if it is there. */
1314 for (cr = cache->filled_head; cr; cr = cr->next) {
1315 if (cache_index < cr->offset_start) {
1316 newcr = CSR1212_MALLOC(sizeof(*newcr));
1317 if (!newcr)
1318 return -ENOMEM;
1320 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1321 newcr->offset_end = newcr->offset_start;
1322 newcr->next = cr;
1323 newcr->prev = cr->prev;
1324 cr->prev = newcr;
1325 cr = newcr;
1326 break;
1327 } else if ((cache_index >= cr->offset_start) &&
1328 (cache_index < cr->offset_end)) {
1329 kvi = (struct csr1212_keyval_img*)
1330 (&cache->data[bytes_to_quads(cache_index)]);
1331 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1332 break;
1333 } else if (cache_index == cr->offset_end) {
1334 break;
1338 if (!cr) {
1339 cr = cache->filled_tail;
1340 newcr = CSR1212_MALLOC(sizeof(*newcr));
1341 if (!newcr)
1342 return -ENOMEM;
1344 newcr->offset_start = cache_index & ~(csr->max_rom - 1);
1345 newcr->offset_end = newcr->offset_start;
1346 newcr->prev = cr;
1347 newcr->next = cr->next;
1348 cr->next = newcr;
1349 cr = newcr;
1350 cache->filled_tail = newcr;
1353 while(!kvi || cr->offset_end < cache_index + kv_len) {
1354 cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
1355 ~(csr->max_rom - 1))];
1357 addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
1358 cr->offset_end) & ~(csr->max_rom - 1);
1360 if (csr->ops->bus_read(csr, addr, csr->max_rom, cache_ptr,
1361 csr->private)) {
1362 if (csr->max_rom == 4)
1363 /* We've got problems! */
1364 return -EIO;
1366 /* Apperently the max_rom value was a lie, set it to
1367 * do quadlet reads and try again. */
1368 csr->max_rom = 4;
1369 continue;
1372 cr->offset_end += csr->max_rom - (cr->offset_end &
1373 (csr->max_rom - 1));
1375 if (!kvi && (cr->offset_end > cache_index)) {
1376 kvi = (struct csr1212_keyval_img*)
1377 (&cache->data[bytes_to_quads(cache_index)]);
1378 kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
1381 if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
1382 /* The Leaf or Directory claims its length extends
1383 * beyond the ConfigROM image region and thus beyond the
1384 * end of our cache region. Therefore, we abort now
1385 * rather than seg faulting later. */
1386 return -EIO;
1389 ncr = cr->next;
1391 if (ncr && (cr->offset_end >= ncr->offset_start)) {
1392 /* consolidate region entries */
1393 ncr->offset_start = cr->offset_start;
1395 if (cr->prev)
1396 cr->prev->next = cr->next;
1397 ncr->prev = cr->prev;
1398 if (cache->filled_head == cr)
1399 cache->filled_head = ncr;
1400 CSR1212_FREE(cr);
1401 cr = ncr;
1405 return csr1212_parse_keyval(kv, cache);
1408 struct csr1212_keyval *
1409 csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
1411 if (!kv)
1412 return NULL;
1413 if (!kv->valid)
1414 if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
1415 return NULL;
1416 return kv;
1419 int csr1212_parse_csr(struct csr1212_csr *csr)
1421 static const int mr_map[] = { 4, 64, 1024, 0 };
1422 struct csr1212_dentry *dentry;
1423 int ret;
1425 BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
1427 ret = csr1212_parse_bus_info_block(csr);
1428 if (ret != CSR1212_SUCCESS)
1429 return ret;
1431 if (!csr->ops->get_max_rom) {
1432 csr->max_rom = mr_map[0]; /* default value */
1433 } else {
1434 int i = csr->ops->get_max_rom(csr->bus_info_data,
1435 csr->private);
1436 if (i & ~0x3)
1437 return -EINVAL;
1438 csr->max_rom = mr_map[i];
1441 csr->cache_head->layout_head = csr->root_kv;
1442 csr->cache_head->layout_tail = csr->root_kv;
1444 csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
1445 csr->bus_info_len;
1447 csr->root_kv->valid = 0;
1448 csr->root_kv->next = csr->root_kv;
1449 csr->root_kv->prev = csr->root_kv;
1450 ret = csr1212_read_keyval(csr, csr->root_kv);
1451 if (ret != CSR1212_SUCCESS)
1452 return ret;
1454 /* Scan through the Root directory finding all extended ROM regions
1455 * and make cache regions for them */
1456 for (dentry = csr->root_kv->value.directory.dentries_head;
1457 dentry; dentry = dentry->next) {
1458 if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
1459 !dentry->kv->valid) {
1460 ret = csr1212_read_keyval(csr, dentry->kv);
1461 if (ret != CSR1212_SUCCESS)
1462 return ret;
1466 return CSR1212_SUCCESS;