1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/delay.h>
15 #include <linux/rtnetlink.h>
17 #define EFX_DRIVER_NAME "sfc_mtd"
18 #include "net_driver.h"
23 #include "mcdi_pcol.h"
25 #define EFX_SPI_VERIFY_BUF_LEN 16
27 struct efx_mtd_partition
{
37 const char *type_name
;
38 char name
[IFNAMSIZ
+ 20];
42 int (*read
)(struct mtd_info
*mtd
, loff_t start
, size_t len
,
43 size_t *retlen
, u8
*buffer
);
44 int (*erase
)(struct mtd_info
*mtd
, loff_t start
, size_t len
);
45 int (*write
)(struct mtd_info
*mtd
, loff_t start
, size_t len
,
46 size_t *retlen
, const u8
*buffer
);
47 int (*sync
)(struct mtd_info
*mtd
);
51 struct list_head node
;
53 const struct efx_spi_device
*spi
;
55 const struct efx_mtd_ops
*ops
;
57 struct efx_mtd_partition part
[0];
60 #define efx_for_each_partition(part, efx_mtd) \
61 for ((part) = &(efx_mtd)->part[0]; \
62 (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
65 #define to_efx_mtd_partition(mtd) \
66 container_of(mtd, struct efx_mtd_partition, mtd)
68 static int falcon_mtd_probe(struct efx_nic
*efx
);
69 static int siena_mtd_probe(struct efx_nic
*efx
);
73 static int efx_spi_slow_wait(struct efx_mtd
*efx_mtd
, bool uninterruptible
)
75 const struct efx_spi_device
*spi
= efx_mtd
->spi
;
76 struct efx_nic
*efx
= efx_mtd
->efx
;
80 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
81 for (i
= 0; i
< 40; i
++) {
82 __set_current_state(uninterruptible
?
83 TASK_UNINTERRUPTIBLE
: TASK_INTERRUPTIBLE
);
84 schedule_timeout(HZ
/ 10);
85 rc
= falcon_spi_cmd(efx
, spi
, SPI_RDSR
, -1, NULL
,
86 &status
, sizeof(status
));
89 if (!(status
& SPI_STATUS_NRDY
))
91 if (signal_pending(current
))
94 EFX_ERR(efx
, "timed out waiting for %s\n", efx_mtd
->name
);
99 efx_spi_unlock(struct efx_nic
*efx
, const struct efx_spi_device
*spi
)
101 const u8 unlock_mask
= (SPI_STATUS_BP2
| SPI_STATUS_BP1
|
106 rc
= falcon_spi_cmd(efx
, spi
, SPI_RDSR
, -1, NULL
,
107 &status
, sizeof(status
));
111 if (!(status
& unlock_mask
))
112 return 0; /* already unlocked */
114 rc
= falcon_spi_cmd(efx
, spi
, SPI_WREN
, -1, NULL
, NULL
, 0);
117 rc
= falcon_spi_cmd(efx
, spi
, SPI_SST_EWSR
, -1, NULL
, NULL
, 0);
121 status
&= ~unlock_mask
;
122 rc
= falcon_spi_cmd(efx
, spi
, SPI_WRSR
, -1, &status
,
123 NULL
, sizeof(status
));
126 rc
= falcon_spi_wait_write(efx
, spi
);
133 static int efx_spi_erase(struct efx_mtd
*efx_mtd
, loff_t start
, size_t len
)
135 const struct efx_spi_device
*spi
= efx_mtd
->spi
;
136 struct efx_nic
*efx
= efx_mtd
->efx
;
137 unsigned pos
, block_len
;
138 u8 empty
[EFX_SPI_VERIFY_BUF_LEN
];
139 u8 buffer
[EFX_SPI_VERIFY_BUF_LEN
];
142 if (len
!= spi
->erase_size
)
145 if (spi
->erase_command
== 0)
148 rc
= efx_spi_unlock(efx
, spi
);
151 rc
= falcon_spi_cmd(efx
, spi
, SPI_WREN
, -1, NULL
, NULL
, 0);
154 rc
= falcon_spi_cmd(efx
, spi
, spi
->erase_command
, start
, NULL
,
158 rc
= efx_spi_slow_wait(efx_mtd
, false);
160 /* Verify the entire region has been wiped */
161 memset(empty
, 0xff, sizeof(empty
));
162 for (pos
= 0; pos
< len
; pos
+= block_len
) {
163 block_len
= min(len
- pos
, sizeof(buffer
));
164 rc
= falcon_spi_read(efx
, spi
, start
+ pos
, block_len
,
168 if (memcmp(empty
, buffer
, block_len
))
171 /* Avoid locking up the system */
173 if (signal_pending(current
))
182 static int efx_mtd_erase(struct mtd_info
*mtd
, struct erase_info
*erase
)
184 struct efx_mtd
*efx_mtd
= mtd
->priv
;
187 rc
= efx_mtd
->ops
->erase(mtd
, erase
->addr
, erase
->len
);
189 erase
->state
= MTD_ERASE_DONE
;
191 erase
->state
= MTD_ERASE_FAILED
;
192 erase
->fail_addr
= 0xffffffff;
194 mtd_erase_callback(erase
);
198 static void efx_mtd_sync(struct mtd_info
*mtd
)
200 struct efx_mtd
*efx_mtd
= mtd
->priv
;
201 struct efx_nic
*efx
= efx_mtd
->efx
;
204 rc
= efx_mtd
->ops
->sync(mtd
);
206 EFX_ERR(efx
, "%s sync failed (%d)\n", efx_mtd
->name
, rc
);
209 static void efx_mtd_remove_partition(struct efx_mtd_partition
*part
)
214 rc
= del_mtd_device(&part
->mtd
);
222 static void efx_mtd_remove_device(struct efx_mtd
*efx_mtd
)
224 struct efx_mtd_partition
*part
;
226 efx_for_each_partition(part
, efx_mtd
)
227 efx_mtd_remove_partition(part
);
228 list_del(&efx_mtd
->node
);
232 static void efx_mtd_rename_device(struct efx_mtd
*efx_mtd
)
234 struct efx_mtd_partition
*part
;
236 efx_for_each_partition(part
, efx_mtd
)
237 if (efx_nic_rev(efx_mtd
->efx
) >= EFX_REV_SIENA_A0
)
238 snprintf(part
->name
, sizeof(part
->name
),
239 "%s %s:%02x", efx_mtd
->efx
->name
,
240 part
->type_name
, part
->mcdi
.fw_subtype
);
242 snprintf(part
->name
, sizeof(part
->name
),
243 "%s %s", efx_mtd
->efx
->name
,
247 static int efx_mtd_probe_device(struct efx_nic
*efx
, struct efx_mtd
*efx_mtd
)
249 struct efx_mtd_partition
*part
;
253 efx_mtd_rename_device(efx_mtd
);
255 efx_for_each_partition(part
, efx_mtd
) {
256 part
->mtd
.writesize
= 1;
258 part
->mtd
.owner
= THIS_MODULE
;
259 part
->mtd
.priv
= efx_mtd
;
260 part
->mtd
.name
= part
->name
;
261 part
->mtd
.erase
= efx_mtd_erase
;
262 part
->mtd
.read
= efx_mtd
->ops
->read
;
263 part
->mtd
.write
= efx_mtd
->ops
->write
;
264 part
->mtd
.sync
= efx_mtd_sync
;
266 if (add_mtd_device(&part
->mtd
))
270 list_add(&efx_mtd
->node
, &efx
->mtd_list
);
274 while (part
!= &efx_mtd
->part
[0]) {
276 efx_mtd_remove_partition(part
);
278 /* add_mtd_device() returns 1 if the MTD table is full */
282 void efx_mtd_remove(struct efx_nic
*efx
)
284 struct efx_mtd
*efx_mtd
, *next
;
286 WARN_ON(efx_dev_registered(efx
));
288 list_for_each_entry_safe(efx_mtd
, next
, &efx
->mtd_list
, node
)
289 efx_mtd_remove_device(efx_mtd
);
292 void efx_mtd_rename(struct efx_nic
*efx
)
294 struct efx_mtd
*efx_mtd
;
298 list_for_each_entry(efx_mtd
, &efx
->mtd_list
, node
)
299 efx_mtd_rename_device(efx_mtd
);
302 int efx_mtd_probe(struct efx_nic
*efx
)
304 if (efx_nic_rev(efx
) >= EFX_REV_SIENA_A0
)
305 return siena_mtd_probe(efx
);
307 return falcon_mtd_probe(efx
);
310 /* Implementation of MTD operations for Falcon */
312 static int falcon_mtd_read(struct mtd_info
*mtd
, loff_t start
,
313 size_t len
, size_t *retlen
, u8
*buffer
)
315 struct efx_mtd_partition
*part
= to_efx_mtd_partition(mtd
);
316 struct efx_mtd
*efx_mtd
= mtd
->priv
;
317 const struct efx_spi_device
*spi
= efx_mtd
->spi
;
318 struct efx_nic
*efx
= efx_mtd
->efx
;
321 rc
= mutex_lock_interruptible(&efx
->spi_lock
);
324 rc
= falcon_spi_read(efx
, spi
, part
->offset
+ start
, len
,
326 mutex_unlock(&efx
->spi_lock
);
330 static int falcon_mtd_erase(struct mtd_info
*mtd
, loff_t start
, size_t len
)
332 struct efx_mtd_partition
*part
= to_efx_mtd_partition(mtd
);
333 struct efx_mtd
*efx_mtd
= mtd
->priv
;
334 struct efx_nic
*efx
= efx_mtd
->efx
;
337 rc
= mutex_lock_interruptible(&efx
->spi_lock
);
340 rc
= efx_spi_erase(efx_mtd
, part
->offset
+ start
, len
);
341 mutex_unlock(&efx
->spi_lock
);
345 static int falcon_mtd_write(struct mtd_info
*mtd
, loff_t start
,
346 size_t len
, size_t *retlen
, const u8
*buffer
)
348 struct efx_mtd_partition
*part
= to_efx_mtd_partition(mtd
);
349 struct efx_mtd
*efx_mtd
= mtd
->priv
;
350 const struct efx_spi_device
*spi
= efx_mtd
->spi
;
351 struct efx_nic
*efx
= efx_mtd
->efx
;
354 rc
= mutex_lock_interruptible(&efx
->spi_lock
);
357 rc
= falcon_spi_write(efx
, spi
, part
->offset
+ start
, len
,
359 mutex_unlock(&efx
->spi_lock
);
363 static int falcon_mtd_sync(struct mtd_info
*mtd
)
365 struct efx_mtd
*efx_mtd
= mtd
->priv
;
366 struct efx_nic
*efx
= efx_mtd
->efx
;
369 mutex_lock(&efx
->spi_lock
);
370 rc
= efx_spi_slow_wait(efx_mtd
, true);
371 mutex_unlock(&efx
->spi_lock
);
375 static struct efx_mtd_ops falcon_mtd_ops
= {
376 .read
= falcon_mtd_read
,
377 .erase
= falcon_mtd_erase
,
378 .write
= falcon_mtd_write
,
379 .sync
= falcon_mtd_sync
,
382 static int falcon_mtd_probe(struct efx_nic
*efx
)
384 struct efx_spi_device
*spi
= efx
->spi_flash
;
385 struct efx_mtd
*efx_mtd
;
390 if (!spi
|| spi
->size
<= FALCON_FLASH_BOOTCODE_START
)
393 efx_mtd
= kzalloc(sizeof(*efx_mtd
) + sizeof(efx_mtd
->part
[0]),
399 efx_mtd
->name
= "flash";
400 efx_mtd
->ops
= &falcon_mtd_ops
;
402 efx_mtd
->n_parts
= 1;
403 efx_mtd
->part
[0].mtd
.type
= MTD_NORFLASH
;
404 efx_mtd
->part
[0].mtd
.flags
= MTD_CAP_NORFLASH
;
405 efx_mtd
->part
[0].mtd
.size
= spi
->size
- FALCON_FLASH_BOOTCODE_START
;
406 efx_mtd
->part
[0].mtd
.erasesize
= spi
->erase_size
;
407 efx_mtd
->part
[0].offset
= FALCON_FLASH_BOOTCODE_START
;
408 efx_mtd
->part
[0].type_name
= "sfc_flash_bootrom";
410 rc
= efx_mtd_probe_device(efx
, efx_mtd
);
416 /* Implementation of MTD operations for Siena */
418 static int siena_mtd_read(struct mtd_info
*mtd
, loff_t start
,
419 size_t len
, size_t *retlen
, u8
*buffer
)
421 struct efx_mtd_partition
*part
= to_efx_mtd_partition(mtd
);
422 struct efx_mtd
*efx_mtd
= mtd
->priv
;
423 struct efx_nic
*efx
= efx_mtd
->efx
;
424 loff_t offset
= start
;
425 loff_t end
= min_t(loff_t
, start
+ len
, mtd
->size
);
429 while (offset
< end
) {
430 chunk
= min_t(size_t, end
- offset
, EFX_MCDI_NVRAM_LEN_MAX
);
431 rc
= efx_mcdi_nvram_read(efx
, part
->mcdi
.nvram_type
, offset
,
439 *retlen
= offset
- start
;
443 static int siena_mtd_erase(struct mtd_info
*mtd
, loff_t start
, size_t len
)
445 struct efx_mtd_partition
*part
= to_efx_mtd_partition(mtd
);
446 struct efx_mtd
*efx_mtd
= mtd
->priv
;
447 struct efx_nic
*efx
= efx_mtd
->efx
;
448 loff_t offset
= start
& ~((loff_t
)(mtd
->erasesize
- 1));
449 loff_t end
= min_t(loff_t
, start
+ len
, mtd
->size
);
450 size_t chunk
= part
->mtd
.erasesize
;
453 if (!part
->mcdi
.updating
) {
454 rc
= efx_mcdi_nvram_update_start(efx
, part
->mcdi
.nvram_type
);
457 part
->mcdi
.updating
= 1;
460 /* The MCDI interface can in fact do multiple erase blocks at once;
461 * but erasing may be slow, so we make multiple calls here to avoid
462 * tripping the MCDI RPC timeout. */
463 while (offset
< end
) {
464 rc
= efx_mcdi_nvram_erase(efx
, part
->mcdi
.nvram_type
, offset
,
474 static int siena_mtd_write(struct mtd_info
*mtd
, loff_t start
,
475 size_t len
, size_t *retlen
, const u8
*buffer
)
477 struct efx_mtd_partition
*part
= to_efx_mtd_partition(mtd
);
478 struct efx_mtd
*efx_mtd
= mtd
->priv
;
479 struct efx_nic
*efx
= efx_mtd
->efx
;
480 loff_t offset
= start
;
481 loff_t end
= min_t(loff_t
, start
+ len
, mtd
->size
);
485 if (!part
->mcdi
.updating
) {
486 rc
= efx_mcdi_nvram_update_start(efx
, part
->mcdi
.nvram_type
);
489 part
->mcdi
.updating
= 1;
492 while (offset
< end
) {
493 chunk
= min_t(size_t, end
- offset
, EFX_MCDI_NVRAM_LEN_MAX
);
494 rc
= efx_mcdi_nvram_write(efx
, part
->mcdi
.nvram_type
, offset
,
502 *retlen
= offset
- start
;
506 static int siena_mtd_sync(struct mtd_info
*mtd
)
508 struct efx_mtd_partition
*part
= to_efx_mtd_partition(mtd
);
509 struct efx_mtd
*efx_mtd
= mtd
->priv
;
510 struct efx_nic
*efx
= efx_mtd
->efx
;
513 if (part
->mcdi
.updating
) {
514 part
->mcdi
.updating
= 0;
515 rc
= efx_mcdi_nvram_update_finish(efx
, part
->mcdi
.nvram_type
);
521 static struct efx_mtd_ops siena_mtd_ops
= {
522 .read
= siena_mtd_read
,
523 .erase
= siena_mtd_erase
,
524 .write
= siena_mtd_write
,
525 .sync
= siena_mtd_sync
,
528 struct siena_nvram_type_info
{
533 static struct siena_nvram_type_info siena_nvram_types
[] = {
534 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO
] = { 0, "sfc_dummy_phy" },
535 [MC_CMD_NVRAM_TYPE_MC_FW
] = { 0, "sfc_mcfw" },
536 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP
] = { 0, "sfc_mcfw_backup" },
537 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
] = { 0, "sfc_static_cfg" },
538 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1
] = { 1, "sfc_static_cfg" },
539 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
] = { 0, "sfc_dynamic_cfg" },
540 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1
] = { 1, "sfc_dynamic_cfg" },
541 [MC_CMD_NVRAM_TYPE_EXP_ROM
] = { 0, "sfc_exp_rom" },
542 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0
] = { 0, "sfc_exp_rom_cfg" },
543 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1
] = { 1, "sfc_exp_rom_cfg" },
544 [MC_CMD_NVRAM_TYPE_PHY_PORT0
] = { 0, "sfc_phy_fw" },
545 [MC_CMD_NVRAM_TYPE_PHY_PORT1
] = { 1, "sfc_phy_fw" },
548 static int siena_mtd_probe_partition(struct efx_nic
*efx
,
549 struct efx_mtd
*efx_mtd
,
550 unsigned int part_id
,
553 struct efx_mtd_partition
*part
= &efx_mtd
->part
[part_id
];
554 struct siena_nvram_type_info
*info
;
555 size_t size
, erase_size
;
559 if (type
>= ARRAY_SIZE(siena_nvram_types
))
562 info
= &siena_nvram_types
[type
];
564 if (info
->port
!= efx_port_num(efx
))
567 rc
= efx_mcdi_nvram_info(efx
, type
, &size
, &erase_size
, &protected);
571 return -ENODEV
; /* hide it */
573 part
->mcdi
.nvram_type
= type
;
574 part
->type_name
= info
->name
;
576 part
->mtd
.type
= MTD_NORFLASH
;
577 part
->mtd
.flags
= MTD_CAP_NORFLASH
;
578 part
->mtd
.size
= size
;
579 part
->mtd
.erasesize
= erase_size
;
584 static int siena_mtd_get_fw_subtypes(struct efx_nic
*efx
,
585 struct efx_mtd
*efx_mtd
)
587 struct efx_mtd_partition
*part
;
588 uint16_t fw_subtype_list
[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN
/
592 rc
= efx_mcdi_get_board_cfg(efx
, NULL
, fw_subtype_list
);
596 efx_for_each_partition(part
, efx_mtd
)
597 part
->mcdi
.fw_subtype
= fw_subtype_list
[part
->mcdi
.nvram_type
];
602 static int siena_mtd_probe(struct efx_nic
*efx
)
604 struct efx_mtd
*efx_mtd
;
611 rc
= efx_mcdi_nvram_types(efx
, &nvram_types
);
615 efx_mtd
= kzalloc(sizeof(*efx_mtd
) +
616 hweight32(nvram_types
) * sizeof(efx_mtd
->part
[0]),
621 efx_mtd
->name
= "Siena NVRAM manager";
623 efx_mtd
->ops
= &siena_mtd_ops
;
626 efx_mtd
->n_parts
= 0;
628 while (nvram_types
!= 0) {
629 if (nvram_types
& 1) {
630 rc
= siena_mtd_probe_partition(efx
, efx_mtd
,
631 efx_mtd
->n_parts
, type
);
634 else if (rc
!= -ENODEV
)
641 rc
= siena_mtd_get_fw_subtypes(efx
, efx_mtd
);
645 rc
= efx_mtd_probe_device(efx
, efx_mtd
);