treewide: kzalloc() -> kcalloc()
[linux-2.6/btrfs-unstable.git] / drivers / firmware / efi / capsule.c
blob4938c29b7c5dced7538763a4b1063355ed748deb
1 /*
2 * EFI capsule support.
4 * Copyright 2013 Intel Corporation; author Matt Fleming
6 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2.
8 */
10 #define pr_fmt(fmt) "efi: " fmt
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/highmem.h>
15 #include <linux/efi.h>
16 #include <linux/vmalloc.h>
17 #include <asm/io.h>
19 typedef struct {
20 u64 length;
21 u64 data;
22 } efi_capsule_block_desc_t;
24 static bool capsule_pending;
25 static bool stop_capsules;
26 static int efi_reset_type = -1;
29 * capsule_mutex serialises access to both capsule_pending and
30 * efi_reset_type and stop_capsules.
32 static DEFINE_MUTEX(capsule_mutex);
34 /**
35 * efi_capsule_pending - has a capsule been passed to the firmware?
36 * @reset_type: store the type of EFI reset if capsule is pending
38 * To ensure that the registered capsule is processed correctly by the
39 * firmware we need to perform a specific type of reset. If a capsule is
40 * pending return the reset type in @reset_type.
42 * This function will race with callers of efi_capsule_update(), for
43 * example, calling this function while somebody else is in
44 * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
45 * will miss the updates to capsule_pending and efi_reset_type after
46 * efi_capsule_update_locked() completes.
48 * A non-racy use is from platform reboot code because we use
49 * system_state to ensure no capsules can be sent to the firmware once
50 * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
52 bool efi_capsule_pending(int *reset_type)
54 if (!capsule_pending)
55 return false;
57 if (reset_type)
58 *reset_type = efi_reset_type;
60 return true;
64 * Whitelist of EFI capsule flags that we support.
66 * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
67 * require us to prepare the kernel for reboot. Refuse to load any
68 * capsules with that flag and any other flags that we do not know how
69 * to handle.
71 #define EFI_CAPSULE_SUPPORTED_FLAG_MASK \
72 (EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
74 /**
75 * efi_capsule_supported - does the firmware support the capsule?
76 * @guid: vendor guid of capsule
77 * @flags: capsule flags
78 * @size: size of capsule data
79 * @reset: the reset type required for this capsule
81 * Check whether a capsule with @flags is supported by the firmware
82 * and that @size doesn't exceed the maximum size for a capsule.
84 * No attempt is made to check @reset against the reset type required
85 * by any pending capsules because of the races involved.
87 int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
89 efi_capsule_header_t capsule;
90 efi_capsule_header_t *cap_list[] = { &capsule };
91 efi_status_t status;
92 u64 max_size;
94 if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
95 return -EINVAL;
97 capsule.headersize = capsule.imagesize = sizeof(capsule);
98 memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
99 capsule.flags = flags;
101 status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
102 if (status != EFI_SUCCESS)
103 return efi_status_to_err(status);
105 if (size > max_size)
106 return -ENOSPC;
108 return 0;
110 EXPORT_SYMBOL_GPL(efi_capsule_supported);
113 * Every scatter gather list (block descriptor) page must end with a
114 * continuation pointer. The last continuation pointer of the last
115 * page must be zero to mark the end of the chain.
117 #define SGLIST_PER_PAGE ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
120 * How many scatter gather list (block descriptor) pages do we need
121 * to map @count pages?
123 static inline unsigned int sg_pages_num(unsigned int count)
125 return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
129 * efi_capsule_update_locked - pass a single capsule to the firmware
130 * @capsule: capsule to send to the firmware
131 * @sg_pages: array of scatter gather (block descriptor) pages
132 * @reset: the reset type required for @capsule
134 * Since this function must be called under capsule_mutex check
135 * whether efi_reset_type will conflict with @reset, and atomically
136 * set it and capsule_pending if a capsule was successfully sent to
137 * the firmware.
139 * We also check to see if the system is about to restart, and if so,
140 * abort. This avoids races between efi_capsule_update() and
141 * efi_capsule_pending().
143 static int
144 efi_capsule_update_locked(efi_capsule_header_t *capsule,
145 struct page **sg_pages, int reset)
147 efi_physical_addr_t sglist_phys;
148 efi_status_t status;
150 lockdep_assert_held(&capsule_mutex);
153 * If someone has already registered a capsule that requires a
154 * different reset type, we're out of luck and must abort.
156 if (efi_reset_type >= 0 && efi_reset_type != reset) {
157 pr_err("Conflicting capsule reset type %d (%d).\n",
158 reset, efi_reset_type);
159 return -EINVAL;
163 * If the system is getting ready to restart it may have
164 * called efi_capsule_pending() to make decisions (such as
165 * whether to force an EFI reboot), and we're racing against
166 * that call. Abort in that case.
168 if (unlikely(stop_capsules)) {
169 pr_warn("Capsule update raced with reboot, aborting.\n");
170 return -EINVAL;
173 sglist_phys = page_to_phys(sg_pages[0]);
175 status = efi.update_capsule(&capsule, 1, sglist_phys);
176 if (status == EFI_SUCCESS) {
177 capsule_pending = true;
178 efi_reset_type = reset;
181 return efi_status_to_err(status);
185 * efi_capsule_update - send a capsule to the firmware
186 * @capsule: capsule to send to firmware
187 * @pages: an array of capsule data pages
189 * Build a scatter gather list with EFI capsule block descriptors to
190 * map the capsule described by @capsule with its data in @pages and
191 * send it to the firmware via the UpdateCapsule() runtime service.
193 * @capsule must be a virtual mapping of the complete capsule update in the
194 * kernel address space, as the capsule can be consumed immediately.
195 * A capsule_header_t that describes the entire contents of the capsule
196 * must be at the start of the first data page.
198 * Even though this function will validate that the firmware supports
199 * the capsule guid, users will likely want to check that
200 * efi_capsule_supported() returns true before calling this function
201 * because it makes it easier to print helpful error messages.
203 * If the capsule is successfully submitted to the firmware, any
204 * subsequent calls to efi_capsule_pending() will return true. @pages
205 * must not be released or modified if this function returns
206 * successfully.
208 * Callers must be prepared for this function to fail, which can
209 * happen if we raced with system reboot or if there is already a
210 * pending capsule that has a reset type that conflicts with the one
211 * required by @capsule. Do NOT use efi_capsule_pending() to detect
212 * this conflict since that would be racy. Instead, submit the capsule
213 * to efi_capsule_update() and check the return value.
215 * Return 0 on success, a converted EFI status code on failure.
217 int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
219 u32 imagesize = capsule->imagesize;
220 efi_guid_t guid = capsule->guid;
221 unsigned int count, sg_count;
222 u32 flags = capsule->flags;
223 struct page **sg_pages;
224 int rv, reset_type;
225 int i, j;
227 rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
228 if (rv)
229 return rv;
231 count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
232 sg_count = sg_pages_num(count);
234 sg_pages = kcalloc(sg_count, sizeof(*sg_pages), GFP_KERNEL);
235 if (!sg_pages)
236 return -ENOMEM;
238 for (i = 0; i < sg_count; i++) {
239 sg_pages[i] = alloc_page(GFP_KERNEL);
240 if (!sg_pages[i]) {
241 rv = -ENOMEM;
242 goto out;
246 for (i = 0; i < sg_count; i++) {
247 efi_capsule_block_desc_t *sglist;
249 sglist = kmap(sg_pages[i]);
251 for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
252 u64 sz = min_t(u64, imagesize,
253 PAGE_SIZE - (u64)*pages % PAGE_SIZE);
255 sglist[j].length = sz;
256 sglist[j].data = *pages++;
258 imagesize -= sz;
259 count--;
262 /* Continuation pointer */
263 sglist[j].length = 0;
265 if (i + 1 == sg_count)
266 sglist[j].data = 0;
267 else
268 sglist[j].data = page_to_phys(sg_pages[i + 1]);
270 kunmap(sg_pages[i]);
273 mutex_lock(&capsule_mutex);
274 rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
275 mutex_unlock(&capsule_mutex);
277 out:
278 for (i = 0; rv && i < sg_count; i++) {
279 if (sg_pages[i])
280 __free_page(sg_pages[i]);
283 kfree(sg_pages);
284 return rv;
286 EXPORT_SYMBOL_GPL(efi_capsule_update);
288 static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
290 mutex_lock(&capsule_mutex);
291 stop_capsules = true;
292 mutex_unlock(&capsule_mutex);
294 return NOTIFY_DONE;
297 static struct notifier_block capsule_reboot_nb = {
298 .notifier_call = capsule_reboot_notify,
301 static int __init capsule_reboot_register(void)
303 return register_reboot_notifier(&capsule_reboot_nb);
305 core_initcall(capsule_reboot_register);