License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6/btrfs-unstable.git] / arch / x86 / mm / mpx.c
blob7eb06701a93593118ee444ed4abe122055ca9d41
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mpx.c - Memory Protection eXtensions
5 * Copyright (c) 2014, Intel Corporation.
6 * Qiaowei Ren <qiaowei.ren@intel.com>
7 * Dave Hansen <dave.hansen@intel.com>
8 */
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/mm_types.h>
12 #include <linux/syscalls.h>
13 #include <linux/sched/sysctl.h>
15 #include <asm/insn.h>
16 #include <asm/mman.h>
17 #include <asm/mmu_context.h>
18 #include <asm/mpx.h>
19 #include <asm/processor.h>
20 #include <asm/fpu/internal.h>
22 #define CREATE_TRACE_POINTS
23 #include <asm/trace/mpx.h>
25 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
27 if (is_64bit_mm(mm))
28 return MPX_BD_SIZE_BYTES_64;
29 else
30 return MPX_BD_SIZE_BYTES_32;
33 static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
35 if (is_64bit_mm(mm))
36 return MPX_BT_SIZE_BYTES_64;
37 else
38 return MPX_BT_SIZE_BYTES_32;
42 * This is really a simplified "vm_mmap". it only handles MPX
43 * bounds tables (the bounds directory is user-allocated).
45 static unsigned long mpx_mmap(unsigned long len)
47 struct mm_struct *mm = current->mm;
48 unsigned long addr, populate;
50 /* Only bounds table can be allocated here */
51 if (len != mpx_bt_size_bytes(mm))
52 return -EINVAL;
54 down_write(&mm->mmap_sem);
55 addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
56 MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate, NULL);
57 up_write(&mm->mmap_sem);
58 if (populate)
59 mm_populate(addr, populate);
61 return addr;
64 enum reg_type {
65 REG_TYPE_RM = 0,
66 REG_TYPE_INDEX,
67 REG_TYPE_BASE,
70 static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
71 enum reg_type type)
73 int regno = 0;
75 static const int regoff[] = {
76 offsetof(struct pt_regs, ax),
77 offsetof(struct pt_regs, cx),
78 offsetof(struct pt_regs, dx),
79 offsetof(struct pt_regs, bx),
80 offsetof(struct pt_regs, sp),
81 offsetof(struct pt_regs, bp),
82 offsetof(struct pt_regs, si),
83 offsetof(struct pt_regs, di),
84 #ifdef CONFIG_X86_64
85 offsetof(struct pt_regs, r8),
86 offsetof(struct pt_regs, r9),
87 offsetof(struct pt_regs, r10),
88 offsetof(struct pt_regs, r11),
89 offsetof(struct pt_regs, r12),
90 offsetof(struct pt_regs, r13),
91 offsetof(struct pt_regs, r14),
92 offsetof(struct pt_regs, r15),
93 #endif
95 int nr_registers = ARRAY_SIZE(regoff);
97 * Don't possibly decode a 32-bit instructions as
98 * reading a 64-bit-only register.
100 if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
101 nr_registers -= 8;
103 switch (type) {
104 case REG_TYPE_RM:
105 regno = X86_MODRM_RM(insn->modrm.value);
106 if (X86_REX_B(insn->rex_prefix.value))
107 regno += 8;
108 break;
110 case REG_TYPE_INDEX:
111 regno = X86_SIB_INDEX(insn->sib.value);
112 if (X86_REX_X(insn->rex_prefix.value))
113 regno += 8;
114 break;
116 case REG_TYPE_BASE:
117 regno = X86_SIB_BASE(insn->sib.value);
118 if (X86_REX_B(insn->rex_prefix.value))
119 regno += 8;
120 break;
122 default:
123 pr_err("invalid register type");
124 BUG();
125 break;
128 if (regno >= nr_registers) {
129 WARN_ONCE(1, "decoded an instruction with an invalid register");
130 return -EINVAL;
132 return regoff[regno];
136 * return the address being referenced be instruction
137 * for rm=3 returning the content of the rm reg
138 * for rm!=3 calculates the address using SIB and Disp
140 static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
142 unsigned long addr, base, indx;
143 int addr_offset, base_offset, indx_offset;
144 insn_byte_t sib;
146 insn_get_modrm(insn);
147 insn_get_sib(insn);
148 sib = insn->sib.value;
150 if (X86_MODRM_MOD(insn->modrm.value) == 3) {
151 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
152 if (addr_offset < 0)
153 goto out_err;
154 addr = regs_get_register(regs, addr_offset);
155 } else {
156 if (insn->sib.nbytes) {
157 base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
158 if (base_offset < 0)
159 goto out_err;
161 indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
162 if (indx_offset < 0)
163 goto out_err;
165 base = regs_get_register(regs, base_offset);
166 indx = regs_get_register(regs, indx_offset);
167 addr = base + indx * (1 << X86_SIB_SCALE(sib));
168 } else {
169 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
170 if (addr_offset < 0)
171 goto out_err;
172 addr = regs_get_register(regs, addr_offset);
174 addr += insn->displacement.value;
176 return (void __user *)addr;
177 out_err:
178 return (void __user *)-1;
181 static int mpx_insn_decode(struct insn *insn,
182 struct pt_regs *regs)
184 unsigned char buf[MAX_INSN_SIZE];
185 int x86_64 = !test_thread_flag(TIF_IA32);
186 int not_copied;
187 int nr_copied;
189 not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
190 nr_copied = sizeof(buf) - not_copied;
192 * The decoder _should_ fail nicely if we pass it a short buffer.
193 * But, let's not depend on that implementation detail. If we
194 * did not get anything, just error out now.
196 if (!nr_copied)
197 return -EFAULT;
198 insn_init(insn, buf, nr_copied, x86_64);
199 insn_get_length(insn);
201 * copy_from_user() tries to get as many bytes as we could see in
202 * the largest possible instruction. If the instruction we are
203 * after is shorter than that _and_ we attempt to copy from
204 * something unreadable, we might get a short read. This is OK
205 * as long as the read did not stop in the middle of the
206 * instruction. Check to see if we got a partial instruction.
208 if (nr_copied < insn->length)
209 return -EFAULT;
211 insn_get_opcode(insn);
213 * We only _really_ need to decode bndcl/bndcn/bndcu
214 * Error out on anything else.
216 if (insn->opcode.bytes[0] != 0x0f)
217 goto bad_opcode;
218 if ((insn->opcode.bytes[1] != 0x1a) &&
219 (insn->opcode.bytes[1] != 0x1b))
220 goto bad_opcode;
222 return 0;
223 bad_opcode:
224 return -EINVAL;
228 * If a bounds overflow occurs then a #BR is generated. This
229 * function decodes MPX instructions to get violation address
230 * and set this address into extended struct siginfo.
232 * Note that this is not a super precise way of doing this.
233 * Userspace could have, by the time we get here, written
234 * anything it wants in to the instructions. We can not
235 * trust anything about it. They might not be valid
236 * instructions or might encode invalid registers, etc...
238 * The caller is expected to kfree() the returned siginfo_t.
240 siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
242 const struct mpx_bndreg_state *bndregs;
243 const struct mpx_bndreg *bndreg;
244 siginfo_t *info = NULL;
245 struct insn insn;
246 uint8_t bndregno;
247 int err;
249 err = mpx_insn_decode(&insn, regs);
250 if (err)
251 goto err_out;
254 * We know at this point that we are only dealing with
255 * MPX instructions.
257 insn_get_modrm(&insn);
258 bndregno = X86_MODRM_REG(insn.modrm.value);
259 if (bndregno > 3) {
260 err = -EINVAL;
261 goto err_out;
263 /* get bndregs field from current task's xsave area */
264 bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
265 if (!bndregs) {
266 err = -EINVAL;
267 goto err_out;
269 /* now go select the individual register in the set of 4 */
270 bndreg = &bndregs->bndreg[bndregno];
272 info = kzalloc(sizeof(*info), GFP_KERNEL);
273 if (!info) {
274 err = -ENOMEM;
275 goto err_out;
278 * The registers are always 64-bit, but the upper 32
279 * bits are ignored in 32-bit mode. Also, note that the
280 * upper bounds are architecturally represented in 1's
281 * complement form.
283 * The 'unsigned long' cast is because the compiler
284 * complains when casting from integers to different-size
285 * pointers.
287 info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
288 info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
289 info->si_addr_lsb = 0;
290 info->si_signo = SIGSEGV;
291 info->si_errno = 0;
292 info->si_code = SEGV_BNDERR;
293 info->si_addr = mpx_get_addr_ref(&insn, regs);
295 * We were not able to extract an address from the instruction,
296 * probably because there was something invalid in it.
298 if (info->si_addr == (void __user *)-1) {
299 err = -EINVAL;
300 goto err_out;
302 trace_mpx_bounds_register_exception(info->si_addr, bndreg);
303 return info;
304 err_out:
305 /* info might be NULL, but kfree() handles that */
306 kfree(info);
307 return ERR_PTR(err);
310 static __user void *mpx_get_bounds_dir(void)
312 const struct mpx_bndcsr *bndcsr;
314 if (!cpu_feature_enabled(X86_FEATURE_MPX))
315 return MPX_INVALID_BOUNDS_DIR;
318 * The bounds directory pointer is stored in a register
319 * only accessible if we first do an xsave.
321 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
322 if (!bndcsr)
323 return MPX_INVALID_BOUNDS_DIR;
326 * Make sure the register looks valid by checking the
327 * enable bit.
329 if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
330 return MPX_INVALID_BOUNDS_DIR;
333 * Lastly, mask off the low bits used for configuration
334 * flags, and return the address of the bounds table.
336 return (void __user *)(unsigned long)
337 (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
340 int mpx_enable_management(void)
342 void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
343 struct mm_struct *mm = current->mm;
344 int ret = 0;
347 * runtime in the userspace will be responsible for allocation of
348 * the bounds directory. Then, it will save the base of the bounds
349 * directory into XSAVE/XRSTOR Save Area and enable MPX through
350 * XRSTOR instruction.
352 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
353 * expected to be relatively expensive. Storing the bounds
354 * directory here means that we do not have to do xsave in the
355 * unmap path; we can just use mm->context.bd_addr instead.
357 bd_base = mpx_get_bounds_dir();
358 down_write(&mm->mmap_sem);
360 /* MPX doesn't support addresses above 47 bits yet. */
361 if (find_vma(mm, DEFAULT_MAP_WINDOW)) {
362 pr_warn_once("%s (%d): MPX cannot handle addresses "
363 "above 47-bits. Disabling.",
364 current->comm, current->pid);
365 ret = -ENXIO;
366 goto out;
368 mm->context.bd_addr = bd_base;
369 if (mm->context.bd_addr == MPX_INVALID_BOUNDS_DIR)
370 ret = -ENXIO;
371 out:
372 up_write(&mm->mmap_sem);
373 return ret;
376 int mpx_disable_management(void)
378 struct mm_struct *mm = current->mm;
380 if (!cpu_feature_enabled(X86_FEATURE_MPX))
381 return -ENXIO;
383 down_write(&mm->mmap_sem);
384 mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
385 up_write(&mm->mmap_sem);
386 return 0;
389 static int mpx_cmpxchg_bd_entry(struct mm_struct *mm,
390 unsigned long *curval,
391 unsigned long __user *addr,
392 unsigned long old_val, unsigned long new_val)
394 int ret;
396 * user_atomic_cmpxchg_inatomic() actually uses sizeof()
397 * the pointer that we pass to it to figure out how much
398 * data to cmpxchg. We have to be careful here not to
399 * pass a pointer to a 64-bit data type when we only want
400 * a 32-bit copy.
402 if (is_64bit_mm(mm)) {
403 ret = user_atomic_cmpxchg_inatomic(curval,
404 addr, old_val, new_val);
405 } else {
406 u32 uninitialized_var(curval_32);
407 u32 old_val_32 = old_val;
408 u32 new_val_32 = new_val;
409 u32 __user *addr_32 = (u32 __user *)addr;
411 ret = user_atomic_cmpxchg_inatomic(&curval_32,
412 addr_32, old_val_32, new_val_32);
413 *curval = curval_32;
415 return ret;
419 * With 32-bit mode, a bounds directory is 4MB, and the size of each
420 * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB,
421 * and the size of each bounds table is 4MB.
423 static int allocate_bt(struct mm_struct *mm, long __user *bd_entry)
425 unsigned long expected_old_val = 0;
426 unsigned long actual_old_val = 0;
427 unsigned long bt_addr;
428 unsigned long bd_new_entry;
429 int ret = 0;
432 * Carve the virtual space out of userspace for the new
433 * bounds table:
435 bt_addr = mpx_mmap(mpx_bt_size_bytes(mm));
436 if (IS_ERR((void *)bt_addr))
437 return PTR_ERR((void *)bt_addr);
439 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
441 bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
444 * Go poke the address of the new bounds table in to the
445 * bounds directory entry out in userspace memory. Note:
446 * we may race with another CPU instantiating the same table.
447 * In that case the cmpxchg will see an unexpected
448 * 'actual_old_val'.
450 * This can fault, but that's OK because we do not hold
451 * mmap_sem at this point, unlike some of the other part
452 * of the MPX code that have to pagefault_disable().
454 ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, bd_entry,
455 expected_old_val, bd_new_entry);
456 if (ret)
457 goto out_unmap;
460 * The user_atomic_cmpxchg_inatomic() will only return nonzero
461 * for faults, *not* if the cmpxchg itself fails. Now we must
462 * verify that the cmpxchg itself completed successfully.
465 * We expected an empty 'expected_old_val', but instead found
466 * an apparently valid entry. Assume we raced with another
467 * thread to instantiate this table and desclare succecss.
469 if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
470 ret = 0;
471 goto out_unmap;
474 * We found a non-empty bd_entry but it did not have the
475 * VALID_FLAG set. Return an error which will result in
476 * a SEGV since this probably means that somebody scribbled
477 * some invalid data in to a bounds table.
479 if (expected_old_val != actual_old_val) {
480 ret = -EINVAL;
481 goto out_unmap;
483 trace_mpx_new_bounds_table(bt_addr);
484 return 0;
485 out_unmap:
486 vm_munmap(bt_addr, mpx_bt_size_bytes(mm));
487 return ret;
491 * When a BNDSTX instruction attempts to save bounds to a bounds
492 * table, it will first attempt to look up the table in the
493 * first-level bounds directory. If it does not find a table in
494 * the directory, a #BR is generated and we get here in order to
495 * allocate a new table.
497 * With 32-bit mode, the size of BD is 4MB, and the size of each
498 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
499 * and the size of each bound table is 4MB.
501 static int do_mpx_bt_fault(void)
503 unsigned long bd_entry, bd_base;
504 const struct mpx_bndcsr *bndcsr;
505 struct mm_struct *mm = current->mm;
507 bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
508 if (!bndcsr)
509 return -EINVAL;
511 * Mask off the preserve and enable bits
513 bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
515 * The hardware provides the address of the missing or invalid
516 * entry via BNDSTATUS, so we don't have to go look it up.
518 bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
520 * Make sure the directory entry is within where we think
521 * the directory is.
523 if ((bd_entry < bd_base) ||
524 (bd_entry >= bd_base + mpx_bd_size_bytes(mm)))
525 return -EINVAL;
527 return allocate_bt(mm, (long __user *)bd_entry);
530 int mpx_handle_bd_fault(void)
533 * Userspace never asked us to manage the bounds tables,
534 * so refuse to help.
536 if (!kernel_managing_mpx_tables(current->mm))
537 return -EINVAL;
539 return do_mpx_bt_fault();
543 * A thin wrapper around get_user_pages(). Returns 0 if the
544 * fault was resolved or -errno if not.
546 static int mpx_resolve_fault(long __user *addr, int write)
548 long gup_ret;
549 int nr_pages = 1;
551 gup_ret = get_user_pages((unsigned long)addr, nr_pages,
552 write ? FOLL_WRITE : 0, NULL, NULL);
554 * get_user_pages() returns number of pages gotten.
555 * 0 means we failed to fault in and get anything,
556 * probably because 'addr' is bad.
558 if (!gup_ret)
559 return -EFAULT;
560 /* Other error, return it */
561 if (gup_ret < 0)
562 return gup_ret;
563 /* must have gup'd a page and gup_ret>0, success */
564 return 0;
567 static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
568 unsigned long bd_entry)
570 unsigned long bt_addr = bd_entry;
571 int align_to_bytes;
573 * Bit 0 in a bt_entry is always the valid bit.
575 bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG;
577 * Tables are naturally aligned at 8-byte boundaries
578 * on 64-bit and 4-byte boundaries on 32-bit. The
579 * documentation makes it appear that the low bits
580 * are ignored by the hardware, so we do the same.
582 if (is_64bit_mm(mm))
583 align_to_bytes = 8;
584 else
585 align_to_bytes = 4;
586 bt_addr &= ~(align_to_bytes-1);
587 return bt_addr;
591 * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
592 * we might run off the end of the bounds table if we are on
593 * a 64-bit kernel and try to get 8 bytes.
595 static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
596 long __user *bd_entry_ptr)
598 u32 bd_entry_32;
599 int ret;
601 if (is_64bit_mm(mm))
602 return get_user(*bd_entry_ret, bd_entry_ptr);
605 * Note that get_user() uses the type of the *pointer* to
606 * establish the size of the get, not the destination.
608 ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
609 *bd_entry_ret = bd_entry_32;
610 return ret;
614 * Get the base of bounds tables pointed by specific bounds
615 * directory entry.
617 static int get_bt_addr(struct mm_struct *mm,
618 long __user *bd_entry_ptr,
619 unsigned long *bt_addr_result)
621 int ret;
622 int valid_bit;
623 unsigned long bd_entry;
624 unsigned long bt_addr;
626 if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr)))
627 return -EFAULT;
629 while (1) {
630 int need_write = 0;
632 pagefault_disable();
633 ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
634 pagefault_enable();
635 if (!ret)
636 break;
637 if (ret == -EFAULT)
638 ret = mpx_resolve_fault(bd_entry_ptr, need_write);
640 * If we could not resolve the fault, consider it
641 * userspace's fault and error out.
643 if (ret)
644 return ret;
647 valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG;
648 bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry);
651 * When the kernel is managing bounds tables, a bounds directory
652 * entry will either have a valid address (plus the valid bit)
653 * *OR* be completely empty. If we see a !valid entry *and* some
654 * data in the address field, we know something is wrong. This
655 * -EINVAL return will cause a SIGSEGV.
657 if (!valid_bit && bt_addr)
658 return -EINVAL;
660 * Do we have an completely zeroed bt entry? That is OK. It
661 * just means there was no bounds table for this memory. Make
662 * sure to distinguish this from -EINVAL, which will cause
663 * a SEGV.
665 if (!valid_bit)
666 return -ENOENT;
668 *bt_addr_result = bt_addr;
669 return 0;
672 static inline int bt_entry_size_bytes(struct mm_struct *mm)
674 if (is_64bit_mm(mm))
675 return MPX_BT_ENTRY_BYTES_64;
676 else
677 return MPX_BT_ENTRY_BYTES_32;
681 * Take a virtual address and turns it in to the offset in bytes
682 * inside of the bounds table where the bounds table entry
683 * controlling 'addr' can be found.
685 static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
686 unsigned long addr)
688 unsigned long bt_table_nr_entries;
689 unsigned long offset = addr;
691 if (is_64bit_mm(mm)) {
692 /* Bottom 3 bits are ignored on 64-bit */
693 offset >>= 3;
694 bt_table_nr_entries = MPX_BT_NR_ENTRIES_64;
695 } else {
696 /* Bottom 2 bits are ignored on 32-bit */
697 offset >>= 2;
698 bt_table_nr_entries = MPX_BT_NR_ENTRIES_32;
701 * We know the size of the table in to which we are
702 * indexing, and we have eliminated all the low bits
703 * which are ignored for indexing.
705 * Mask out all the high bits which we do not need
706 * to index in to the table. Note that the tables
707 * are always powers of two so this gives us a proper
708 * mask.
710 offset &= (bt_table_nr_entries-1);
712 * We now have an entry offset in terms of *entries* in
713 * the table. We need to scale it back up to bytes.
715 offset *= bt_entry_size_bytes(mm);
716 return offset;
720 * How much virtual address space does a single bounds
721 * directory entry cover?
723 * Note, we need a long long because 4GB doesn't fit in
724 * to a long on 32-bit.
726 static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
728 unsigned long long virt_space;
729 unsigned long long GB = (1ULL << 30);
732 * This covers 32-bit emulation as well as 32-bit kernels
733 * running on 64-bit hardware.
735 if (!is_64bit_mm(mm))
736 return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
739 * 'x86_virt_bits' returns what the hardware is capable
740 * of, and returns the full >32-bit address space when
741 * running 32-bit kernels on 64-bit hardware.
743 virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
744 return virt_space / MPX_BD_NR_ENTRIES_64;
748 * Free the backing physical pages of bounds table 'bt_addr'.
749 * Assume start...end is within that bounds table.
751 static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
752 unsigned long bt_addr,
753 unsigned long start_mapping, unsigned long end_mapping)
755 struct vm_area_struct *vma;
756 unsigned long addr, len;
757 unsigned long start;
758 unsigned long end;
761 * if we 'end' on a boundary, the offset will be 0 which
762 * is not what we want. Back it up a byte to get the
763 * last bt entry. Then once we have the entry itself,
764 * move 'end' back up by the table entry size.
766 start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping);
767 end = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1);
769 * Move end back up by one entry. Among other things
770 * this ensures that it remains page-aligned and does
771 * not screw up zap_page_range()
773 end += bt_entry_size_bytes(mm);
776 * Find the first overlapping vma. If vma->vm_start > start, there
777 * will be a hole in the bounds table. This -EINVAL return will
778 * cause a SIGSEGV.
780 vma = find_vma(mm, start);
781 if (!vma || vma->vm_start > start)
782 return -EINVAL;
785 * A NUMA policy on a VM_MPX VMA could cause this bounds table to
786 * be split. So we need to look across the entire 'start -> end'
787 * range of this bounds table, find all of the VM_MPX VMAs, and
788 * zap only those.
790 addr = start;
791 while (vma && vma->vm_start < end) {
793 * We followed a bounds directory entry down
794 * here. If we find a non-MPX VMA, that's bad,
795 * so stop immediately and return an error. This
796 * probably results in a SIGSEGV.
798 if (!(vma->vm_flags & VM_MPX))
799 return -EINVAL;
801 len = min(vma->vm_end, end) - addr;
802 zap_page_range(vma, addr, len);
803 trace_mpx_unmap_zap(addr, addr+len);
805 vma = vma->vm_next;
806 addr = vma->vm_start;
808 return 0;
811 static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm,
812 unsigned long addr)
815 * There are several ways to derive the bd offsets. We
816 * use the following approach here:
817 * 1. We know the size of the virtual address space
818 * 2. We know the number of entries in a bounds table
819 * 3. We know that each entry covers a fixed amount of
820 * virtual address space.
821 * So, we can just divide the virtual address by the
822 * virtual space used by one entry to determine which
823 * entry "controls" the given virtual address.
825 if (is_64bit_mm(mm)) {
826 int bd_entry_size = 8; /* 64-bit pointer */
828 * Take the 64-bit addressing hole in to account.
830 addr &= ((1UL << boot_cpu_data.x86_virt_bits) - 1);
831 return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
832 } else {
833 int bd_entry_size = 4; /* 32-bit pointer */
835 * 32-bit has no hole so this case needs no mask
837 return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
840 * The two return calls above are exact copies. If we
841 * pull out a single copy and put it in here, gcc won't
842 * realize that we're doing a power-of-2 divide and use
843 * shifts. It uses a real divide. If we put them up
844 * there, it manages to figure it out (gcc 4.8.3).
848 static int unmap_entire_bt(struct mm_struct *mm,
849 long __user *bd_entry, unsigned long bt_addr)
851 unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
852 unsigned long uninitialized_var(actual_old_val);
853 int ret;
855 while (1) {
856 int need_write = 1;
857 unsigned long cleared_bd_entry = 0;
859 pagefault_disable();
860 ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,
861 bd_entry, expected_old_val, cleared_bd_entry);
862 pagefault_enable();
863 if (!ret)
864 break;
865 if (ret == -EFAULT)
866 ret = mpx_resolve_fault(bd_entry, need_write);
868 * If we could not resolve the fault, consider it
869 * userspace's fault and error out.
871 if (ret)
872 return ret;
875 * The cmpxchg was performed, check the results.
877 if (actual_old_val != expected_old_val) {
879 * Someone else raced with us to unmap the table.
880 * That is OK, since we were both trying to do
881 * the same thing. Declare success.
883 if (!actual_old_val)
884 return 0;
886 * Something messed with the bounds directory
887 * entry. We hold mmap_sem for read or write
888 * here, so it could not be a _new_ bounds table
889 * that someone just allocated. Something is
890 * wrong, so pass up the error and SIGSEGV.
892 return -EINVAL;
895 * Note, we are likely being called under do_munmap() already. To
896 * avoid recursion, do_munmap() will check whether it comes
897 * from one bounds table through VM_MPX flag.
899 return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm), NULL);
902 static int try_unmap_single_bt(struct mm_struct *mm,
903 unsigned long start, unsigned long end)
905 struct vm_area_struct *next;
906 struct vm_area_struct *prev;
908 * "bta" == Bounds Table Area: the area controlled by the
909 * bounds table that we are unmapping.
911 unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1);
912 unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm);
913 unsigned long uninitialized_var(bt_addr);
914 void __user *bde_vaddr;
915 int ret;
917 * We already unlinked the VMAs from the mm's rbtree so 'start'
918 * is guaranteed to be in a hole. This gets us the first VMA
919 * before the hole in to 'prev' and the next VMA after the hole
920 * in to 'next'.
922 next = find_vma_prev(mm, start, &prev);
924 * Do not count other MPX bounds table VMAs as neighbors.
925 * Although theoretically possible, we do not allow bounds
926 * tables for bounds tables so our heads do not explode.
927 * If we count them as neighbors here, we may end up with
928 * lots of tables even though we have no actual table
929 * entries in use.
931 while (next && (next->vm_flags & VM_MPX))
932 next = next->vm_next;
933 while (prev && (prev->vm_flags & VM_MPX))
934 prev = prev->vm_prev;
936 * We know 'start' and 'end' lie within an area controlled
937 * by a single bounds table. See if there are any other
938 * VMAs controlled by that bounds table. If there are not
939 * then we can "expand" the are we are unmapping to possibly
940 * cover the entire table.
942 next = find_vma_prev(mm, start, &prev);
943 if ((!prev || prev->vm_end <= bta_start_vaddr) &&
944 (!next || next->vm_start >= bta_end_vaddr)) {
946 * No neighbor VMAs controlled by same bounds
947 * table. Try to unmap the whole thing
949 start = bta_start_vaddr;
950 end = bta_end_vaddr;
953 bde_vaddr = mm->context.bd_addr + mpx_get_bd_entry_offset(mm, start);
954 ret = get_bt_addr(mm, bde_vaddr, &bt_addr);
956 * No bounds table there, so nothing to unmap.
958 if (ret == -ENOENT) {
959 ret = 0;
960 return 0;
962 if (ret)
963 return ret;
965 * We are unmapping an entire table. Either because the
966 * unmap that started this whole process was large enough
967 * to cover an entire table, or that the unmap was small
968 * but was the area covered by a bounds table.
970 if ((start == bta_start_vaddr) &&
971 (end == bta_end_vaddr))
972 return unmap_entire_bt(mm, bde_vaddr, bt_addr);
973 return zap_bt_entries_mapping(mm, bt_addr, start, end);
976 static int mpx_unmap_tables(struct mm_struct *mm,
977 unsigned long start, unsigned long end)
979 unsigned long one_unmap_start;
980 trace_mpx_unmap_search(start, end);
982 one_unmap_start = start;
983 while (one_unmap_start < end) {
984 int ret;
985 unsigned long next_unmap_start = ALIGN(one_unmap_start+1,
986 bd_entry_virt_space(mm));
987 unsigned long one_unmap_end = end;
989 * if the end is beyond the current bounds table,
990 * move it back so we only deal with a single one
991 * at a time
993 if (one_unmap_end > next_unmap_start)
994 one_unmap_end = next_unmap_start;
995 ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end);
996 if (ret)
997 return ret;
999 one_unmap_start = next_unmap_start;
1001 return 0;
1005 * Free unused bounds tables covered in a virtual address region being
1006 * munmap()ed. Assume end > start.
1008 * This function will be called by do_munmap(), and the VMAs covering
1009 * the virtual address region start...end have already been split if
1010 * necessary, and the 'vma' is the first vma in this range (start -> end).
1012 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
1013 unsigned long start, unsigned long end)
1015 int ret;
1018 * Refuse to do anything unless userspace has asked
1019 * the kernel to help manage the bounds tables,
1021 if (!kernel_managing_mpx_tables(current->mm))
1022 return;
1024 * This will look across the entire 'start -> end' range,
1025 * and find all of the non-VM_MPX VMAs.
1027 * To avoid recursion, if a VM_MPX vma is found in the range
1028 * (start->end), we will not continue follow-up work. This
1029 * recursion represents having bounds tables for bounds tables,
1030 * which should not occur normally. Being strict about it here
1031 * helps ensure that we do not have an exploitable stack overflow.
1033 do {
1034 if (vma->vm_flags & VM_MPX)
1035 return;
1036 vma = vma->vm_next;
1037 } while (vma && vma->vm_start < end);
1039 ret = mpx_unmap_tables(mm, start, end);
1040 if (ret)
1041 force_sig(SIGSEGV, current);
1044 /* MPX cannot handle addresses above 47 bits yet. */
1045 unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
1046 unsigned long flags)
1048 if (!kernel_managing_mpx_tables(current->mm))
1049 return addr;
1050 if (addr + len <= DEFAULT_MAP_WINDOW)
1051 return addr;
1052 if (flags & MAP_FIXED)
1053 return -ENOMEM;
1056 * Requested len is larger than the whole area we're allowed to map in.
1057 * Resetting hinting address wouldn't do much good -- fail early.
1059 if (len > DEFAULT_MAP_WINDOW)
1060 return -ENOMEM;
1062 /* Look for unmap area within DEFAULT_MAP_WINDOW */
1063 return 0;