gru: add hugepage support
[linux-2.6/linux-2.6-openrd.git] / drivers / misc / sgi-gru / grufault.c
blob929d6073e600f940f96096a0774fe0feb50b3566
1 /*
2 * SN Platform GRU Driver
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
8 * the user CB.
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/spinlock.h>
30 #include <linux/mm.h>
31 #include <linux/hugetlb.h>
32 #include <linux/device.h>
33 #include <linux/io.h>
34 #include <linux/uaccess.h>
35 #include <linux/security.h>
36 #include <asm/pgtable.h>
37 #include "gru.h"
38 #include "grutables.h"
39 #include "grulib.h"
40 #include "gru_instructions.h"
41 #include <asm/uv/uv_hub.h>
43 /* Return codes for vtop functions */
44 #define VTOP_SUCCESS 0
45 #define VTOP_INVALID -1
46 #define VTOP_RETRY -2
50 * Test if a physical address is a valid GRU GSEG address
52 static inline int is_gru_paddr(unsigned long paddr)
54 return paddr >= gru_start_paddr && paddr < gru_end_paddr;
58 * Find the vma of a GRU segment. Caller must hold mmap_sem.
60 struct vm_area_struct *gru_find_vma(unsigned long vaddr)
62 struct vm_area_struct *vma;
64 vma = find_vma(current->mm, vaddr);
65 if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
66 return vma;
67 return NULL;
71 * Find and lock the gts that contains the specified user vaddr.
73 * Returns:
74 * - *gts with the mmap_sem locked for read and the GTS locked.
75 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
78 static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
80 struct mm_struct *mm = current->mm;
81 struct vm_area_struct *vma;
82 struct gru_thread_state *gts = NULL;
84 down_read(&mm->mmap_sem);
85 vma = gru_find_vma(vaddr);
86 if (vma)
87 gts = gru_find_thread_state(vma, TSID(vaddr, vma));
88 if (gts)
89 mutex_lock(&gts->ts_ctxlock);
90 else
91 up_read(&mm->mmap_sem);
92 return gts;
95 static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
97 struct mm_struct *mm = current->mm;
98 struct vm_area_struct *vma;
99 struct gru_thread_state *gts = ERR_PTR(-EINVAL);
101 down_write(&mm->mmap_sem);
102 vma = gru_find_vma(vaddr);
103 if (!vma)
104 goto err;
106 gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
107 if (IS_ERR(gts))
108 goto err;
109 mutex_lock(&gts->ts_ctxlock);
110 downgrade_write(&mm->mmap_sem);
111 return gts;
113 err:
114 up_write(&mm->mmap_sem);
115 return gts;
119 * Unlock a GTS that was previously locked with gru_find_lock_gts().
121 static void gru_unlock_gts(struct gru_thread_state *gts)
123 mutex_unlock(&gts->ts_ctxlock);
124 up_read(&current->mm->mmap_sem);
128 * Set a CB.istatus to active using a user virtual address. This must be done
129 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
130 * If the line is evicted, the status may be lost. The in-cache update
131 * is necessary to prevent the user from seeing a stale cb.istatus that will
132 * change as soon as the TFH restart is complete. Races may cause an
133 * occasional failure to clear the cb.istatus, but that is ok.
135 static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
137 if (cbk) {
138 cbk->istatus = CBS_ACTIVE;
143 * Read & clear a TFM
145 * The GRU has an array of fault maps. A map is private to a cpu
146 * Only one cpu will be accessing a cpu's fault map.
148 * This function scans the cpu-private fault map & clears all bits that
149 * are set. The function returns a bitmap that indicates the bits that
150 * were cleared. Note that sense the maps may be updated asynchronously by
151 * the GRU, atomic operations must be used to clear bits.
153 static void get_clear_fault_map(struct gru_state *gru,
154 struct gru_tlb_fault_map *imap,
155 struct gru_tlb_fault_map *dmap)
157 unsigned long i, k;
158 struct gru_tlb_fault_map *tfm;
160 tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
161 prefetchw(tfm); /* Helps on hardware, required for emulator */
162 for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
163 k = tfm->fault_bits[i];
164 if (k)
165 k = xchg(&tfm->fault_bits[i], 0UL);
166 imap->fault_bits[i] = k;
167 k = tfm->done_bits[i];
168 if (k)
169 k = xchg(&tfm->done_bits[i], 0UL);
170 dmap->fault_bits[i] = k;
174 * Not functionally required but helps performance. (Required
175 * on emulator)
177 gru_flush_cache(tfm);
181 * Atomic (interrupt context) & non-atomic (user context) functions to
182 * convert a vaddr into a physical address. The size of the page
183 * is returned in pageshift.
184 * returns:
185 * 0 - successful
186 * < 0 - error code
187 * 1 - (atomic only) try again in non-atomic context
189 static int non_atomic_pte_lookup(struct vm_area_struct *vma,
190 unsigned long vaddr, int write,
191 unsigned long *paddr, int *pageshift)
193 struct page *page;
195 #ifdef CONFIG_HUGETLB_PAGE
196 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
197 #else
198 *pageshift = PAGE_SHIFT;
199 #endif
200 if (get_user_pages
201 (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
202 return -EFAULT;
203 *paddr = page_to_phys(page);
204 put_page(page);
205 return 0;
209 * atomic_pte_lookup
211 * Convert a user virtual address to a physical address
212 * Only supports Intel large pages (2MB only) on x86_64.
213 * ZZZ - hugepage support is incomplete
215 * NOTE: mmap_sem is already held on entry to this function. This
216 * guarantees existence of the page tables.
218 static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
219 int write, unsigned long *paddr, int *pageshift)
221 pgd_t *pgdp;
222 pmd_t *pmdp;
223 pud_t *pudp;
224 pte_t pte;
226 pgdp = pgd_offset(vma->vm_mm, vaddr);
227 if (unlikely(pgd_none(*pgdp)))
228 goto err;
230 pudp = pud_offset(pgdp, vaddr);
231 if (unlikely(pud_none(*pudp)))
232 goto err;
234 pmdp = pmd_offset(pudp, vaddr);
235 if (unlikely(pmd_none(*pmdp)))
236 goto err;
237 #ifdef CONFIG_X86_64
238 if (unlikely(pmd_large(*pmdp)))
239 pte = *(pte_t *) pmdp;
240 else
241 #endif
242 pte = *pte_offset_kernel(pmdp, vaddr);
244 if (unlikely(!pte_present(pte) ||
245 (write && (!pte_write(pte) || !pte_dirty(pte)))))
246 return 1;
248 *paddr = pte_pfn(pte) << PAGE_SHIFT;
249 #ifdef CONFIG_HUGETLB_PAGE
250 *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
251 #else
252 *pageshift = PAGE_SHIFT;
253 #endif
254 return 0;
256 err:
257 return 1;
260 static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
261 int write, int atomic, unsigned long *gpa, int *pageshift)
263 struct mm_struct *mm = gts->ts_mm;
264 struct vm_area_struct *vma;
265 unsigned long paddr;
266 int ret, ps;
268 vma = find_vma(mm, vaddr);
269 if (!vma)
270 goto inval;
273 * Atomic lookup is faster & usually works even if called in non-atomic
274 * context.
276 rmb(); /* Must/check ms_range_active before loading PTEs */
277 ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
278 if (ret) {
279 if (atomic)
280 goto upm;
281 if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
282 goto inval;
284 if (is_gru_paddr(paddr))
285 goto inval;
286 paddr = paddr & ~((1UL << ps) - 1);
287 *gpa = uv_soc_phys_ram_to_gpa(paddr);
288 *pageshift = ps;
289 return VTOP_SUCCESS;
291 inval:
292 return VTOP_INVALID;
293 upm:
294 return VTOP_RETRY;
299 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
300 * CBE cacheline so that the line will be written back to home agent.
301 * Otherwise the line may be silently dropped. This has no impact
302 * except on performance.
304 static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
306 if (unlikely(cbe)) {
307 cbe->cbrexecstatus = 0; /* make CL dirty */
308 gru_flush_cache(cbe);
313 * Preload the TLB with entries that may be required. Currently, preloading
314 * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
315 * the end of the bcopy tranfer, whichever is smaller.
317 static void gru_preload_tlb(struct gru_state *gru,
318 struct gru_thread_state *gts, int atomic,
319 unsigned long fault_vaddr, int asid, int write,
320 unsigned char tlb_preload_count,
321 struct gru_tlb_fault_handle *tfh,
322 struct gru_control_block_extended *cbe)
324 unsigned long vaddr = 0, gpa;
325 int ret, pageshift;
327 if (cbe->opccpy != OP_BCOPY)
328 return;
330 if (fault_vaddr == cbe->cbe_baddr0)
331 vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
332 else if (fault_vaddr == cbe->cbe_baddr1)
333 vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
335 fault_vaddr &= PAGE_MASK;
336 vaddr &= PAGE_MASK;
337 vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
339 while (vaddr > fault_vaddr) {
340 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
341 if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
342 GRU_PAGESIZE(pageshift)))
343 return;
344 gru_dbg(grudev,
345 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
346 atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
347 vaddr, asid, write, pageshift, gpa);
348 vaddr -= PAGE_SIZE;
349 STAT(tlb_preload_page);
354 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
355 * Input:
356 * cb Address of user CBR. Null if not running in user context
357 * Return:
358 * 0 = dropin, exception, or switch to UPM successful
359 * 1 = range invalidate active
360 * < 0 = error code
363 static int gru_try_dropin(struct gru_thread_state *gts,
364 struct gru_tlb_fault_handle *tfh,
365 struct gru_instruction_bits *cbk)
367 struct gru_control_block_extended *cbe = NULL;
368 unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
369 int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
370 unsigned long gpa = 0, vaddr = 0;
373 * NOTE: The GRU contains magic hardware that eliminates races between
374 * TLB invalidates and TLB dropins. If an invalidate occurs
375 * in the window between reading the TFH and the subsequent TLB dropin,
376 * the dropin is ignored. This eliminates the need for additional locks.
380 * Prefetch the CBE if doing TLB preloading
382 if (unlikely(tlb_preload_count)) {
383 cbe = gru_tfh_to_cbe(tfh);
384 prefetchw(cbe);
388 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
389 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
390 * is a transient state.
392 if (tfh->status != TFHSTATUS_EXCEPTION) {
393 gru_flush_cache(tfh);
394 sync_core();
395 if (tfh->status != TFHSTATUS_EXCEPTION)
396 goto failnoexception;
397 STAT(tfh_stale_on_fault);
399 if (tfh->state == TFHSTATE_IDLE)
400 goto failidle;
401 if (tfh->state == TFHSTATE_MISS_FMM && cbk)
402 goto failfmm;
404 write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
405 vaddr = tfh->missvaddr;
406 asid = tfh->missasid;
407 indexway = tfh->indexway;
408 if (asid == 0)
409 goto failnoasid;
411 rmb(); /* TFH must be cache resident before reading ms_range_active */
414 * TFH is cache resident - at least briefly. Fail the dropin
415 * if a range invalidate is active.
417 if (atomic_read(&gts->ts_gms->ms_range_active))
418 goto failactive;
420 ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
421 if (ret == VTOP_INVALID)
422 goto failinval;
423 if (ret == VTOP_RETRY)
424 goto failupm;
426 if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
427 gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
428 if (atomic || !gru_update_cch(gts)) {
429 gts->ts_force_cch_reload = 1;
430 goto failupm;
434 if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
435 gru_preload_tlb(gts->ts_gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
436 gru_flush_cache_cbe(cbe);
439 gru_cb_set_istatus_active(cbk);
440 tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
441 GRU_PAGESIZE(pageshift));
442 gru_dbg(grudev,
443 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
444 " rw %d, ps %d, gpa 0x%lx\n",
445 atomic ? "atomic" : "non-atomic", gts->ts_gru->gs_gid, gts, tfh, vaddr, asid,
446 indexway, write, pageshift, gpa);
447 STAT(tlb_dropin);
448 return 0;
450 failnoasid:
451 /* No asid (delayed unload). */
452 STAT(tlb_dropin_fail_no_asid);
453 gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
454 if (!cbk)
455 tfh_user_polling_mode(tfh);
456 else
457 gru_flush_cache(tfh);
458 gru_flush_cache_cbe(cbe);
459 return -EAGAIN;
461 failupm:
462 /* Atomic failure switch CBR to UPM */
463 tfh_user_polling_mode(tfh);
464 gru_flush_cache_cbe(cbe);
465 STAT(tlb_dropin_fail_upm);
466 gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
467 return 1;
469 failfmm:
470 /* FMM state on UPM call */
471 gru_flush_cache(tfh);
472 gru_flush_cache_cbe(cbe);
473 STAT(tlb_dropin_fail_fmm);
474 gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
475 return 0;
477 failnoexception:
478 /* TFH status did not show exception pending */
479 gru_flush_cache(tfh);
480 gru_flush_cache_cbe(cbe);
481 if (cbk)
482 gru_flush_cache(cbk);
483 STAT(tlb_dropin_fail_no_exception);
484 gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
485 tfh, tfh->status, tfh->state);
486 return 0;
488 failidle:
489 /* TFH state was idle - no miss pending */
490 gru_flush_cache(tfh);
491 gru_flush_cache_cbe(cbe);
492 if (cbk)
493 gru_flush_cache(cbk);
494 STAT(tlb_dropin_fail_idle);
495 gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
496 return 0;
498 failinval:
499 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
500 tfh_exception(tfh);
501 gru_flush_cache_cbe(cbe);
502 STAT(tlb_dropin_fail_invalid);
503 gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
504 return -EFAULT;
506 failactive:
507 /* Range invalidate active. Switch to UPM iff atomic */
508 if (!cbk)
509 tfh_user_polling_mode(tfh);
510 else
511 gru_flush_cache(tfh);
512 gru_flush_cache_cbe(cbe);
513 STAT(tlb_dropin_fail_range_active);
514 gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
515 tfh, vaddr);
516 return 1;
520 * Process an external interrupt from the GRU. This interrupt is
521 * caused by a TLB miss.
522 * Note that this is the interrupt handler that is registered with linux
523 * interrupt handlers.
525 static irqreturn_t gru_intr(int chiplet, int blade)
527 struct gru_state *gru;
528 struct gru_tlb_fault_map imap, dmap;
529 struct gru_thread_state *gts;
530 struct gru_tlb_fault_handle *tfh = NULL;
531 int cbrnum, ctxnum;
533 STAT(intr);
535 gru = &gru_base[blade]->bs_grus[chiplet];
536 if (!gru) {
537 dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
538 raw_smp_processor_id(), chiplet);
539 return IRQ_NONE;
541 get_clear_fault_map(gru, &imap, &dmap);
542 gru_dbg(grudev,
543 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
544 smp_processor_id(), chiplet, gru->gs_gid,
545 imap.fault_bits[0], imap.fault_bits[1],
546 dmap.fault_bits[0], dmap.fault_bits[1]);
548 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
549 STAT(intr_cbr);
550 complete(gru->gs_blade->bs_async_wq);
551 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
552 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
555 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
556 STAT(intr_tfh);
557 tfh = get_tfh_by_index(gru, cbrnum);
558 prefetchw(tfh); /* Helps on hdw, required for emulator */
561 * When hardware sets a bit in the faultmap, it implicitly
562 * locks the GRU context so that it cannot be unloaded.
563 * The gts cannot change until a TFH start/writestart command
564 * is issued.
566 ctxnum = tfh->ctxnum;
567 gts = gru->gs_gts[ctxnum];
570 * This is running in interrupt context. Trylock the mmap_sem.
571 * If it fails, retry the fault in user context.
573 if (!gts->ts_force_cch_reload &&
574 down_read_trylock(&gts->ts_mm->mmap_sem)) {
575 gts->ustats.fmm_tlbdropin++;
576 gru_try_dropin(gts, tfh, NULL);
577 up_read(&gts->ts_mm->mmap_sem);
578 } else {
579 tfh_user_polling_mode(tfh);
580 STAT(intr_mm_lock_failed);
583 return IRQ_HANDLED;
586 irqreturn_t gru0_intr(int irq, void *dev_id)
588 return gru_intr(0, uv_numa_blade_id());
591 irqreturn_t gru1_intr(int irq, void *dev_id)
593 return gru_intr(1, uv_numa_blade_id());
596 irqreturn_t gru_intr_mblade(int irq, void *dev_id)
598 int blade;
600 for_each_possible_blade(blade) {
601 if (uv_blade_nr_possible_cpus(blade))
602 continue;
603 gru_intr(0, blade);
604 gru_intr(1, blade);
606 return IRQ_HANDLED;
610 static int gru_user_dropin(struct gru_thread_state *gts,
611 struct gru_tlb_fault_handle *tfh,
612 void *cb)
614 struct gru_mm_struct *gms = gts->ts_gms;
615 int ret;
617 gts->ustats.upm_tlbdropin++;
618 while (1) {
619 wait_event(gms->ms_wait_queue,
620 atomic_read(&gms->ms_range_active) == 0);
621 prefetchw(tfh); /* Helps on hdw, required for emulator */
622 ret = gru_try_dropin(gts, tfh, cb);
623 if (ret <= 0)
624 return ret;
625 STAT(call_os_wait_queue);
630 * This interface is called as a result of a user detecting a "call OS" bit
631 * in a user CB. Normally means that a TLB fault has occurred.
632 * cb - user virtual address of the CB
634 int gru_handle_user_call_os(unsigned long cb)
636 struct gru_tlb_fault_handle *tfh;
637 struct gru_thread_state *gts;
638 void *cbk;
639 int ucbnum, cbrnum, ret = -EINVAL;
641 STAT(call_os);
643 /* sanity check the cb pointer */
644 ucbnum = get_cb_number((void *)cb);
645 if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
646 return -EINVAL;
648 gts = gru_find_lock_gts(cb);
649 if (!gts)
650 return -EINVAL;
651 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
653 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
654 goto exit;
656 gru_check_context_placement(gts);
659 * CCH may contain stale data if ts_force_cch_reload is set.
661 if (gts->ts_gru && gts->ts_force_cch_reload) {
662 gts->ts_force_cch_reload = 0;
663 gru_update_cch(gts);
666 ret = -EAGAIN;
667 cbrnum = thread_cbr_number(gts, ucbnum);
668 if (gts->ts_gru) {
669 tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
670 cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
671 gts->ts_ctxnum, ucbnum);
672 ret = gru_user_dropin(gts, tfh, cbk);
674 exit:
675 gru_unlock_gts(gts);
676 return ret;
680 * Fetch the exception detail information for a CB that terminated with
681 * an exception.
683 int gru_get_exception_detail(unsigned long arg)
685 struct control_block_extended_exc_detail excdet;
686 struct gru_control_block_extended *cbe;
687 struct gru_thread_state *gts;
688 int ucbnum, cbrnum, ret;
690 STAT(user_exception);
691 if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
692 return -EFAULT;
694 gts = gru_find_lock_gts(excdet.cb);
695 if (!gts)
696 return -EINVAL;
698 gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
699 ucbnum = get_cb_number((void *)excdet.cb);
700 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
701 ret = -EINVAL;
702 } else if (gts->ts_gru) {
703 cbrnum = thread_cbr_number(gts, ucbnum);
704 cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
705 gru_flush_cache(cbe); /* CBE not coherent */
706 sync_core(); /* make sure we are have current data */
707 excdet.opc = cbe->opccpy;
708 excdet.exopc = cbe->exopccpy;
709 excdet.ecause = cbe->ecause;
710 excdet.exceptdet0 = cbe->idef1upd;
711 excdet.exceptdet1 = cbe->idef3upd;
712 excdet.cbrstate = cbe->cbrstate;
713 excdet.cbrexecstatus = cbe->cbrexecstatus;
714 gru_flush_cache_cbe(cbe);
715 ret = 0;
716 } else {
717 ret = -EAGAIN;
719 gru_unlock_gts(gts);
721 gru_dbg(grudev,
722 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
723 "exdet0 0x%lx, exdet1 0x%x\n",
724 excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
725 excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
726 if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
727 ret = -EFAULT;
728 return ret;
732 * User request to unload a context. Content is saved for possible reload.
734 static int gru_unload_all_contexts(void)
736 struct gru_thread_state *gts;
737 struct gru_state *gru;
738 int gid, ctxnum;
740 if (!capable(CAP_SYS_ADMIN))
741 return -EPERM;
742 foreach_gid(gid) {
743 gru = GID_TO_GRU(gid);
744 spin_lock(&gru->gs_lock);
745 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
746 gts = gru->gs_gts[ctxnum];
747 if (gts && mutex_trylock(&gts->ts_ctxlock)) {
748 spin_unlock(&gru->gs_lock);
749 gru_unload_context(gts, 1);
750 mutex_unlock(&gts->ts_ctxlock);
751 spin_lock(&gru->gs_lock);
754 spin_unlock(&gru->gs_lock);
756 return 0;
759 int gru_user_unload_context(unsigned long arg)
761 struct gru_thread_state *gts;
762 struct gru_unload_context_req req;
764 STAT(user_unload_context);
765 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
766 return -EFAULT;
768 gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
770 if (!req.gseg)
771 return gru_unload_all_contexts();
773 gts = gru_find_lock_gts(req.gseg);
774 if (!gts)
775 return -EINVAL;
777 if (gts->ts_gru)
778 gru_unload_context(gts, 1);
779 gru_unlock_gts(gts);
781 return 0;
785 * User request to flush a range of virtual addresses from the GRU TLB
786 * (Mainly for testing).
788 int gru_user_flush_tlb(unsigned long arg)
790 struct gru_thread_state *gts;
791 struct gru_flush_tlb_req req;
792 struct gru_mm_struct *gms;
794 STAT(user_flush_tlb);
795 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
796 return -EFAULT;
798 gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
799 req.vaddr, req.len);
801 gts = gru_find_lock_gts(req.gseg);
802 if (!gts)
803 return -EINVAL;
805 gms = gts->ts_gms;
806 gru_unlock_gts(gts);
807 gru_flush_tlb_range(gms, req.vaddr, req.len);
809 return 0;
813 * Fetch GSEG statisticss
815 long gru_get_gseg_statistics(unsigned long arg)
817 struct gru_thread_state *gts;
818 struct gru_get_gseg_statistics_req req;
820 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
821 return -EFAULT;
824 * The library creates arrays of contexts for threaded programs.
825 * If no gts exists in the array, the context has never been used & all
826 * statistics are implicitly 0.
828 gts = gru_find_lock_gts(req.gseg);
829 if (gts) {
830 memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
831 gru_unlock_gts(gts);
832 } else {
833 memset(&req.stats, 0, sizeof(gts->ustats));
836 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
837 return -EFAULT;
839 return 0;
843 * Register the current task as the user of the GSEG slice.
844 * Needed for TLB fault interrupt targeting.
846 int gru_set_context_option(unsigned long arg)
848 struct gru_thread_state *gts;
849 struct gru_set_context_option_req req;
850 int ret = 0;
852 STAT(set_context_option);
853 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
854 return -EFAULT;
855 gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
857 gts = gru_find_lock_gts(req.gseg);
858 if (!gts) {
859 gts = gru_alloc_locked_gts(req.gseg);
860 if (IS_ERR(gts))
861 return PTR_ERR(gts);
864 switch (req.op) {
865 case sco_blade_chiplet:
866 /* Select blade/chiplet for GRU context */
867 if (req.val1 < -1 || req.val1 >= GRU_MAX_BLADES || !gru_base[req.val1] ||
868 req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB) {
869 ret = -EINVAL;
870 } else {
871 gts->ts_user_blade_id = req.val1;
872 gts->ts_user_chiplet_id = req.val0;
873 gru_check_context_placement(gts);
875 break;
876 case sco_gseg_owner:
877 /* Register the current task as the GSEG owner */
878 gts->ts_tgid_owner = current->tgid;
879 break;
880 case sco_cch_req_slice:
881 /* Set the CCH slice option */
882 gts->ts_cch_req_slice = req.val1 & 3;
883 break;
884 default:
885 ret = -EINVAL;
887 gru_unlock_gts(gts);
889 return ret;