2 * SN Platform GRU Driver
4 * FAULT HANDLER FOR GRU DETECTED TLB MISSES
6 * This file contains code that handles TLB misses within the GRU.
7 * These misses are reported either via interrupts or user polling of
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/spinlock.h>
31 #include <linux/hugetlb.h>
32 #include <linux/device.h>
34 #include <linux/uaccess.h>
35 #include <linux/security.h>
36 #include <asm/pgtable.h>
38 #include "grutables.h"
40 #include "gru_instructions.h"
41 #include <asm/uv/uv_hub.h>
43 /* Return codes for vtop functions */
44 #define VTOP_SUCCESS 0
45 #define VTOP_INVALID -1
50 * Test if a physical address is a valid GRU GSEG address
52 static inline int is_gru_paddr(unsigned long paddr
)
54 return paddr
>= gru_start_paddr
&& paddr
< gru_end_paddr
;
58 * Find the vma of a GRU segment. Caller must hold mmap_sem.
60 struct vm_area_struct
*gru_find_vma(unsigned long vaddr
)
62 struct vm_area_struct
*vma
;
64 vma
= find_vma(current
->mm
, vaddr
);
65 if (vma
&& vma
->vm_start
<= vaddr
&& vma
->vm_ops
== &gru_vm_ops
)
71 * Find and lock the gts that contains the specified user vaddr.
74 * - *gts with the mmap_sem locked for read and the GTS locked.
75 * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
78 static struct gru_thread_state
*gru_find_lock_gts(unsigned long vaddr
)
80 struct mm_struct
*mm
= current
->mm
;
81 struct vm_area_struct
*vma
;
82 struct gru_thread_state
*gts
= NULL
;
84 down_read(&mm
->mmap_sem
);
85 vma
= gru_find_vma(vaddr
);
87 gts
= gru_find_thread_state(vma
, TSID(vaddr
, vma
));
89 mutex_lock(>s
->ts_ctxlock
);
91 up_read(&mm
->mmap_sem
);
95 static struct gru_thread_state
*gru_alloc_locked_gts(unsigned long vaddr
)
97 struct mm_struct
*mm
= current
->mm
;
98 struct vm_area_struct
*vma
;
99 struct gru_thread_state
*gts
= ERR_PTR(-EINVAL
);
101 down_write(&mm
->mmap_sem
);
102 vma
= gru_find_vma(vaddr
);
106 gts
= gru_alloc_thread_state(vma
, TSID(vaddr
, vma
));
109 mutex_lock(>s
->ts_ctxlock
);
110 downgrade_write(&mm
->mmap_sem
);
114 up_write(&mm
->mmap_sem
);
119 * Unlock a GTS that was previously locked with gru_find_lock_gts().
121 static void gru_unlock_gts(struct gru_thread_state
*gts
)
123 mutex_unlock(>s
->ts_ctxlock
);
124 up_read(¤t
->mm
->mmap_sem
);
128 * Set a CB.istatus to active using a user virtual address. This must be done
129 * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
130 * If the line is evicted, the status may be lost. The in-cache update
131 * is necessary to prevent the user from seeing a stale cb.istatus that will
132 * change as soon as the TFH restart is complete. Races may cause an
133 * occasional failure to clear the cb.istatus, but that is ok.
135 static void gru_cb_set_istatus_active(struct gru_instruction_bits
*cbk
)
138 cbk
->istatus
= CBS_ACTIVE
;
145 * The GRU has an array of fault maps. A map is private to a cpu
146 * Only one cpu will be accessing a cpu's fault map.
148 * This function scans the cpu-private fault map & clears all bits that
149 * are set. The function returns a bitmap that indicates the bits that
150 * were cleared. Note that sense the maps may be updated asynchronously by
151 * the GRU, atomic operations must be used to clear bits.
153 static void get_clear_fault_map(struct gru_state
*gru
,
154 struct gru_tlb_fault_map
*imap
,
155 struct gru_tlb_fault_map
*dmap
)
158 struct gru_tlb_fault_map
*tfm
;
160 tfm
= get_tfm_for_cpu(gru
, gru_cpu_fault_map_id());
161 prefetchw(tfm
); /* Helps on hardware, required for emulator */
162 for (i
= 0; i
< BITS_TO_LONGS(GRU_NUM_CBE
); i
++) {
163 k
= tfm
->fault_bits
[i
];
165 k
= xchg(&tfm
->fault_bits
[i
], 0UL);
166 imap
->fault_bits
[i
] = k
;
167 k
= tfm
->done_bits
[i
];
169 k
= xchg(&tfm
->done_bits
[i
], 0UL);
170 dmap
->fault_bits
[i
] = k
;
174 * Not functionally required but helps performance. (Required
177 gru_flush_cache(tfm
);
181 * Atomic (interrupt context) & non-atomic (user context) functions to
182 * convert a vaddr into a physical address. The size of the page
183 * is returned in pageshift.
187 * 1 - (atomic only) try again in non-atomic context
189 static int non_atomic_pte_lookup(struct vm_area_struct
*vma
,
190 unsigned long vaddr
, int write
,
191 unsigned long *paddr
, int *pageshift
)
195 #ifdef CONFIG_HUGETLB_PAGE
196 *pageshift
= is_vm_hugetlb_page(vma
) ? HPAGE_SHIFT
: PAGE_SHIFT
;
198 *pageshift
= PAGE_SHIFT
;
201 (current
, current
->mm
, vaddr
, 1, write
, 0, &page
, NULL
) <= 0)
203 *paddr
= page_to_phys(page
);
211 * Convert a user virtual address to a physical address
212 * Only supports Intel large pages (2MB only) on x86_64.
213 * ZZZ - hugepage support is incomplete
215 * NOTE: mmap_sem is already held on entry to this function. This
216 * guarantees existence of the page tables.
218 static int atomic_pte_lookup(struct vm_area_struct
*vma
, unsigned long vaddr
,
219 int write
, unsigned long *paddr
, int *pageshift
)
226 pgdp
= pgd_offset(vma
->vm_mm
, vaddr
);
227 if (unlikely(pgd_none(*pgdp
)))
230 pudp
= pud_offset(pgdp
, vaddr
);
231 if (unlikely(pud_none(*pudp
)))
234 pmdp
= pmd_offset(pudp
, vaddr
);
235 if (unlikely(pmd_none(*pmdp
)))
238 if (unlikely(pmd_large(*pmdp
)))
239 pte
= *(pte_t
*) pmdp
;
242 pte
= *pte_offset_kernel(pmdp
, vaddr
);
244 if (unlikely(!pte_present(pte
) ||
245 (write
&& (!pte_write(pte
) || !pte_dirty(pte
)))))
248 *paddr
= pte_pfn(pte
) << PAGE_SHIFT
;
249 #ifdef CONFIG_HUGETLB_PAGE
250 *pageshift
= is_vm_hugetlb_page(vma
) ? HPAGE_SHIFT
: PAGE_SHIFT
;
252 *pageshift
= PAGE_SHIFT
;
260 static int gru_vtop(struct gru_thread_state
*gts
, unsigned long vaddr
,
261 int write
, int atomic
, unsigned long *gpa
, int *pageshift
)
263 struct mm_struct
*mm
= gts
->ts_mm
;
264 struct vm_area_struct
*vma
;
268 vma
= find_vma(mm
, vaddr
);
273 * Atomic lookup is faster & usually works even if called in non-atomic
276 rmb(); /* Must/check ms_range_active before loading PTEs */
277 ret
= atomic_pte_lookup(vma
, vaddr
, write
, &paddr
, &ps
);
281 if (non_atomic_pte_lookup(vma
, vaddr
, write
, &paddr
, &ps
))
284 if (is_gru_paddr(paddr
))
286 paddr
= paddr
& ~((1UL << ps
) - 1);
287 *gpa
= uv_soc_phys_ram_to_gpa(paddr
);
299 * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
300 * CBE cacheline so that the line will be written back to home agent.
301 * Otherwise the line may be silently dropped. This has no impact
302 * except on performance.
304 static void gru_flush_cache_cbe(struct gru_control_block_extended
*cbe
)
307 cbe
->cbrexecstatus
= 0; /* make CL dirty */
308 gru_flush_cache(cbe
);
313 * Preload the TLB with entries that may be required. Currently, preloading
314 * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
315 * the end of the bcopy tranfer, whichever is smaller.
317 static void gru_preload_tlb(struct gru_state
*gru
,
318 struct gru_thread_state
*gts
, int atomic
,
319 unsigned long fault_vaddr
, int asid
, int write
,
320 unsigned char tlb_preload_count
,
321 struct gru_tlb_fault_handle
*tfh
,
322 struct gru_control_block_extended
*cbe
)
324 unsigned long vaddr
= 0, gpa
;
327 if (cbe
->opccpy
!= OP_BCOPY
)
330 if (fault_vaddr
== cbe
->cbe_baddr0
)
331 vaddr
= fault_vaddr
+ GRU_CACHE_LINE_BYTES
* cbe
->cbe_src_cl
- 1;
332 else if (fault_vaddr
== cbe
->cbe_baddr1
)
333 vaddr
= fault_vaddr
+ (1 << cbe
->xtypecpy
) * cbe
->cbe_nelemcur
- 1;
335 fault_vaddr
&= PAGE_MASK
;
337 vaddr
= min(vaddr
, fault_vaddr
+ tlb_preload_count
* PAGE_SIZE
);
339 while (vaddr
> fault_vaddr
) {
340 ret
= gru_vtop(gts
, vaddr
, write
, atomic
, &gpa
, &pageshift
);
341 if (ret
|| tfh_write_only(tfh
, gpa
, GAA_RAM
, vaddr
, asid
, write
,
342 GRU_PAGESIZE(pageshift
)))
345 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
346 atomic
? "atomic" : "non-atomic", gru
->gs_gid
, gts
, tfh
,
347 vaddr
, asid
, write
, pageshift
, gpa
);
349 STAT(tlb_preload_page
);
354 * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
356 * cb Address of user CBR. Null if not running in user context
358 * 0 = dropin, exception, or switch to UPM successful
359 * 1 = range invalidate active
363 static int gru_try_dropin(struct gru_thread_state
*gts
,
364 struct gru_tlb_fault_handle
*tfh
,
365 struct gru_instruction_bits
*cbk
)
367 struct gru_control_block_extended
*cbe
= NULL
;
368 unsigned char tlb_preload_count
= gts
->ts_tlb_preload_count
;
369 int pageshift
= 0, asid
, write
, ret
, atomic
= !cbk
, indexway
;
370 unsigned long gpa
= 0, vaddr
= 0;
373 * NOTE: The GRU contains magic hardware that eliminates races between
374 * TLB invalidates and TLB dropins. If an invalidate occurs
375 * in the window between reading the TFH and the subsequent TLB dropin,
376 * the dropin is ignored. This eliminates the need for additional locks.
380 * Prefetch the CBE if doing TLB preloading
382 if (unlikely(tlb_preload_count
)) {
383 cbe
= gru_tfh_to_cbe(tfh
);
388 * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
389 * Might be a hardware race OR a stupid user. Ignore FMM because FMM
390 * is a transient state.
392 if (tfh
->status
!= TFHSTATUS_EXCEPTION
) {
393 gru_flush_cache(tfh
);
395 if (tfh
->status
!= TFHSTATUS_EXCEPTION
)
396 goto failnoexception
;
397 STAT(tfh_stale_on_fault
);
399 if (tfh
->state
== TFHSTATE_IDLE
)
401 if (tfh
->state
== TFHSTATE_MISS_FMM
&& cbk
)
404 write
= (tfh
->cause
& TFHCAUSE_TLB_MOD
) != 0;
405 vaddr
= tfh
->missvaddr
;
406 asid
= tfh
->missasid
;
407 indexway
= tfh
->indexway
;
411 rmb(); /* TFH must be cache resident before reading ms_range_active */
414 * TFH is cache resident - at least briefly. Fail the dropin
415 * if a range invalidate is active.
417 if (atomic_read(>s
->ts_gms
->ms_range_active
))
420 ret
= gru_vtop(gts
, vaddr
, write
, atomic
, &gpa
, &pageshift
);
421 if (ret
== VTOP_INVALID
)
423 if (ret
== VTOP_RETRY
)
426 if (!(gts
->ts_sizeavail
& GRU_SIZEAVAIL(pageshift
))) {
427 gts
->ts_sizeavail
|= GRU_SIZEAVAIL(pageshift
);
428 if (atomic
|| !gru_update_cch(gts
)) {
429 gts
->ts_force_cch_reload
= 1;
434 if (unlikely(cbe
) && pageshift
== PAGE_SHIFT
) {
435 gru_preload_tlb(gts
->ts_gru
, gts
, atomic
, vaddr
, asid
, write
, tlb_preload_count
, tfh
, cbe
);
436 gru_flush_cache_cbe(cbe
);
439 gru_cb_set_istatus_active(cbk
);
440 tfh_write_restart(tfh
, gpa
, GAA_RAM
, vaddr
, asid
, write
,
441 GRU_PAGESIZE(pageshift
));
443 "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
444 " rw %d, ps %d, gpa 0x%lx\n",
445 atomic
? "atomic" : "non-atomic", gts
->ts_gru
->gs_gid
, gts
, tfh
, vaddr
, asid
,
446 indexway
, write
, pageshift
, gpa
);
451 /* No asid (delayed unload). */
452 STAT(tlb_dropin_fail_no_asid
);
453 gru_dbg(grudev
, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
455 tfh_user_polling_mode(tfh
);
457 gru_flush_cache(tfh
);
458 gru_flush_cache_cbe(cbe
);
462 /* Atomic failure switch CBR to UPM */
463 tfh_user_polling_mode(tfh
);
464 gru_flush_cache_cbe(cbe
);
465 STAT(tlb_dropin_fail_upm
);
466 gru_dbg(grudev
, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
470 /* FMM state on UPM call */
471 gru_flush_cache(tfh
);
472 gru_flush_cache_cbe(cbe
);
473 STAT(tlb_dropin_fail_fmm
);
474 gru_dbg(grudev
, "FAILED fmm tfh: 0x%p, state %d\n", tfh
, tfh
->state
);
478 /* TFH status did not show exception pending */
479 gru_flush_cache(tfh
);
480 gru_flush_cache_cbe(cbe
);
482 gru_flush_cache(cbk
);
483 STAT(tlb_dropin_fail_no_exception
);
484 gru_dbg(grudev
, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
485 tfh
, tfh
->status
, tfh
->state
);
489 /* TFH state was idle - no miss pending */
490 gru_flush_cache(tfh
);
491 gru_flush_cache_cbe(cbe
);
493 gru_flush_cache(cbk
);
494 STAT(tlb_dropin_fail_idle
);
495 gru_dbg(grudev
, "FAILED idle tfh: 0x%p, state %d\n", tfh
, tfh
->state
);
499 /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
501 gru_flush_cache_cbe(cbe
);
502 STAT(tlb_dropin_fail_invalid
);
503 gru_dbg(grudev
, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh
, vaddr
);
507 /* Range invalidate active. Switch to UPM iff atomic */
509 tfh_user_polling_mode(tfh
);
511 gru_flush_cache(tfh
);
512 gru_flush_cache_cbe(cbe
);
513 STAT(tlb_dropin_fail_range_active
);
514 gru_dbg(grudev
, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
520 * Process an external interrupt from the GRU. This interrupt is
521 * caused by a TLB miss.
522 * Note that this is the interrupt handler that is registered with linux
523 * interrupt handlers.
525 static irqreturn_t
gru_intr(int chiplet
, int blade
)
527 struct gru_state
*gru
;
528 struct gru_tlb_fault_map imap
, dmap
;
529 struct gru_thread_state
*gts
;
530 struct gru_tlb_fault_handle
*tfh
= NULL
;
535 gru
= &gru_base
[blade
]->bs_grus
[chiplet
];
537 dev_err(grudev
, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
538 raw_smp_processor_id(), chiplet
);
541 get_clear_fault_map(gru
, &imap
, &dmap
);
543 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
544 smp_processor_id(), chiplet
, gru
->gs_gid
,
545 imap
.fault_bits
[0], imap
.fault_bits
[1],
546 dmap
.fault_bits
[0], dmap
.fault_bits
[1]);
548 for_each_cbr_in_tfm(cbrnum
, dmap
.fault_bits
) {
550 complete(gru
->gs_blade
->bs_async_wq
);
551 gru_dbg(grudev
, "gid %d, cbr_done %d, done %d\n",
552 gru
->gs_gid
, cbrnum
, gru
->gs_blade
->bs_async_wq
->done
);
555 for_each_cbr_in_tfm(cbrnum
, imap
.fault_bits
) {
557 tfh
= get_tfh_by_index(gru
, cbrnum
);
558 prefetchw(tfh
); /* Helps on hdw, required for emulator */
561 * When hardware sets a bit in the faultmap, it implicitly
562 * locks the GRU context so that it cannot be unloaded.
563 * The gts cannot change until a TFH start/writestart command
566 ctxnum
= tfh
->ctxnum
;
567 gts
= gru
->gs_gts
[ctxnum
];
570 * This is running in interrupt context. Trylock the mmap_sem.
571 * If it fails, retry the fault in user context.
573 if (!gts
->ts_force_cch_reload
&&
574 down_read_trylock(>s
->ts_mm
->mmap_sem
)) {
575 gts
->ustats
.fmm_tlbdropin
++;
576 gru_try_dropin(gts
, tfh
, NULL
);
577 up_read(>s
->ts_mm
->mmap_sem
);
579 tfh_user_polling_mode(tfh
);
580 STAT(intr_mm_lock_failed
);
586 irqreturn_t
gru0_intr(int irq
, void *dev_id
)
588 return gru_intr(0, uv_numa_blade_id());
591 irqreturn_t
gru1_intr(int irq
, void *dev_id
)
593 return gru_intr(1, uv_numa_blade_id());
596 irqreturn_t
gru_intr_mblade(int irq
, void *dev_id
)
600 for_each_possible_blade(blade
) {
601 if (uv_blade_nr_possible_cpus(blade
))
610 static int gru_user_dropin(struct gru_thread_state
*gts
,
611 struct gru_tlb_fault_handle
*tfh
,
614 struct gru_mm_struct
*gms
= gts
->ts_gms
;
617 gts
->ustats
.upm_tlbdropin
++;
619 wait_event(gms
->ms_wait_queue
,
620 atomic_read(&gms
->ms_range_active
) == 0);
621 prefetchw(tfh
); /* Helps on hdw, required for emulator */
622 ret
= gru_try_dropin(gts
, tfh
, cb
);
625 STAT(call_os_wait_queue
);
630 * This interface is called as a result of a user detecting a "call OS" bit
631 * in a user CB. Normally means that a TLB fault has occurred.
632 * cb - user virtual address of the CB
634 int gru_handle_user_call_os(unsigned long cb
)
636 struct gru_tlb_fault_handle
*tfh
;
637 struct gru_thread_state
*gts
;
639 int ucbnum
, cbrnum
, ret
= -EINVAL
;
643 /* sanity check the cb pointer */
644 ucbnum
= get_cb_number((void *)cb
);
645 if ((cb
& (GRU_HANDLE_STRIDE
- 1)) || ucbnum
>= GRU_NUM_CB
)
648 gts
= gru_find_lock_gts(cb
);
651 gru_dbg(grudev
, "address 0x%lx, gid %d, gts 0x%p\n", cb
, gts
->ts_gru
? gts
->ts_gru
->gs_gid
: -1, gts
);
653 if (ucbnum
>= gts
->ts_cbr_au_count
* GRU_CBR_AU_SIZE
)
656 gru_check_context_placement(gts
);
659 * CCH may contain stale data if ts_force_cch_reload is set.
661 if (gts
->ts_gru
&& gts
->ts_force_cch_reload
) {
662 gts
->ts_force_cch_reload
= 0;
667 cbrnum
= thread_cbr_number(gts
, ucbnum
);
669 tfh
= get_tfh_by_index(gts
->ts_gru
, cbrnum
);
670 cbk
= get_gseg_base_address_cb(gts
->ts_gru
->gs_gru_base_vaddr
,
671 gts
->ts_ctxnum
, ucbnum
);
672 ret
= gru_user_dropin(gts
, tfh
, cbk
);
680 * Fetch the exception detail information for a CB that terminated with
683 int gru_get_exception_detail(unsigned long arg
)
685 struct control_block_extended_exc_detail excdet
;
686 struct gru_control_block_extended
*cbe
;
687 struct gru_thread_state
*gts
;
688 int ucbnum
, cbrnum
, ret
;
690 STAT(user_exception
);
691 if (copy_from_user(&excdet
, (void __user
*)arg
, sizeof(excdet
)))
694 gts
= gru_find_lock_gts(excdet
.cb
);
698 gru_dbg(grudev
, "address 0x%lx, gid %d, gts 0x%p\n", excdet
.cb
, gts
->ts_gru
? gts
->ts_gru
->gs_gid
: -1, gts
);
699 ucbnum
= get_cb_number((void *)excdet
.cb
);
700 if (ucbnum
>= gts
->ts_cbr_au_count
* GRU_CBR_AU_SIZE
) {
702 } else if (gts
->ts_gru
) {
703 cbrnum
= thread_cbr_number(gts
, ucbnum
);
704 cbe
= get_cbe_by_index(gts
->ts_gru
, cbrnum
);
705 gru_flush_cache(cbe
); /* CBE not coherent */
706 sync_core(); /* make sure we are have current data */
707 excdet
.opc
= cbe
->opccpy
;
708 excdet
.exopc
= cbe
->exopccpy
;
709 excdet
.ecause
= cbe
->ecause
;
710 excdet
.exceptdet0
= cbe
->idef1upd
;
711 excdet
.exceptdet1
= cbe
->idef3upd
;
712 excdet
.cbrstate
= cbe
->cbrstate
;
713 excdet
.cbrexecstatus
= cbe
->cbrexecstatus
;
714 gru_flush_cache_cbe(cbe
);
722 "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
723 "exdet0 0x%lx, exdet1 0x%x\n",
724 excdet
.cb
, excdet
.opc
, excdet
.exopc
, excdet
.cbrstate
, excdet
.cbrexecstatus
,
725 excdet
.ecause
, excdet
.exceptdet0
, excdet
.exceptdet1
);
726 if (!ret
&& copy_to_user((void __user
*)arg
, &excdet
, sizeof(excdet
)))
732 * User request to unload a context. Content is saved for possible reload.
734 static int gru_unload_all_contexts(void)
736 struct gru_thread_state
*gts
;
737 struct gru_state
*gru
;
740 if (!capable(CAP_SYS_ADMIN
))
743 gru
= GID_TO_GRU(gid
);
744 spin_lock(&gru
->gs_lock
);
745 for (ctxnum
= 0; ctxnum
< GRU_NUM_CCH
; ctxnum
++) {
746 gts
= gru
->gs_gts
[ctxnum
];
747 if (gts
&& mutex_trylock(>s
->ts_ctxlock
)) {
748 spin_unlock(&gru
->gs_lock
);
749 gru_unload_context(gts
, 1);
750 mutex_unlock(>s
->ts_ctxlock
);
751 spin_lock(&gru
->gs_lock
);
754 spin_unlock(&gru
->gs_lock
);
759 int gru_user_unload_context(unsigned long arg
)
761 struct gru_thread_state
*gts
;
762 struct gru_unload_context_req req
;
764 STAT(user_unload_context
);
765 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
768 gru_dbg(grudev
, "gseg 0x%lx\n", req
.gseg
);
771 return gru_unload_all_contexts();
773 gts
= gru_find_lock_gts(req
.gseg
);
778 gru_unload_context(gts
, 1);
785 * User request to flush a range of virtual addresses from the GRU TLB
786 * (Mainly for testing).
788 int gru_user_flush_tlb(unsigned long arg
)
790 struct gru_thread_state
*gts
;
791 struct gru_flush_tlb_req req
;
792 struct gru_mm_struct
*gms
;
794 STAT(user_flush_tlb
);
795 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
798 gru_dbg(grudev
, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req
.gseg
,
801 gts
= gru_find_lock_gts(req
.gseg
);
807 gru_flush_tlb_range(gms
, req
.vaddr
, req
.len
);
813 * Fetch GSEG statisticss
815 long gru_get_gseg_statistics(unsigned long arg
)
817 struct gru_thread_state
*gts
;
818 struct gru_get_gseg_statistics_req req
;
820 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
824 * The library creates arrays of contexts for threaded programs.
825 * If no gts exists in the array, the context has never been used & all
826 * statistics are implicitly 0.
828 gts
= gru_find_lock_gts(req
.gseg
);
830 memcpy(&req
.stats
, >s
->ustats
, sizeof(gts
->ustats
));
833 memset(&req
.stats
, 0, sizeof(gts
->ustats
));
836 if (copy_to_user((void __user
*)arg
, &req
, sizeof(req
)))
843 * Register the current task as the user of the GSEG slice.
844 * Needed for TLB fault interrupt targeting.
846 int gru_set_context_option(unsigned long arg
)
848 struct gru_thread_state
*gts
;
849 struct gru_set_context_option_req req
;
852 STAT(set_context_option
);
853 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
855 gru_dbg(grudev
, "op %d, gseg 0x%lx, value1 0x%lx\n", req
.op
, req
.gseg
, req
.val1
);
857 gts
= gru_find_lock_gts(req
.gseg
);
859 gts
= gru_alloc_locked_gts(req
.gseg
);
865 case sco_blade_chiplet
:
866 /* Select blade/chiplet for GRU context */
867 if (req
.val1
< -1 || req
.val1
>= GRU_MAX_BLADES
|| !gru_base
[req
.val1
] ||
868 req
.val0
< -1 || req
.val0
>= GRU_CHIPLETS_PER_HUB
) {
871 gts
->ts_user_blade_id
= req
.val1
;
872 gts
->ts_user_chiplet_id
= req
.val0
;
873 gru_check_context_placement(gts
);
877 /* Register the current task as the GSEG owner */
878 gts
->ts_tgid_owner
= current
->tgid
;
880 case sco_cch_req_slice
:
881 /* Set the CCH slice option */
882 gts
->ts_cch_req_slice
= req
.val1
& 3;