2 * SN Platform GRU Driver
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/sched.h>
18 #include <linux/device.h>
19 #include <linux/list.h>
20 #include <asm/uv/uv_hub.h>
22 #include "grutables.h"
23 #include "gruhandles.h"
25 unsigned long gru_options __read_mostly
;
27 static struct device_driver gru_driver
= {
31 static struct device gru_device
= {
33 .driver
= &gru_driver
,
36 struct device
*grudev
= &gru_device
;
39 * Select a gru fault map to be used by the current cpu. Note that
40 * multiple cpus may be using the same map.
41 * ZZZ should "shift" be used?? Depends on HT cpu numbering
42 * ZZZ should be inline but did not work on emulator
44 int gru_cpu_fault_map_id(void)
46 return uv_blade_processor_id() % GRU_NUM_TFM
;
49 /*--------- ASID Management -------------------------------------------
51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
52 * Once MAX is reached, flush the TLB & start over. However,
53 * some asids may still be in use. There won't be many (percentage wise) still
54 * in use. Search active contexts & determine the value of the first
55 * asid in use ("x"s below). Set "limit" to this value.
56 * This defines a block of assignable asids.
58 * When "limit" is reached, search forward from limit+1 and determine the
59 * next block of assignable asids.
61 * Repeat until MAX_ASID is reached, then start over again.
63 * Each time MAX_ASID is reached, increment the asid generation. Since
64 * the search for in-use asids only checks contexts with GRUs currently
65 * assigned, asids in some contexts will be missed. Prior to loading
66 * a context, the asid generation of the GTS asid is rechecked. If it
67 * doesn't match the current generation, a new asid will be assigned.
69 * 0---------------x------------x---------------------x----|
70 * ^-next ^-limit ^-MAX_ASID
72 * All asid manipulation & context loading/unloading is protected by the
76 /* Hit the asid limit. Start over */
77 static int gru_wrap_asid(struct gru_state
*gru
)
79 gru_dbg(grudev
, "gid %d\n", gru
->gs_gid
);
82 gru_flush_all_tlb(gru
);
86 /* Find the next chunk of unused asids */
87 static int gru_reset_asid_limit(struct gru_state
*gru
, int asid
)
89 int i
, gid
, inuse_asid
, limit
;
91 gru_dbg(grudev
, "gid %d, asid 0x%x\n", gru
->gs_gid
, asid
);
95 asid
= gru_wrap_asid(gru
);
98 for (i
= 0; i
< GRU_NUM_CCH
; i
++) {
101 inuse_asid
= gru
->gs_gts
[i
]->ts_gms
->ms_asids
[gid
].mt_asid
;
102 gru_dbg(grudev
, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
103 gru
->gs_gid
, gru
->gs_gts
[i
], gru
->gs_gts
[i
]->ts_gms
,
105 if (inuse_asid
== asid
) {
109 * empty range: reset the range limit and
113 if (asid
>= MAX_ASID
)
114 asid
= gru_wrap_asid(gru
);
119 if ((inuse_asid
> asid
) && (inuse_asid
< limit
))
122 gru
->gs_asid_limit
= limit
;
124 gru_dbg(grudev
, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru
->gs_gid
,
129 /* Assign a new ASID to a thread context. */
130 static int gru_assign_asid(struct gru_state
*gru
)
134 spin_lock(&gru
->gs_asid_lock
);
135 gru
->gs_asid
+= ASID_INC
;
137 if (asid
>= gru
->gs_asid_limit
)
138 asid
= gru_reset_asid_limit(gru
, asid
);
139 spin_unlock(&gru
->gs_asid_lock
);
141 gru_dbg(grudev
, "gid %d, asid 0x%x\n", gru
->gs_gid
, asid
);
146 * Clear n bits in a word. Return a word indicating the bits that were cleared.
147 * Optionally, build an array of chars that contain the bit numbers allocated.
149 static unsigned long reserve_resources(unsigned long *p
, int n
, int mmax
,
152 unsigned long bits
= 0;
156 i
= find_first_bit(p
, mmax
);
167 unsigned long gru_reserve_cb_resources(struct gru_state
*gru
, int cbr_au_count
,
170 return reserve_resources(&gru
->gs_cbr_map
, cbr_au_count
, GRU_CBR_AU
,
174 unsigned long gru_reserve_ds_resources(struct gru_state
*gru
, int dsr_au_count
,
177 return reserve_resources(&gru
->gs_dsr_map
, dsr_au_count
, GRU_DSR_AU
,
181 static void reserve_gru_resources(struct gru_state
*gru
,
182 struct gru_thread_state
*gts
)
184 gru
->gs_active_contexts
++;
186 gru_reserve_cb_resources(gru
, gts
->ts_cbr_au_count
,
189 gru_reserve_ds_resources(gru
, gts
->ts_dsr_au_count
, NULL
);
192 static void free_gru_resources(struct gru_state
*gru
,
193 struct gru_thread_state
*gts
)
195 gru
->gs_active_contexts
--;
196 gru
->gs_cbr_map
|= gts
->ts_cbr_map
;
197 gru
->gs_dsr_map
|= gts
->ts_dsr_map
;
201 * Check if a GRU has sufficient free resources to satisfy an allocation
202 * request. Note: GRU locks may or may not be held when this is called. If
203 * not held, recheck after acquiring the appropriate locks.
205 * Returns 1 if sufficient resources, 0 if not
207 static int check_gru_resources(struct gru_state
*gru
, int cbr_au_count
,
208 int dsr_au_count
, int max_active_contexts
)
210 return hweight64(gru
->gs_cbr_map
) >= cbr_au_count
211 && hweight64(gru
->gs_dsr_map
) >= dsr_au_count
212 && gru
->gs_active_contexts
< max_active_contexts
;
216 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
219 static int gru_load_mm_tracker(struct gru_state
*gru
,
220 struct gru_thread_state
*gts
)
222 struct gru_mm_struct
*gms
= gts
->ts_gms
;
223 struct gru_mm_tracker
*asids
= &gms
->ms_asids
[gru
->gs_gid
];
224 unsigned short ctxbitmap
= (1 << gts
->ts_ctxnum
);
227 spin_lock(&gms
->ms_asid_lock
);
228 asid
= asids
->mt_asid
;
230 if (asid
== 0 || asids
->mt_asid_gen
!= gru
->gs_asid_gen
) {
231 asid
= gru_assign_asid(gru
);
232 asids
->mt_asid
= asid
;
233 asids
->mt_asid_gen
= gru
->gs_asid_gen
;
239 BUG_ON(asids
->mt_ctxbitmap
& ctxbitmap
);
240 asids
->mt_ctxbitmap
|= ctxbitmap
;
241 if (!test_bit(gru
->gs_gid
, gms
->ms_asidmap
))
242 __set_bit(gru
->gs_gid
, gms
->ms_asidmap
);
243 spin_unlock(&gms
->ms_asid_lock
);
246 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
247 gru
->gs_gid
, gts
, gms
, gts
->ts_ctxnum
, asid
,
252 static void gru_unload_mm_tracker(struct gru_state
*gru
,
253 struct gru_thread_state
*gts
)
255 struct gru_mm_struct
*gms
= gts
->ts_gms
;
256 struct gru_mm_tracker
*asids
;
257 unsigned short ctxbitmap
;
259 asids
= &gms
->ms_asids
[gru
->gs_gid
];
260 ctxbitmap
= (1 << gts
->ts_ctxnum
);
261 spin_lock(&gms
->ms_asid_lock
);
262 BUG_ON((asids
->mt_ctxbitmap
& ctxbitmap
) != ctxbitmap
);
263 asids
->mt_ctxbitmap
^= ctxbitmap
;
264 gru_dbg(grudev
, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
265 gru
->gs_gid
, gts
, gms
, gts
->ts_ctxnum
, gms
->ms_asidmap
[0]);
266 spin_unlock(&gms
->ms_asid_lock
);
270 * Decrement the reference count on a GTS structure. Free the structure
271 * if the reference count goes to zero.
273 void gts_drop(struct gru_thread_state
*gts
)
275 if (gts
&& atomic_dec_return(>s
->ts_refcnt
) == 0) {
276 gru_drop_mmu_notifier(gts
->ts_gms
);
283 * Locate the GTS structure for the current thread.
285 static struct gru_thread_state
*gru_find_current_gts_nolock(struct gru_vma_data
288 struct gru_thread_state
*gts
;
290 list_for_each_entry(gts
, &vdata
->vd_head
, ts_next
)
291 if (gts
->ts_tsid
== tsid
)
297 * Allocate a thread state structure.
299 static struct gru_thread_state
*gru_alloc_gts(struct vm_area_struct
*vma
,
300 struct gru_vma_data
*vdata
,
303 struct gru_thread_state
*gts
;
306 bytes
= DSR_BYTES(vdata
->vd_dsr_au_count
) +
307 CBR_BYTES(vdata
->vd_cbr_au_count
);
308 bytes
+= sizeof(struct gru_thread_state
);
309 gts
= kzalloc(bytes
, GFP_KERNEL
);
314 atomic_set(>s
->ts_refcnt
, 1);
315 mutex_init(>s
->ts_ctxlock
);
316 gts
->ts_cbr_au_count
= vdata
->vd_cbr_au_count
;
317 gts
->ts_dsr_au_count
= vdata
->vd_dsr_au_count
;
318 gts
->ts_user_options
= vdata
->vd_user_options
;
320 gts
->ts_user_options
= vdata
->vd_user_options
;
321 gts
->ts_ctxnum
= NULLCTX
;
322 gts
->ts_mm
= current
->mm
;
324 gts
->ts_tlb_int_select
= -1;
325 gts
->ts_gms
= gru_register_mmu_notifier();
329 gru_dbg(grudev
, "alloc vdata %p, new gts %p\n", vdata
, gts
);
338 * Allocate a vma private data structure.
340 struct gru_vma_data
*gru_alloc_vma_data(struct vm_area_struct
*vma
, int tsid
)
342 struct gru_vma_data
*vdata
= NULL
;
344 vdata
= kmalloc(sizeof(*vdata
), GFP_KERNEL
);
348 INIT_LIST_HEAD(&vdata
->vd_head
);
349 spin_lock_init(&vdata
->vd_lock
);
350 gru_dbg(grudev
, "alloc vdata %p\n", vdata
);
355 * Find the thread state structure for the current thread.
357 struct gru_thread_state
*gru_find_thread_state(struct vm_area_struct
*vma
,
360 struct gru_vma_data
*vdata
= vma
->vm_private_data
;
361 struct gru_thread_state
*gts
;
363 spin_lock(&vdata
->vd_lock
);
364 gts
= gru_find_current_gts_nolock(vdata
, tsid
);
365 spin_unlock(&vdata
->vd_lock
);
366 gru_dbg(grudev
, "vma %p, gts %p\n", vma
, gts
);
371 * Allocate a new thread state for a GSEG. Note that races may allow
372 * another thread to race to create a gts.
374 struct gru_thread_state
*gru_alloc_thread_state(struct vm_area_struct
*vma
,
377 struct gru_vma_data
*vdata
= vma
->vm_private_data
;
378 struct gru_thread_state
*gts
, *ngts
;
380 gts
= gru_alloc_gts(vma
, vdata
, tsid
);
384 spin_lock(&vdata
->vd_lock
);
385 ngts
= gru_find_current_gts_nolock(vdata
, tsid
);
389 STAT(gts_double_allocate
);
391 list_add(>s
->ts_next
, &vdata
->vd_head
);
393 spin_unlock(&vdata
->vd_lock
);
394 gru_dbg(grudev
, "vma %p, gts %p\n", vma
, gts
);
399 * Free the GRU context assigned to the thread state.
401 static void gru_free_gru_context(struct gru_thread_state
*gts
)
403 struct gru_state
*gru
;
406 gru_dbg(grudev
, "gts %p, gid %d\n", gts
, gru
->gs_gid
);
408 spin_lock(&gru
->gs_lock
);
409 gru
->gs_gts
[gts
->ts_ctxnum
] = NULL
;
410 free_gru_resources(gru
, gts
);
411 BUG_ON(test_bit(gts
->ts_ctxnum
, &gru
->gs_context_map
) == 0);
412 __clear_bit(gts
->ts_ctxnum
, &gru
->gs_context_map
);
413 gts
->ts_ctxnum
= NULLCTX
;
415 spin_unlock(&gru
->gs_lock
);
422 * Prefetching cachelines help hardware performance.
423 * (Strictly a performance enhancement. Not functionally required).
425 static void prefetch_data(void *p
, int num
, int stride
)
433 static inline long gru_copy_handle(void *d
, void *s
)
435 memcpy(d
, s
, GRU_HANDLE_BYTES
);
436 return GRU_HANDLE_BYTES
;
439 static void gru_prefetch_context(void *gseg
, void *cb
, void *cbe
,
440 unsigned long cbrmap
, unsigned long length
)
444 prefetch_data(gseg
+ GRU_DS_BASE
, length
/ GRU_CACHE_LINE_BYTES
,
445 GRU_CACHE_LINE_BYTES
);
447 for_each_cbr_in_allocation_map(i
, &cbrmap
, scr
) {
448 prefetch_data(cb
, 1, GRU_CACHE_LINE_BYTES
);
449 prefetch_data(cbe
+ i
* GRU_HANDLE_STRIDE
, 1,
450 GRU_CACHE_LINE_BYTES
);
451 cb
+= GRU_HANDLE_STRIDE
;
455 static void gru_load_context_data(void *save
, void *grubase
, int ctxnum
,
456 unsigned long cbrmap
, unsigned long dsrmap
)
458 void *gseg
, *cb
, *cbe
;
459 unsigned long length
;
462 gseg
= grubase
+ ctxnum
* GRU_GSEG_STRIDE
;
463 cb
= gseg
+ GRU_CB_BASE
;
464 cbe
= grubase
+ GRU_CBE_BASE
;
465 length
= hweight64(dsrmap
) * GRU_DSR_AU_BYTES
;
466 gru_prefetch_context(gseg
, cb
, cbe
, cbrmap
, length
);
468 for_each_cbr_in_allocation_map(i
, &cbrmap
, scr
) {
469 save
+= gru_copy_handle(cb
, save
);
470 save
+= gru_copy_handle(cbe
+ i
* GRU_HANDLE_STRIDE
, save
);
471 cb
+= GRU_HANDLE_STRIDE
;
474 memcpy(gseg
+ GRU_DS_BASE
, save
, length
);
477 static void gru_unload_context_data(void *save
, void *grubase
, int ctxnum
,
478 unsigned long cbrmap
, unsigned long dsrmap
)
480 void *gseg
, *cb
, *cbe
;
481 unsigned long length
;
484 gseg
= grubase
+ ctxnum
* GRU_GSEG_STRIDE
;
485 cb
= gseg
+ GRU_CB_BASE
;
486 cbe
= grubase
+ GRU_CBE_BASE
;
487 length
= hweight64(dsrmap
) * GRU_DSR_AU_BYTES
;
488 gru_prefetch_context(gseg
, cb
, cbe
, cbrmap
, length
);
490 for_each_cbr_in_allocation_map(i
, &cbrmap
, scr
) {
491 save
+= gru_copy_handle(save
, cb
);
492 save
+= gru_copy_handle(save
, cbe
+ i
* GRU_HANDLE_STRIDE
);
493 cb
+= GRU_HANDLE_STRIDE
;
495 memcpy(save
, gseg
+ GRU_DS_BASE
, length
);
498 void gru_unload_context(struct gru_thread_state
*gts
, int savestate
)
500 struct gru_state
*gru
= gts
->ts_gru
;
501 struct gru_context_configuration_handle
*cch
;
502 int ctxnum
= gts
->ts_ctxnum
;
504 zap_vma_ptes(gts
->ts_vma
, UGRUADDR(gts
), GRU_GSEG_PAGESIZE
);
505 cch
= get_cch(gru
->gs_gru_base_vaddr
, ctxnum
);
507 gru_dbg(grudev
, "gts %p\n", gts
);
508 lock_cch_handle(cch
);
509 if (cch_interrupt_sync(cch
))
512 gru_unload_mm_tracker(gru
, gts
);
514 gru_unload_context_data(gts
->ts_gdata
, gru
->gs_gru_base_vaddr
,
515 ctxnum
, gts
->ts_cbr_map
,
518 if (cch_deallocate(cch
))
520 gts
->ts_force_unload
= 0; /* ts_force_unload locked by CCH lock */
521 unlock_cch_handle(cch
);
523 gru_free_gru_context(gts
);
524 STAT(unload_context
);
528 * Load a GRU context by copying it from the thread data structure in memory
531 static void gru_load_context(struct gru_thread_state
*gts
)
533 struct gru_state
*gru
= gts
->ts_gru
;
534 struct gru_context_configuration_handle
*cch
;
535 int err
, asid
, ctxnum
= gts
->ts_ctxnum
;
537 gru_dbg(grudev
, "gts %p\n", gts
);
538 cch
= get_cch(gru
->gs_gru_base_vaddr
, ctxnum
);
540 lock_cch_handle(cch
);
541 asid
= gru_load_mm_tracker(gru
, gts
);
542 cch
->tfm_fault_bit_enable
=
543 (gts
->ts_user_options
== GRU_OPT_MISS_FMM_POLL
544 || gts
->ts_user_options
== GRU_OPT_MISS_FMM_INTR
);
545 cch
->tlb_int_enable
= (gts
->ts_user_options
== GRU_OPT_MISS_FMM_INTR
);
546 if (cch
->tlb_int_enable
) {
547 gts
->ts_tlb_int_select
= gru_cpu_fault_map_id();
548 cch
->tlb_int_select
= gts
->ts_tlb_int_select
;
550 cch
->tfm_done_bit_enable
= 0;
551 err
= cch_allocate(cch
, asid
, gts
->ts_cbr_map
, gts
->ts_dsr_map
);
554 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
555 err
, cch
, gts
, gts
->ts_cbr_map
, gts
->ts_dsr_map
);
559 gru_load_context_data(gts
->ts_gdata
, gru
->gs_gru_base_vaddr
, ctxnum
,
560 gts
->ts_cbr_map
, gts
->ts_dsr_map
);
564 unlock_cch_handle(cch
);
570 * Update fields in an active CCH:
571 * - retarget interrupts on local blade
572 * - force a delayed context unload by clearing the CCH asids. This
573 * forces TLB misses for new GRU instructions. The context is unloaded
574 * when the next TLB miss occurs.
576 static int gru_update_cch(struct gru_thread_state
*gts
, int int_select
)
578 struct gru_context_configuration_handle
*cch
;
579 struct gru_state
*gru
= gts
->ts_gru
;
580 int i
, ctxnum
= gts
->ts_ctxnum
, ret
= 0;
582 cch
= get_cch(gru
->gs_gru_base_vaddr
, ctxnum
);
584 lock_cch_handle(cch
);
585 if (cch
->state
== CCHSTATE_ACTIVE
) {
586 if (gru
->gs_gts
[gts
->ts_ctxnum
] != gts
)
588 if (cch_interrupt(cch
))
590 if (int_select
>= 0) {
591 gts
->ts_tlb_int_select
= int_select
;
592 cch
->tlb_int_select
= int_select
;
594 for (i
= 0; i
< 8; i
++)
596 cch
->tfm_fault_bit_enable
= 0;
597 cch
->tlb_int_enable
= 0;
598 gts
->ts_force_unload
= 1;
605 unlock_cch_handle(cch
);
610 * Update CCH tlb interrupt select. Required when all the following is true:
611 * - task's GRU context is loaded into a GRU
612 * - task is using interrupt notification for TLB faults
613 * - task has migrated to a different cpu on the same blade where
614 * it was previously running.
616 static int gru_retarget_intr(struct gru_thread_state
*gts
)
618 if (gts
->ts_tlb_int_select
< 0
619 || gts
->ts_tlb_int_select
== gru_cpu_fault_map_id())
622 gru_dbg(grudev
, "retarget from %d to %d\n", gts
->ts_tlb_int_select
,
623 gru_cpu_fault_map_id());
624 return gru_update_cch(gts
, gru_cpu_fault_map_id());
629 * Insufficient GRU resources available on the local blade. Steal a context from
630 * a process. This is a hack until a _real_ resource scheduler is written....
632 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
633 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
634 ((g)+1) : &(b)->bs_grus[0])
636 static void gru_steal_context(struct gru_thread_state
*gts
)
638 struct gru_blade_state
*blade
;
639 struct gru_state
*gru
, *gru0
;
640 struct gru_thread_state
*ngts
= NULL
;
641 int ctxnum
, ctxnum0
, flag
= 0, cbr
, dsr
;
643 cbr
= gts
->ts_cbr_au_count
;
644 dsr
= gts
->ts_dsr_au_count
;
647 blade
= gru_base
[uv_numa_blade_id()];
648 spin_lock(&blade
->bs_lock
);
650 ctxnum
= next_ctxnum(blade
->bs_lru_ctxnum
);
651 gru
= blade
->bs_lru_gru
;
653 gru
= next_gru(blade
, gru
);
657 if (check_gru_resources(gru
, cbr
, dsr
, GRU_NUM_CCH
))
659 spin_lock(&gru
->gs_lock
);
660 for (; ctxnum
< GRU_NUM_CCH
; ctxnum
++) {
661 if (flag
&& gru
== gru0
&& ctxnum
== ctxnum0
)
663 ngts
= gru
->gs_gts
[ctxnum
];
665 * We are grabbing locks out of order, so trylock is
666 * needed. GTSs are usually not locked, so the odds of
667 * success are high. If trylock fails, try to steal a
670 if (ngts
&& mutex_trylock(&ngts
->ts_ctxlock
))
675 spin_unlock(&gru
->gs_lock
);
676 if (ngts
|| (flag
&& gru
== gru0
&& ctxnum
== ctxnum0
))
679 gru
= next_gru(blade
, gru
);
681 blade
->bs_lru_gru
= gru
;
682 blade
->bs_lru_ctxnum
= ctxnum
;
683 spin_unlock(&blade
->bs_lock
);
688 ngts
->ts_steal_jiffies
= jiffies
;
689 gru_unload_context(ngts
, 1);
690 mutex_unlock(&ngts
->ts_ctxlock
);
692 STAT(steal_context_failed
);
695 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
696 " avail cb %ld, ds %ld\n",
697 gru
->gs_gid
, ctxnum
, ngts
, cbr
, dsr
, hweight64(gru
->gs_cbr_map
),
698 hweight64(gru
->gs_dsr_map
));
702 * Scan the GRUs on the local blade & assign a GRU context.
704 static struct gru_state
*gru_assign_gru_context(struct gru_thread_state
*gts
)
706 struct gru_state
*gru
, *grux
;
707 int i
, max_active_contexts
;
713 max_active_contexts
= GRU_NUM_CCH
;
714 for_each_gru_on_blade(grux
, uv_numa_blade_id(), i
) {
715 if (check_gru_resources(grux
, gts
->ts_cbr_au_count
,
716 gts
->ts_dsr_au_count
,
717 max_active_contexts
)) {
719 max_active_contexts
= grux
->gs_active_contexts
;
720 if (max_active_contexts
== 0)
726 spin_lock(&gru
->gs_lock
);
727 if (!check_gru_resources(gru
, gts
->ts_cbr_au_count
,
728 gts
->ts_dsr_au_count
, GRU_NUM_CCH
)) {
729 spin_unlock(&gru
->gs_lock
);
732 reserve_gru_resources(gru
, gts
);
735 find_first_zero_bit(&gru
->gs_context_map
, GRU_NUM_CCH
);
736 BUG_ON(gts
->ts_ctxnum
== GRU_NUM_CCH
);
737 atomic_inc(>s
->ts_refcnt
);
738 gru
->gs_gts
[gts
->ts_ctxnum
] = gts
;
739 __set_bit(gts
->ts_ctxnum
, &gru
->gs_context_map
);
740 spin_unlock(&gru
->gs_lock
);
742 STAT(assign_context
);
744 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
745 gseg_virtual_address(gts
->ts_gru
, gts
->ts_ctxnum
), gts
,
746 gts
->ts_gru
->gs_gid
, gts
->ts_ctxnum
,
747 gts
->ts_cbr_au_count
, gts
->ts_dsr_au_count
);
749 gru_dbg(grudev
, "failed to allocate a GTS %s\n", "");
750 STAT(assign_context_failed
);
760 * Map the user's GRU segment
762 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
764 int gru_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
766 struct gru_thread_state
*gts
;
767 unsigned long paddr
, vaddr
;
769 vaddr
= (unsigned long)vmf
->virtual_address
;
770 gru_dbg(grudev
, "vma %p, vaddr 0x%lx (0x%lx)\n",
771 vma
, vaddr
, GSEG_BASE(vaddr
));
774 /* The following check ensures vaddr is a valid address in the VMA */
775 gts
= gru_find_thread_state(vma
, TSID(vaddr
, vma
));
777 return VM_FAULT_SIGBUS
;
780 mutex_lock(>s
->ts_ctxlock
);
783 if (gts
->ts_gru
->gs_blade_id
!= uv_numa_blade_id()) {
784 STAT(migrated_nopfn_unload
);
785 gru_unload_context(gts
, 1);
787 if (gru_retarget_intr(gts
))
788 STAT(migrated_nopfn_retarget
);
793 if (!gru_assign_gru_context(gts
)) {
794 mutex_unlock(>s
->ts_ctxlock
);
796 schedule_timeout(GRU_ASSIGN_DELAY
); /* true hack ZZZ */
797 if (gts
->ts_steal_jiffies
+ GRU_STEAL_DELAY
< jiffies
)
798 gru_steal_context(gts
);
801 gru_load_context(gts
);
802 paddr
= gseg_physical_address(gts
->ts_gru
, gts
->ts_ctxnum
);
803 remap_pfn_range(vma
, vaddr
& ~(GRU_GSEG_PAGESIZE
- 1),
804 paddr
>> PAGE_SHIFT
, GRU_GSEG_PAGESIZE
,
808 mutex_unlock(>s
->ts_ctxlock
);
811 return VM_FAULT_NOPAGE
;