2 * SN Platform GRU Driver
4 * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/sched.h>
18 #include <linux/device.h>
19 #include <linux/list.h>
20 #include <asm/uv/uv_hub.h>
22 #include "grutables.h"
23 #include "gruhandles.h"
25 unsigned long gru_options __read_mostly
;
27 static struct device_driver gru_driver
= {
31 static struct device gru_device
= {
33 .driver
= &gru_driver
,
36 struct device
*grudev
= &gru_device
;
39 * Select a gru fault map to be used by the current cpu. Note that
40 * multiple cpus may be using the same map.
41 * ZZZ should "shift" be used?? Depends on HT cpu numbering
42 * ZZZ should be inline but did not work on emulator
44 int gru_cpu_fault_map_id(void)
46 return uv_blade_processor_id() % GRU_NUM_TFM
;
49 /*--------- ASID Management -------------------------------------------
51 * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID.
52 * Once MAX is reached, flush the TLB & start over. However,
53 * some asids may still be in use. There won't be many (percentage wise) still
54 * in use. Search active contexts & determine the value of the first
55 * asid in use ("x"s below). Set "limit" to this value.
56 * This defines a block of assignable asids.
58 * When "limit" is reached, search forward from limit+1 and determine the
59 * next block of assignable asids.
61 * Repeat until MAX_ASID is reached, then start over again.
63 * Each time MAX_ASID is reached, increment the asid generation. Since
64 * the search for in-use asids only checks contexts with GRUs currently
65 * assigned, asids in some contexts will be missed. Prior to loading
66 * a context, the asid generation of the GTS asid is rechecked. If it
67 * doesn't match the current generation, a new asid will be assigned.
69 * 0---------------x------------x---------------------x----|
70 * ^-next ^-limit ^-MAX_ASID
72 * All asid manipulation & context loading/unloading is protected by the
76 /* Hit the asid limit. Start over */
77 static int gru_wrap_asid(struct gru_state
*gru
)
79 gru_dbg(grudev
, "gid %d\n", gru
->gs_gid
);
85 /* Find the next chunk of unused asids */
86 static int gru_reset_asid_limit(struct gru_state
*gru
, int asid
)
88 int i
, gid
, inuse_asid
, limit
;
90 gru_dbg(grudev
, "gid %d, asid 0x%x\n", gru
->gs_gid
, asid
);
94 asid
= gru_wrap_asid(gru
);
95 gru_flush_all_tlb(gru
);
98 for (i
= 0; i
< GRU_NUM_CCH
; i
++) {
99 if (!gru
->gs_gts
[i
] || is_kernel_context(gru
->gs_gts
[i
]))
101 inuse_asid
= gru
->gs_gts
[i
]->ts_gms
->ms_asids
[gid
].mt_asid
;
102 gru_dbg(grudev
, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
103 gru
->gs_gid
, gru
->gs_gts
[i
], gru
->gs_gts
[i
]->ts_gms
,
105 if (inuse_asid
== asid
) {
109 * empty range: reset the range limit and
113 if (asid
>= MAX_ASID
)
114 asid
= gru_wrap_asid(gru
);
119 if ((inuse_asid
> asid
) && (inuse_asid
< limit
))
122 gru
->gs_asid_limit
= limit
;
124 gru_dbg(grudev
, "gid %d, new asid 0x%x, new_limit 0x%x\n", gru
->gs_gid
,
129 /* Assign a new ASID to a thread context. */
130 static int gru_assign_asid(struct gru_state
*gru
)
134 gru
->gs_asid
+= ASID_INC
;
136 if (asid
>= gru
->gs_asid_limit
)
137 asid
= gru_reset_asid_limit(gru
, asid
);
139 gru_dbg(grudev
, "gid %d, asid 0x%x\n", gru
->gs_gid
, asid
);
144 * Clear n bits in a word. Return a word indicating the bits that were cleared.
145 * Optionally, build an array of chars that contain the bit numbers allocated.
147 static unsigned long reserve_resources(unsigned long *p
, int n
, int mmax
,
150 unsigned long bits
= 0;
154 i
= find_first_bit(p
, mmax
);
165 unsigned long gru_reserve_cb_resources(struct gru_state
*gru
, int cbr_au_count
,
168 return reserve_resources(&gru
->gs_cbr_map
, cbr_au_count
, GRU_CBR_AU
,
172 unsigned long gru_reserve_ds_resources(struct gru_state
*gru
, int dsr_au_count
,
175 return reserve_resources(&gru
->gs_dsr_map
, dsr_au_count
, GRU_DSR_AU
,
179 static void reserve_gru_resources(struct gru_state
*gru
,
180 struct gru_thread_state
*gts
)
182 gru
->gs_active_contexts
++;
184 gru_reserve_cb_resources(gru
, gts
->ts_cbr_au_count
,
187 gru_reserve_ds_resources(gru
, gts
->ts_dsr_au_count
, NULL
);
190 static void free_gru_resources(struct gru_state
*gru
,
191 struct gru_thread_state
*gts
)
193 gru
->gs_active_contexts
--;
194 gru
->gs_cbr_map
|= gts
->ts_cbr_map
;
195 gru
->gs_dsr_map
|= gts
->ts_dsr_map
;
199 * Check if a GRU has sufficient free resources to satisfy an allocation
200 * request. Note: GRU locks may or may not be held when this is called. If
201 * not held, recheck after acquiring the appropriate locks.
203 * Returns 1 if sufficient resources, 0 if not
205 static int check_gru_resources(struct gru_state
*gru
, int cbr_au_count
,
206 int dsr_au_count
, int max_active_contexts
)
208 return hweight64(gru
->gs_cbr_map
) >= cbr_au_count
209 && hweight64(gru
->gs_dsr_map
) >= dsr_au_count
210 && gru
->gs_active_contexts
< max_active_contexts
;
214 * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG
217 static int gru_load_mm_tracker(struct gru_state
*gru
,
218 struct gru_thread_state
*gts
)
220 struct gru_mm_struct
*gms
= gts
->ts_gms
;
221 struct gru_mm_tracker
*asids
= &gms
->ms_asids
[gru
->gs_gid
];
222 unsigned short ctxbitmap
= (1 << gts
->ts_ctxnum
);
225 spin_lock(&gms
->ms_asid_lock
);
226 asid
= asids
->mt_asid
;
228 spin_lock(&gru
->gs_asid_lock
);
229 if (asid
== 0 || (asids
->mt_ctxbitmap
== 0 && asids
->mt_asid_gen
!=
231 asid
= gru_assign_asid(gru
);
232 asids
->mt_asid
= asid
;
233 asids
->mt_asid_gen
= gru
->gs_asid_gen
;
238 spin_unlock(&gru
->gs_asid_lock
);
240 BUG_ON(asids
->mt_ctxbitmap
& ctxbitmap
);
241 asids
->mt_ctxbitmap
|= ctxbitmap
;
242 if (!test_bit(gru
->gs_gid
, gms
->ms_asidmap
))
243 __set_bit(gru
->gs_gid
, gms
->ms_asidmap
);
244 spin_unlock(&gms
->ms_asid_lock
);
247 "gid %d, gts %p, gms %p, ctxnum %d, asid 0x%x, asidmap 0x%lx\n",
248 gru
->gs_gid
, gts
, gms
, gts
->ts_ctxnum
, asid
,
253 static void gru_unload_mm_tracker(struct gru_state
*gru
,
254 struct gru_thread_state
*gts
)
256 struct gru_mm_struct
*gms
= gts
->ts_gms
;
257 struct gru_mm_tracker
*asids
;
258 unsigned short ctxbitmap
;
260 asids
= &gms
->ms_asids
[gru
->gs_gid
];
261 ctxbitmap
= (1 << gts
->ts_ctxnum
);
262 spin_lock(&gms
->ms_asid_lock
);
263 spin_lock(&gru
->gs_asid_lock
);
264 BUG_ON((asids
->mt_ctxbitmap
& ctxbitmap
) != ctxbitmap
);
265 asids
->mt_ctxbitmap
^= ctxbitmap
;
266 gru_dbg(grudev
, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
267 gru
->gs_gid
, gts
, gms
, gts
->ts_ctxnum
, gms
->ms_asidmap
[0]);
268 spin_unlock(&gru
->gs_asid_lock
);
269 spin_unlock(&gms
->ms_asid_lock
);
273 * Decrement the reference count on a GTS structure. Free the structure
274 * if the reference count goes to zero.
276 void gts_drop(struct gru_thread_state
*gts
)
278 if (gts
&& atomic_dec_return(>s
->ts_refcnt
) == 0) {
279 gru_drop_mmu_notifier(gts
->ts_gms
);
286 * Locate the GTS structure for the current thread.
288 static struct gru_thread_state
*gru_find_current_gts_nolock(struct gru_vma_data
291 struct gru_thread_state
*gts
;
293 list_for_each_entry(gts
, &vdata
->vd_head
, ts_next
)
294 if (gts
->ts_tsid
== tsid
)
300 * Allocate a thread state structure.
302 struct gru_thread_state
*gru_alloc_gts(struct vm_area_struct
*vma
,
303 int cbr_au_count
, int dsr_au_count
, int options
, int tsid
)
305 struct gru_thread_state
*gts
;
308 bytes
= DSR_BYTES(dsr_au_count
) + CBR_BYTES(cbr_au_count
);
309 bytes
+= sizeof(struct gru_thread_state
);
310 gts
= kmalloc(bytes
, GFP_KERNEL
);
315 memset(gts
, 0, sizeof(struct gru_thread_state
)); /* zero out header */
316 atomic_set(>s
->ts_refcnt
, 1);
317 mutex_init(>s
->ts_ctxlock
);
318 gts
->ts_cbr_au_count
= cbr_au_count
;
319 gts
->ts_dsr_au_count
= dsr_au_count
;
320 gts
->ts_user_options
= options
;
322 gts
->ts_ctxnum
= NULLCTX
;
323 gts
->ts_tlb_int_select
= -1;
324 gts
->ts_cch_req_slice
= -1;
325 gts
->ts_sizeavail
= GRU_SIZEAVAIL(PAGE_SHIFT
);
327 gts
->ts_mm
= current
->mm
;
329 gts
->ts_gms
= gru_register_mmu_notifier();
334 gru_dbg(grudev
, "alloc gts %p\n", gts
);
343 * Allocate a vma private data structure.
345 struct gru_vma_data
*gru_alloc_vma_data(struct vm_area_struct
*vma
, int tsid
)
347 struct gru_vma_data
*vdata
= NULL
;
349 vdata
= kmalloc(sizeof(*vdata
), GFP_KERNEL
);
353 INIT_LIST_HEAD(&vdata
->vd_head
);
354 spin_lock_init(&vdata
->vd_lock
);
355 gru_dbg(grudev
, "alloc vdata %p\n", vdata
);
360 * Find the thread state structure for the current thread.
362 struct gru_thread_state
*gru_find_thread_state(struct vm_area_struct
*vma
,
365 struct gru_vma_data
*vdata
= vma
->vm_private_data
;
366 struct gru_thread_state
*gts
;
368 spin_lock(&vdata
->vd_lock
);
369 gts
= gru_find_current_gts_nolock(vdata
, tsid
);
370 spin_unlock(&vdata
->vd_lock
);
371 gru_dbg(grudev
, "vma %p, gts %p\n", vma
, gts
);
376 * Allocate a new thread state for a GSEG. Note that races may allow
377 * another thread to race to create a gts.
379 struct gru_thread_state
*gru_alloc_thread_state(struct vm_area_struct
*vma
,
382 struct gru_vma_data
*vdata
= vma
->vm_private_data
;
383 struct gru_thread_state
*gts
, *ngts
;
385 gts
= gru_alloc_gts(vma
, vdata
->vd_cbr_au_count
, vdata
->vd_dsr_au_count
,
386 vdata
->vd_user_options
, tsid
);
390 spin_lock(&vdata
->vd_lock
);
391 ngts
= gru_find_current_gts_nolock(vdata
, tsid
);
395 STAT(gts_double_allocate
);
397 list_add(>s
->ts_next
, &vdata
->vd_head
);
399 spin_unlock(&vdata
->vd_lock
);
400 gru_dbg(grudev
, "vma %p, gts %p\n", vma
, gts
);
405 * Free the GRU context assigned to the thread state.
407 static void gru_free_gru_context(struct gru_thread_state
*gts
)
409 struct gru_state
*gru
;
412 gru_dbg(grudev
, "gts %p, gid %d\n", gts
, gru
->gs_gid
);
414 spin_lock(&gru
->gs_lock
);
415 gru
->gs_gts
[gts
->ts_ctxnum
] = NULL
;
416 free_gru_resources(gru
, gts
);
417 BUG_ON(test_bit(gts
->ts_ctxnum
, &gru
->gs_context_map
) == 0);
418 __clear_bit(gts
->ts_ctxnum
, &gru
->gs_context_map
);
419 gts
->ts_ctxnum
= NULLCTX
;
422 spin_unlock(&gru
->gs_lock
);
429 * Prefetching cachelines help hardware performance.
430 * (Strictly a performance enhancement. Not functionally required).
432 static void prefetch_data(void *p
, int num
, int stride
)
440 static inline long gru_copy_handle(void *d
, void *s
)
442 memcpy(d
, s
, GRU_HANDLE_BYTES
);
443 return GRU_HANDLE_BYTES
;
446 static void gru_prefetch_context(void *gseg
, void *cb
, void *cbe
,
447 unsigned long cbrmap
, unsigned long length
)
451 prefetch_data(gseg
+ GRU_DS_BASE
, length
/ GRU_CACHE_LINE_BYTES
,
452 GRU_CACHE_LINE_BYTES
);
454 for_each_cbr_in_allocation_map(i
, &cbrmap
, scr
) {
455 prefetch_data(cb
, 1, GRU_CACHE_LINE_BYTES
);
456 prefetch_data(cbe
+ i
* GRU_HANDLE_STRIDE
, 1,
457 GRU_CACHE_LINE_BYTES
);
458 cb
+= GRU_HANDLE_STRIDE
;
462 static void gru_load_context_data(void *save
, void *grubase
, int ctxnum
,
463 unsigned long cbrmap
, unsigned long dsrmap
,
466 void *gseg
, *cb
, *cbe
;
467 unsigned long length
;
470 gseg
= grubase
+ ctxnum
* GRU_GSEG_STRIDE
;
471 cb
= gseg
+ GRU_CB_BASE
;
472 cbe
= grubase
+ GRU_CBE_BASE
;
473 length
= hweight64(dsrmap
) * GRU_DSR_AU_BYTES
;
474 gru_prefetch_context(gseg
, cb
, cbe
, cbrmap
, length
);
476 for_each_cbr_in_allocation_map(i
, &cbrmap
, scr
) {
478 save
+= gru_copy_handle(cb
, save
);
479 save
+= gru_copy_handle(cbe
+ i
* GRU_HANDLE_STRIDE
,
482 memset(cb
, 0, GRU_CACHE_LINE_BYTES
);
483 memset(cbe
+ i
* GRU_HANDLE_STRIDE
, 0,
484 GRU_CACHE_LINE_BYTES
);
486 cb
+= GRU_HANDLE_STRIDE
;
490 memcpy(gseg
+ GRU_DS_BASE
, save
, length
);
492 memset(gseg
+ GRU_DS_BASE
, 0, length
);
495 static void gru_unload_context_data(void *save
, void *grubase
, int ctxnum
,
496 unsigned long cbrmap
, unsigned long dsrmap
)
498 void *gseg
, *cb
, *cbe
;
499 unsigned long length
;
502 gseg
= grubase
+ ctxnum
* GRU_GSEG_STRIDE
;
503 cb
= gseg
+ GRU_CB_BASE
;
504 cbe
= grubase
+ GRU_CBE_BASE
;
505 length
= hweight64(dsrmap
) * GRU_DSR_AU_BYTES
;
506 gru_prefetch_context(gseg
, cb
, cbe
, cbrmap
, length
);
508 for_each_cbr_in_allocation_map(i
, &cbrmap
, scr
) {
509 save
+= gru_copy_handle(save
, cb
);
510 save
+= gru_copy_handle(save
, cbe
+ i
* GRU_HANDLE_STRIDE
);
511 cb
+= GRU_HANDLE_STRIDE
;
513 memcpy(save
, gseg
+ GRU_DS_BASE
, length
);
516 void gru_unload_context(struct gru_thread_state
*gts
, int savestate
)
518 struct gru_state
*gru
= gts
->ts_gru
;
519 struct gru_context_configuration_handle
*cch
;
520 int ctxnum
= gts
->ts_ctxnum
;
522 if (!is_kernel_context(gts
))
523 zap_vma_ptes(gts
->ts_vma
, UGRUADDR(gts
), GRU_GSEG_PAGESIZE
);
524 cch
= get_cch(gru
->gs_gru_base_vaddr
, ctxnum
);
526 gru_dbg(grudev
, "gts %p\n", gts
);
527 lock_cch_handle(cch
);
528 if (cch_interrupt_sync(cch
))
531 if (!is_kernel_context(gts
))
532 gru_unload_mm_tracker(gru
, gts
);
534 gru_unload_context_data(gts
->ts_gdata
, gru
->gs_gru_base_vaddr
,
535 ctxnum
, gts
->ts_cbr_map
,
537 gts
->ts_data_valid
= 1;
540 if (cch_deallocate(cch
))
542 gts
->ts_force_unload
= 0; /* ts_force_unload locked by CCH lock */
543 unlock_cch_handle(cch
);
545 gru_free_gru_context(gts
);
549 * Load a GRU context by copying it from the thread data structure in memory
552 void gru_load_context(struct gru_thread_state
*gts
)
554 struct gru_state
*gru
= gts
->ts_gru
;
555 struct gru_context_configuration_handle
*cch
;
556 int i
, err
, asid
, ctxnum
= gts
->ts_ctxnum
;
558 gru_dbg(grudev
, "gts %p\n", gts
);
559 cch
= get_cch(gru
->gs_gru_base_vaddr
, ctxnum
);
561 lock_cch_handle(cch
);
562 cch
->tfm_fault_bit_enable
=
563 (gts
->ts_user_options
== GRU_OPT_MISS_FMM_POLL
564 || gts
->ts_user_options
== GRU_OPT_MISS_FMM_INTR
);
565 cch
->tlb_int_enable
= (gts
->ts_user_options
== GRU_OPT_MISS_FMM_INTR
);
566 if (cch
->tlb_int_enable
) {
567 gts
->ts_tlb_int_select
= gru_cpu_fault_map_id();
568 cch
->tlb_int_select
= gts
->ts_tlb_int_select
;
570 if (gts
->ts_cch_req_slice
>= 0) {
571 cch
->req_slice_set_enable
= 1;
572 cch
->req_slice
= gts
->ts_cch_req_slice
;
574 cch
->req_slice_set_enable
=0;
576 cch
->tfm_done_bit_enable
= 0;
577 cch
->dsr_allocation_map
= gts
->ts_dsr_map
;
578 cch
->cbr_allocation_map
= gts
->ts_cbr_map
;
580 if (is_kernel_context(gts
)) {
581 cch
->unmap_enable
= 1;
582 cch
->tfm_done_bit_enable
= 1;
583 cch
->cb_int_enable
= 1;
585 cch
->unmap_enable
= 0;
586 cch
->tfm_done_bit_enable
= 0;
587 cch
->cb_int_enable
= 0;
588 asid
= gru_load_mm_tracker(gru
, gts
);
589 for (i
= 0; i
< 8; i
++) {
590 cch
->asid
[i
] = asid
+ i
;
591 cch
->sizeavail
[i
] = gts
->ts_sizeavail
;
595 err
= cch_allocate(cch
);
598 "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n",
599 err
, cch
, gts
, gts
->ts_cbr_map
, gts
->ts_dsr_map
);
603 gru_load_context_data(gts
->ts_gdata
, gru
->gs_gru_base_vaddr
, ctxnum
,
604 gts
->ts_cbr_map
, gts
->ts_dsr_map
, gts
->ts_data_valid
);
608 unlock_cch_handle(cch
);
612 * Update fields in an active CCH:
613 * - retarget interrupts on local blade
614 * - update sizeavail mask
615 * - force a delayed context unload by clearing the CCH asids. This
616 * forces TLB misses for new GRU instructions. The context is unloaded
617 * when the next TLB miss occurs.
619 int gru_update_cch(struct gru_thread_state
*gts
, int force_unload
)
621 struct gru_context_configuration_handle
*cch
;
622 struct gru_state
*gru
= gts
->ts_gru
;
623 int i
, ctxnum
= gts
->ts_ctxnum
, ret
= 0;
625 cch
= get_cch(gru
->gs_gru_base_vaddr
, ctxnum
);
627 lock_cch_handle(cch
);
628 if (cch
->state
== CCHSTATE_ACTIVE
) {
629 if (gru
->gs_gts
[gts
->ts_ctxnum
] != gts
)
631 if (cch_interrupt(cch
))
634 for (i
= 0; i
< 8; i
++)
635 cch
->sizeavail
[i
] = gts
->ts_sizeavail
;
636 gts
->ts_tlb_int_select
= gru_cpu_fault_map_id();
637 cch
->tlb_int_select
= gru_cpu_fault_map_id();
638 cch
->tfm_fault_bit_enable
=
639 (gts
->ts_user_options
== GRU_OPT_MISS_FMM_POLL
640 || gts
->ts_user_options
== GRU_OPT_MISS_FMM_INTR
);
642 for (i
= 0; i
< 8; i
++)
644 cch
->tfm_fault_bit_enable
= 0;
645 cch
->tlb_int_enable
= 0;
646 gts
->ts_force_unload
= 1;
653 unlock_cch_handle(cch
);
658 * Update CCH tlb interrupt select. Required when all the following is true:
659 * - task's GRU context is loaded into a GRU
660 * - task is using interrupt notification for TLB faults
661 * - task has migrated to a different cpu on the same blade where
662 * it was previously running.
664 static int gru_retarget_intr(struct gru_thread_state
*gts
)
666 if (gts
->ts_tlb_int_select
< 0
667 || gts
->ts_tlb_int_select
== gru_cpu_fault_map_id())
670 gru_dbg(grudev
, "retarget from %d to %d\n", gts
->ts_tlb_int_select
,
671 gru_cpu_fault_map_id());
672 return gru_update_cch(gts
, 0);
677 * Insufficient GRU resources available on the local blade. Steal a context from
678 * a process. This is a hack until a _real_ resource scheduler is written....
680 #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0)
681 #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
682 ((g)+1) : &(b)->bs_grus[0])
684 static int is_gts_stealable(struct gru_thread_state
*gts
,
685 struct gru_blade_state
*bs
)
687 if (is_kernel_context(gts
))
688 return down_write_trylock(&bs
->bs_kgts_sema
);
690 return mutex_trylock(>s
->ts_ctxlock
);
693 static void gts_stolen(struct gru_thread_state
*gts
,
694 struct gru_blade_state
*bs
)
696 if (is_kernel_context(gts
)) {
697 up_write(&bs
->bs_kgts_sema
);
698 STAT(steal_kernel_context
);
700 mutex_unlock(>s
->ts_ctxlock
);
701 STAT(steal_user_context
);
705 void gru_steal_context(struct gru_thread_state
*gts
, int blade_id
)
707 struct gru_blade_state
*blade
;
708 struct gru_state
*gru
, *gru0
;
709 struct gru_thread_state
*ngts
= NULL
;
710 int ctxnum
, ctxnum0
, flag
= 0, cbr
, dsr
;
712 cbr
= gts
->ts_cbr_au_count
;
713 dsr
= gts
->ts_dsr_au_count
;
715 blade
= gru_base
[blade_id
];
716 spin_lock(&blade
->bs_lock
);
718 ctxnum
= next_ctxnum(blade
->bs_lru_ctxnum
);
719 gru
= blade
->bs_lru_gru
;
721 gru
= next_gru(blade
, gru
);
725 if (check_gru_resources(gru
, cbr
, dsr
, GRU_NUM_CCH
))
727 spin_lock(&gru
->gs_lock
);
728 for (; ctxnum
< GRU_NUM_CCH
; ctxnum
++) {
729 if (flag
&& gru
== gru0
&& ctxnum
== ctxnum0
)
731 ngts
= gru
->gs_gts
[ctxnum
];
733 * We are grabbing locks out of order, so trylock is
734 * needed. GTSs are usually not locked, so the odds of
735 * success are high. If trylock fails, try to steal a
738 if (ngts
&& is_gts_stealable(ngts
, blade
))
743 spin_unlock(&gru
->gs_lock
);
744 if (ngts
|| (flag
&& gru
== gru0
&& ctxnum
== ctxnum0
))
747 gru
= next_gru(blade
, gru
);
749 blade
->bs_lru_gru
= gru
;
750 blade
->bs_lru_ctxnum
= ctxnum
;
751 spin_unlock(&blade
->bs_lock
);
754 gts
->ustats
.context_stolen
++;
755 ngts
->ts_steal_jiffies
= jiffies
;
756 gru_unload_context(ngts
, is_kernel_context(ngts
) ? 0 : 1);
757 gts_stolen(ngts
, blade
);
759 STAT(steal_context_failed
);
762 "stole gid %d, ctxnum %d from gts %p. Need cb %d, ds %d;"
763 " avail cb %ld, ds %ld\n",
764 gru
->gs_gid
, ctxnum
, ngts
, cbr
, dsr
, hweight64(gru
->gs_cbr_map
),
765 hweight64(gru
->gs_dsr_map
));
769 * Scan the GRUs on the local blade & assign a GRU context.
771 struct gru_state
*gru_assign_gru_context(struct gru_thread_state
*gts
,
774 struct gru_state
*gru
, *grux
;
775 int i
, max_active_contexts
;
780 max_active_contexts
= GRU_NUM_CCH
;
781 for_each_gru_on_blade(grux
, blade
, i
) {
782 if (check_gru_resources(grux
, gts
->ts_cbr_au_count
,
783 gts
->ts_dsr_au_count
,
784 max_active_contexts
)) {
786 max_active_contexts
= grux
->gs_active_contexts
;
787 if (max_active_contexts
== 0)
793 spin_lock(&gru
->gs_lock
);
794 if (!check_gru_resources(gru
, gts
->ts_cbr_au_count
,
795 gts
->ts_dsr_au_count
, GRU_NUM_CCH
)) {
796 spin_unlock(&gru
->gs_lock
);
799 reserve_gru_resources(gru
, gts
);
801 gts
->ts_blade
= gru
->gs_blade_id
;
803 find_first_zero_bit(&gru
->gs_context_map
, GRU_NUM_CCH
);
804 BUG_ON(gts
->ts_ctxnum
== GRU_NUM_CCH
);
805 atomic_inc(>s
->ts_refcnt
);
806 gru
->gs_gts
[gts
->ts_ctxnum
] = gts
;
807 __set_bit(gts
->ts_ctxnum
, &gru
->gs_context_map
);
808 spin_unlock(&gru
->gs_lock
);
810 STAT(assign_context
);
812 "gseg %p, gts %p, gid %d, ctx %d, cbr %d, dsr %d\n",
813 gseg_virtual_address(gts
->ts_gru
, gts
->ts_ctxnum
), gts
,
814 gts
->ts_gru
->gs_gid
, gts
->ts_ctxnum
,
815 gts
->ts_cbr_au_count
, gts
->ts_dsr_au_count
);
817 gru_dbg(grudev
, "failed to allocate a GTS %s\n", "");
818 STAT(assign_context_failed
);
827 * Map the user's GRU segment
829 * Note: gru segments alway mmaped on GRU_GSEG_PAGESIZE boundaries.
831 int gru_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
833 struct gru_thread_state
*gts
;
834 unsigned long paddr
, vaddr
;
837 vaddr
= (unsigned long)vmf
->virtual_address
;
838 gru_dbg(grudev
, "vma %p, vaddr 0x%lx (0x%lx)\n",
839 vma
, vaddr
, GSEG_BASE(vaddr
));
842 /* The following check ensures vaddr is a valid address in the VMA */
843 gts
= gru_find_thread_state(vma
, TSID(vaddr
, vma
));
845 return VM_FAULT_SIGBUS
;
848 mutex_lock(>s
->ts_ctxlock
);
850 blade_id
= uv_numa_blade_id();
853 if (gts
->ts_gru
->gs_blade_id
!= blade_id
) {
854 STAT(migrated_nopfn_unload
);
855 gru_unload_context(gts
, 1);
857 if (gru_retarget_intr(gts
))
858 STAT(migrated_nopfn_retarget
);
863 STAT(load_user_context
);
864 if (!gru_assign_gru_context(gts
, blade_id
)) {
866 mutex_unlock(>s
->ts_ctxlock
);
867 set_current_state(TASK_INTERRUPTIBLE
);
868 schedule_timeout(GRU_ASSIGN_DELAY
); /* true hack ZZZ */
869 blade_id
= uv_numa_blade_id();
870 if (gts
->ts_steal_jiffies
+ GRU_STEAL_DELAY
< jiffies
)
871 gru_steal_context(gts
, blade_id
);
874 gru_load_context(gts
);
875 paddr
= gseg_physical_address(gts
->ts_gru
, gts
->ts_ctxnum
);
876 remap_pfn_range(vma
, vaddr
& ~(GRU_GSEG_PAGESIZE
- 1),
877 paddr
>> PAGE_SHIFT
, GRU_GSEG_PAGESIZE
,
882 mutex_unlock(>s
->ts_ctxlock
);
884 return VM_FAULT_NOPAGE
;