2 * SN Platform GRU Driver
4 * FILE OPERATIONS & DRIVER INITIALIZATION
6 * This file supports the user system call for file open, close, mmap, etc.
7 * This also incudes the driver initialization code.
9 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/device.h>
34 #include <linux/miscdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/proc_fs.h>
37 #include <linux/uaccess.h>
39 #include <asm/uv/uv_irq.h>
41 #include <asm/uv/uv.h>
44 #include "grutables.h"
46 #include <asm/uv/uv_hub.h>
47 #include <asm/uv/uv_mmrs.h>
49 struct gru_blade_state
*gru_base
[GRU_MAX_BLADES
] __read_mostly
;
50 unsigned long gru_start_paddr __read_mostly
;
51 void *gru_start_vaddr __read_mostly
;
52 unsigned long gru_end_paddr __read_mostly
;
53 unsigned int gru_max_gids __read_mostly
;
54 struct gru_stats_s gru_stats
;
56 /* Guaranteed user available resources on each node */
57 static int max_user_cbrs
, max_user_dsr_bytes
;
59 static struct miscdevice gru_miscdev
;
65 * Called when unmapping a device mapping. Frees all gru resources
66 * and tables belonging to the vma.
68 static void gru_vma_close(struct vm_area_struct
*vma
)
70 struct gru_vma_data
*vdata
;
71 struct gru_thread_state
*gts
;
72 struct list_head
*entry
, *next
;
74 if (!vma
->vm_private_data
)
77 vdata
= vma
->vm_private_data
;
78 vma
->vm_private_data
= NULL
;
79 gru_dbg(grudev
, "vma %p, file %p, vdata %p\n", vma
, vma
->vm_file
,
81 list_for_each_safe(entry
, next
, &vdata
->vd_head
) {
83 list_entry(entry
, struct gru_thread_state
, ts_next
);
84 list_del(>s
->ts_next
);
85 mutex_lock(>s
->ts_ctxlock
);
87 gru_unload_context(gts
, 0);
88 mutex_unlock(>s
->ts_ctxlock
);
98 * Called when mmapping the device. Initializes the vma with a fault handler
99 * and private data structure necessary to allocate, track, and free the
102 static int gru_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
104 if ((vma
->vm_flags
& (VM_SHARED
| VM_WRITE
)) != (VM_SHARED
| VM_WRITE
))
107 if (vma
->vm_start
& (GRU_GSEG_PAGESIZE
- 1) ||
108 vma
->vm_end
& (GRU_GSEG_PAGESIZE
- 1))
112 (VM_IO
| VM_DONTCOPY
| VM_LOCKED
| VM_DONTEXPAND
| VM_PFNMAP
|
114 vma
->vm_page_prot
= PAGE_SHARED
;
115 vma
->vm_ops
= &gru_vm_ops
;
117 vma
->vm_private_data
= gru_alloc_vma_data(vma
, 0);
118 if (!vma
->vm_private_data
)
121 gru_dbg(grudev
, "file %p, vaddr 0x%lx, vma %p, vdata %p\n",
122 file
, vma
->vm_start
, vma
, vma
->vm_private_data
);
127 * Create a new GRU context
129 static int gru_create_new_context(unsigned long arg
)
131 struct gru_create_context_req req
;
132 struct vm_area_struct
*vma
;
133 struct gru_vma_data
*vdata
;
136 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
139 if (req
.data_segment_bytes
> max_user_dsr_bytes
)
141 if (req
.control_blocks
> max_user_cbrs
|| !req
.maximum_thread_count
)
144 if (!(req
.options
& GRU_OPT_MISS_MASK
))
145 req
.options
|= GRU_OPT_MISS_FMM_INTR
;
147 down_write(¤t
->mm
->mmap_sem
);
148 vma
= gru_find_vma(req
.gseg
);
150 vdata
= vma
->vm_private_data
;
151 vdata
->vd_user_options
= req
.options
;
152 vdata
->vd_dsr_au_count
=
153 GRU_DS_BYTES_TO_AU(req
.data_segment_bytes
);
154 vdata
->vd_cbr_au_count
= GRU_CB_COUNT_TO_AU(req
.control_blocks
);
157 up_write(¤t
->mm
->mmap_sem
);
163 * Get GRU configuration info (temp - for emulator testing)
165 static long gru_get_config_info(unsigned long arg
)
167 struct gru_config_info info
;
170 if (num_online_nodes() > 1 &&
171 (uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
175 info
.cpus
= num_online_cpus();
176 info
.nodes
= num_online_nodes();
177 info
.blades
= info
.nodes
/ nodesperblade
;
178 info
.chiplets
= GRU_CHIPLETS_PER_BLADE
* info
.blades
;
180 if (copy_to_user((void __user
*)arg
, &info
, sizeof(info
)))
186 * gru_file_unlocked_ioctl
188 * Called to update file attributes via IOCTL calls.
190 static long gru_file_unlocked_ioctl(struct file
*file
, unsigned int req
,
195 gru_dbg(grudev
, "file %p\n", file
);
198 case GRU_CREATE_CONTEXT
:
199 err
= gru_create_new_context(arg
);
201 case GRU_SET_CONTEXT_OPTION
:
202 err
= gru_set_context_option(arg
);
204 case GRU_USER_GET_EXCEPTION_DETAIL
:
205 err
= gru_get_exception_detail(arg
);
207 case GRU_USER_UNLOAD_CONTEXT
:
208 err
= gru_user_unload_context(arg
);
210 case GRU_USER_FLUSH_TLB
:
211 err
= gru_user_flush_tlb(arg
);
213 case GRU_USER_CALL_OS
:
214 err
= gru_handle_user_call_os(arg
);
216 case GRU_GET_GSEG_STATISTICS
:
217 err
= gru_get_gseg_statistics(arg
);
220 err
= gru_ktest(arg
);
222 case GRU_GET_CONFIG_INFO
:
223 err
= gru_get_config_info(arg
);
225 case GRU_DUMP_CHIPLET_STATE
:
226 err
= gru_dump_chiplet_request(arg
);
233 * Called at init time to build tables for all GRUs that are present in the
236 static void gru_init_chiplet(struct gru_state
*gru
, unsigned long paddr
,
237 void *vaddr
, int blade_id
, int chiplet_id
)
239 spin_lock_init(&gru
->gs_lock
);
240 spin_lock_init(&gru
->gs_asid_lock
);
241 gru
->gs_gru_base_paddr
= paddr
;
242 gru
->gs_gru_base_vaddr
= vaddr
;
243 gru
->gs_gid
= blade_id
* GRU_CHIPLETS_PER_BLADE
+ chiplet_id
;
244 gru
->gs_blade
= gru_base
[blade_id
];
245 gru
->gs_blade_id
= blade_id
;
246 gru
->gs_chiplet_id
= chiplet_id
;
247 gru
->gs_cbr_map
= (GRU_CBR_AU
== 64) ? ~0 : (1UL << GRU_CBR_AU
) - 1;
248 gru
->gs_dsr_map
= (1UL << GRU_DSR_AU
) - 1;
249 gru
->gs_asid_limit
= MAX_ASID
;
250 gru_tgh_flush_init(gru
);
251 if (gru
->gs_gid
>= gru_max_gids
)
252 gru_max_gids
= gru
->gs_gid
+ 1;
253 gru_dbg(grudev
, "bid %d, gid %d, vaddr %p (0x%lx)\n",
254 blade_id
, gru
->gs_gid
, gru
->gs_gru_base_vaddr
,
255 gru
->gs_gru_base_paddr
);
258 static int gru_init_tables(unsigned long gru_base_paddr
, void *gru_base_vaddr
)
260 int pnode
, nid
, bid
, chip
;
261 int cbrs
, dsrbytes
, n
;
262 int order
= get_order(sizeof(struct gru_blade_state
));
264 struct gru_state
*gru
;
268 max_user_cbrs
= GRU_NUM_CB
;
269 max_user_dsr_bytes
= GRU_NUM_DSR_BYTES
;
270 for_each_possible_blade(bid
) {
271 pnode
= uv_blade_to_pnode(bid
);
272 nid
= uv_blade_to_memory_nid(bid
);/* -1 if no memory on blade */
273 page
= alloc_pages_node(nid
, GFP_KERNEL
, order
);
276 gru_base
[bid
] = page_address(page
);
277 memset(gru_base
[bid
], 0, sizeof(struct gru_blade_state
));
278 gru_base
[bid
]->bs_lru_gru
= &gru_base
[bid
]->bs_grus
[0];
279 spin_lock_init(&gru_base
[bid
]->bs_lock
);
280 init_rwsem(&gru_base
[bid
]->bs_kgts_sema
);
284 for (gru
= gru_base
[bid
]->bs_grus
, chip
= 0;
285 chip
< GRU_CHIPLETS_PER_BLADE
;
287 paddr
= gru_chiplet_paddr(gru_base_paddr
, pnode
, chip
);
288 vaddr
= gru_chiplet_vaddr(gru_base_vaddr
, pnode
, chip
);
289 gru_init_chiplet(gru
, paddr
, vaddr
, bid
, chip
);
290 n
= hweight64(gru
->gs_cbr_map
) * GRU_CBR_AU_SIZE
;
292 n
= hweight64(gru
->gs_dsr_map
) * GRU_DSR_AU_BYTES
;
293 dsrbytes
= max(dsrbytes
, n
);
295 max_user_cbrs
= min(max_user_cbrs
, cbrs
);
296 max_user_dsr_bytes
= min(max_user_dsr_bytes
, dsrbytes
);
302 for (bid
--; bid
>= 0; bid
--)
303 free_pages((unsigned long)gru_base
[bid
], order
);
307 static void gru_free_tables(void)
310 int order
= get_order(sizeof(struct gru_state
) *
311 GRU_CHIPLETS_PER_BLADE
);
313 for (bid
= 0; bid
< GRU_MAX_BLADES
; bid
++)
314 free_pages((unsigned long)gru_base
[bid
], order
);
317 static unsigned long gru_chiplet_cpu_to_mmr(int chiplet
, int cpu
, int *corep
)
319 unsigned long mmr
= 0;
323 * We target the cores of a blade and not the hyperthreads themselves.
324 * There is a max of 8 cores per socket and 2 sockets per blade,
325 * making for a max total of 16 cores (i.e., 16 CPUs without
326 * hyperthreading and 32 CPUs with hyperthreading).
328 core
= uv_cpu_core_number(cpu
) + UV_MAX_INT_CORES
* uv_cpu_socket_number(cpu
);
329 if (core
>= GRU_NUM_TFM
|| uv_cpu_ht_number(cpu
))
333 mmr
= UVH_GR0_TLB_INT0_CONFIG
+
334 core
* (UVH_GR0_TLB_INT1_CONFIG
- UVH_GR0_TLB_INT0_CONFIG
);
335 } else if (chiplet
== 1) {
336 mmr
= UVH_GR1_TLB_INT0_CONFIG
+
337 core
* (UVH_GR1_TLB_INT1_CONFIG
- UVH_GR1_TLB_INT0_CONFIG
);
348 static int gru_irq_count
[GRU_CHIPLETS_PER_BLADE
];
350 static void gru_noop(unsigned int irq
)
354 static struct irq_chip gru_chip
[GRU_CHIPLETS_PER_BLADE
] = {
355 [0 ... GRU_CHIPLETS_PER_BLADE
- 1] {
362 static int gru_chiplet_setup_tlb_irq(int chiplet
, char *irq_name
,
363 irq_handler_t irq_handler
, int cpu
, int blade
)
366 int irq
= IRQ_GRU
+ chiplet
;
369 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
373 if (gru_irq_count
[chiplet
] == 0) {
374 gru_chip
[chiplet
].name
= irq_name
;
375 ret
= set_irq_chip(irq
, &gru_chip
[chiplet
]);
377 printk(KERN_ERR
"%s: set_irq_chip failed, errno=%d\n",
378 GRU_DRIVER_ID_STR
, -ret
);
382 ret
= request_irq(irq
, irq_handler
, 0, irq_name
, NULL
);
384 printk(KERN_ERR
"%s: request_irq failed, errno=%d\n",
385 GRU_DRIVER_ID_STR
, -ret
);
389 gru_irq_count
[chiplet
]++;
394 static void gru_chiplet_teardown_tlb_irq(int chiplet
, int cpu
, int blade
)
397 int core
, irq
= IRQ_GRU
+ chiplet
;
399 if (gru_irq_count
[chiplet
] == 0)
402 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
406 if (--gru_irq_count
[chiplet
] == 0)
410 #elif defined CONFIG_X86_64
412 static int gru_chiplet_setup_tlb_irq(int chiplet
, char *irq_name
,
413 irq_handler_t irq_handler
, int cpu
, int blade
)
419 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
423 irq
= uv_setup_irq(irq_name
, cpu
, blade
, mmr
, UV_AFFINITY_CPU
);
425 printk(KERN_ERR
"%s: uv_setup_irq failed, errno=%d\n",
426 GRU_DRIVER_ID_STR
, -irq
);
430 ret
= request_irq(irq
, irq_handler
, 0, irq_name
, NULL
);
432 uv_teardown_irq(irq
);
433 printk(KERN_ERR
"%s: request_irq failed, errno=%d\n",
434 GRU_DRIVER_ID_STR
, -ret
);
437 gru_base
[blade
]->bs_grus
[chiplet
].gs_irq
[core
] = irq
;
441 static void gru_chiplet_teardown_tlb_irq(int chiplet
, int cpu
, int blade
)
446 mmr
= gru_chiplet_cpu_to_mmr(chiplet
, cpu
, &core
);
448 irq
= gru_base
[blade
]->bs_grus
[chiplet
].gs_irq
[core
];
451 uv_teardown_irq(irq
);
458 static void gru_teardown_tlb_irqs(void)
463 for_each_online_cpu(cpu
) {
464 blade
= uv_cpu_to_blade_id(cpu
);
465 gru_chiplet_teardown_tlb_irq(0, cpu
, blade
);
466 gru_chiplet_teardown_tlb_irq(1, cpu
, blade
);
468 for_each_possible_blade(blade
) {
469 if (uv_blade_nr_possible_cpus(blade
))
471 gru_chiplet_teardown_tlb_irq(0, 0, blade
);
472 gru_chiplet_teardown_tlb_irq(1, 0, blade
);
476 static int gru_setup_tlb_irqs(void)
482 for_each_online_cpu(cpu
) {
483 blade
= uv_cpu_to_blade_id(cpu
);
484 ret
= gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr
, cpu
, blade
);
488 ret
= gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr
, cpu
, blade
);
492 for_each_possible_blade(blade
) {
493 if (uv_blade_nr_possible_cpus(blade
))
495 ret
= gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade
, 0, blade
);
499 ret
= gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade
, 0, blade
);
507 gru_teardown_tlb_irqs();
514 * Called at boot or module load time to initialize the GRUs.
516 static int __init
gru_init(void)
523 #if defined CONFIG_IA64
524 gru_start_paddr
= 0xd000000000UL
; /* ZZZZZZZZZZZZZZZZZZZ fixme */
526 gru_start_paddr
= uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR
) &
529 gru_start_vaddr
= __va(gru_start_paddr
);
530 gru_end_paddr
= gru_start_paddr
+ GRU_MAX_BLADES
* GRU_SIZE
;
531 printk(KERN_INFO
"GRU space: 0x%lx - 0x%lx\n",
532 gru_start_paddr
, gru_end_paddr
);
533 ret
= misc_register(&gru_miscdev
);
535 printk(KERN_ERR
"%s: misc_register failed\n",
540 ret
= gru_proc_init();
542 printk(KERN_ERR
"%s: proc init failed\n", GRU_DRIVER_ID_STR
);
546 ret
= gru_init_tables(gru_start_paddr
, gru_start_vaddr
);
548 printk(KERN_ERR
"%s: init tables failed\n", GRU_DRIVER_ID_STR
);
552 ret
= gru_setup_tlb_irqs();
556 gru_kservices_init();
558 printk(KERN_INFO
"%s: v%s\n", GRU_DRIVER_ID_STR
,
559 GRU_DRIVER_VERSION_STR
);
567 misc_deregister(&gru_miscdev
);
573 static void __exit
gru_exit(void)
578 gru_teardown_tlb_irqs();
579 gru_kservices_exit();
581 misc_deregister(&gru_miscdev
);
585 static const struct file_operations gru_fops
= {
586 .owner
= THIS_MODULE
,
587 .unlocked_ioctl
= gru_file_unlocked_ioctl
,
588 .mmap
= gru_file_mmap
,
591 static struct miscdevice gru_miscdev
= {
592 .minor
= MISC_DYNAMIC_MINOR
,
597 const struct vm_operations_struct gru_vm_ops
= {
598 .close
= gru_vma_close
,
603 fs_initcall(gru_init
);
605 module_init(gru_init
);
607 module_exit(gru_exit
);
609 module_param(gru_options
, ulong
, 0644);
610 MODULE_PARM_DESC(gru_options
, "Various debug options");
612 MODULE_AUTHOR("Silicon Graphics, Inc.");
613 MODULE_LICENSE("GPL");
614 MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR
);
615 MODULE_VERSION(GRU_DRIVER_VERSION_STR
);