2 * SN Platform GRU Driver
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
25 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <linux/delay.h>
28 #include <linux/bitops.h>
29 #include <asm/uv/uv_hub.h>
31 #include "grutables.h"
32 #include "gruhandles.h"
35 #define CCH_LOCK_ATTEMPTS 10
37 static int gru_user_copy_handle(void __user
**dp
, void *s
)
39 if (copy_to_user(*dp
, s
, GRU_HANDLE_BYTES
))
41 *dp
+= GRU_HANDLE_BYTES
;
45 static int gru_dump_context_data(void *grubase
,
46 struct gru_context_configuration_handle
*cch
,
47 void __user
*ubuf
, int ctxnum
, int dsrcnt
,
50 void *cb
, *cbe
, *tfh
, *gseg
;
53 gseg
= grubase
+ ctxnum
* GRU_GSEG_STRIDE
;
54 cb
= gseg
+ GRU_CB_BASE
;
55 cbe
= grubase
+ GRU_CBE_BASE
;
56 tfh
= grubase
+ GRU_TFH_BASE
;
58 for_each_cbr_in_allocation_map(i
, &cch
->cbr_allocation_map
, scr
) {
61 if (gru_user_copy_handle(&ubuf
, cb
))
63 if (gru_user_copy_handle(&ubuf
, tfh
+ i
* GRU_HANDLE_STRIDE
))
65 if (gru_user_copy_handle(&ubuf
, cbe
+ i
* GRU_HANDLE_STRIDE
))
67 cb
+= GRU_HANDLE_STRIDE
;
70 memcpy(ubuf
, gseg
+ GRU_DS_BASE
, dsrcnt
* GRU_HANDLE_STRIDE
);
77 static int gru_dump_tfm(struct gru_state
*gru
,
78 void __user
*ubuf
, void __user
*ubufend
)
80 struct gru_tlb_fault_map
*tfm
;
83 bytes
= GRU_NUM_TFM
* GRU_CACHE_LINE_BYTES
;
84 if (bytes
> ubufend
- ubuf
)
87 for (i
= 0; i
< GRU_NUM_TFM
; i
++) {
88 tfm
= get_tfm(gru
->gs_gru_base_vaddr
, i
);
89 if (gru_user_copy_handle(&ubuf
, tfm
))
92 return GRU_NUM_TFM
* GRU_CACHE_LINE_BYTES
;
98 static int gru_dump_tgh(struct gru_state
*gru
,
99 void __user
*ubuf
, void __user
*ubufend
)
101 struct gru_tlb_global_handle
*tgh
;
104 bytes
= GRU_NUM_TGH
* GRU_CACHE_LINE_BYTES
;
105 if (bytes
> ubufend
- ubuf
)
108 for (i
= 0; i
< GRU_NUM_TGH
; i
++) {
109 tgh
= get_tgh(gru
->gs_gru_base_vaddr
, i
);
110 if (gru_user_copy_handle(&ubuf
, tgh
))
113 return GRU_NUM_TGH
* GRU_CACHE_LINE_BYTES
;
119 static int gru_dump_context(struct gru_state
*gru
, int ctxnum
,
120 void __user
*ubuf
, void __user
*ubufend
, char data_opt
,
121 char lock_cch
, char flush_cbrs
)
123 struct gru_dump_context_header hdr
;
124 struct gru_dump_context_header __user
*uhdr
= ubuf
;
125 struct gru_context_configuration_handle
*cch
, *ubufcch
;
126 struct gru_thread_state
*gts
;
127 int try, cch_locked
, cbrcnt
= 0, dsrcnt
= 0, bytes
= 0, ret
= 0;
130 memset(&hdr
, 0, sizeof(hdr
));
131 grubase
= gru
->gs_gru_base_vaddr
;
132 cch
= get_cch(grubase
, ctxnum
);
133 for (try = 0; try < CCH_LOCK_ATTEMPTS
; try++) {
134 cch_locked
= trylock_cch_handle(cch
);
142 if (gru_user_copy_handle(&ubuf
, cch
))
145 ubufcch
->delresp
= 0;
146 bytes
= sizeof(hdr
) + GRU_CACHE_LINE_BYTES
;
148 if (cch_locked
|| !lock_cch
) {
149 gts
= gru
->gs_gts
[ctxnum
];
150 if (gts
&& gts
->ts_vma
) {
151 hdr
.pid
= gts
->ts_tgid_owner
;
152 hdr
.vaddr
= gts
->ts_vma
->vm_start
;
154 if (cch
->state
!= CCHSTATE_INACTIVE
) {
155 cbrcnt
= hweight64(cch
->cbr_allocation_map
) *
157 dsrcnt
= data_opt
? hweight32(cch
->dsr_allocation_map
) *
160 bytes
+= (3 * cbrcnt
+ dsrcnt
) * GRU_CACHE_LINE_BYTES
;
161 if (bytes
> ubufend
- ubuf
)
164 ret
= gru_dump_context_data(grubase
, cch
, ubuf
, ctxnum
,
168 unlock_cch_handle(cch
);
172 hdr
.magic
= GRU_DUMP_MAGIC
;
173 hdr
.gid
= gru
->gs_gid
;
177 hdr
.cch_locked
= cch_locked
;
178 if (!ret
&& copy_to_user((void __user
*)uhdr
, &hdr
, sizeof(hdr
)))
181 return ret
? ret
: bytes
;
184 unlock_cch_handle(cch
);
188 int gru_dump_chiplet_request(unsigned long arg
)
190 struct gru_state
*gru
;
191 struct gru_dump_chiplet_state_req req
;
193 void __user
*ubufend
;
194 int ctxnum
, ret
, cnt
= 0;
196 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
199 /* Currently, only dump by gid is implemented */
200 if (req
.gid
>= gru_max_gids
|| req
.gid
< 0)
203 gru
= GID_TO_GRU(req
.gid
);
205 ubufend
= req
.buf
+ req
.buflen
;
207 ret
= gru_dump_tfm(gru
, ubuf
, ubufend
);
212 ret
= gru_dump_tgh(gru
, ubuf
, ubufend
);
217 for (ctxnum
= 0; ctxnum
< GRU_NUM_CCH
; ctxnum
++) {
218 if (req
.ctxnum
== ctxnum
|| req
.ctxnum
< 0) {
219 ret
= gru_dump_context(gru
, ctxnum
, ubuf
, ubufend
,
220 req
.data_opt
, req
.lock_cch
,
229 if (copy_to_user((void __user
*)arg
, &req
, sizeof(req
)))