x86, delay: tsc based udelay should have rdtsc_barrier
[linux-2.6/mini2440.git] / drivers / misc / sgi-gru / grukdump.c
blob55eabfa85585eb60dd889ec09dc441f187623c7b
1 /*
2 * SN Platform GRU Driver
4 * Dump GRU State
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <linux/delay.h>
28 #include <linux/bitops.h>
29 #include <asm/uv/uv_hub.h>
30 #include "gru.h"
31 #include "grutables.h"
32 #include "gruhandles.h"
33 #include "grulib.h"
35 #define CCH_LOCK_ATTEMPTS 10
37 static int gru_user_copy_handle(void __user **dp, void *s)
39 if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
40 return -1;
41 *dp += GRU_HANDLE_BYTES;
42 return 0;
45 static int gru_dump_context_data(void *grubase,
46 struct gru_context_configuration_handle *cch,
47 void __user *ubuf, int ctxnum, int dsrcnt)
49 void *cb, *cbe, *tfh, *gseg;
50 int i, scr;
52 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
53 cb = gseg + GRU_CB_BASE;
54 cbe = grubase + GRU_CBE_BASE;
55 tfh = grubase + GRU_TFH_BASE;
57 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
58 if (gru_user_copy_handle(&ubuf, cb))
59 goto fail;
60 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
61 goto fail;
62 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
63 goto fail;
64 cb += GRU_HANDLE_STRIDE;
66 if (dsrcnt)
67 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
68 return 0;
70 fail:
71 return -EFAULT;
74 static int gru_dump_tfm(struct gru_state *gru,
75 void __user *ubuf, void __user *ubufend)
77 struct gru_tlb_fault_map *tfm;
78 int i, ret, bytes;
80 bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
81 if (bytes > ubufend - ubuf)
82 ret = -EFBIG;
84 for (i = 0; i < GRU_NUM_TFM; i++) {
85 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
86 if (gru_user_copy_handle(&ubuf, tfm))
87 goto fail;
89 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
91 fail:
92 return -EFAULT;
95 static int gru_dump_tgh(struct gru_state *gru,
96 void __user *ubuf, void __user *ubufend)
98 struct gru_tlb_global_handle *tgh;
99 int i, ret, bytes;
101 bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
102 if (bytes > ubufend - ubuf)
103 ret = -EFBIG;
105 for (i = 0; i < GRU_NUM_TGH; i++) {
106 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
107 if (gru_user_copy_handle(&ubuf, tgh))
108 goto fail;
110 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
112 fail:
113 return -EFAULT;
116 static int gru_dump_context(struct gru_state *gru, int ctxnum,
117 void __user *ubuf, void __user *ubufend, char data_opt,
118 char lock_cch)
120 struct gru_dump_context_header hdr;
121 struct gru_dump_context_header __user *uhdr = ubuf;
122 struct gru_context_configuration_handle *cch, *ubufcch;
123 struct gru_thread_state *gts;
124 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
125 void *grubase;
127 memset(&hdr, 0, sizeof(hdr));
128 grubase = gru->gs_gru_base_vaddr;
129 cch = get_cch(grubase, ctxnum);
130 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
131 cch_locked = trylock_cch_handle(cch);
132 if (cch_locked)
133 break;
134 msleep(1);
137 ubuf += sizeof(hdr);
138 ubufcch = ubuf;
139 if (gru_user_copy_handle(&ubuf, cch))
140 goto fail;
141 if (cch_locked)
142 ubufcch->delresp = 0;
143 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
145 if (cch_locked || !lock_cch) {
146 gts = gru->gs_gts[ctxnum];
147 if (gts && gts->ts_vma) {
148 hdr.pid = gts->ts_tgid_owner;
149 hdr.vaddr = gts->ts_vma->vm_start;
151 if (cch->state != CCHSTATE_INACTIVE) {
152 cbrcnt = hweight64(cch->cbr_allocation_map) *
153 GRU_CBR_AU_SIZE;
154 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
155 GRU_DSR_AU_CL : 0;
157 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
158 if (bytes > ubufend - ubuf)
159 ret = -EFBIG;
160 else
161 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
162 dsrcnt);
165 if (cch_locked)
166 unlock_cch_handle(cch);
167 if (ret)
168 return ret;
170 hdr.magic = GRU_DUMP_MAGIC;
171 hdr.gid = gru->gs_gid;
172 hdr.ctxnum = ctxnum;
173 hdr.cbrcnt = cbrcnt;
174 hdr.dsrcnt = dsrcnt;
175 hdr.cch_locked = cch_locked;
176 if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
177 ret = -EFAULT;
179 return ret ? ret : bytes;
181 fail:
182 unlock_cch_handle(cch);
183 return -EFAULT;
186 int gru_dump_chiplet_request(unsigned long arg)
188 struct gru_state *gru;
189 struct gru_dump_chiplet_state_req req;
190 void __user *ubuf;
191 void __user *ubufend;
192 int ctxnum, ret, cnt = 0;
194 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
195 return -EFAULT;
197 /* Currently, only dump by gid is implemented */
198 if (req.gid >= gru_max_gids || req.gid < 0)
199 return -EINVAL;
201 gru = GID_TO_GRU(req.gid);
202 ubuf = req.buf;
203 ubufend = req.buf + req.buflen;
205 ret = gru_dump_tfm(gru, ubuf, ubufend);
206 if (ret < 0)
207 goto fail;
208 ubuf += ret;
210 ret = gru_dump_tgh(gru, ubuf, ubufend);
211 if (ret < 0)
212 goto fail;
213 ubuf += ret;
215 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
216 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
217 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
218 req.data_opt, req.lock_cch);
219 if (ret < 0)
220 goto fail;
221 ubuf += ret;
222 cnt++;
226 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
227 return -EFAULT;
228 return cnt;
230 fail:
231 return ret;