gru: update to rev 0.9 of gru spec
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / misc / sgi-gru / grukdump.c
blob7b1bdf3906baafa1c21a7afa4f05336b2535160e
1 /*
2 * SN Platform GRU Driver
4 * Dump GRU State
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
10 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <asm/uv/uv_hub.h>
20 #include "gru.h"
21 #include "grutables.h"
22 #include "gruhandles.h"
23 #include "grulib.h"
25 #define CCH_LOCK_ATTEMPTS 10
27 static int gru_user_copy_handle(void __user **dp, void *s)
29 if (copy_to_user(dp, s, GRU_HANDLE_BYTES))
30 return -1;
31 *dp += GRU_HANDLE_BYTES;
32 return 0;
35 static int gru_dump_context_data(void *grubase,
36 struct gru_context_configuration_handle *cch,
37 void __user *ubuf, int ctxnum, int dsrcnt)
39 void *cb, *cbe, *tfh, *gseg;
40 int i, scr;
42 gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
43 cb = gseg + GRU_CB_BASE;
44 cbe = grubase + GRU_CBE_BASE;
45 tfh = grubase + GRU_TFH_BASE;
47 for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
48 if (gru_user_copy_handle(&ubuf, cb))
49 goto fail;
50 if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
51 goto fail;
52 if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
53 goto fail;
54 cb += GRU_HANDLE_STRIDE;
56 if (dsrcnt)
57 memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
58 return 0;
60 fail:
61 return -EFAULT;
64 static int gru_dump_tfm(struct gru_state *gru,
65 void __user *ubuf, void __user *ubufend)
67 struct gru_tlb_fault_map *tfm;
68 int i, ret, bytes;
70 bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
71 if (bytes > ubufend - ubuf)
72 ret = -EFBIG;
74 for (i = 0; i < GRU_NUM_TFM; i++) {
75 tfm = get_tfm(gru->gs_gru_base_vaddr, i);
76 if (gru_user_copy_handle(&ubuf, tfm))
77 goto fail;
79 return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
81 fail:
82 return -EFAULT;
85 static int gru_dump_tgh(struct gru_state *gru,
86 void __user *ubuf, void __user *ubufend)
88 struct gru_tlb_global_handle *tgh;
89 int i, ret, bytes;
91 bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
92 if (bytes > ubufend - ubuf)
93 ret = -EFBIG;
95 for (i = 0; i < GRU_NUM_TGH; i++) {
96 tgh = get_tgh(gru->gs_gru_base_vaddr, i);
97 if (gru_user_copy_handle(&ubuf, tgh))
98 goto fail;
100 return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
102 fail:
103 return -EFAULT;
106 static int gru_dump_context(struct gru_state *gru, int ctxnum,
107 void __user *ubuf, void __user *ubufend, char data_opt,
108 char lock_cch)
110 struct gru_dump_context_header hdr;
111 struct gru_dump_context_header __user *uhdr = ubuf;
112 struct gru_context_configuration_handle *cch;
113 struct gru_thread_state *gts;
114 int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
115 void *grubase;
117 memset(&hdr, 0, sizeof(hdr));
118 grubase = gru->gs_gru_base_vaddr;
119 cch = get_cch(grubase, ctxnum);
120 for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
121 cch_locked = trylock_cch_handle(cch);
122 if (cch_locked)
123 break;
124 msleep(1);
127 ubuf += sizeof(hdr);
128 if (gru_user_copy_handle(&ubuf, cch))
129 goto fail;
130 bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
132 if (cch_locked || !lock_cch) {
133 gts = gru->gs_gts[ctxnum];
134 if (gts && gts->ts_vma) {
135 hdr.pid = gts->ts_tgid_owner;
136 hdr.vaddr = gts->ts_vma->vm_start;
138 if (cch->state != CCHSTATE_INACTIVE) {
139 cbrcnt = hweight64(cch->cbr_allocation_map) *
140 GRU_CBR_AU_SIZE;
141 dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
142 GRU_DSR_AU_CL : 0;
144 bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
145 if (bytes > ubufend - ubuf)
146 ret = -EFBIG;
147 else
148 ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
149 dsrcnt);
152 if (cch_locked)
153 unlock_cch_handle(cch);
154 if (ret)
155 return ret;
157 hdr.magic = GRU_DUMP_MAGIC;
158 hdr.ctxnum = ctxnum;
159 hdr.cbrcnt = cbrcnt;
160 hdr.dsrcnt = dsrcnt;
161 hdr.cch_locked = cch_locked;
162 if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
163 ret = -EFAULT;
165 return ret ? ret : bytes;
167 fail:
168 unlock_cch_handle(cch);
169 return -EFAULT;
172 int gru_dump_chiplet_request(unsigned long arg)
174 struct gru_state *gru;
175 struct gru_dump_chiplet_state_req req;
176 void __user *ubuf;
177 void __user *ubufend;
178 int ctxnum, ret, cnt = 0;
180 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
181 return -EFAULT;
183 /* Currently, only dump by gid is implemented */
184 if (req.gid >= gru_max_gids || req.gid < 0)
185 return -EINVAL;
187 gru = GID_TO_GRU(req.gid);
188 ubuf = req.buf;
189 ubufend = req.buf + req.buflen;
191 ret = gru_dump_tfm(gru, ubuf, ubufend);
192 if (ret < 0)
193 goto fail;
194 ubuf += ret;
196 ret = gru_dump_tgh(gru, ubuf, ubufend);
197 if (ret < 0)
198 goto fail;
199 ubuf += ret;
201 for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
202 if (req.ctxnum == ctxnum || req.ctxnum < 0) {
203 ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
204 req.data_opt, req.lock_cch);
205 if (ret < 0)
206 goto fail;
207 ubuf += ret;
208 cnt++;
212 if (copy_to_user((void __user *)arg, &req, sizeof(req)))
213 return -EFAULT;
214 return cnt;
216 fail:
217 return ret;