x86/paravirt: PTE updates in k(un)map_atomic need to be synchronous, regardless of...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / misc / sgi-gru / gruhandles.c
blob2f30badc6ffd9251cd2e59835e3061b2816cd09f
1 /*
2 * GRU KERNEL MCS INSTRUCTIONS
4 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/kernel.h>
22 #include "gru.h"
23 #include "grulib.h"
24 #include "grutables.h"
26 /* 10 sec */
27 #ifdef CONFIG_IA64
28 #include <asm/processor.h>
29 #define GRU_OPERATION_TIMEOUT (((cycles_t) local_cpu_data->itc_freq)*10)
30 #define CLKS2NSEC(c) ((c) *1000000000 / local_cpu_data->itc_freq)
31 #else
32 #include <asm/tsc.h>
33 #define GRU_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
34 #define CLKS2NSEC(c) ((c) * 1000000 / tsc_khz)
35 #endif
37 /* Extract the status field from a kernel handle */
38 #define GET_MSEG_HANDLE_STATUS(h) (((*(unsigned long *)(h)) >> 16) & 3)
40 struct mcs_op_statistic mcs_op_statistics[mcsop_last];
42 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
44 unsigned long nsec;
46 nsec = CLKS2NSEC(clks);
47 atomic_long_inc(&mcs_op_statistics[op].count);
48 atomic_long_add(nsec, &mcs_op_statistics[op].total);
49 if (mcs_op_statistics[op].max < nsec)
50 mcs_op_statistics[op].max = nsec;
53 static void start_instruction(void *h)
55 unsigned long *w0 = h;
57 wmb(); /* setting CMD/STATUS bits must be last */
58 *w0 = *w0 | 0x20001;
59 gru_flush_cache(h);
62 static void report_instruction_timeout(void *h)
64 unsigned long goff = GSEGPOFF((unsigned long)h);
65 char *id = "???";
67 if (TYPE_IS(CCH, goff))
68 id = "CCH";
69 else if (TYPE_IS(TGH, goff))
70 id = "TGH";
71 else if (TYPE_IS(TFH, goff))
72 id = "TFH";
74 panic(KERN_ALERT "GRU %p (%s) is malfunctioning\n", h, id);
77 static int wait_instruction_complete(void *h, enum mcs_op opc)
79 int status;
80 unsigned long start_time = get_cycles();
82 while (1) {
83 cpu_relax();
84 status = GET_MSEG_HANDLE_STATUS(h);
85 if (status != CCHSTATUS_ACTIVE)
86 break;
87 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) {
88 report_instruction_timeout(h);
89 start_time = get_cycles();
92 if (gru_options & OPT_STATS)
93 update_mcs_stats(opc, get_cycles() - start_time);
94 return status;
97 int cch_allocate(struct gru_context_configuration_handle *cch)
99 int ret;
101 cch->opc = CCHOP_ALLOCATE;
102 start_instruction(cch);
103 ret = wait_instruction_complete(cch, cchop_allocate);
106 * Stop speculation into the GSEG being mapped by the previous ALLOCATE.
107 * The GSEG memory does not exist until the ALLOCATE completes.
109 sync_core();
110 return ret;
113 int cch_start(struct gru_context_configuration_handle *cch)
115 cch->opc = CCHOP_START;
116 start_instruction(cch);
117 return wait_instruction_complete(cch, cchop_start);
120 int cch_interrupt(struct gru_context_configuration_handle *cch)
122 cch->opc = CCHOP_INTERRUPT;
123 start_instruction(cch);
124 return wait_instruction_complete(cch, cchop_interrupt);
127 int cch_deallocate(struct gru_context_configuration_handle *cch)
129 int ret;
131 cch->opc = CCHOP_DEALLOCATE;
132 start_instruction(cch);
133 ret = wait_instruction_complete(cch, cchop_deallocate);
136 * Stop speculation into the GSEG being unmapped by the previous
137 * DEALLOCATE.
139 sync_core();
140 return ret;
143 int cch_interrupt_sync(struct gru_context_configuration_handle
144 *cch)
146 cch->opc = CCHOP_INTERRUPT_SYNC;
147 start_instruction(cch);
148 return wait_instruction_complete(cch, cchop_interrupt_sync);
151 int tgh_invalidate(struct gru_tlb_global_handle *tgh,
152 unsigned long vaddr, unsigned long vaddrmask,
153 int asid, int pagesize, int global, int n,
154 unsigned short ctxbitmap)
156 tgh->vaddr = vaddr;
157 tgh->asid = asid;
158 tgh->pagesize = pagesize;
159 tgh->n = n;
160 tgh->global = global;
161 tgh->vaddrmask = vaddrmask;
162 tgh->ctxbitmap = ctxbitmap;
163 tgh->opc = TGHOP_TLBINV;
164 start_instruction(tgh);
165 return wait_instruction_complete(tgh, tghop_invalidate);
168 int tfh_write_only(struct gru_tlb_fault_handle *tfh,
169 unsigned long paddr, int gaa,
170 unsigned long vaddr, int asid, int dirty,
171 int pagesize)
173 tfh->fillasid = asid;
174 tfh->fillvaddr = vaddr;
175 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
176 tfh->gaa = gaa;
177 tfh->dirty = dirty;
178 tfh->pagesize = pagesize;
179 tfh->opc = TFHOP_WRITE_ONLY;
180 start_instruction(tfh);
181 return wait_instruction_complete(tfh, tfhop_write_only);
184 void tfh_write_restart(struct gru_tlb_fault_handle *tfh,
185 unsigned long paddr, int gaa,
186 unsigned long vaddr, int asid, int dirty,
187 int pagesize)
189 tfh->fillasid = asid;
190 tfh->fillvaddr = vaddr;
191 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
192 tfh->gaa = gaa;
193 tfh->dirty = dirty;
194 tfh->pagesize = pagesize;
195 tfh->opc = TFHOP_WRITE_RESTART;
196 start_instruction(tfh);
199 void tfh_restart(struct gru_tlb_fault_handle *tfh)
201 tfh->opc = TFHOP_RESTART;
202 start_instruction(tfh);
205 void tfh_user_polling_mode(struct gru_tlb_fault_handle *tfh)
207 tfh->opc = TFHOP_USER_POLLING_MODE;
208 start_instruction(tfh);
211 void tfh_exception(struct gru_tlb_fault_handle *tfh)
213 tfh->opc = TFHOP_EXCEPTION;
214 start_instruction(tfh);