Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
grukdump.c
Go to the documentation of this file.
1 /*
2  * SN Platform GRU Driver
3  *
4  * Dump GRU State
5  *
6  * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <linux/delay.h>
28 #include <linux/bitops.h>
29 #include <asm/uv/uv_hub.h>
30 #include "gru.h"
31 #include "grutables.h"
32 #include "gruhandles.h"
33 #include "grulib.h"
34 
35 #define CCH_LOCK_ATTEMPTS 10
36 
37 static int gru_user_copy_handle(void __user **dp, void *s)
38 {
39  if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
40  return -1;
41  *dp += GRU_HANDLE_BYTES;
42  return 0;
43 }
44 
45 static int gru_dump_context_data(void *grubase,
47  void __user *ubuf, int ctxnum, int dsrcnt,
48  int flush_cbrs)
49 {
50  void *cb, *cbe, *tfh, *gseg;
51  int i, scr;
52 
53  gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
54  cb = gseg + GRU_CB_BASE;
55  cbe = grubase + GRU_CBE_BASE;
56  tfh = grubase + GRU_TFH_BASE;
57 
59  if (flush_cbrs)
60  gru_flush_cache(cb);
61  if (gru_user_copy_handle(&ubuf, cb))
62  goto fail;
63  if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
64  goto fail;
65  if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
66  goto fail;
67  cb += GRU_HANDLE_STRIDE;
68  }
69  if (dsrcnt)
70  memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
71  return 0;
72 
73 fail:
74  return -EFAULT;
75 }
76 
77 static int gru_dump_tfm(struct gru_state *gru,
78  void __user *ubuf, void __user *ubufend)
79 {
80  struct gru_tlb_fault_map *tfm;
81  int i, ret, bytes;
82 
84  if (bytes > ubufend - ubuf)
85  ret = -EFBIG;
86 
87  for (i = 0; i < GRU_NUM_TFM; i++) {
88  tfm = get_tfm(gru->gs_gru_base_vaddr, i);
89  if (gru_user_copy_handle(&ubuf, tfm))
90  goto fail;
91  }
92  return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
93 
94 fail:
95  return -EFAULT;
96 }
97 
98 static int gru_dump_tgh(struct gru_state *gru,
99  void __user *ubuf, void __user *ubufend)
100 {
101  struct gru_tlb_global_handle *tgh;
102  int i, ret, bytes;
103 
105  if (bytes > ubufend - ubuf)
106  ret = -EFBIG;
107 
108  for (i = 0; i < GRU_NUM_TGH; i++) {
109  tgh = get_tgh(gru->gs_gru_base_vaddr, i);
110  if (gru_user_copy_handle(&ubuf, tgh))
111  goto fail;
112  }
113  return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
114 
115 fail:
116  return -EFAULT;
117 }
118 
119 static int gru_dump_context(struct gru_state *gru, int ctxnum,
120  void __user *ubuf, void __user *ubufend, char data_opt,
121  char lock_cch, char flush_cbrs)
122 {
124  struct gru_dump_context_header __user *uhdr = ubuf;
125  struct gru_context_configuration_handle *cch, *ubufcch;
126  struct gru_thread_state *gts;
127  int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
128  void *grubase;
129 
130  memset(&hdr, 0, sizeof(hdr));
131  grubase = gru->gs_gru_base_vaddr;
132  cch = get_cch(grubase, ctxnum);
133  for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
134  cch_locked = trylock_cch_handle(cch);
135  if (cch_locked)
136  break;
137  msleep(1);
138  }
139 
140  ubuf += sizeof(hdr);
141  ubufcch = ubuf;
142  if (gru_user_copy_handle(&ubuf, cch))
143  goto fail;
144  if (cch_locked)
145  ubufcch->delresp = 0;
146  bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
147 
148  if (cch_locked || !lock_cch) {
149  gts = gru->gs_gts[ctxnum];
150  if (gts && gts->ts_vma) {
151  hdr.pid = gts->ts_tgid_owner;
152  hdr.vaddr = gts->ts_vma->vm_start;
153  }
154  if (cch->state != CCHSTATE_INACTIVE) {
155  cbrcnt = hweight64(cch->cbr_allocation_map) *
157  dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
158  GRU_DSR_AU_CL : 0;
159  }
160  bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
161  if (bytes > ubufend - ubuf)
162  ret = -EFBIG;
163  else
164  ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
165  dsrcnt, flush_cbrs);
166  }
167  if (cch_locked)
168  unlock_cch_handle(cch);
169  if (ret)
170  return ret;
171 
172  hdr.magic = GRU_DUMP_MAGIC;
173  hdr.gid = gru->gs_gid;
174  hdr.ctxnum = ctxnum;
175  hdr.cbrcnt = cbrcnt;
176  hdr.dsrcnt = dsrcnt;
177  hdr.cch_locked = cch_locked;
178  if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
179  ret = -EFAULT;
180 
181  return ret ? ret : bytes;
182 
183 fail:
184  unlock_cch_handle(cch);
185  return -EFAULT;
186 }
187 
188 int gru_dump_chiplet_request(unsigned long arg)
189 {
190  struct gru_state *gru;
191  struct gru_dump_chiplet_state_req req;
192  void __user *ubuf;
193  void __user *ubufend;
194  int ctxnum, ret, cnt = 0;
195 
196  if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
197  return -EFAULT;
198 
199  /* Currently, only dump by gid is implemented */
200  if (req.gid >= gru_max_gids || req.gid < 0)
201  return -EINVAL;
202 
203  gru = GID_TO_GRU(req.gid);
204  ubuf = req.buf;
205  ubufend = req.buf + req.buflen;
206 
207  ret = gru_dump_tfm(gru, ubuf, ubufend);
208  if (ret < 0)
209  goto fail;
210  ubuf += ret;
211 
212  ret = gru_dump_tgh(gru, ubuf, ubufend);
213  if (ret < 0)
214  goto fail;
215  ubuf += ret;
216 
217  for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
218  if (req.ctxnum == ctxnum || req.ctxnum < 0) {
219  ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
220  req.data_opt, req.lock_cch,
221  req.flush_cbrs);
222  if (ret < 0)
223  goto fail;
224  ubuf += ret;
225  cnt++;
226  }
227  }
228 
229  if (copy_to_user((void __user *)arg, &req, sizeof(req)))
230  return -EFAULT;
231  return cnt;
232 
233 fail:
234  return ret;
235 }