24 #include <linux/slab.h>
28 #include <asm/uaccess.h>
31 #include <asm/debug.h>
36 struct dtl_entry *
buf;
51 static u8 dtl_event_mask = 0x7;
58 static int dtl_buf_entries = N_DISPATCH_LOG;
60 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
63 struct dtl_entry *write_ptr;
64 struct dtl_entry *
buf;
65 struct dtl_entry *buf_end;
77 static void consume_dtle(
struct dtl_entry *dtle,
u64 index)
80 struct dtl_entry *
wp = dtlr->write_ptr;
81 struct lppaca *vpa = local_paca->lppaca_ptr;
90 if (index + N_DISPATCH_LOG < vpa->dtl_idx)
94 if (wp == dtlr->buf_end)
103 static int dtl_start(
struct dtl *
dtl)
105 struct dtl_ring *dtlr = &
per_cpu(dtl_rings, dtl->
cpu);
107 dtlr->buf = dtl->
buf;
109 dtlr->write_index = 0;
113 dtlr->write_ptr = dtl->
buf;
116 dtlr->saved_dtl_mask = lppaca_of(dtl->
cpu).dtl_enable_mask;
117 lppaca_of(dtl->
cpu).dtl_enable_mask |= dtl_event_mask;
119 dtl_consumer = consume_dtle;
124 static void dtl_stop(
struct dtl *
dtl)
126 struct dtl_ring *dtlr = &
per_cpu(dtl_rings, dtl->
cpu);
128 dtlr->write_ptr =
NULL;
134 lppaca_of(dtl->
cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
140 static u64 dtl_current_index(
struct dtl *dtl)
142 return per_cpu(dtl_rings, dtl->
cpu).write_index;
147 static int dtl_start(
struct dtl *dtl)
154 ((
u32 *)dtl->
buf)[1] = DISPATCH_LOG_BYTES;
156 hwcpu = get_hard_smp_processor_id(dtl->
cpu);
158 ret = register_dtl(hwcpu, addr);
161 "failed with %d\n", __func__, dtl->
cpu, hwcpu, ret);
166 lppaca_of(dtl->
cpu).dtl_idx = 0;
173 lppaca_of(dtl->
cpu).dtl_enable_mask = dtl_event_mask;
178 static void dtl_stop(
struct dtl *dtl)
180 int hwcpu = get_hard_smp_processor_id(dtl->
cpu);
182 lppaca_of(dtl->
cpu).dtl_enable_mask = 0x0;
184 unregister_dtl(hwcpu);
187 static u64 dtl_current_index(
struct dtl *dtl)
189 return lppaca_of(dtl->
cpu).dtl_idx;
193 static int dtl_enable(
struct dtl *dtl)
206 n_entries = dtl_buf_entries;
214 spin_lock(&dtl->
lock);
225 spin_unlock(&dtl->
lock);
232 static void dtl_disable(
struct dtl *dtl)
234 spin_lock(&dtl->
lock);
239 spin_unlock(&dtl->
lock);
249 rc = dtl_enable(dtl);
257 static int dtl_file_release(
struct inode *inode,
struct file *filp)
264 static ssize_t dtl_file_read(
struct file *filp,
char __user *buf,
size_t len,
271 if ((len %
sizeof(
struct dtl_entry)) != 0)
277 n_req = len /
sizeof(
struct dtl_entry);
282 spin_lock(&dtl->
lock);
284 cur_idx = dtl_current_index(dtl);
290 if (last_idx + n_req > cur_idx)
291 n_req = cur_idx - last_idx;
296 spin_unlock(&dtl->
lock);
308 read_size *
sizeof(
struct dtl_entry));
315 buf += read_size *
sizeof(
struct dtl_entry);
319 rc =
copy_to_user(buf, &dtl->
buf[i], n_req *
sizeof(
struct dtl_entry));
325 return n_read *
sizeof(
struct dtl_entry);
329 .open = dtl_file_open,
330 .release = dtl_file_release,
331 .read = dtl_file_read,
335 static struct dentry *dtl_dir;
337 static int dtl_setup_file(
struct dtl *dtl)
350 static int dtl_init(
void)
352 struct dentry *event_mask_file, *buf_entries_file;
355 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
369 dtl_dir, &dtl_event_mask);
371 dtl_dir, &dtl_buf_entries);
373 if (!event_mask_file || !buf_entries_file) {
380 struct dtl *dtl = &
per_cpu(cpu_dtl, i);
384 rc = dtl_setup_file(dtl);