10 #include <linux/kernel.h>
11 #include <linux/types.h>
14 #include <linux/export.h>
16 #include <linux/poll.h>
20 #include <linux/slab.h>
22 #include <asm/uaccess.h>
32 #define MON_IOC_MAGIC 0x92
34 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
36 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
37 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
38 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
39 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
40 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
41 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
43 #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get)
46 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
47 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
48 #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32)
61 #define CHUNK_SIZE PAGE_SIZE
62 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
77 #define BUFF_MAX CHUNK_ALIGN(1200*1024)
78 #define BUFF_DFL CHUNK_ALIGN(300*1024)
79 #define BUFF_MIN CHUNK_ALIGN(8*1024)
144 struct mon_bin_get32 {
150 struct mon_bin_mfetch32 {
161 #define PKT_SZ_API0 48
162 #define PKT_SZ_API1 64
164 #define ISODESC_MAX 128
167 #define MON_BIN_MAX_MINOR 128
207 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
209 static unsigned char xfer_to_pipe[4] = {
210 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
213 static struct class *mon_bin_class;
214 static dev_t mon_bin_dev0;
215 static struct cdev mon_bin_cdev;
220 static int mon_alloc_buff(
struct mon_pgmap *
map,
int npages);
221 static void mon_free_buff(
struct mon_pgmap *
map,
int npages);
226 static unsigned int mon_copy_to_buff(
const struct mon_reader_bin *
this,
227 unsigned int off,
const unsigned char *
from,
unsigned int length)
229 unsigned int step_len;
231 unsigned int in_page;
239 if (in_page < step_len)
246 memcpy(buf, from, step_len);
247 if ((off += step_len) >= this->b_size) off = 0;
258 static int copy_from_buf(
const struct mon_reader_bin *
this,
unsigned int off,
259 char __user *to,
int length)
261 unsigned int step_len;
263 unsigned int in_page;
271 if (in_page < step_len)
280 if ((off += step_len) >= this->b_size) off = 0;
317 static unsigned int mon_buff_area_alloc_contiguous(
struct mon_reader_bin *rp,
321 unsigned int fill_size;
335 mon_buff_area_fill(rp, rp->
b_in, fill_size);
339 rp->
b_cnt += size + fill_size;
356 static void mon_buff_area_shrink(
struct mon_reader_bin *rp,
unsigned int size)
370 static void mon_buff_area_free(
struct mon_reader_bin *rp,
unsigned int size)
380 unsigned int offset,
unsigned int size)
384 ep = MON_OFF2HDR(rp, offset);
390 static inline char mon_bin_get_setup(
unsigned char *setupb,
391 const struct urb *
urb,
char ev_type)
394 if (urb->setup_packet ==
NULL)
400 static unsigned int mon_bin_get_data(
const struct mon_reader_bin *rp,
401 unsigned int offset,
struct urb *urb,
unsigned int length,
406 unsigned int this_len;
409 if (urb->num_sgs == 0) {
410 if (urb->transfer_buffer ==
NULL) {
414 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
419 if (urb->transfer_flags & URB_DMA_SG_COMBINED) {
426 if (length == 0 || PageHighMem(sg_page(sg)))
428 this_len =
min_t(
unsigned int, sg->
length, length);
429 offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
444 static unsigned int mon_bin_collate_isodesc(
const struct mon_reader_bin *rp,
445 struct urb *urb,
unsigned int ndesc)
447 struct usb_iso_packet_descriptor *
fp;
451 fp = urb->iso_frame_desc;
452 while (ndesc-- != 0) {
453 if (fp->actual_length != 0) {
454 if (fp->offset + fp->actual_length > length)
455 length = fp->offset + fp->actual_length;
463 unsigned int offset,
struct urb *urb,
char ev_type,
unsigned int ndesc)
466 struct usb_iso_packet_descriptor *
fp;
468 fp = urb->iso_frame_desc;
469 while (ndesc-- != 0) {
474 dp->
iso_len = (ev_type ==
'S') ? fp->length : fp->actual_length;
482 static void mon_bin_event(
struct mon_reader_bin *rp,
struct urb *urb,
488 unsigned int urb_length;
492 unsigned int ndesc, lendesc;
504 urb_length = (ev_type ==
'S') ?
505 urb->transfer_buffer_length : urb->actual_length;
508 if (usb_endpoint_xfer_isoc(epd)) {
509 if (urb->number_of_packets < 0) {
511 }
else if (urb->number_of_packets >=
ISODESC_MAX) {
514 ndesc = urb->number_of_packets;
516 if (ev_type ==
'C' && usb_urb_dir_in(urb))
517 length = mon_bin_collate_isodesc(rp, urb, ndesc);
524 if (length >= urb->transfer_buffer_length)
525 length = urb->transfer_buffer_length;
527 if (length >= rp->
b_size/5)
530 if (usb_urb_dir_in(urb)) {
531 if (ev_type ==
'S') {
538 if (ev_type ==
'C') {
546 offset = mon_buff_area_alloc_contiguous(rp,
549 offset = mon_buff_area_alloc(rp, length +
PKT_SIZE + lendesc);
553 spin_unlock_irqrestore(&rp->
b_lock, flags);
557 ep = MON_OFF2HDR(rp, offset);
565 ep->
xfer_type = xfer_to_pipe[usb_endpoint_type(epd)];
566 ep->
epnum = dir | usb_endpoint_num(epd);
567 ep->
devnum = urb->dev->devnum;
568 ep->
busnum = urb->dev->bus->busnum;
569 ep->
id = (
unsigned long) urb;
574 ep->
len_cap = length + lendesc;
577 if (usb_endpoint_xfer_int(epd)) {
579 }
else if (usb_endpoint_xfer_isoc(epd)) {
582 ep->
s.
iso.error_count = urb->error_count;
583 ep->
s.
iso.numdesc = urb->number_of_packets;
586 if (usb_endpoint_xfer_control(epd) && ev_type ==
'S') {
594 mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc);
595 if ((offset += lendesc) >= rp->
b_size)
600 length = mon_bin_get_data(rp, offset, urb, length,
606 mon_buff_area_shrink(rp, delta);
612 spin_unlock_irqrestore(&rp->
b_lock, flags);
617 static void mon_bin_submit(
void *
data,
struct urb *urb)
623 static void mon_bin_complete(
void *
data,
struct urb *urb,
int status)
626 mon_bin_event(rp, urb,
'C', status);
629 static void mon_bin_error(
void *
data,
struct urb *urb,
int error)
641 offset = mon_buff_area_alloc(rp,
PKT_SIZE);
644 spin_unlock_irqrestore(&rp->
b_lock, flags);
648 ep = MON_OFF2HDR(rp, offset);
652 ep->
xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)];
654 ep->
epnum |= usb_endpoint_num(&urb->ep->desc);
655 ep->
devnum = urb->dev->devnum;
656 ep->
busnum = urb->dev->bus->busnum;
657 ep->
id = (
unsigned long) urb;
665 spin_unlock_irqrestore(&rp->
b_lock, flags);
704 if ((rc = mon_alloc_buff(rp->
b_vec, rp->
b_size/CHUNK_SIZE)) < 0)
709 rp->
r.rnf_submit = mon_bin_submit;
710 rp->
r.rnf_error = mon_bin_error;
711 rp->
r.rnf_complete = mon_bin_complete;
733 static int mon_bin_get_event(
struct file *file,
struct mon_reader_bin *rp,
735 void __user *data,
unsigned int nbytes)
745 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
750 ep = MON_OFF2HDR(rp, rp->
b_out);
760 if (copy_from_buf(rp, offset, data, step_len)) {
767 spin_unlock_irqrestore(&rp->
b_lock, flags);
774 static int mon_bin_release(
struct inode *inode,
struct file *file)
796 static ssize_t mon_bin_read(
struct file *file,
char __user *buf,
797 size_t nbytes, loff_t *ppos)
811 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
816 ep = MON_OFF2HDR(rp, rp->
b_out);
818 if (rp->
b_read < hdrbytes) {
819 step_len =
min(nbytes, (
size_t)(hdrbytes - rp->
b_read));
820 ptr = ((
char *)ep) + rp->
b_read;
831 if (rp->
b_read >= hdrbytes) {
833 step_len -= rp->
b_read - hdrbytes;
834 if (step_len > nbytes)
837 offset += rp->
b_read - hdrbytes;
840 if (copy_from_buf(rp, offset, buf, step_len)) {
856 spin_unlock_irqrestore(&rp->
b_lock, flags);
880 ep = MON_OFF2HDR(rp, rp->
b_out);
883 spin_unlock_irqrestore(&rp->
b_lock, flags);
894 static int mon_bin_fetch(
struct file *file,
struct mon_reader_bin *rp,
895 u32 __user *vec,
unsigned int max)
897 unsigned int cur_out;
907 if ((rc = mon_bin_wait_event(file, rp)) < 0) {
914 spin_unlock_irqrestore(&rp->
b_lock, flags);
919 while (bytes < avail) {
923 ep = MON_OFF2HDR(rp, cur_out);
924 if (
put_user(cur_out, &vec[nevents])) {
932 if ((cur_out += size) >= rp->
b_size)
947 unsigned int cur_out;
958 spin_unlock_irqrestore(&rp->
b_lock, flags);
963 while (bytes < avail) {
964 ep = MON_OFF2HDR(rp, cur_out);
969 if ((cur_out += size) >= rp->
b_size)
980 static long mon_bin_ioctl(
struct file *file,
unsigned int cmd,
unsigned long arg)
996 ep = MON_OFF2HDR(rp, rp->
b_out);
999 spin_unlock_irqrestore(&rp->
b_lock, flags);
1017 if (arg < BUFF_MIN || arg >
BUFF_MAX)
1021 if ((vec = kzalloc(
sizeof(
struct mon_pgmap) * (size/CHUNK_SIZE),
1027 ret = mon_alloc_buff(vec, size/CHUNK_SIZE);
1041 spin_unlock_irqrestore(&rp->
b_lock, flags);
1047 ret = mon_bin_flush(rp, arg);
1059 if (getb.alloc > 0x10000000)
1061 ret = mon_bin_get_event(file, rp, getb.hdr,
1063 getb.data, (
unsigned int)getb.alloc);
1077 if (mfetch.nflush) {
1078 ret = mon_bin_flush(rp, mfetch.nflush);
1084 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch);
1096 unsigned int ndropped;
1101 spin_unlock_irqrestore(&rp->
b_lock, flags);
1102 nevents = mon_bin_queued(rp);
1120 #ifdef CONFIG_COMPAT
1121 static long mon_bin_compat_ioctl(
struct file *file,
1122 unsigned int cmd,
unsigned long arg)
1129 case MON_IOCX_GET32:
1130 case MON_IOCX_GETX32:
1132 struct mon_bin_get32 getb;
1135 sizeof(
struct mon_bin_get32)))
1138 ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32),
1140 compat_ptr(getb.data32), getb.alloc32);
1146 case MON_IOCX_MFETCH32:
1148 struct mon_bin_mfetch32 mfetch;
1149 struct mon_bin_mfetch32
__user *uptr;
1151 uptr = (
struct mon_bin_mfetch32
__user *) compat_ptr(arg);
1156 if (mfetch.nflush32) {
1157 ret = mon_bin_flush(rp, mfetch.nflush32);
1160 if (
put_user(ret, &uptr->nflush32))
1163 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32),
1167 if (
put_user(ret, &uptr->nfetch32))
1173 return mon_bin_ioctl(file, cmd, (
unsigned long) compat_ptr(arg));
1179 return mon_bin_ioctl(file, cmd, arg);
1192 unsigned int mask = 0;
1193 unsigned long flags;
1196 poll_wait(file, &rp->
b_wait, wait);
1201 spin_unlock_irqrestore(&rp->
b_lock, flags);
1224 static int mon_bin_vma_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
1227 unsigned long offset, chunk_idx;
1228 struct page *pageptr;
1231 if (offset >= rp->
b_size)
1232 return VM_FAULT_SIGBUS;
1234 pageptr = rp->
b_vec[chunk_idx].pg;
1236 vmf->page = pageptr;
1240 static const struct vm_operations_struct mon_bin_vm_ops = {
1241 .open = mon_bin_vma_open,
1242 .close = mon_bin_vma_close,
1243 .fault = mon_bin_vma_fault,
1246 static int mon_bin_mmap(
struct file *filp,
struct vm_area_struct *vma)
1249 vma->
vm_ops = &mon_bin_vm_ops;
1250 vma->
vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1252 mon_bin_vma_open(vma);
1258 .open = mon_bin_open,
1260 .read = mon_bin_read,
1262 .poll = mon_bin_poll,
1263 .unlocked_ioctl = mon_bin_ioctl,
1264 #ifdef CONFIG_COMPAT
1265 .compat_ioctl = mon_bin_compat_ioctl,
1267 .release = mon_bin_release,
1268 .mmap = mon_bin_mmap,
1271 static int mon_bin_wait_event(
struct file *file,
struct mon_reader_bin *rp)
1274 unsigned long flags;
1281 spin_unlock_irqrestore(&rp->
b_lock, flags);
1289 if (signal_pending(
current)) {
1297 spin_unlock_irqrestore(&rp->
b_lock, flags);
1304 static int mon_alloc_buff(
struct mon_pgmap *
map,
int npages)
1307 unsigned long vaddr;
1309 for (n = 0; n < npages; n++) {
1316 map[
n].
ptr = (
unsigned char *) vaddr;
1322 static void mon_free_buff(
struct mon_pgmap *map,
int npages)
1326 for (n = 0; n < npages; n++)
1333 unsigned minor = ubus? ubus->busnum: 0;
1358 if (IS_ERR(mon_bin_class)) {
1359 rc = PTR_ERR(mon_bin_class);
1367 cdev_init(&mon_bin_cdev, &mon_fops_binary);