Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
socket.c
Go to the documentation of this file.
1 /*
2  * File: socket.c
3  *
4  * Phonet sockets
5  *
6  * Copyright (C) 2008 Nokia Corporation.
7  *
8  * Authors: Sakari Ailus <[email protected]>
9  * RĂ©mi Denis-Courmont
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * version 2 as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23  * 02110-1301 USA
24  */
25 
26 #include <linux/gfp.h>
27 #include <linux/kernel.h>
28 #include <linux/net.h>
29 #include <linux/poll.h>
30 #include <net/sock.h>
31 #include <net/tcp_states.h>
32 
33 #include <linux/phonet.h>
34 #include <linux/export.h>
35 #include <net/phonet/phonet.h>
36 #include <net/phonet/pep.h>
37 #include <net/phonet/pn_dev.h>
38 
39 static int pn_socket_release(struct socket *sock)
40 {
41  struct sock *sk = sock->sk;
42 
43  if (sk) {
44  sock->sk = NULL;
45  sk->sk_prot->close(sk, 0);
46  }
47  return 0;
48 }
49 
50 #define PN_HASHSIZE 16
51 #define PN_HASHMASK (PN_HASHSIZE-1)
52 
53 
54 static struct {
56  struct mutex lock;
57 } pnsocks;
58 
59 void __init pn_sock_init(void)
60 {
61  unsigned int i;
62 
63  for (i = 0; i < PN_HASHSIZE; i++)
64  INIT_HLIST_HEAD(pnsocks.hlist + i);
65  mutex_init(&pnsocks.lock);
66 }
67 
68 static struct hlist_head *pn_hash_list(u16 obj)
69 {
70  return pnsocks.hlist + (obj & PN_HASHMASK);
71 }
72 
73 /*
74  * Find address based on socket address, match only certain fields.
75  * Also grab sock if it was found. Remember to sock_put it later.
76  */
77 struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
78 {
79  struct hlist_node *node;
80  struct sock *sknode;
81  struct sock *rval = NULL;
82  u16 obj = pn_sockaddr_get_object(spn);
83  u8 res = spn->spn_resource;
84  struct hlist_head *hlist = pn_hash_list(obj);
85 
86  rcu_read_lock();
87  sk_for_each_rcu(sknode, node, hlist) {
88  struct pn_sock *pn = pn_sk(sknode);
89  BUG_ON(!pn->sobject); /* unbound socket */
90 
91  if (!net_eq(sock_net(sknode), net))
92  continue;
93  if (pn_port(obj)) {
94  /* Look up socket by port */
95  if (pn_port(pn->sobject) != pn_port(obj))
96  continue;
97  } else {
98  /* If port is zero, look up by resource */
99  if (pn->resource != res)
100  continue;
101  }
102  if (pn_addr(pn->sobject) &&
103  pn_addr(pn->sobject) != pn_addr(obj))
104  continue;
105 
106  rval = sknode;
107  sock_hold(sknode);
108  break;
109  }
110  rcu_read_unlock();
111 
112  return rval;
113 }
114 
115 /* Deliver a broadcast packet (only in bottom-half) */
117 {
118  struct hlist_head *hlist = pnsocks.hlist;
119  unsigned int h;
120 
121  rcu_read_lock();
122  for (h = 0; h < PN_HASHSIZE; h++) {
123  struct hlist_node *node;
124  struct sock *sknode;
125 
126  sk_for_each(sknode, node, hlist) {
127  struct sk_buff *clone;
128 
129  if (!net_eq(sock_net(sknode), net))
130  continue;
131  if (!sock_flag(sknode, SOCK_BROADCAST))
132  continue;
133 
134  clone = skb_clone(skb, GFP_ATOMIC);
135  if (clone) {
136  sock_hold(sknode);
137  sk_receive_skb(sknode, clone, 0);
138  }
139  }
140  hlist++;
141  }
142  rcu_read_unlock();
143 }
144 
145 void pn_sock_hash(struct sock *sk)
146 {
147  struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
148 
149  mutex_lock(&pnsocks.lock);
150  sk_add_node_rcu(sk, hlist);
151  mutex_unlock(&pnsocks.lock);
152 }
154 
155 void pn_sock_unhash(struct sock *sk)
156 {
157  mutex_lock(&pnsocks.lock);
159  mutex_unlock(&pnsocks.lock);
161  synchronize_rcu();
162 }
164 
165 static DEFINE_MUTEX(port_mutex);
166 
167 static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
168 {
169  struct sock *sk = sock->sk;
170  struct pn_sock *pn = pn_sk(sk);
171  struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
172  int err;
173  u16 handle;
174  u8 saddr;
175 
176  if (sk->sk_prot->bind)
177  return sk->sk_prot->bind(sk, addr, len);
178 
179  if (len < sizeof(struct sockaddr_pn))
180  return -EINVAL;
181  if (spn->spn_family != AF_PHONET)
182  return -EAFNOSUPPORT;
183 
184  handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
185  saddr = pn_addr(handle);
186  if (saddr && phonet_address_lookup(sock_net(sk), saddr))
187  return -EADDRNOTAVAIL;
188 
189  lock_sock(sk);
190  if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
191  err = -EINVAL; /* attempt to rebind */
192  goto out;
193  }
194  WARN_ON(sk_hashed(sk));
195  mutex_lock(&port_mutex);
196  err = sk->sk_prot->get_port(sk, pn_port(handle));
197  if (err)
198  goto out_port;
199 
200  /* get_port() sets the port, bind() sets the address if applicable */
201  pn->sobject = pn_object(saddr, pn_port(pn->sobject));
202  pn->resource = spn->spn_resource;
203 
204  /* Enable RX on the socket */
205  sk->sk_prot->hash(sk);
206 out_port:
207  mutex_unlock(&port_mutex);
208 out:
209  release_sock(sk);
210  return err;
211 }
212 
213 static int pn_socket_autobind(struct socket *sock)
214 {
215  struct sockaddr_pn sa;
216  int err;
217 
218  memset(&sa, 0, sizeof(sa));
219  sa.spn_family = AF_PHONET;
220  err = pn_socket_bind(sock, (struct sockaddr *)&sa,
221  sizeof(struct sockaddr_pn));
222  if (err != -EINVAL)
223  return err;
224  BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
225  return 0; /* socket was already bound */
226 }
227 
228 static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
229  int len, int flags)
230 {
231  struct sock *sk = sock->sk;
232  struct pn_sock *pn = pn_sk(sk);
233  struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
234  struct task_struct *tsk = current;
235  long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
236  int err;
237 
238  if (pn_socket_autobind(sock))
239  return -ENOBUFS;
240  if (len < sizeof(struct sockaddr_pn))
241  return -EINVAL;
242  if (spn->spn_family != AF_PHONET)
243  return -EAFNOSUPPORT;
244 
245  lock_sock(sk);
246 
247  switch (sock->state) {
248  case SS_UNCONNECTED:
249  if (sk->sk_state != TCP_CLOSE) {
250  err = -EISCONN;
251  goto out;
252  }
253  break;
254  case SS_CONNECTING:
255  err = -EALREADY;
256  goto out;
257  default:
258  err = -EISCONN;
259  goto out;
260  }
261 
262  pn->dobject = pn_sockaddr_get_object(spn);
263  pn->resource = pn_sockaddr_get_resource(spn);
264  sock->state = SS_CONNECTING;
265 
266  err = sk->sk_prot->connect(sk, addr, len);
267  if (err) {
268  sock->state = SS_UNCONNECTED;
269  pn->dobject = 0;
270  goto out;
271  }
272 
273  while (sk->sk_state == TCP_SYN_SENT) {
274  DEFINE_WAIT(wait);
275 
276  if (!timeo) {
277  err = -EINPROGRESS;
278  goto out;
279  }
280  if (signal_pending(tsk)) {
281  err = sock_intr_errno(timeo);
282  goto out;
283  }
284 
285  prepare_to_wait_exclusive(sk_sleep(sk), &wait,
287  release_sock(sk);
288  timeo = schedule_timeout(timeo);
289  lock_sock(sk);
290  finish_wait(sk_sleep(sk), &wait);
291  }
292 
293  if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
294  err = 0;
295  else if (sk->sk_state == TCP_CLOSE_WAIT)
296  err = -ECONNRESET;
297  else
298  err = -ECONNREFUSED;
299  sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
300 out:
301  release_sock(sk);
302  return err;
303 }
304 
305 static int pn_socket_accept(struct socket *sock, struct socket *newsock,
306  int flags)
307 {
308  struct sock *sk = sock->sk;
309  struct sock *newsk;
310  int err;
311 
312  if (unlikely(sk->sk_state != TCP_LISTEN))
313  return -EINVAL;
314 
315  newsk = sk->sk_prot->accept(sk, flags, &err);
316  if (!newsk)
317  return err;
318 
319  lock_sock(newsk);
320  sock_graft(newsk, newsock);
321  newsock->state = SS_CONNECTED;
322  release_sock(newsk);
323  return 0;
324 }
325 
326 static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
327  int *sockaddr_len, int peer)
328 {
329  struct sock *sk = sock->sk;
330  struct pn_sock *pn = pn_sk(sk);
331 
332  memset(addr, 0, sizeof(struct sockaddr_pn));
333  addr->sa_family = AF_PHONET;
334  if (!peer) /* Race with bind() here is userland's problem. */
335  pn_sockaddr_set_object((struct sockaddr_pn *)addr,
336  pn->sobject);
337 
338  *sockaddr_len = sizeof(struct sockaddr_pn);
339  return 0;
340 }
341 
342 static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
343  poll_table *wait)
344 {
345  struct sock *sk = sock->sk;
346  struct pep_sock *pn = pep_sk(sk);
347  unsigned int mask = 0;
348 
349  poll_wait(file, sk_sleep(sk), wait);
350 
351  if (sk->sk_state == TCP_CLOSE)
352  return POLLERR;
353  if (!skb_queue_empty(&sk->sk_receive_queue))
354  mask |= POLLIN | POLLRDNORM;
355  if (!skb_queue_empty(&pn->ctrlreq_queue))
356  mask |= POLLPRI;
357  if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
358  return POLLHUP;
359 
360  if (sk->sk_state == TCP_ESTABLISHED &&
361  atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
362  atomic_read(&pn->tx_credits))
363  mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
364 
365  return mask;
366 }
367 
368 static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
369  unsigned long arg)
370 {
371  struct sock *sk = sock->sk;
372  struct pn_sock *pn = pn_sk(sk);
373 
374  if (cmd == SIOCPNGETOBJECT) {
375  struct net_device *dev;
376  u16 handle;
377  u8 saddr;
378 
379  if (get_user(handle, (__u16 __user *)arg))
380  return -EFAULT;
381 
382  lock_sock(sk);
383  if (sk->sk_bound_dev_if)
384  dev = dev_get_by_index(sock_net(sk),
385  sk->sk_bound_dev_if);
386  else
387  dev = phonet_device_get(sock_net(sk));
388  if (dev && (dev->flags & IFF_UP))
389  saddr = phonet_address_get(dev, pn_addr(handle));
390  else
391  saddr = PN_NO_ADDR;
392  release_sock(sk);
393 
394  if (dev)
395  dev_put(dev);
396  if (saddr == PN_NO_ADDR)
397  return -EHOSTUNREACH;
398 
399  handle = pn_object(saddr, pn_port(pn->sobject));
400  return put_user(handle, (__u16 __user *)arg);
401  }
402 
403  return sk->sk_prot->ioctl(sk, cmd, arg);
404 }
405 
406 static int pn_socket_listen(struct socket *sock, int backlog)
407 {
408  struct sock *sk = sock->sk;
409  int err = 0;
410 
411  if (pn_socket_autobind(sock))
412  return -ENOBUFS;
413 
414  lock_sock(sk);
415  if (sock->state != SS_UNCONNECTED) {
416  err = -EINVAL;
417  goto out;
418  }
419 
420  if (sk->sk_state != TCP_LISTEN) {
421  sk->sk_state = TCP_LISTEN;
422  sk->sk_ack_backlog = 0;
423  }
425 out:
426  release_sock(sk);
427  return err;
428 }
429 
430 static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
431  struct msghdr *m, size_t total_len)
432 {
433  struct sock *sk = sock->sk;
434 
435  if (pn_socket_autobind(sock))
436  return -EAGAIN;
437 
438  return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
439 }
440 
441 const struct proto_ops phonet_dgram_ops = {
442  .family = AF_PHONET,
443  .owner = THIS_MODULE,
444  .release = pn_socket_release,
445  .bind = pn_socket_bind,
446  .connect = sock_no_connect,
447  .socketpair = sock_no_socketpair,
448  .accept = sock_no_accept,
449  .getname = pn_socket_getname,
450  .poll = datagram_poll,
451  .ioctl = pn_socket_ioctl,
452  .listen = sock_no_listen,
453  .shutdown = sock_no_shutdown,
454  .setsockopt = sock_no_setsockopt,
455  .getsockopt = sock_no_getsockopt,
456 #ifdef CONFIG_COMPAT
457  .compat_setsockopt = sock_no_setsockopt,
458  .compat_getsockopt = sock_no_getsockopt,
459 #endif
460  .sendmsg = pn_socket_sendmsg,
461  .recvmsg = sock_common_recvmsg,
462  .mmap = sock_no_mmap,
463  .sendpage = sock_no_sendpage,
464 };
465 
466 const struct proto_ops phonet_stream_ops = {
467  .family = AF_PHONET,
468  .owner = THIS_MODULE,
469  .release = pn_socket_release,
470  .bind = pn_socket_bind,
471  .connect = pn_socket_connect,
472  .socketpair = sock_no_socketpair,
473  .accept = pn_socket_accept,
474  .getname = pn_socket_getname,
475  .poll = pn_socket_poll,
476  .ioctl = pn_socket_ioctl,
477  .listen = pn_socket_listen,
478  .shutdown = sock_no_shutdown,
479  .setsockopt = sock_common_setsockopt,
480  .getsockopt = sock_common_getsockopt,
481 #ifdef CONFIG_COMPAT
482  .compat_setsockopt = compat_sock_common_setsockopt,
483  .compat_getsockopt = compat_sock_common_getsockopt,
484 #endif
485  .sendmsg = pn_socket_sendmsg,
486  .recvmsg = sock_common_recvmsg,
487  .mmap = sock_no_mmap,
488  .sendpage = sock_no_sendpage,
489 };
490 EXPORT_SYMBOL(phonet_stream_ops);
491 
492 /* allocate port for a socket */
493 int pn_sock_get_port(struct sock *sk, unsigned short sport)
494 {
495  static int port_cur;
496  struct net *net = sock_net(sk);
497  struct pn_sock *pn = pn_sk(sk);
498  struct sockaddr_pn try_sa;
499  struct sock *tmpsk;
500 
501  memset(&try_sa, 0, sizeof(struct sockaddr_pn));
502  try_sa.spn_family = AF_PHONET;
503  WARN_ON(!mutex_is_locked(&port_mutex));
504  if (!sport) {
505  /* search free port */
506  int port, pmin, pmax;
507 
508  phonet_get_local_port_range(&pmin, &pmax);
509  for (port = pmin; port <= pmax; port++) {
510  port_cur++;
511  if (port_cur < pmin || port_cur > pmax)
512  port_cur = pmin;
513 
514  pn_sockaddr_set_port(&try_sa, port_cur);
515  tmpsk = pn_find_sock_by_sa(net, &try_sa);
516  if (tmpsk == NULL) {
517  sport = port_cur;
518  goto found;
519  } else
520  sock_put(tmpsk);
521  }
522  } else {
523  /* try to find specific port */
524  pn_sockaddr_set_port(&try_sa, sport);
525  tmpsk = pn_find_sock_by_sa(net, &try_sa);
526  if (tmpsk == NULL)
527  /* No sock there! We can use that port... */
528  goto found;
529  else
530  sock_put(tmpsk);
531  }
532  /* the port must be in use already */
533  return -EADDRINUSE;
534 
535 found:
536  pn->sobject = pn_object(pn_addr(pn->sobject), sport);
537  return 0;
538 }
540 
541 #ifdef CONFIG_PROC_FS
542 static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
543 {
544  struct net *net = seq_file_net(seq);
545  struct hlist_head *hlist = pnsocks.hlist;
546  struct hlist_node *node;
547  struct sock *sknode;
548  unsigned int h;
549 
550  for (h = 0; h < PN_HASHSIZE; h++) {
551  sk_for_each_rcu(sknode, node, hlist) {
552  if (!net_eq(net, sock_net(sknode)))
553  continue;
554  if (!pos)
555  return sknode;
556  pos--;
557  }
558  hlist++;
559  }
560  return NULL;
561 }
562 
563 static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
564 {
565  struct net *net = seq_file_net(seq);
566 
567  do
568  sk = sk_next(sk);
569  while (sk && !net_eq(net, sock_net(sk)));
570 
571  return sk;
572 }
573 
574 static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
575  __acquires(rcu)
576 {
577  rcu_read_lock();
578  return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
579 }
580 
581 static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
582 {
583  struct sock *sk;
584 
585  if (v == SEQ_START_TOKEN)
586  sk = pn_sock_get_idx(seq, 0);
587  else
588  sk = pn_sock_get_next(seq, v);
589  (*pos)++;
590  return sk;
591 }
592 
593 static void pn_sock_seq_stop(struct seq_file *seq, void *v)
594  __releases(rcu)
595 {
596  rcu_read_unlock();
597 }
598 
599 static int pn_sock_seq_show(struct seq_file *seq, void *v)
600 {
601  int len;
602 
603  if (v == SEQ_START_TOKEN)
604  seq_printf(seq, "%s%n", "pt loc rem rs st tx_queue rx_queue "
605  " uid inode ref pointer drops", &len);
606  else {
607  struct sock *sk = v;
608  struct pn_sock *pn = pn_sk(sk);
609 
610  seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
611  "%d %pK %d%n",
612  sk->sk_protocol, pn->sobject, pn->dobject,
613  pn->resource, sk->sk_state,
614  sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
615  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
616  sock_i_ino(sk),
617  atomic_read(&sk->sk_refcnt), sk,
618  atomic_read(&sk->sk_drops), &len);
619  }
620  seq_printf(seq, "%*s\n", 127 - len, "");
621  return 0;
622 }
623 
624 static const struct seq_operations pn_sock_seq_ops = {
625  .start = pn_sock_seq_start,
626  .next = pn_sock_seq_next,
627  .stop = pn_sock_seq_stop,
628  .show = pn_sock_seq_show,
629 };
630 
631 static int pn_sock_open(struct inode *inode, struct file *file)
632 {
633  return seq_open_net(inode, file, &pn_sock_seq_ops,
634  sizeof(struct seq_net_private));
635 }
636 
637 const struct file_operations pn_sock_seq_fops = {
638  .owner = THIS_MODULE,
639  .open = pn_sock_open,
640  .read = seq_read,
641  .llseek = seq_lseek,
642  .release = seq_release_net,
643 };
644 #endif
645 
646 static struct {
647  struct sock *sk[256];
648 } pnres;
649 
650 /*
651  * Find and hold socket based on resource.
652  */
653 struct sock *pn_find_sock_by_res(struct net *net, u8 res)
654 {
655  struct sock *sk;
656 
657  if (!net_eq(net, &init_net))
658  return NULL;
659 
660  rcu_read_lock();
661  sk = rcu_dereference(pnres.sk[res]);
662  if (sk)
663  sock_hold(sk);
664  rcu_read_unlock();
665  return sk;
666 }
667 
668 static DEFINE_MUTEX(resource_mutex);
669 
670 int pn_sock_bind_res(struct sock *sk, u8 res)
671 {
672  int ret = -EADDRINUSE;
673 
674  if (!net_eq(sock_net(sk), &init_net))
675  return -ENOIOCTLCMD;
676  if (!capable(CAP_SYS_ADMIN))
677  return -EPERM;
678  if (pn_socket_autobind(sk->sk_socket))
679  return -EAGAIN;
680 
681  mutex_lock(&resource_mutex);
682  if (pnres.sk[res] == NULL) {
683  sock_hold(sk);
684  rcu_assign_pointer(pnres.sk[res], sk);
685  ret = 0;
686  }
687  mutex_unlock(&resource_mutex);
688  return ret;
689 }
690 
691 int pn_sock_unbind_res(struct sock *sk, u8 res)
692 {
693  int ret = -ENOENT;
694 
695  if (!capable(CAP_SYS_ADMIN))
696  return -EPERM;
697 
698  mutex_lock(&resource_mutex);
699  if (pnres.sk[res] == sk) {
700  RCU_INIT_POINTER(pnres.sk[res], NULL);
701  ret = 0;
702  }
703  mutex_unlock(&resource_mutex);
704 
705  if (ret == 0) {
706  synchronize_rcu();
707  sock_put(sk);
708  }
709  return ret;
710 }
711 
712 void pn_sock_unbind_all_res(struct sock *sk)
713 {
714  unsigned int res, match = 0;
715 
716  mutex_lock(&resource_mutex);
717  for (res = 0; res < 256; res++) {
718  if (pnres.sk[res] == sk) {
719  RCU_INIT_POINTER(pnres.sk[res], NULL);
720  match++;
721  }
722  }
723  mutex_unlock(&resource_mutex);
724 
725  while (match > 0) {
726  __sock_put(sk);
727  match--;
728  }
729  /* Caller is responsible for RCU sync before final sock_put() */
730 }
731 
732 #ifdef CONFIG_PROC_FS
733 static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
734 {
735  struct net *net = seq_file_net(seq);
736  unsigned int i;
737 
738  if (!net_eq(net, &init_net))
739  return NULL;
740 
741  for (i = 0; i < 256; i++) {
742  if (pnres.sk[i] == NULL)
743  continue;
744  if (!pos)
745  return pnres.sk + i;
746  pos--;
747  }
748  return NULL;
749 }
750 
751 static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
752 {
753  struct net *net = seq_file_net(seq);
754  unsigned int i;
755 
756  BUG_ON(!net_eq(net, &init_net));
757 
758  for (i = (sk - pnres.sk) + 1; i < 256; i++)
759  if (pnres.sk[i])
760  return pnres.sk + i;
761  return NULL;
762 }
763 
764 static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
765  __acquires(resource_mutex)
766 {
767  mutex_lock(&resource_mutex);
768  return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
769 }
770 
771 static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
772 {
773  struct sock **sk;
774 
775  if (v == SEQ_START_TOKEN)
776  sk = pn_res_get_idx(seq, 0);
777  else
778  sk = pn_res_get_next(seq, v);
779  (*pos)++;
780  return sk;
781 }
782 
783 static void pn_res_seq_stop(struct seq_file *seq, void *v)
784  __releases(resource_mutex)
785 {
786  mutex_unlock(&resource_mutex);
787 }
788 
789 static int pn_res_seq_show(struct seq_file *seq, void *v)
790 {
791  int len;
792 
793  if (v == SEQ_START_TOKEN)
794  seq_printf(seq, "%s%n", "rs uid inode", &len);
795  else {
796  struct sock **psk = v;
797  struct sock *sk = *psk;
798 
799  seq_printf(seq, "%02X %5d %lu%n",
800  (int) (psk - pnres.sk),
801  from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
802  sock_i_ino(sk), &len);
803  }
804  seq_printf(seq, "%*s\n", 63 - len, "");
805  return 0;
806 }
807 
808 static const struct seq_operations pn_res_seq_ops = {
809  .start = pn_res_seq_start,
810  .next = pn_res_seq_next,
811  .stop = pn_res_seq_stop,
812  .show = pn_res_seq_show,
813 };
814 
815 static int pn_res_open(struct inode *inode, struct file *file)
816 {
817  return seq_open_net(inode, file, &pn_res_seq_ops,
818  sizeof(struct seq_net_private));
819 }
820 
821 const struct file_operations pn_res_seq_fops = {
822  .owner = THIS_MODULE,
823  .open = pn_res_open,
824  .read = seq_read,
825  .llseek = seq_lseek,
826  .release = seq_release_net,
827 };
828 #endif