Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
hdlc_ppp.c
Go to the documentation of this file.
1 /*
2  * Generic HDLC support routines for Linux
3  * Point-to-point protocol support
4  *
5  * Copyright (C) 1999 - 2008 Krzysztof Halasa <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of version 2 of the GNU General Public License
9  * as published by the Free Software Foundation.
10  */
11 
12 #include <linux/errno.h>
13 #include <linux/hdlc.h>
14 #include <linux/if_arp.h>
15 #include <linux/inetdevice.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/pkt_sched.h>
20 #include <linux/poll.h>
21 #include <linux/skbuff.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 
25 #define DEBUG_CP 0 /* also bytes# to dump */
26 #define DEBUG_STATE 0
27 #define DEBUG_HARD_HEADER 0
28 
29 #define HDLC_ADDR_ALLSTATIONS 0xFF
30 #define HDLC_CTRL_UI 0x03
31 
32 #define PID_LCP 0xC021
33 #define PID_IP 0x0021
34 #define PID_IPCP 0x8021
35 #define PID_IPV6 0x0057
36 #define PID_IPV6CP 0x8057
37 
42 #if DEBUG_CP
43 static const char *const code_names[CP_CODES] = {
44  "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
45  "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
46 };
47 static char debug_buffer[64 + 3 * DEBUG_CP];
48 #endif
49 
51 
52 struct hdlc_header {
53  u8 address;
54  u8 control;
56 };
57 
58 struct cp_header {
60  u8 id;
62 };
63 
64 
65 struct proto {
66  struct net_device *dev;
67  struct timer_list timer;
68  unsigned long timeout;
69  u16 pid; /* protocol ID */
71  u8 cr_id; /* ID of last Configuration-Request */
73 };
74 
75 struct ppp {
78  unsigned long last_pong;
81  u8 seq; /* local sequence number for requests */
82  u8 echo_id; /* ID of last Echo-Request (LCP) */
83 };
84 
86  STATES, STATE_MASK = 0xF};
89 enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
90  SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
91 
92 #if DEBUG_STATE
93 static const char *const state_names[STATES] = {
94  "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
95  "Opened"
96 };
97 static const char *const event_names[EVENTS] = {
98  "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
99  "RTR", "RTA", "RUC", "RXJ+", "RXJ-"
100 };
101 #endif
102 
103 static struct sk_buff_head tx_queue; /* used when holding the spin lock */
104 
105 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
106 
107 static inline struct ppp* get_ppp(struct net_device *dev)
108 {
109  return (struct ppp *)dev_to_hdlc(dev)->state;
110 }
111 
112 static inline struct proto* get_proto(struct net_device *dev, u16 pid)
113 {
114  struct ppp *ppp = get_ppp(dev);
115 
116  switch (pid) {
117  case PID_LCP:
118  return &ppp->protos[IDX_LCP];
119  case PID_IPCP:
120  return &ppp->protos[IDX_IPCP];
121  case PID_IPV6CP:
122  return &ppp->protos[IDX_IPV6CP];
123  default:
124  return NULL;
125  }
126 }
127 
128 static inline const char* proto_name(u16 pid)
129 {
130  switch (pid) {
131  case PID_LCP:
132  return "LCP";
133  case PID_IPCP:
134  return "IPCP";
135  case PID_IPV6CP:
136  return "IPV6CP";
137  default:
138  return NULL;
139  }
140 }
141 
142 static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
143 {
144  struct hdlc_header *data = (struct hdlc_header*)skb->data;
145 
146  if (skb->len < sizeof(struct hdlc_header))
147  return htons(ETH_P_HDLC);
148  if (data->address != HDLC_ADDR_ALLSTATIONS ||
149  data->control != HDLC_CTRL_UI)
150  return htons(ETH_P_HDLC);
151 
152  switch (data->protocol) {
153  case cpu_to_be16(PID_IP):
155  return htons(ETH_P_IP);
156 
157  case cpu_to_be16(PID_IPV6):
159  return htons(ETH_P_IPV6);
160 
161  default:
162  return htons(ETH_P_HDLC);
163  }
164 }
165 
166 
167 static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
168  u16 type, const void *daddr, const void *saddr,
169  unsigned int len)
170 {
171  struct hdlc_header *data;
172 #if DEBUG_HARD_HEADER
173  printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
174 #endif
175 
176  skb_push(skb, sizeof(struct hdlc_header));
177  data = (struct hdlc_header*)skb->data;
178 
180  data->control = HDLC_CTRL_UI;
181  switch (type) {
182  case ETH_P_IP:
183  data->protocol = htons(PID_IP);
184  break;
185  case ETH_P_IPV6:
186  data->protocol = htons(PID_IPV6);
187  break;
188  case PID_LCP:
189  case PID_IPCP:
190  case PID_IPV6CP:
191  data->protocol = htons(type);
192  break;
193  default: /* unknown protocol */
194  data->protocol = 0;
195  }
196  return sizeof(struct hdlc_header);
197 }
198 
199 
200 static void ppp_tx_flush(void)
201 {
202  struct sk_buff *skb;
203  while ((skb = skb_dequeue(&tx_queue)) != NULL)
204  dev_queue_xmit(skb);
205 }
206 
207 static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
208  u8 id, unsigned int len, const void *data)
209 {
210  struct sk_buff *skb;
211  struct cp_header *cp;
212  unsigned int magic_len = 0;
213  static u32 magic;
214 
215 #if DEBUG_CP
216  int i;
217  char *ptr;
218 #endif
219 
220  if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
221  magic_len = sizeof(magic);
222 
223  skb = dev_alloc_skb(sizeof(struct hdlc_header) +
224  sizeof(struct cp_header) + magic_len + len);
225  if (!skb) {
226  netdev_warn(dev, "out of memory in ppp_tx_cp()\n");
227  return;
228  }
229  skb_reserve(skb, sizeof(struct hdlc_header));
230 
231  cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header));
232  cp->code = code;
233  cp->id = id;
234  cp->len = htons(sizeof(struct cp_header) + magic_len + len);
235 
236  if (magic_len)
237  memcpy(skb_put(skb, magic_len), &magic, magic_len);
238  if (len)
239  memcpy(skb_put(skb, len), data, len);
240 
241 #if DEBUG_CP
242  BUG_ON(code >= CP_CODES);
243  ptr = debug_buffer;
244  *ptr = '\x0';
245  for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
246  sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
247  ptr += strlen(ptr);
248  }
249  printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
250  proto_name(pid), code_names[code], id, debug_buffer);
251 #endif
252 
253  ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
254 
255  skb->priority = TC_PRIO_CONTROL;
256  skb->dev = dev;
257  skb_reset_network_header(skb);
258  skb_queue_tail(&tx_queue, skb);
259 }
260 
261 
262 /* State transition table (compare STD-51)
263  Events Actions
264  TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
265  TO- = Timeout with counter expired zrc = Zero-Restart-Count
266 
267  RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request
268  RCR- = Receive-Configure-Request (Bad)
269  RCA = Receive-Configure-Ack sca = Send-Configure-Ack
270  RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej
271 
272  RTR = Receive-Terminate-Request str = Send-Terminate-Request
273  RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack
274 
275  RUC = Receive-Unknown-Code scj = Send-Code-Reject
276  RXJ+ = Receive-Code-Reject (permitted)
277  or Receive-Protocol-Reject
278  RXJ- = Receive-Code-Reject (catastrophic)
279  or Receive-Protocol-Reject
280 */
281 static int cp_table[EVENTS][STATES] = {
282  /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
283  0 1 2 3 4 5 6 */
284  {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */
285  { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */
286  { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */
287  { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */
288  { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */
289  { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */
290  { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */
291  { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */
292  { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */
293  { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */
294  { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */
295  { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */
296  { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
297 };
298 
299 
300 /* SCA: RCR+ must supply id, len and data
301  SCN: RCR- must supply code, id, len and data
302  STA: RTR must supply id
303  SCJ: RUC must supply CP packet len and data */
304 static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
305  u8 id, unsigned int len, const void *data)
306 {
307  int old_state, action;
308  struct ppp *ppp = get_ppp(dev);
309  struct proto *proto = get_proto(dev, pid);
310 
311  old_state = proto->state;
312  BUG_ON(old_state >= STATES);
313  BUG_ON(event >= EVENTS);
314 
315 #if DEBUG_STATE
316  printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
317  proto_name(pid), event_names[event], state_names[proto->state]);
318 #endif
319 
320  action = cp_table[event][old_state];
321 
322  proto->state = action & STATE_MASK;
323  if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
324  mod_timer(&proto->timer, proto->timeout =
325  jiffies + ppp->req_timeout * HZ);
326  if (action & ZRC)
327  proto->restart_counter = 0;
328  if (action & IRC)
329  proto->restart_counter = (proto->state == STOPPING) ?
330  ppp->term_retries : ppp->cr_retries;
331 
332  if (action & SCR) /* send Configure-Request */
333  ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
334  0, NULL);
335  if (action & SCA) /* send Configure-Ack */
336  ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
337  if (action & SCN) /* send Configure-Nak/Reject */
338  ppp_tx_cp(dev, pid, code, id, len, data);
339  if (action & STR) /* send Terminate-Request */
340  ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
341  if (action & STA) /* send Terminate-Ack */
342  ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
343  if (action & SCJ) /* send Code-Reject */
344  ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
345 
346  if (old_state != OPENED && proto->state == OPENED) {
347  netdev_info(dev, "%s up\n", proto_name(pid));
348  if (pid == PID_LCP) {
349  netif_dormant_off(dev);
350  ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
351  ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
352  ppp->last_pong = jiffies;
353  mod_timer(&proto->timer, proto->timeout =
354  jiffies + ppp->keepalive_interval * HZ);
355  }
356  }
357  if (old_state == OPENED && proto->state != OPENED) {
358  netdev_info(dev, "%s down\n", proto_name(pid));
359  if (pid == PID_LCP) {
360  netif_dormant_on(dev);
361  ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
362  ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
363  }
364  }
365  if (old_state != CLOSED && proto->state == CLOSED)
366  del_timer(&proto->timer);
367 
368 #if DEBUG_STATE
369  printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
370  proto_name(pid), event_names[event], state_names[proto->state]);
371 #endif
372 }
373 
374 
375 static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
376  unsigned int req_len, const u8 *data)
377 {
378  static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
379  const u8 *opt;
380  u8 *out;
381  unsigned int len = req_len, nak_len = 0, rej_len = 0;
382 
383  if (!(out = kmalloc(len, GFP_ATOMIC))) {
384  dev->stats.rx_dropped++;
385  return; /* out of memory, ignore CR packet */
386  }
387 
388  for (opt = data; len; len -= opt[1], opt += opt[1]) {
389  if (len < 2 || len < opt[1]) {
390  dev->stats.rx_errors++;
391  kfree(out);
392  return; /* bad packet, drop silently */
393  }
394 
395  if (pid == PID_LCP)
396  switch (opt[0]) {
397  case LCP_OPTION_MRU:
398  continue; /* MRU always OK and > 1500 bytes? */
399 
400  case LCP_OPTION_ACCM: /* async control character map */
401  if (!memcmp(opt, valid_accm,
402  sizeof(valid_accm)))
403  continue;
404  if (!rej_len) { /* NAK it */
405  memcpy(out + nak_len, valid_accm,
406  sizeof(valid_accm));
407  nak_len += sizeof(valid_accm);
408  continue;
409  }
410  break;
411  case LCP_OPTION_MAGIC:
412  if (opt[1] != 6 || (!opt[2] && !opt[3] &&
413  !opt[4] && !opt[5]))
414  break; /* reject invalid magic number */
415  continue;
416  }
417  /* reject this option */
418  memcpy(out + rej_len, opt, opt[1]);
419  rej_len += opt[1];
420  }
421 
422  if (rej_len)
423  ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
424  else if (nak_len)
425  ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
426  else
427  ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
428 
429  kfree(out);
430 }
431 
432 static int ppp_rx(struct sk_buff *skb)
433 {
434  struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
435  struct net_device *dev = skb->dev;
436  struct ppp *ppp = get_ppp(dev);
437  struct proto *proto;
438  struct cp_header *cp;
439  unsigned long flags;
440  unsigned int len;
441  u16 pid;
442 #if DEBUG_CP
443  int i;
444  char *ptr;
445 #endif
446 
447  spin_lock_irqsave(&ppp->lock, flags);
448  /* Check HDLC header */
449  if (skb->len < sizeof(struct hdlc_header))
450  goto rx_error;
451  cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header));
452  if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
453  hdr->control != HDLC_CTRL_UI)
454  goto rx_error;
455 
456  pid = ntohs(hdr->protocol);
457  proto = get_proto(dev, pid);
458  if (!proto) {
459  if (ppp->protos[IDX_LCP].state == OPENED)
460  ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
461  ++ppp->seq, skb->len + 2, &hdr->protocol);
462  goto rx_error;
463  }
464 
465  len = ntohs(cp->len);
466  if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
467  skb->len < len /* truncated packet? */)
468  goto rx_error;
469  skb_pull(skb, sizeof(struct cp_header));
470  len -= sizeof(struct cp_header);
471 
472  /* HDLC and CP headers stripped from skb */
473 #if DEBUG_CP
474  if (cp->code < CP_CODES)
475  sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
476  cp->id);
477  else
478  sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
480  for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
481  sprintf(ptr, " %02X", skb->data[i]);
482  ptr += strlen(ptr);
483  }
484  printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
485  debug_buffer);
486 #endif
487 
488  /* LCP only */
489  if (pid == PID_LCP)
490  switch (cp->code) {
491  case LCP_PROTO_REJ:
492  pid = ntohs(*(__be16*)skb->data);
493  if (pid == PID_LCP || pid == PID_IPCP ||
494  pid == PID_IPV6CP)
495  ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
496  0, NULL);
497  goto out;
498 
499  case LCP_ECHO_REQ: /* send Echo-Reply */
500  if (len >= 4 && proto->state == OPENED)
501  ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
502  cp->id, len - 4, skb->data + 4);
503  goto out;
504 
505  case LCP_ECHO_REPLY:
506  if (cp->id == ppp->echo_id)
507  ppp->last_pong = jiffies;
508  goto out;
509 
510  case LCP_DISC_REQ: /* discard */
511  goto out;
512  }
513 
514  /* LCP, IPCP and IPV6CP */
515  switch (cp->code) {
516  case CP_CONF_REQ:
517  ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
518  break;
519 
520  case CP_CONF_ACK:
521  if (cp->id == proto->cr_id)
522  ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
523  break;
524 
525  case CP_CONF_REJ:
526  case CP_CONF_NAK:
527  if (cp->id == proto->cr_id)
528  ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
529  break;
530 
531  case CP_TERM_REQ:
532  ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
533  break;
534 
535  case CP_TERM_ACK:
536  ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
537  break;
538 
539  case CP_CODE_REJ:
540  ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
541  break;
542 
543  default:
544  len += sizeof(struct cp_header);
545  if (len > dev->mtu)
546  len = dev->mtu;
547  ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
548  break;
549  }
550  goto out;
551 
552 rx_error:
553  dev->stats.rx_errors++;
554 out:
555  spin_unlock_irqrestore(&ppp->lock, flags);
556  dev_kfree_skb_any(skb);
557  ppp_tx_flush();
558  return NET_RX_DROP;
559 }
560 
561 static void ppp_timer(unsigned long arg)
562 {
563  struct proto *proto = (struct proto *)arg;
564  struct ppp *ppp = get_ppp(proto->dev);
565  unsigned long flags;
566 
567  spin_lock_irqsave(&ppp->lock, flags);
568  switch (proto->state) {
569  case STOPPING:
570  case REQ_SENT:
571  case ACK_RECV:
572  case ACK_SENT:
573  if (proto->restart_counter) {
574  ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
575  0, NULL);
576  proto->restart_counter--;
577  } else
578  ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
579  0, NULL);
580  break;
581 
582  case OPENED:
583  if (proto->pid != PID_LCP)
584  break;
585  if (time_after(jiffies, ppp->last_pong +
586  ppp->keepalive_timeout * HZ)) {
587  netdev_info(proto->dev, "Link down\n");
588  ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
589  ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
590  } else { /* send keep-alive packet */
591  ppp->echo_id = ++ppp->seq;
592  ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
593  ppp->echo_id, 0, NULL);
594  proto->timer.expires = jiffies +
595  ppp->keepalive_interval * HZ;
596  add_timer(&proto->timer);
597  }
598  break;
599  }
600  spin_unlock_irqrestore(&ppp->lock, flags);
601  ppp_tx_flush();
602 }
603 
604 
605 static void ppp_start(struct net_device *dev)
606 {
607  struct ppp *ppp = get_ppp(dev);
608  int i;
609 
610  for (i = 0; i < IDX_COUNT; i++) {
611  struct proto *proto = &ppp->protos[i];
612  proto->dev = dev;
613  init_timer(&proto->timer);
614  proto->timer.function = ppp_timer;
615  proto->timer.data = (unsigned long)proto;
616  proto->state = CLOSED;
617  }
618  ppp->protos[IDX_LCP].pid = PID_LCP;
619  ppp->protos[IDX_IPCP].pid = PID_IPCP;
620  ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
621 
622  ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
623 }
624 
625 static void ppp_stop(struct net_device *dev)
626 {
627  ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
628 }
629 
630 static void ppp_close(struct net_device *dev)
631 {
632  ppp_tx_flush();
633 }
634 
635 static struct hdlc_proto proto = {
636  .start = ppp_start,
637  .stop = ppp_stop,
638  .close = ppp_close,
639  .type_trans = ppp_type_trans,
640  .ioctl = ppp_ioctl,
641  .netif_rx = ppp_rx,
642  .module = THIS_MODULE,
643 };
644 
645 static const struct header_ops ppp_header_ops = {
646  .create = ppp_hard_header,
647 };
648 
649 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
650 {
651  hdlc_device *hdlc = dev_to_hdlc(dev);
652  struct ppp *ppp;
653  int result;
654 
655  switch (ifr->ifr_settings.type) {
656  case IF_GET_PROTO:
657  if (dev_to_hdlc(dev)->proto != &proto)
658  return -EINVAL;
659  ifr->ifr_settings.type = IF_PROTO_PPP;
660  return 0; /* return protocol only, no settable parameters */
661 
662  case IF_PROTO_PPP:
663  if (!capable(CAP_NET_ADMIN))
664  return -EPERM;
665 
666  if (dev->flags & IFF_UP)
667  return -EBUSY;
668 
669  /* no settable parameters */
670 
671  result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
672  if (result)
673  return result;
674 
675  result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
676  if (result)
677  return result;
678 
679  ppp = get_ppp(dev);
680  spin_lock_init(&ppp->lock);
681  ppp->req_timeout = 2;
682  ppp->cr_retries = 10;
683  ppp->term_retries = 2;
684  ppp->keepalive_interval = 10;
685  ppp->keepalive_timeout = 60;
686 
687  dev->hard_header_len = sizeof(struct hdlc_header);
688  dev->header_ops = &ppp_header_ops;
689  dev->type = ARPHRD_PPP;
690  netif_dormant_on(dev);
691  return 0;
692  }
693 
694  return -EINVAL;
695 }
696 
697 
698 static int __init mod_init(void)
699 {
700  skb_queue_head_init(&tx_queue);
701  register_hdlc_protocol(&proto);
702  return 0;
703 }
704 
705 static void __exit mod_exit(void)
706 {
707  unregister_hdlc_protocol(&proto);
708 }
709 
710 
711 module_init(mod_init);
712 module_exit(mod_exit);
713 
714 MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
715 MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
716 MODULE_LICENSE("GPL v2");