Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ks8695net.c
Go to the documentation of this file.
1 /*
2  * Micrel KS8695 (Centaur) Ethernet.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * Copyright 2008 Simtec Electronics
15  * Daniel Silverstone <[email protected]>
16  * Vincent Sanders <[email protected]>
17  */
18 
19 #include <linux/dma-mapping.h>
20 #include <linux/module.h>
21 #include <linux/ioport.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 #include <linux/crc32.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/delay.h>
32 #include <linux/platform_device.h>
33 #include <linux/irq.h>
34 #include <linux/io.h>
35 #include <linux/slab.h>
36 
37 #include <asm/irq.h>
38 
39 #include <mach/regs-switch.h>
40 #include <mach/regs-misc.h>
41 #include <asm/mach/irq.h>
42 #include <mach/regs-irq.h>
43 
44 #include "ks8695net.h"
45 
46 #define MODULENAME "ks8695_ether"
47 #define MODULEVERSION "1.02"
48 
49 /*
50  * Transmit and device reset timeout, default 5 seconds.
51  */
52 static int watchdog = 5000;
53 
54 /* Hardware structures */
55 
63 struct rx_ring_desc {
68 };
69 
77 struct tx_ring_desc {
82 };
83 
90 struct ks8695_skbuff {
91  struct sk_buff *skb;
94 };
95 
96 /* Private device structure */
97 
98 #define MAX_TX_DESC 8
99 #define MAX_TX_DESC_MASK 0x7
100 #define MAX_RX_DESC 16
101 #define MAX_RX_DESC_MASK 0xf
102 
103 /*napi_weight have better more than rx DMA buffers*/
104 #define NAPI_WEIGHT 64
105 
106 #define MAX_RXBUF_SIZE 0x700
107 
108 #define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
109 #define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
110 #define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
111 
122 };
123 
158 struct ks8695_priv {
160  struct net_device *ndev;
161  struct device *dev;
164 
166 
169 
172 
173  void *ring_base;
175 
182 
188 
190 };
191 
192 /* Register access */
193 
199 static inline u32
200 ks8695_readreg(struct ks8695_priv *ksp, int reg)
201 {
202  return readl(ksp->io_regs + reg);
203 }
204 
211 static inline void
212 ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
213 {
214  writel(value, ksp->io_regs + reg);
215 }
216 
217 /* Utility functions */
218 
226 static const char *
227 ks8695_port_type(struct ks8695_priv *ksp)
228 {
229  switch (ksp->dtype) {
230  case KS8695_DTYPE_LAN:
231  return "LAN";
232  case KS8695_DTYPE_WAN:
233  return "WAN";
234  case KS8695_DTYPE_HPNA:
235  return "HPNA";
236  }
237 
238  return "UNKNOWN";
239 }
240 
248 static void
249 ks8695_update_mac(struct ks8695_priv *ksp)
250 {
251  /* Update the HW with the MAC from the net_device */
252  struct net_device *ndev = ksp->ndev;
253  u32 machigh, maclow;
254 
255  maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
256  (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
257  machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
258 
259  ks8695_writereg(ksp, KS8695_MAL, maclow);
260  ks8695_writereg(ksp, KS8695_MAH, machigh);
261 
262 }
263 
273 static void
274 ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
275 {
276  /* Run around the RX ring, filling in any missing sk_buff's */
277  int buff_n;
278 
279  for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
280  if (!ksp->rx_buffers[buff_n].skb) {
281  struct sk_buff *skb =
282  netdev_alloc_skb(ksp->ndev, MAX_RXBUF_SIZE);
284 
285  ksp->rx_buffers[buff_n].skb = skb;
286  if (skb == NULL) {
287  /* Failed to allocate one, perhaps
288  * we'll try again later.
289  */
290  break;
291  }
292 
293  mapping = dma_map_single(ksp->dev, skb->data,
296  if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
297  /* Failed to DMA map this SKB, try later */
298  dev_kfree_skb_irq(skb);
299  ksp->rx_buffers[buff_n].skb = NULL;
300  break;
301  }
302  ksp->rx_buffers[buff_n].dma_ptr = mapping;
303  ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
304 
305  /* Record this into the DMA ring */
306  ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
307  ksp->rx_ring[buff_n].length =
309 
310  wmb();
311 
312  /* And give ownership over to the hardware */
313  ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
314  }
315  }
316 }
317 
318 /* Maximum number of multicast addresses which the KS8695 HW supports */
319 #define KS8695_NR_ADDRESSES 16
320 
331 static void
332 ks8695_init_partial_multicast(struct ks8695_priv *ksp,
333  struct net_device *ndev)
334 {
335  u32 low, high;
336  int i;
337  struct netdev_hw_addr *ha;
338 
339  i = 0;
340  netdev_for_each_mc_addr(ha, ndev) {
341  /* Ran out of space in chip? */
343 
344  low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
345  (ha->addr[4] << 8) | (ha->addr[5]);
346  high = (ha->addr[0] << 8) | (ha->addr[1]);
347 
348  ks8695_writereg(ksp, KS8695_AAL_(i), low);
349  ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
350  i++;
351  }
352 
353  /* Clear the remaining Additional Station Addresses */
354  for (; i < KS8695_NR_ADDRESSES; i++) {
355  ks8695_writereg(ksp, KS8695_AAL_(i), 0);
356  ks8695_writereg(ksp, KS8695_AAH_(i), 0);
357  }
358 }
359 
360 /* Interrupt handling */
361 
371 static irqreturn_t
372 ks8695_tx_irq(int irq, void *dev_id)
373 {
374  struct net_device *ndev = (struct net_device *)dev_id;
375  struct ks8695_priv *ksp = netdev_priv(ndev);
376  int buff_n;
377 
378  for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
379  if (ksp->tx_buffers[buff_n].skb &&
380  !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
381  rmb();
382  /* An SKB which is not owned by HW is present */
383  /* Update the stats for the net_device */
384  ndev->stats.tx_packets++;
385  ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
386 
387  /* Free the packet from the ring */
388  ksp->tx_ring[buff_n].data_ptr = 0;
389 
390  /* Free the sk_buff */
391  dma_unmap_single(ksp->dev,
392  ksp->tx_buffers[buff_n].dma_ptr,
393  ksp->tx_buffers[buff_n].length,
394  DMA_TO_DEVICE);
395  dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
396  ksp->tx_buffers[buff_n].skb = NULL;
397  ksp->tx_ring_used--;
398  }
399  }
400 
401  netif_wake_queue(ndev);
402 
403  return IRQ_HANDLED;
404 }
405 
420 static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
421 {
422  return ksp->rx_irq;
423 }
424 
433 static irqreturn_t
434 ks8695_rx_irq(int irq, void *dev_id)
435 {
436  struct net_device *ndev = (struct net_device *)dev_id;
437  struct ks8695_priv *ksp = netdev_priv(ndev);
438 
439  spin_lock(&ksp->rx_lock);
440 
441  if (napi_schedule_prep(&ksp->napi)) {
442  unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
443  unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
444  /*disable rx interrupt*/
445  status &= ~mask_bit;
446  writel(status , KS8695_IRQ_VA + KS8695_INTEN);
447  __napi_schedule(&ksp->napi);
448  }
449 
450  spin_unlock(&ksp->rx_lock);
451  return IRQ_HANDLED;
452 }
453 
459 static int ks8695_rx(struct ks8695_priv *ksp, int budget)
460 {
461  struct net_device *ndev = ksp->ndev;
462  struct sk_buff *skb;
463  int buff_n;
464  u32 flags;
465  int pktlen;
466  int received = 0;
467 
468  buff_n = ksp->next_rx_desc_read;
469  while (received < budget
470  && ksp->rx_buffers[buff_n].skb
471  && (!(ksp->rx_ring[buff_n].status &
472  cpu_to_le32(RDES_OWN)))) {
473  rmb();
474  flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
475 
476  /* Found an SKB which we own, this means we
477  * received a packet
478  */
479  if ((flags & (RDES_FS | RDES_LS)) !=
480  (RDES_FS | RDES_LS)) {
481  /* This packet is not the first and
482  * the last segment. Therefore it is
483  * a "spanning" packet and we can't
484  * handle it
485  */
486  goto rx_failure;
487  }
488 
489  if (flags & (RDES_ES | RDES_RE)) {
490  /* It's an error packet */
491  ndev->stats.rx_errors++;
492  if (flags & RDES_TL)
493  ndev->stats.rx_length_errors++;
494  if (flags & RDES_RF)
495  ndev->stats.rx_length_errors++;
496  if (flags & RDES_CE)
497  ndev->stats.rx_crc_errors++;
498  if (flags & RDES_RE)
499  ndev->stats.rx_missed_errors++;
500 
501  goto rx_failure;
502  }
503 
504  pktlen = flags & RDES_FLEN;
505  pktlen -= 4; /* Drop the CRC */
506 
507  /* Retrieve the sk_buff */
508  skb = ksp->rx_buffers[buff_n].skb;
509 
510  /* Clear it from the ring */
511  ksp->rx_buffers[buff_n].skb = NULL;
512  ksp->rx_ring[buff_n].data_ptr = 0;
513 
514  /* Unmap the SKB */
515  dma_unmap_single(ksp->dev,
516  ksp->rx_buffers[buff_n].dma_ptr,
517  ksp->rx_buffers[buff_n].length,
519 
520  /* Relinquish the SKB to the network layer */
521  skb_put(skb, pktlen);
522  skb->protocol = eth_type_trans(skb, ndev);
523  netif_receive_skb(skb);
524 
525  /* Record stats */
526  ndev->stats.rx_packets++;
527  ndev->stats.rx_bytes += pktlen;
528  goto rx_finished;
529 
530 rx_failure:
531  /* This ring entry is an error, but we can
532  * re-use the skb
533  */
534  /* Give the ring entry back to the hardware */
535  ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
536 rx_finished:
537  received++;
538  buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
539  }
540 
541  /* And note which RX descriptor we last did */
542  ksp->next_rx_desc_read = buff_n;
543 
544  /* And refill the buffers */
545  ks8695_refill_rxbuffers(ksp);
546 
547  /* Kick the RX DMA engine, in case it became suspended */
548  ks8695_writereg(ksp, KS8695_DRSC, 0);
549 
550  return received;
551 }
552 
553 
562 static int ks8695_poll(struct napi_struct *napi, int budget)
563 {
564  struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
565  unsigned long work_done;
566 
567  unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
568  unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
569 
570  work_done = ks8695_rx(ksp, budget);
571 
572  if (work_done < budget) {
573  unsigned long flags;
574  spin_lock_irqsave(&ksp->rx_lock, flags);
575  __napi_complete(napi);
576  /*enable rx interrupt*/
577  writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
578  spin_unlock_irqrestore(&ksp->rx_lock, flags);
579  }
580  return work_done;
581 }
582 
591 static irqreturn_t
592 ks8695_link_irq(int irq, void *dev_id)
593 {
594  struct net_device *ndev = (struct net_device *)dev_id;
595  struct ks8695_priv *ksp = netdev_priv(ndev);
596  u32 ctrl;
597 
598  ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
599  if (ctrl & WMC_WLS) {
600  netif_carrier_on(ndev);
601  if (netif_msg_link(ksp))
602  dev_info(ksp->dev,
603  "%s: Link is now up (10%sMbps/%s-duplex)\n",
604  ndev->name,
605  (ctrl & WMC_WSS) ? "0" : "",
606  (ctrl & WMC_WDS) ? "Full" : "Half");
607  } else {
608  netif_carrier_off(ndev);
609  if (netif_msg_link(ksp))
610  dev_info(ksp->dev, "%s: Link is now down.\n",
611  ndev->name);
612  }
613 
614  return IRQ_HANDLED;
615 }
616 
617 
618 /* KS8695 Device functions */
619 
627 static void
628 ks8695_reset(struct ks8695_priv *ksp)
629 {
630  int reset_timeout = watchdog;
631  /* Issue the reset via the TX DMA control register */
632  ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
633  while (reset_timeout--) {
634  if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
635  break;
636  msleep(1);
637  }
638 
639  if (reset_timeout < 0) {
640  dev_crit(ksp->dev,
641  "Timeout waiting for DMA engines to reset\n");
642  /* And blithely carry on */
643  }
644 
645  /* Definitely wait long enough before attempting to program
646  * the engines
647  */
648  msleep(10);
649 
650  /* RX: unicast and broadcast */
651  ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
652  /* TX: pad and add CRC */
653  ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
654 }
655 
664 static void
665 ks8695_shutdown(struct ks8695_priv *ksp)
666 {
667  u32 ctrl;
668  int buff_n;
669 
670  /* Disable packet transmission */
671  ctrl = ks8695_readreg(ksp, KS8695_DTXC);
672  ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
673 
674  /* Disable packet reception */
675  ctrl = ks8695_readreg(ksp, KS8695_DRXC);
676  ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
677 
678  /* Release the IRQs */
679  free_irq(ksp->rx_irq, ksp->ndev);
680  free_irq(ksp->tx_irq, ksp->ndev);
681  if (ksp->link_irq != -1)
682  free_irq(ksp->link_irq, ksp->ndev);
683 
684  /* Throw away any pending TX packets */
685  for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
686  if (ksp->tx_buffers[buff_n].skb) {
687  /* Remove this SKB from the TX ring */
688  ksp->tx_ring[buff_n].owner = 0;
689  ksp->tx_ring[buff_n].status = 0;
690  ksp->tx_ring[buff_n].data_ptr = 0;
691 
692  /* Unmap and bin this SKB */
693  dma_unmap_single(ksp->dev,
694  ksp->tx_buffers[buff_n].dma_ptr,
695  ksp->tx_buffers[buff_n].length,
696  DMA_TO_DEVICE);
697  dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
698  ksp->tx_buffers[buff_n].skb = NULL;
699  }
700  }
701 
702  /* Purge the RX buffers */
703  for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
704  if (ksp->rx_buffers[buff_n].skb) {
705  /* Remove the SKB from the RX ring */
706  ksp->rx_ring[buff_n].status = 0;
707  ksp->rx_ring[buff_n].data_ptr = 0;
708 
709  /* Unmap and bin the SKB */
710  dma_unmap_single(ksp->dev,
711  ksp->rx_buffers[buff_n].dma_ptr,
712  ksp->rx_buffers[buff_n].length,
714  dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
715  ksp->rx_buffers[buff_n].skb = NULL;
716  }
717  }
718 }
719 
720 
730 static int
731 ks8695_setup_irq(int irq, const char *irq_name,
732  irq_handler_t handler, struct net_device *ndev)
733 {
734  int ret;
735 
736  ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
737 
738  if (ret) {
739  dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
740  return ret;
741  }
742 
743  return 0;
744 }
745 
754 static int
755 ks8695_init_net(struct ks8695_priv *ksp)
756 {
757  int ret;
758  u32 ctrl;
759 
760  ks8695_refill_rxbuffers(ksp);
761 
762  /* Initialise the DMA engines */
763  ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
764  ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
765 
766  /* Request the IRQs */
767  ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
768  ks8695_rx_irq, ksp->ndev);
769  if (ret)
770  return ret;
771  ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
772  ks8695_tx_irq, ksp->ndev);
773  if (ret)
774  return ret;
775  if (ksp->link_irq != -1) {
776  ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
777  ks8695_link_irq, ksp->ndev);
778  if (ret)
779  return ret;
780  }
781 
782  /* Set up the ring indices */
783  ksp->next_rx_desc_read = 0;
784  ksp->tx_ring_next_slot = 0;
785  ksp->tx_ring_used = 0;
786 
787  /* Bring up transmission */
788  ctrl = ks8695_readreg(ksp, KS8695_DTXC);
789  /* Enable packet transmission */
790  ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
791 
792  /* Bring up the reception */
793  ctrl = ks8695_readreg(ksp, KS8695_DRXC);
794  /* Enable packet reception */
795  ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
796  /* And start the DMA engine */
797  ks8695_writereg(ksp, KS8695_DRSC, 0);
798 
799  /* All done */
800  return 0;
801 }
802 
810 static void
811 ks8695_release_device(struct ks8695_priv *ksp)
812 {
813  /* Unmap the registers */
814  iounmap(ksp->io_regs);
815  if (ksp->phyiface_regs)
816  iounmap(ksp->phyiface_regs);
817 
818  /* And release the request */
820  kfree(ksp->regs_req);
821  if (ksp->phyiface_req) {
823  kfree(ksp->phyiface_req);
824  }
825 
826  /* Free the ring buffers */
828  ksp->ring_base, ksp->ring_base_dma);
829 }
830 
831 /* Ethtool support */
832 
837 static u32
838 ks8695_get_msglevel(struct net_device *ndev)
839 {
840  struct ks8695_priv *ksp = netdev_priv(ndev);
841 
842  return ksp->msg_enable;
843 }
844 
850 static void
851 ks8695_set_msglevel(struct net_device *ndev, u32 value)
852 {
853  struct ks8695_priv *ksp = netdev_priv(ndev);
854 
855  ksp->msg_enable = value;
856 }
857 
863 static int
864 ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
865 {
866  struct ks8695_priv *ksp = netdev_priv(ndev);
867  u32 ctrl;
868 
869  /* All ports on the KS8695 support these... */
873  cmd->transceiver = XCVR_INTERNAL;
874 
876  cmd->port = PORT_MII;
878  cmd->phy_address = 0;
879 
880  ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
881  if ((ctrl & WMC_WAND) == 0) {
882  /* auto-negotiation is enabled */
884  if (ctrl & WMC_WANA100F)
886  if (ctrl & WMC_WANA100H)
888  if (ctrl & WMC_WANA10F)
890  if (ctrl & WMC_WANA10H)
892  if (ctrl & WMC_WANAP)
894  cmd->autoneg = AUTONEG_ENABLE;
895 
896  ethtool_cmd_speed_set(cmd,
897  (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
898  cmd->duplex = (ctrl & WMC_WDS) ?
900  } else {
901  /* auto-negotiation is disabled */
902  cmd->autoneg = AUTONEG_DISABLE;
903 
904  ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
905  SPEED_100 : SPEED_10));
906  cmd->duplex = (ctrl & WMC_WANFF) ?
908  }
909 
910  return 0;
911 }
912 
918 static int
919 ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
920 {
921  struct ks8695_priv *ksp = netdev_priv(ndev);
922  u32 ctrl;
923 
924  if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
925  return -EINVAL;
926  if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
927  return -EINVAL;
928  if (cmd->port != PORT_MII)
929  return -EINVAL;
930  if (cmd->transceiver != XCVR_INTERNAL)
931  return -EINVAL;
932  if ((cmd->autoneg != AUTONEG_DISABLE) &&
933  (cmd->autoneg != AUTONEG_ENABLE))
934  return -EINVAL;
935 
936  if (cmd->autoneg == AUTONEG_ENABLE) {
937  if ((cmd->advertising & (ADVERTISED_10baseT_Half |
941  return -EINVAL;
942 
943  ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
944 
945  ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
946  WMC_WANA10F | WMC_WANA10H);
948  ctrl |= WMC_WANA100F;
950  ctrl |= WMC_WANA100H;
952  ctrl |= WMC_WANA10F;
954  ctrl |= WMC_WANA10H;
955 
956  /* force a re-negotiation */
957  ctrl |= WMC_WANR;
958  writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
959  } else {
960  ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
961 
962  /* disable auto-negotiation */
963  ctrl |= WMC_WAND;
964  ctrl &= ~(WMC_WANF100 | WMC_WANFF);
965 
966  if (cmd->speed == SPEED_100)
967  ctrl |= WMC_WANF100;
968  if (cmd->duplex == DUPLEX_FULL)
969  ctrl |= WMC_WANFF;
970 
971  writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
972  }
973 
974  return 0;
975 }
976 
981 static int
982 ks8695_wan_nwayreset(struct net_device *ndev)
983 {
984  struct ks8695_priv *ksp = netdev_priv(ndev);
985  u32 ctrl;
986 
987  ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
988 
989  if ((ctrl & WMC_WAND) == 0)
990  writel(ctrl | WMC_WANR,
991  ksp->phyiface_regs + KS8695_WMC);
992  else
993  /* auto-negotiation not enabled */
994  return -EINVAL;
995 
996  return 0;
997 }
998 
1004 static void
1005 ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1006 {
1007  struct ks8695_priv *ksp = netdev_priv(ndev);
1008  u32 ctrl;
1009 
1010  ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1011 
1012  /* advertise Pause */
1013  param->autoneg = (ctrl & WMC_WANAP);
1014 
1015  /* current Rx Flow-control */
1016  ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1017  param->rx_pause = (ctrl & DRXC_RFCE);
1018 
1019  /* current Tx Flow-control */
1020  ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1021  param->tx_pause = (ctrl & DTXC_TFCE);
1022 }
1023 
1029 static void
1030 ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1031 {
1032  strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1033  strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1034  strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1035  sizeof(info->bus_info));
1036 }
1037 
1038 static const struct ethtool_ops ks8695_ethtool_ops = {
1039  .get_msglevel = ks8695_get_msglevel,
1040  .set_msglevel = ks8695_set_msglevel,
1041  .get_drvinfo = ks8695_get_drvinfo,
1042 };
1043 
1044 static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1045  .get_msglevel = ks8695_get_msglevel,
1046  .set_msglevel = ks8695_set_msglevel,
1047  .get_settings = ks8695_wan_get_settings,
1048  .set_settings = ks8695_wan_set_settings,
1049  .nway_reset = ks8695_wan_nwayreset,
1050  .get_link = ethtool_op_get_link,
1051  .get_pauseparam = ks8695_wan_get_pause,
1052  .get_drvinfo = ks8695_get_drvinfo,
1053 };
1054 
1055 /* Network device interface functions */
1056 
1062 static int
1063 ks8695_set_mac(struct net_device *ndev, void *addr)
1064 {
1065  struct ks8695_priv *ksp = netdev_priv(ndev);
1066  struct sockaddr *address = addr;
1067 
1068  if (!is_valid_ether_addr(address->sa_data))
1069  return -EADDRNOTAVAIL;
1070 
1071  memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1072 
1073  ks8695_update_mac(ksp);
1074 
1075  dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1076  ndev->name, ndev->dev_addr);
1077 
1078  return 0;
1079 }
1080 
1088 static void
1089 ks8695_set_multicast(struct net_device *ndev)
1090 {
1091  struct ks8695_priv *ksp = netdev_priv(ndev);
1092  u32 ctrl;
1093 
1094  ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1095 
1096  if (ndev->flags & IFF_PROMISC) {
1097  /* enable promiscuous mode */
1098  ctrl |= DRXC_RA;
1099  } else if (ndev->flags & ~IFF_PROMISC) {
1100  /* disable promiscuous mode */
1101  ctrl &= ~DRXC_RA;
1102  }
1103 
1104  if (ndev->flags & IFF_ALLMULTI) {
1105  /* enable all multicast mode */
1106  ctrl |= DRXC_RM;
1107  } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
1108  /* more specific multicast addresses than can be
1109  * handled in hardware
1110  */
1111  ctrl |= DRXC_RM;
1112  } else {
1113  /* enable specific multicasts */
1114  ctrl &= ~DRXC_RM;
1115  ks8695_init_partial_multicast(ksp, ndev);
1116  }
1117 
1118  ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1119 }
1120 
1127 static void
1128 ks8695_timeout(struct net_device *ndev)
1129 {
1130  struct ks8695_priv *ksp = netdev_priv(ndev);
1131 
1132  netif_stop_queue(ndev);
1133  ks8695_shutdown(ksp);
1134 
1135  ks8695_reset(ksp);
1136 
1137  ks8695_update_mac(ksp);
1138 
1139  /* We ignore the return from this since it managed to init
1140  * before it probably will be okay to init again.
1141  */
1142  ks8695_init_net(ksp);
1143 
1144  /* Reconfigure promiscuity etc */
1145  ks8695_set_multicast(ndev);
1146 
1147  /* And start the TX queue once more */
1148  netif_start_queue(ndev);
1149 }
1150 
1160 static int
1161 ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1162 {
1163  struct ks8695_priv *ksp = netdev_priv(ndev);
1164  int buff_n;
1165  dma_addr_t dmap;
1166 
1167  spin_lock_irq(&ksp->txq_lock);
1168 
1169  if (ksp->tx_ring_used == MAX_TX_DESC) {
1170  /* Somehow we got entered when we have no room */
1171  spin_unlock_irq(&ksp->txq_lock);
1172  return NETDEV_TX_BUSY;
1173  }
1174 
1175  buff_n = ksp->tx_ring_next_slot;
1176 
1177  BUG_ON(ksp->tx_buffers[buff_n].skb);
1178 
1179  dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1180  if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1181  /* Failed to DMA map this SKB, give it back for now */
1182  spin_unlock_irq(&ksp->txq_lock);
1183  dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1184  "transmission, trying later\n", ndev->name);
1185  return NETDEV_TX_BUSY;
1186  }
1187 
1188  ksp->tx_buffers[buff_n].dma_ptr = dmap;
1189  /* Mapped okay, store the buffer pointer and length for later */
1190  ksp->tx_buffers[buff_n].skb = skb;
1191  ksp->tx_buffers[buff_n].length = skb->len;
1192 
1193  /* Fill out the TX descriptor */
1194  ksp->tx_ring[buff_n].data_ptr =
1195  cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1196  ksp->tx_ring[buff_n].status =
1198  (skb->len & TDES_TBS));
1199 
1200  wmb();
1201 
1202  /* Hand it over to the hardware */
1203  ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1204 
1205  if (++ksp->tx_ring_used == MAX_TX_DESC)
1206  netif_stop_queue(ndev);
1207 
1208  /* Kick the TX DMA in case it decided to go IDLE */
1209  ks8695_writereg(ksp, KS8695_DTSC, 0);
1210 
1211  /* And update the next ring slot */
1212  ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1213 
1214  spin_unlock_irq(&ksp->txq_lock);
1215  return NETDEV_TX_OK;
1216 }
1217 
1225 static int
1226 ks8695_stop(struct net_device *ndev)
1227 {
1228  struct ks8695_priv *ksp = netdev_priv(ndev);
1229 
1230  netif_stop_queue(ndev);
1231  napi_disable(&ksp->napi);
1232 
1233  ks8695_shutdown(ksp);
1234 
1235  return 0;
1236 }
1237 
1246 static int
1247 ks8695_open(struct net_device *ndev)
1248 {
1249  struct ks8695_priv *ksp = netdev_priv(ndev);
1250  int ret;
1251 
1252  if (!is_valid_ether_addr(ndev->dev_addr))
1253  return -EADDRNOTAVAIL;
1254 
1255  ks8695_reset(ksp);
1256 
1257  ks8695_update_mac(ksp);
1258 
1259  ret = ks8695_init_net(ksp);
1260  if (ret) {
1261  ks8695_shutdown(ksp);
1262  return ret;
1263  }
1264 
1265  napi_enable(&ksp->napi);
1266  netif_start_queue(ndev);
1267 
1268  return 0;
1269 }
1270 
1271 /* Platform device driver */
1272 
1280 static void __devinit
1281 ks8695_init_switch(struct ks8695_priv *ksp)
1282 {
1283  u32 ctrl;
1284 
1285  /* Default value for SEC0 according to datasheet */
1286  ctrl = 0x40819e00;
1287 
1288  /* LED0 = Speed LED1 = Link/Activity */
1289  ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1290  ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1291 
1292  /* Enable Switch */
1293  ctrl |= SEC0_ENABLE;
1294 
1295  writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1296 
1297  /* Defaults for SEC1 */
1298  writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1299 }
1300 
1308 static void __devinit
1309 ks8695_init_wan_phy(struct ks8695_priv *ksp)
1310 {
1311  u32 ctrl;
1312 
1313  /* Support auto-negotiation */
1314  ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1315  WMC_WANA10F | WMC_WANA10H);
1316 
1317  /* LED0 = Activity , LED1 = Link */
1318  ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1319 
1320  /* Restart Auto-negotiation */
1321  ctrl |= WMC_WANR;
1322 
1323  writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1324 
1325  writel(0, ksp->phyiface_regs + KS8695_WPPM);
1326  writel(0, ksp->phyiface_regs + KS8695_PPS);
1327 }
1328 
1329 static const struct net_device_ops ks8695_netdev_ops = {
1330  .ndo_open = ks8695_open,
1331  .ndo_stop = ks8695_stop,
1332  .ndo_start_xmit = ks8695_start_xmit,
1333  .ndo_tx_timeout = ks8695_timeout,
1334  .ndo_set_mac_address = ks8695_set_mac,
1335  .ndo_validate_addr = eth_validate_addr,
1336  .ndo_set_rx_mode = ks8695_set_multicast,
1337 };
1338 
1352 static int __devinit
1353 ks8695_probe(struct platform_device *pdev)
1354 {
1355  struct ks8695_priv *ksp;
1356  struct net_device *ndev;
1357  struct resource *regs_res, *phyiface_res;
1358  struct resource *rxirq_res, *txirq_res, *linkirq_res;
1359  int ret = 0;
1360  int buff_n;
1361  u32 machigh, maclow;
1362 
1363  /* Initialise a net_device */
1364  ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1365  if (!ndev)
1366  return -ENOMEM;
1367 
1368  SET_NETDEV_DEV(ndev, &pdev->dev);
1369 
1370  dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1371 
1372  /* Configure our private structure a little */
1373  ksp = netdev_priv(ndev);
1374 
1375  ksp->dev = &pdev->dev;
1376  ksp->ndev = ndev;
1377  ksp->msg_enable = NETIF_MSG_LINK;
1378 
1379  /* Retrieve resources */
1380  regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1381  phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1382 
1383  rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1384  txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1385  linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1386 
1387  if (!(regs_res && rxirq_res && txirq_res)) {
1388  dev_err(ksp->dev, "insufficient resources\n");
1389  ret = -ENOENT;
1390  goto failure;
1391  }
1392 
1393  ksp->regs_req = request_mem_region(regs_res->start,
1394  resource_size(regs_res),
1395  pdev->name);
1396 
1397  if (!ksp->regs_req) {
1398  dev_err(ksp->dev, "cannot claim register space\n");
1399  ret = -EIO;
1400  goto failure;
1401  }
1402 
1403  ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1404 
1405  if (!ksp->io_regs) {
1406  dev_err(ksp->dev, "failed to ioremap registers\n");
1407  ret = -EINVAL;
1408  goto failure;
1409  }
1410 
1411  if (phyiface_res) {
1412  ksp->phyiface_req =
1413  request_mem_region(phyiface_res->start,
1414  resource_size(phyiface_res),
1415  phyiface_res->name);
1416 
1417  if (!ksp->phyiface_req) {
1418  dev_err(ksp->dev,
1419  "cannot claim switch register space\n");
1420  ret = -EIO;
1421  goto failure;
1422  }
1423 
1424  ksp->phyiface_regs = ioremap(phyiface_res->start,
1425  resource_size(phyiface_res));
1426 
1427  if (!ksp->phyiface_regs) {
1428  dev_err(ksp->dev,
1429  "failed to ioremap switch registers\n");
1430  ret = -EINVAL;
1431  goto failure;
1432  }
1433  }
1434 
1435  ksp->rx_irq = rxirq_res->start;
1436  ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1437  ksp->tx_irq = txirq_res->start;
1438  ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1439  ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1440  ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1441  linkirq_res->name : "Ethernet Link";
1442 
1443  /* driver system setup */
1444  ndev->netdev_ops = &ks8695_netdev_ops;
1446 
1447  netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1448 
1449  /* Retrieve the default MAC addr from the chip. */
1450  /* The bootloader should have left it in there for us. */
1451 
1452  machigh = ks8695_readreg(ksp, KS8695_MAH);
1453  maclow = ks8695_readreg(ksp, KS8695_MAL);
1454 
1455  ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1456  ndev->dev_addr[1] = machigh & 0xFF;
1457  ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1458  ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1459  ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1460  ndev->dev_addr[5] = maclow & 0xFF;
1461 
1462  if (!is_valid_ether_addr(ndev->dev_addr))
1463  dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
1464  "set using ifconfig\n", ndev->name);
1465 
1466  /* In order to be efficient memory-wise, we allocate both
1467  * rings in one go.
1468  */
1470  &ksp->ring_base_dma, GFP_KERNEL);
1471  if (!ksp->ring_base) {
1472  ret = -ENOMEM;
1473  goto failure;
1474  }
1475 
1476  /* Specify the TX DMA ring buffer */
1477  ksp->tx_ring = ksp->ring_base;
1478  ksp->tx_ring_dma = ksp->ring_base_dma;
1479 
1480  /* And initialise the queue's lock */
1481  spin_lock_init(&ksp->txq_lock);
1482  spin_lock_init(&ksp->rx_lock);
1483 
1484  /* Specify the RX DMA ring buffer */
1485  ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1487 
1488  /* Zero the descriptor rings */
1489  memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1490  memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1491 
1492  /* Build the rings */
1493  for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1494  ksp->tx_ring[buff_n].next_desc =
1495  cpu_to_le32(ksp->tx_ring_dma +
1496  (sizeof(struct tx_ring_desc) *
1497  ((buff_n + 1) & MAX_TX_DESC_MASK)));
1498  }
1499 
1500  for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1501  ksp->rx_ring[buff_n].next_desc =
1502  cpu_to_le32(ksp->rx_ring_dma +
1503  (sizeof(struct rx_ring_desc) *
1504  ((buff_n + 1) & MAX_RX_DESC_MASK)));
1505  }
1506 
1507  /* Initialise the port (physically) */
1508  if (ksp->phyiface_regs && ksp->link_irq == -1) {
1509  ks8695_init_switch(ksp);
1510  ksp->dtype = KS8695_DTYPE_LAN;
1511  SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1512  } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1513  ks8695_init_wan_phy(ksp);
1514  ksp->dtype = KS8695_DTYPE_WAN;
1515  SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
1516  } else {
1517  /* No initialisation since HPNA does not have a PHY */
1518  ksp->dtype = KS8695_DTYPE_HPNA;
1519  SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1520  }
1521 
1522  /* And bring up the net_device with the net core */
1523  platform_set_drvdata(pdev, ndev);
1524  ret = register_netdev(ndev);
1525 
1526  if (ret == 0) {
1527  dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1528  ks8695_port_type(ksp), ndev->dev_addr);
1529  } else {
1530  /* Report the failure to register the net_device */
1531  dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1532  goto failure;
1533  }
1534 
1535  /* All is well */
1536  return 0;
1537 
1538  /* Error exit path */
1539 failure:
1540  ks8695_release_device(ksp);
1541  free_netdev(ndev);
1542 
1543  return ret;
1544 }
1545 
1553 static int
1554 ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1555 {
1556  struct net_device *ndev = platform_get_drvdata(pdev);
1557  struct ks8695_priv *ksp = netdev_priv(ndev);
1558 
1559  ksp->in_suspend = 1;
1560 
1561  if (netif_running(ndev)) {
1562  netif_device_detach(ndev);
1563  ks8695_shutdown(ksp);
1564  }
1565 
1566  return 0;
1567 }
1568 
1576 static int
1577 ks8695_drv_resume(struct platform_device *pdev)
1578 {
1579  struct net_device *ndev = platform_get_drvdata(pdev);
1580  struct ks8695_priv *ksp = netdev_priv(ndev);
1581 
1582  if (netif_running(ndev)) {
1583  ks8695_reset(ksp);
1584  ks8695_init_net(ksp);
1585  ks8695_set_multicast(ndev);
1586  netif_device_attach(ndev);
1587  }
1588 
1589  ksp->in_suspend = 0;
1590 
1591  return 0;
1592 }
1593 
1600 static int __devexit
1601 ks8695_drv_remove(struct platform_device *pdev)
1602 {
1603  struct net_device *ndev = platform_get_drvdata(pdev);
1604  struct ks8695_priv *ksp = netdev_priv(ndev);
1605 
1606  platform_set_drvdata(pdev, NULL);
1607  netif_napi_del(&ksp->napi);
1608 
1609  unregister_netdev(ndev);
1610  ks8695_release_device(ksp);
1611  free_netdev(ndev);
1612 
1613  dev_dbg(&pdev->dev, "released and freed device\n");
1614  return 0;
1615 }
1616 
1617 static struct platform_driver ks8695_driver = {
1618  .driver = {
1619  .name = MODULENAME,
1620  .owner = THIS_MODULE,
1621  },
1622  .probe = ks8695_probe,
1623  .remove = __devexit_p(ks8695_drv_remove),
1624  .suspend = ks8695_drv_suspend,
1625  .resume = ks8695_drv_resume,
1626 };
1627 
1628 /* Module interface */
1629 
1630 static int __init
1631 ks8695_init(void)
1632 {
1633  printk(KERN_INFO "%s Ethernet driver, V%s\n",
1635 
1636  return platform_driver_register(&ks8695_driver);
1637 }
1638 
1639 static void __exit
1640 ks8695_cleanup(void)
1641 {
1642  platform_driver_unregister(&ks8695_driver);
1643 }
1644 
1645 module_init(ks8695_init);
1646 module_exit(ks8695_cleanup);
1647 
1648 MODULE_AUTHOR("Simtec Electronics");
1649 MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1650 MODULE_LICENSE("GPL");
1651 MODULE_ALIAS("platform:" MODULENAME);
1652 
1653 module_param(watchdog, int, 0400);
1654 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");