25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/pci.h>
31 #include <linux/netdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
40 #include <linux/ipv6.h>
41 #include <linux/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/if_vlan.h>
44 #include <linux/slab.h>
48 static int force_pseudohp = -1;
49 static int no_pseudohp = -1;
50 static int no_extplug = -1;
53 "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
58 "Do not use external plug signal for pseudo hot-plug.");
80 pr_err(
"phy(%d) read timeout : %d\n", phy, reg);
92 int phy,
int reg,
int val)
99 smi_phy_addr(phy) | smi_reg_addr(reg));
109 pr_err(
"phy(%d) write timeout : %d\n", phy, reg);
117 jme_mdio_write(jme->
dev,
123 jme_mdio_write(jme->
dev,
128 val = jme_mdio_read(jme->
dev,
132 jme_mdio_write(jme->
dev,
204 jme_reset_250A2_workaround(
struct jme_adapter *jme)
229 u32 crc = 0xCDCDCDCD;
233 jme_reset_ghc_speed(jme);
234 jme_reset_250A2_workaround(jme);
236 jme_mac_rxclk_on(jme);
237 jme_mac_txclk_on(jme);
239 jme_assert_ghc_reset(jme);
241 jme_mac_rxclk_off(jme);
242 jme_mac_txclk_off(jme);
244 jme_clear_ghc_reset(jme);
246 jme_mac_rxclk_on(jme);
247 jme_mac_txclk_on(jme);
249 jme_mac_rxclk_off(jme);
250 jme_mac_txclk_off(jme);
264 jme_setup_wakeup_frame(jme, mask, crc, i);
300 pr_err(
"eeprom reload timeout\n");
317 macaddr[0] = (val >> 0) & 0xFF;
318 macaddr[1] = (val >> 8) & 0xFF;
319 macaddr[2] = (val >> 16) & 0xFF;
320 macaddr[3] = (val >> 24) & 0xFF;
322 macaddr[4] = (val >> 0) & 0xFF;
323 macaddr[5] = (val >> 8) & 0xFF;
366 jme_set_rx_pcc(jme,
PCC_P1);
397 phylink = jme_mdio_read(jme->
dev, jme->
mii_if.phy_id, 17);
408 jme_mdio_write(jme->
dev, jme->
mii_if.phy_id, 27, 0x0004);
414 jme_mdio_write(jme->
dev, jme->
mii_if.phy_id, 27, 0x0000);
418 jme_check_link(
struct net_device *netdev,
int testonly)
428 phylink = jme_linkstat_from_phy(jme);
440 bmcr = jme_mdio_read(jme->
dev,
447 (bmcr & BMCR_SPEED100) ?
454 strcat(linkmsg,
"Forced: ");
465 phylink = jme_linkstat_from_phy(jme);
470 pr_err(
"Waiting speed resolve timeout\n");
472 strcat(linkmsg,
"ANed: ");
491 strcat(linkmsg,
"10 Mbps, ");
495 strcat(linkmsg,
"100 Mbps, ");
499 strcat(linkmsg,
"1000 Mbps, ");
519 if (is_buggy250(jme->
pdev->device, jme->
chiprev)) {
522 if (!(phylink & PHY_LINK_DUPLEX))
524 switch (phylink & PHY_LINK_SPEED_MASK) {
526 jme_set_phyfifo_8level(jme);
530 jme_set_phyfifo_5level(jme);
534 jme_set_phyfifo_8level(jme);
542 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
589 goto err_free_txring;
627 dev_kfree_skb(txbi->
skb);
686 jme_mac_txclk_on(jme);
720 pr_err(
"Disable TX engine timeout\n");
725 jme_mac_txclk_off(jme);
729 jme_set_clean_rxdesc(
struct jme_adapter *jme,
int i)
750 jme_make_new_rx_buf(
struct jme_adapter *jme,
int i)
757 skb = netdev_alloc_skb(jme->
dev,
765 if (
unlikely(pci_dma_mapping_error(jme->
pdev, mapping))) {
775 rxbi->
len = skb_tailroom(skb);
788 pci_unmap_page(jme->
pdev,
792 dev_kfree_skb(rxbi->
skb);
808 jme_free_rx_buf(jme, i);
851 goto err_free_rxring;
859 if (
unlikely(jme_make_new_rx_buf(jme, i))) {
860 jme_free_rx_resources(jme);
864 jme_set_clean_rxdesc(jme, i);
908 jme_set_unicastaddr(jme->
dev);
909 jme_set_multi(jme->
dev);
923 jme_mac_rxclk_on(jme);
958 pr_err(
"Disable RX engine timeout\n");
963 jme_mac_rxclk_off(jme);
967 jme_udpsum(
struct sk_buff *skb)
975 skb_set_network_header(skb,
ETH_HLEN);
978 (ip_hdr(skb)->ihl << 2) +
979 sizeof(
struct udphdr)))) {
980 skb_reset_network_header(skb);
983 skb_set_transport_header(skb,
984 ETH_HLEN + (ip_hdr(skb)->ihl << 2));
985 csum = udp_hdr(skb)->check;
986 skb_reset_transport_header(skb);
987 skb_reset_network_header(skb);
1001 netif_err(jme, rx_err, jme->
dev,
"TCP Checksum error\n");
1008 netif_err(jme, rx_err, jme->
dev,
"UDP Checksum error\n");
1014 netif_err(jme, rx_err, jme->
dev,
"IPv4 Checksum error\n");
1025 struct rxdesc *rxdesc = rxring->
desc;
1034 pci_dma_sync_single_for_cpu(jme->
pdev,
1039 if (
unlikely(jme_make_new_rx_buf(jme, idx))) {
1040 pci_dma_sync_single_for_device(jme->
pdev,
1057 skb_checksum_none_assert(skb);
1062 __vlan_hwaccel_put_tag(skb, vid);
1071 NET_STAT(jme).rx_bytes += framesize;
1075 jme_set_clean_rxdesc(jme, idx);
1083 struct rxdesc *rxdesc = rxring->
desc;
1097 rxdesc = rxring->
desc;
1119 limit -= desccnt - 1;
1121 for (j = i, ccnt = desccnt ; ccnt-- ; ) {
1122 jme_set_clean_rxdesc(jme, j);
1123 j = (j + 1) & (mask);
1127 jme_alloc_and_feed_skb(jme, i);
1139 return limit > 0 ? limit : 0;
1144 jme_attempt_pcc(
struct dynpcc_info *dpi,
int atmp)
1166 jme_attempt_pcc(dpi,
PCC_P3);
1169 jme_attempt_pcc(dpi,
PCC_P2);
1171 jme_attempt_pcc(dpi,
PCC_P1);
1176 jme_set_rx_pcc(jme, dpi->
attempt);
1204 phylink = jme_linkstat_from_phy(jme);
1206 if (!(phylink & PHY_LINK_UP)) {
1216 jme_pcc_tasklet(
unsigned long arg)
1222 jme_shutdown_nic(jme);
1226 if (
unlikely(!netif_carrier_ok(netdev) ||
1229 jme_stop_pcc_timer(jme);
1234 jme_dynamic_pcc(jme);
1236 jme_start_pcc_timer(jme);
1248 jme_set_rx_pcc(jme,
PCC_P1);
1252 jme_pseudo_hotplug_enabled(
struct jme_adapter *jme)
1295 jme_link_change_tasklet(
unsigned long arg)
1308 if (jme_check_link(netdev, 1) && jme->
old_mtu == netdev->
mtu)
1312 netif_stop_queue(netdev);
1313 if (jme_pseudo_hotplug_enabled(jme))
1314 jme_stop_shutdown_timer(jme);
1316 jme_stop_pcc_timer(jme);
1321 if (netif_carrier_ok(netdev)) {
1322 jme_disable_rx_engine(jme);
1323 jme_disable_tx_engine(jme);
1324 jme_reset_mac_processor(jme);
1325 jme_free_rx_resources(jme);
1326 jme_free_tx_resources(jme);
1329 jme_polling_mode(jme);
1334 jme_check_link(netdev, 0);
1335 if (netif_carrier_ok(netdev)) {
1336 rc = jme_setup_rx_resources(jme);
1338 pr_err(
"Allocating resources for RX error, Device STOPPED!\n");
1339 goto out_enable_tasklet;
1342 rc = jme_setup_tx_resources(jme);
1344 pr_err(
"Allocating resources for TX error, Device STOPPED!\n");
1345 goto err_out_free_rx_resources;
1348 jme_enable_rx_engine(jme);
1349 jme_enable_tx_engine(jme);
1351 netif_start_queue(netdev);
1354 jme_interrupt_mode(jme);
1356 jme_start_pcc_timer(jme);
1357 }
else if (jme_pseudo_hotplug_enabled(jme)) {
1358 jme_start_shutdown_timer(jme);
1361 goto out_enable_tasklet;
1363 err_out_free_rx_resources:
1364 jme_free_rx_resources(jme);
1374 jme_rx_clean_tasklet(
unsigned long arg)
1395 jme_restart_rx_engine(jme);
1401 jme_interrupt_mode(jme);
1409 jme_rx_empty_tasklet(
unsigned long arg)
1421 jme_rx_clean_tasklet(arg);
1426 jme_restart_rx_engine(jme);
1432 jme_wake_queue_if_stopped(
struct jme_adapter *jme)
1440 netif_wake_queue(jme->
dev);
1446 jme_tx_clean_tasklet(
unsigned long arg)
1454 tx_dbg(jme,
"Into txclean\n");
1475 tx_dbg(jme,
"txclean: %d+%d@%lu\n",
1476 i, ctxbi->nr_desc, jiffies);
1480 for (j = 1 ; j < ctxbi->nr_desc ; ++
j) {
1481 ttxbi = txbi + ((i +
j) & (mask));
1482 txdesc[(i +
j) & (mask)].
dw[0] = 0;
1484 pci_unmap_page(jme->
pdev,
1493 dev_kfree_skb(ctxbi->skb);
1495 cnt += ctxbi->nr_desc;
1498 ++(
NET_STAT(jme).tx_carrier_errors);
1501 NET_STAT(jme).tx_bytes += ctxbi->len;
1506 ctxbi->start_xmit = 0;
1512 i = (i + ctxbi->nr_desc) & mask;
1517 tx_dbg(jme,
"txclean: done %d@%lu\n", i, jiffies);
1521 jme_wake_queue_if_stopped(jme);
1546 jwrite32(jme,
JME_IEVE, INTR_TMINTR);
1568 jme_polling_mode(jme);
1573 if (intrstat & INTR_RX0EMP) {
1589 jme_intr(
int irq,
void *
dev_id)
1609 jme_intr_msi(jme, intrstat);
1615 jme_msi(
int irq,
void *dev_id)
1623 jme_intr_msi(jme, intrstat);
1654 if (!pci_enable_msi(jme->
pdev)) {
1664 "Unable to request %s interrupt (return: %d)\n",
1686 jme->
dev->irq = jme->
pdev->irq;
1732 jme_new_phy_on(jme);
1745 jme_new_phy_off(jme);
1756 return jme_mdio_read(jme->
dev, jme->
mii_if.phy_id,
1775 u32 ctrl1000, phy_data;
1807 u32 phy_comm0 = 0, phy_comm1 = 0;
1814 switch (jme->
pdev->device) {
1864 (
unsigned long) jme);
1866 (
unsigned long) jme);
1868 (
unsigned long) jme);
1870 (
unsigned long) jme);
1872 rc = jme_request_irq(jme);
1880 jme_set_settings(netdev, &jme->
old_ecmd);
1882 jme_reset_phy_processor(jme);
1883 jme_phy_calibration(jme);
1885 jme_reset_link(jme);
1890 netif_stop_queue(netdev);
1915 #define JME_WAIT_LINK_TIME 2000
1922 phylink = jme_linkstat_from_phy(jme);
1923 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1925 phylink = jme_linkstat_from_phy(jme);
1933 jme_set_100m_half(jme);
1947 netif_stop_queue(netdev);
1960 jme_disable_rx_engine(jme);
1961 jme_disable_tx_engine(jme);
1962 jme_reset_mac_processor(jme);
1963 jme_free_rx_resources(jme);
1964 jme_free_tx_resources(jme);
1979 nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1992 jme_fill_tx_map(
struct pci_dev *pdev,
1993 struct txdesc *txdesc,
2002 dmaaddr = pci_map_page(pdev,
2008 pci_dma_sync_single_for_device(pdev,
2020 (
__u64)dmaaddr & 0xFFFFFFFFUL);
2030 struct txdesc *txdesc = txring->
desc, *ctxdesc;
2033 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2038 for (i = 0 ; i < nr_frags ; ++
i) {
2039 frag = &skb_shinfo(skb)->frags[
i];
2040 ctxdesc = txdesc + ((idx + i + 2) & (mask));
2041 ctxbi = txbi + ((idx + i + 2) & (mask));
2043 jme_fill_tx_map(jme->
pdev, ctxdesc, ctxbi,
2044 skb_frag_page(frag),
2048 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
2049 ctxdesc = txdesc + ((idx + 1) & (mask));
2050 ctxbi = txbi + ((idx + 1) & (mask));
2059 if (
unlikely(skb_shinfo(skb)->gso_size &&
2060 skb_header_cloned(skb) &&
2077 struct iphdr *iph = ip_hdr(skb);
2085 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2110 ip_proto = ipv6_hdr(skb)->
nexthdr;
2125 netif_err(jme, tx_err, jme->
dev,
"Error upper layer protocol\n");
2148 txdesc = (
struct txdesc *)txring->
desc + idx;
2149 txbi = txring->
bufinf + idx;
2168 if (jme_tx_tso(skb, &txdesc->
desc1.mss, &flags))
2169 jme_tx_csum(jme, skb, &flags);
2170 jme_tx_vlan(skb, &txdesc->
desc1.vlan, &flags);
2171 jme_map_tx_skb(jme, skb, idx);
2178 txbi->
nr_desc = skb_shinfo(skb)->nr_frags + 2;
2199 netif_stop_queue(jme->
dev);
2204 netif_wake_queue(jme->
dev);
2205 netif_info(jme, tx_queued, jme->
dev,
"TX Queue Fast Waked\n");
2212 netif_stop_queue(jme->
dev);
2214 "TX Queue Stopped %d@%lu\n", idx, jiffies);
2228 if (
unlikely(jme_expand_header(jme, skb))) {
2233 idx = jme_alloc_txdesc(jme, skb);
2236 netif_stop_queue(netdev);
2238 "BUG! Tx ring full when queue awake!\n");
2243 jme_fill_tx_desc(jme, skb, idx);
2250 tx_dbg(jme,
"xmit: %d+%d@%lu\n",
2251 idx, skb_shinfo(skb)->nr_frags + 2, jiffies);
2252 jme_stop_queue_if_full(jme);
2258 jme_set_unicastaddr(
struct net_device *netdev)
2263 val = (netdev->
dev_addr[3] & 0xff) << 24 |
2264 (netdev->
dev_addr[2] & 0xff) << 16 |
2265 (netdev->
dev_addr[1] & 0xff) << 8 |
2268 val = (netdev->
dev_addr[5] & 0xff) << 8 |
2274 jme_set_macaddr(
struct net_device *netdev,
void *p)
2279 if (netif_running(netdev))
2284 jme_set_unicastaddr(netdev);
2294 u32 mc_hash[2] = {};
2311 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2325 jme_change_mtu(
struct net_device *netdev,
int new_mtu)
2337 netdev->
mtu = new_mtu;
2340 jme_restart_rx_engine(jme);
2341 jme_reset_link(jme);
2352 jme_reset_phy_processor(jme);
2354 jme_set_settings(netdev, &jme->
old_ecmd);
2359 jme_reset_link(jme);
2362 static inline void jme_pause_rx(
struct jme_adapter *jme)
2375 static inline void jme_resume_rx(
struct jme_adapter *jme)
2388 jme_set_rx_pcc(jme,
PCC_P1);
2415 for (i = 0 ; i < len ; i += 4)
2416 p[i >> 2] = jread32(jme, reg + i);
2425 for (i = 0 ; i < reg_nr ; ++
i)
2426 p16[i] = jme_mdio_read(jme->
dev, jme->
mii_if.phy_id, i);
2470 switch (jme->
dpi.cur) {
2496 if (netif_running(netdev))
2506 jme_set_rx_pcc(jme,
PCC_P1);
2507 jme_interrupt_mode(jme);
2512 jme_interrupt_mode(jme);
2519 jme_get_pauseparam(
struct net_device *netdev,
2537 jme_set_pauseparam(
struct net_device *netdev,
2577 jme_mdio_write(jme->
dev, jme->
mii_if.phy_id,
2658 if (jme->
mii_if.force_media &&
2669 jme_reset_link(jme);
2683 unsigned int duplex_chg;
2698 jme_reset_link(jme);
2699 jme_get_settings(netdev, &jme->
old_ecmd);
2730 if (netdev->
mtu > 1900)
2751 #ifdef CONFIG_NET_POLL_CONTROLLER
2754 unsigned long flags;
2757 jme_intr(dev->
irq, dev);
2766 jme_restart_an(jme);
2771 jme_smb_read(
struct jme_adapter *jme,
unsigned int addr)
2814 while ((val & SMBCSR_BUSY) && --to) {
2831 while ((val & SMBINTF_HWCMD) && --to) {
2844 jme_get_eeprom_len(
struct net_device *netdev)
2863 for (i = 0 ; i < len ; ++
i)
2864 data[i] = jme_smb_read(jme, i + offset);
2874 int i, offset = eeprom->
offset, len = eeprom->
len;
2882 for (i = 0 ; i < len ; ++
i)
2883 jme_smb_write(jme, i + offset, data[i]);
2888 static const struct ethtool_ops jme_ethtool_ops = {
2889 .get_drvinfo = jme_get_drvinfo,
2890 .get_regs_len = jme_get_regs_len,
2891 .get_regs = jme_get_regs,
2892 .get_coalesce = jme_get_coalesce,
2893 .set_coalesce = jme_set_coalesce,
2894 .get_pauseparam = jme_get_pauseparam,
2895 .set_pauseparam = jme_set_pauseparam,
2896 .get_wol = jme_get_wol,
2897 .set_wol = jme_set_wol,
2898 .get_settings = jme_get_settings,
2899 .set_settings = jme_set_settings,
2900 .get_link = jme_get_link,
2901 .get_msglevel = jme_get_msglevel,
2902 .set_msglevel = jme_set_msglevel,
2903 .nway_reset = jme_nway_reset,
2904 .get_eeprom_len = jme_get_eeprom_len,
2905 .get_eeprom = jme_get_eeprom,
2906 .set_eeprom = jme_set_eeprom,
2910 jme_pci_dma64(
struct pci_dev *pdev)
2914 if (!pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64)))
2919 if (!pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(40)))
2923 if (!pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32)))
2934 reg26 = jme_mdio_read(jme->
dev, jme->
mii_if.phy_id, 26);
2935 jme_mdio_write(jme->
dev, jme->
mii_if.phy_id, 26, reg26 | 0x1000);
2952 .ndo_open = jme_open,
2953 .ndo_stop = jme_close,
2955 .ndo_do_ioctl = jme_ioctl,
2956 .ndo_start_xmit = jme_start_xmit,
2957 .ndo_set_mac_address = jme_set_macaddr,
2958 .ndo_set_rx_mode = jme_set_multi,
2959 .ndo_change_mtu = jme_change_mtu,
2960 .ndo_tx_timeout = jme_tx_timeout,
2961 .ndo_fix_features = jme_fix_features,
2962 .ndo_set_features = jme_set_features,
2963 #ifdef CONFIG_NET_POLL_CONTROLLER
2964 .ndo_poll_controller = jme_netpoll,
2969 jme_init_one(
struct pci_dev *pdev,
2972 int rc = 0, using_dac,
i;
2986 pr_err(
"Cannot enable PCI device\n");
2990 using_dac = jme_pci_dma64(pdev);
2991 if (using_dac < 0) {
2992 pr_err(
"Cannot set PCI DMA Mask\n");
2994 goto err_out_disable_pdev;
2998 pr_err(
"No PCI resource region found\n");
3000 goto err_out_disable_pdev;
3005 pr_err(
"Cannot obtain PCI resource region\n");
3006 goto err_out_disable_pdev;
3014 netdev = alloc_etherdev(
sizeof(*jme));
3017 goto err_out_release_regions;
3039 pci_set_drvdata(pdev, netdev);
3044 jme = netdev_priv(netdev);
3059 pr_err(
"Mapping PCI resource region error\n");
3061 goto err_out_free_netdev;
3067 }
else if (force_pseudohp) {
3085 (
unsigned long) jme);
3103 switch (jme->mrrs) {
3118 jme_check_hw_ver(jme);
3119 jme->
mii_if.dev = netdev;
3122 for (i = 1 ; i < 32 ; ++
i) {
3123 bmcr = jme_mdio_read(netdev, i,
MII_BMCR);
3124 bmsr = jme_mdio_read(netdev, i,
MII_BMSR);
3125 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
3131 if (!jme->
mii_if.phy_id) {
3133 pr_err(
"Can not find phy_id\n");
3142 jme->
mii_if.supports_gmii =
true;
3144 jme->
mii_if.supports_gmii =
false;
3145 jme->
mii_if.phy_id_mask = 0x1F;
3146 jme->
mii_if.reg_num_mask = 0x1F;
3147 jme->
mii_if.mdio_read = jme_mdio_read;
3148 jme->
mii_if.mdio_write = jme_mdio_write;
3154 jme_set_phyfifo_5level(jme);
3163 jme_reset_mac_processor(jme);
3164 rc = jme_reload_eeprom(jme);
3166 pr_err(
"Reload eeprom for reading MAC Address error\n");
3169 jme_load_macaddr(netdev);
3178 pr_err(
"Cannot register net device\n");
3182 netif_info(jme, probe, jme->
dev,
"%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
3184 "JMC250 Gigabit Ethernet" :
3186 "JMC260 Fast Ethernet" :
"Unknown",
3187 (jme->
fpgaver != 0) ?
" (FPGA)" :
"",
3195 err_out_free_netdev:
3196 pci_set_drvdata(pdev,
NULL);
3198 err_out_release_regions:
3200 err_out_disable_pdev:
3207 jme_remove_one(
struct pci_dev *pdev)
3209 struct net_device *netdev = pci_get_drvdata(pdev);
3214 pci_set_drvdata(pdev,
NULL);
3222 jme_shutdown(
struct pci_dev *pdev)
3224 struct net_device *netdev = pci_get_drvdata(pdev);
3227 jme_powersave_phy(jme);
3231 #ifdef CONFIG_PM_SLEEP
3236 struct net_device *netdev = pci_get_drvdata(pdev);
3239 if (!netif_running(netdev))
3245 netif_stop_queue(netdev);
3252 if (netif_carrier_ok(netdev)) {
3254 jme_polling_mode(jme);
3256 jme_stop_pcc_timer(jme);
3257 jme_disable_rx_engine(jme);
3258 jme_disable_tx_engine(jme);
3259 jme_reset_mac_processor(jme);
3260 jme_free_rx_resources(jme);
3261 jme_free_tx_resources(jme);
3270 jme_powersave_phy(jme);
3276 jme_resume(
struct device *dev)
3279 struct net_device *netdev = pci_get_drvdata(pdev);
3282 if (!netif_running(netdev))
3288 jme_set_settings(netdev, &jme->
old_ecmd);
3290 jme_reset_phy_processor(jme);
3291 jme_phy_calibration(jme);
3298 jme_reset_link(jme);
3304 #define JME_PM_OPS (&jme_pm_ops)
3308 #define JME_PM_OPS NULL
3319 .id_table = jme_pci_tbl,
3320 .probe = jme_init_one,
3322 .shutdown = jme_shutdown,
3327 jme_init_module(
void)
3330 return pci_register_driver(&jme_driver);
3334 jme_cleanup_module(
void)