Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ide-io.c
Go to the documentation of this file.
1 /*
2  * IDE I/O functions
3  *
4  * Basic PIO and command management functionality.
5  *
6  * This code was split off from ide.c. See ide.c for history and original
7  * copyrights.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the
11  * Free Software Foundation; either version 2, or (at your option) any
12  * later version.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * General Public License for more details.
18  *
19  * For the avoidance of doubt the "preferred form" of this code is one which
20  * is in an open non patent encumbered format. Where cryptographic key signing
21  * forms part of the process of creating an executable the information
22  * including keys needed to generate an equivalently functional executable
23  * are deemed to be part of the source code.
24  */
25 
26 
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/string.h>
30 #include <linux/kernel.h>
31 #include <linux/timer.h>
32 #include <linux/mm.h>
33 #include <linux/interrupt.h>
34 #include <linux/major.h>
35 #include <linux/errno.h>
36 #include <linux/genhd.h>
37 #include <linux/blkpg.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ide.h>
43 #include <linux/completion.h>
44 #include <linux/reboot.h>
45 #include <linux/cdrom.h>
46 #include <linux/seq_file.h>
47 #include <linux/device.h>
48 #include <linux/kmod.h>
49 #include <linux/scatterlist.h>
50 #include <linux/bitops.h>
51 
52 #include <asm/byteorder.h>
53 #include <asm/irq.h>
54 #include <asm/uaccess.h>
55 #include <asm/io.h>
56 
57 int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
58  unsigned int nr_bytes)
59 {
60  /*
61  * decide whether to reenable DMA -- 3 is a random magic for now,
62  * if we DMA timeout more than 3 times, just stay in PIO
63  */
64  if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
65  drive->retry_pio <= 3) {
67  ide_dma_on(drive);
68  }
69 
70  return blk_end_request(rq, error, nr_bytes);
71 }
73 
75 {
76  const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops;
77  struct ide_taskfile *tf = &cmd->tf;
78  struct request *rq = cmd->rq;
79  u8 tf_cmd = tf->command;
80 
81  tf->error = err;
82  tf->status = stat;
83 
84  if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) {
85  u8 data[2];
86 
87  tp_ops->input_data(drive, cmd, data, 2);
88 
89  cmd->tf.data = data[0];
90  cmd->hob.data = data[1];
91  }
92 
93  ide_tf_readback(drive, cmd);
94 
95  if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) &&
96  tf_cmd == ATA_CMD_IDLEIMMEDIATE) {
97  if (tf->lbal != 0xc4) {
98  printk(KERN_ERR "%s: head unload failed!\n",
99  drive->name);
100  ide_tf_dump(drive->name, cmd);
101  } else
102  drive->dev_flags |= IDE_DFLAG_PARKED;
103  }
104 
105  if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
106  struct ide_cmd *orig_cmd = rq->special;
107 
108  if (cmd->tf_flags & IDE_TFLAG_DYN)
109  kfree(orig_cmd);
110  else
111  memcpy(orig_cmd, cmd, sizeof(*cmd));
112  }
113 }
114 
115 int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
116 {
117  ide_hwif_t *hwif = drive->hwif;
118  struct request *rq = hwif->rq;
119  int rc;
120 
121  /*
122  * if failfast is set on a request, override number of sectors
123  * and complete the whole request right now
124  */
125  if (blk_noretry_request(rq) && error <= 0)
126  nr_bytes = blk_rq_sectors(rq) << 9;
127 
128  rc = ide_end_rq(drive, rq, error, nr_bytes);
129  if (rc == 0)
130  hwif->rq = NULL;
131 
132  return rc;
133 }
135 
136 void ide_kill_rq(ide_drive_t *drive, struct request *rq)
137 {
138  u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk;
139  u8 media = drive->media;
140 
141  drive->failed_pc = NULL;
142 
143  if ((media == ide_floppy || media == ide_tape) && drv_req) {
144  rq->errors = 0;
145  } else {
146  if (media == ide_tape)
147  rq->errors = IDE_DRV_ERROR_GENERAL;
148  else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
149  rq->errors = -EIO;
150  }
151 
152  ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
153 }
154 
155 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
156 {
157  tf->nsect = drive->sect;
158  tf->lbal = drive->sect;
159  tf->lbam = drive->cyl;
160  tf->lbah = drive->cyl >> 8;
161  tf->device = (drive->head - 1) | drive->select;
163 }
164 
165 static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
166 {
167  tf->nsect = drive->sect;
168  tf->command = ATA_CMD_RESTORE;
169 }
170 
171 static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
172 {
173  tf->nsect = drive->mult_req;
175 }
176 
185 static ide_startstop_t do_special(ide_drive_t *drive)
186 {
187  struct ide_cmd cmd;
188 
189 #ifdef DEBUG
190  printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__,
191  drive->special_flags);
192 #endif
193  if (drive->media != ide_disk) {
194  drive->special_flags = 0;
195  drive->mult_req = 0;
196  return ide_stopped;
197  }
198 
199  memset(&cmd, 0, sizeof(cmd));
200  cmd.protocol = ATA_PROT_NODATA;
201 
202  if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) {
204  ide_tf_set_specify_cmd(drive, &cmd.tf);
205  } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) {
207  ide_tf_set_restore_cmd(drive, &cmd.tf);
208  } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) {
210  ide_tf_set_setmult_cmd(drive, &cmd.tf);
211  } else
212  BUG();
213 
214  cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
215  cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
216  cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER;
217 
218  do_rw_taskfile(drive, &cmd);
219 
220  return ide_started;
221 }
222 
223 void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd)
224 {
225  ide_hwif_t *hwif = drive->hwif;
226  struct scatterlist *sg = hwif->sg_table;
227  struct request *rq = cmd->rq;
228 
229  cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
230 }
232 
233 void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes)
234 {
235  cmd->nbytes = cmd->nleft = nr_bytes;
236  cmd->cursg_ofs = 0;
237  cmd->cursg = NULL;
238 }
240 
253 static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
254  struct request *rq)
255 {
256  struct ide_cmd *cmd = rq->special;
257 
258  if (cmd) {
259  if (cmd->protocol == ATA_PROT_PIO) {
260  ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
261  ide_map_sg(drive, cmd);
262  }
263 
264  return do_rw_taskfile(drive, cmd);
265  }
266 
267  /*
268  * NULL is actually a valid way of waiting for
269  * all current requests to be flushed from the queue.
270  */
271 #ifdef DEBUG
272  printk("%s: DRIVE_CMD (null)\n", drive->name);
273 #endif
274  rq->errors = 0;
275  ide_complete_rq(drive, 0, blk_rq_bytes(rq));
276 
277  return ide_stopped;
278 }
279 
280 static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
281 {
282  u8 cmd = rq->cmd[0];
283 
284  switch (cmd) {
285  case REQ_PARK_HEADS:
286  case REQ_UNPARK_HEADS:
287  return ide_do_park_unpark(drive, rq);
288  case REQ_DEVSET_EXEC:
289  return ide_do_devset(drive, rq);
290  case REQ_DRIVE_RESET:
291  return ide_do_reset(drive);
292  default:
293  BUG();
294  }
295 }
296 
306 static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
307 {
308  ide_startstop_t startstop;
309 
310  BUG_ON(!(rq->cmd_flags & REQ_STARTED));
311 
312 #ifdef DEBUG
313  printk("%s: start_request: current=0x%08lx\n",
314  drive->hwif->name, (unsigned long) rq);
315 #endif
316 
317  /* bail early if we've exceeded max_failures */
318  if (drive->max_failures && (drive->failures > drive->max_failures)) {
319  rq->cmd_flags |= REQ_FAILED;
320  goto kill_rq;
321  }
322 
323  if (blk_pm_request(rq))
324  ide_check_pm_state(drive, rq);
325 
326  drive->hwif->tp_ops->dev_select(drive);
327  if (ide_wait_stat(&startstop, drive, drive->ready_stat,
329  printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
330  return startstop;
331  }
332 
333  if (drive->special_flags == 0) {
334  struct ide_driver *drv;
335 
336  /*
337  * We reset the drive so we need to issue a SETFEATURES.
338  * Do it _after_ do_special() restored device parameters.
339  */
340  if (drive->current_speed == 0xff)
341  ide_config_drive_speed(drive, drive->desired_speed);
342 
343  if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
344  return execute_drive_cmd(drive, rq);
345  else if (blk_pm_request(rq)) {
346  struct request_pm_state *pm = rq->special;
347 #ifdef DEBUG_PM
348  printk("%s: start_power_step(step: %d)\n",
349  drive->name, pm->pm_step);
350 #endif
351  startstop = ide_start_power_step(drive, rq);
352  if (startstop == ide_stopped &&
353  pm->pm_step == IDE_PM_COMPLETED)
354  ide_complete_pm_rq(drive, rq);
355  return startstop;
356  } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL)
357  /*
358  * TODO: Once all ULDs have been modified to
359  * check for specific op codes rather than
360  * blindly accepting any special request, the
361  * check for ->rq_disk above may be replaced
362  * by a more suitable mechanism or even
363  * dropped entirely.
364  */
365  return ide_special_rq(drive, rq);
366 
367  drv = *(struct ide_driver **)rq->rq_disk->private_data;
368 
369  return drv->do_request(drive, rq, blk_rq_pos(rq));
370  }
371  return do_special(drive);
372 kill_rq:
373  ide_kill_rq(drive, rq);
374  return ide_stopped;
375 }
376 
386 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
387 {
388  if (timeout > WAIT_WORSTCASE)
389  timeout = WAIT_WORSTCASE;
390  drive->sleep = timeout + jiffies;
391  drive->dev_flags |= IDE_DFLAG_SLEEPING;
392 }
394 
395 static inline int ide_lock_port(ide_hwif_t *hwif)
396 {
397  if (hwif->busy)
398  return 1;
399 
400  hwif->busy = 1;
401 
402  return 0;
403 }
404 
405 static inline void ide_unlock_port(ide_hwif_t *hwif)
406 {
407  hwif->busy = 0;
408 }
409 
410 static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
411 {
412  int rc = 0;
413 
414  if (host->host_flags & IDE_HFLAG_SERIALIZE) {
416  if (rc == 0) {
417  if (host->get_lock)
418  host->get_lock(ide_intr, hwif);
419  }
420  }
421  return rc;
422 }
423 
424 static inline void ide_unlock_host(struct ide_host *host)
425 {
426  if (host->host_flags & IDE_HFLAG_SERIALIZE) {
427  if (host->release_lock)
428  host->release_lock();
430  }
431 }
432 
433 static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq)
434 {
435  if (rq)
436  blk_requeue_request(q, rq);
437  if (rq || blk_peek_request(q)) {
438  /* Use 3ms as that was the old plug delay */
439  blk_delay_queue(q, 3);
440  }
441 }
442 
443 void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
444 {
445  struct request_queue *q = drive->queue;
446  unsigned long flags;
447 
448  spin_lock_irqsave(q->queue_lock, flags);
449  __ide_requeue_and_plug(q, rq);
450  spin_unlock_irqrestore(q->queue_lock, flags);
451 }
452 
453 /*
454  * Issue a new request to a device.
455  */
457 {
458  ide_drive_t *drive = q->queuedata;
459  ide_hwif_t *hwif = drive->hwif;
460  struct ide_host *host = hwif->host;
461  struct request *rq = NULL;
462  ide_startstop_t startstop;
463  unsigned long queue_run_ms = 3; /* old plug delay */
464 
465  spin_unlock_irq(q->queue_lock);
466 
467  /* HLD do_request() callback might sleep, make sure it's okay */
468  might_sleep();
469 
470  if (ide_lock_host(host, hwif))
471  goto plug_device_2;
472 
473  spin_lock_irq(&hwif->lock);
474 
475  if (!ide_lock_port(hwif)) {
476  ide_hwif_t *prev_port;
477 
478  WARN_ON_ONCE(hwif->rq);
479 repeat:
480  prev_port = hwif->host->cur_port;
481  if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
482  time_after(drive->sleep, jiffies)) {
483  unsigned long left = jiffies - drive->sleep;
484 
485  queue_run_ms = jiffies_to_msecs(left + 1);
486  ide_unlock_port(hwif);
487  goto plug_device;
488  }
489 
490  if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
491  hwif != prev_port) {
492  ide_drive_t *cur_dev =
493  prev_port ? prev_port->cur_dev : NULL;
494 
495  /*
496  * set nIEN for previous port, drives in the
497  * quirk list may not like intr setups/cleanups
498  */
499  if (cur_dev &&
500  (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0)
501  prev_port->tp_ops->write_devctl(prev_port,
502  ATA_NIEN |
504 
505  hwif->host->cur_port = hwif;
506  }
507  hwif->cur_dev = drive;
509 
510  spin_unlock_irq(&hwif->lock);
511  spin_lock_irq(q->queue_lock);
512  /*
513  * we know that the queue isn't empty, but this can happen
514  * if the q->prep_rq_fn() decides to kill a request
515  */
516  if (!rq)
517  rq = blk_fetch_request(drive->queue);
518 
519  spin_unlock_irq(q->queue_lock);
520  spin_lock_irq(&hwif->lock);
521 
522  if (!rq) {
523  ide_unlock_port(hwif);
524  goto out;
525  }
526 
527  /*
528  * Sanity: don't accept a request that isn't a PM request
529  * if we are currently power managed. This is very important as
530  * blk_stop_queue() doesn't prevent the blk_fetch_request()
531  * above to return us whatever is in the queue. Since we call
532  * ide_do_request() ourselves, we end up taking requests while
533  * the queue is blocked...
534  *
535  * We let requests forced at head of queue with ide-preempt
536  * though. I hope that doesn't happen too much, hopefully not
537  * unless the subdriver triggers such a thing in its own PM
538  * state machine.
539  */
540  if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
541  blk_pm_request(rq) == 0 &&
542  (rq->cmd_flags & REQ_PREEMPT) == 0) {
543  /* there should be no pending command at this point */
544  ide_unlock_port(hwif);
545  goto plug_device;
546  }
547 
548  hwif->rq = rq;
549 
550  spin_unlock_irq(&hwif->lock);
551  startstop = start_request(drive, rq);
552  spin_lock_irq(&hwif->lock);
553 
554  if (startstop == ide_stopped) {
555  rq = hwif->rq;
556  hwif->rq = NULL;
557  goto repeat;
558  }
559  } else
560  goto plug_device;
561 out:
562  spin_unlock_irq(&hwif->lock);
563  if (rq == NULL)
564  ide_unlock_host(host);
565  spin_lock_irq(q->queue_lock);
566  return;
567 
568 plug_device:
569  spin_unlock_irq(&hwif->lock);
570  ide_unlock_host(host);
571 plug_device_2:
572  spin_lock_irq(q->queue_lock);
573  __ide_requeue_and_plug(q, rq);
574 }
575 
576 static int drive_is_ready(ide_drive_t *drive)
577 {
578  ide_hwif_t *hwif = drive->hwif;
579  u8 stat = 0;
580 
581  if (drive->waiting_for_dma)
582  return hwif->dma_ops->dma_test_irq(drive);
583 
584  if (hwif->io_ports.ctl_addr &&
585  (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
586  stat = hwif->tp_ops->read_altstatus(hwif);
587  else
588  /* Note: this may clear a pending IRQ!! */
589  stat = hwif->tp_ops->read_status(hwif);
590 
591  if (stat & ATA_BUSY)
592  /* drive busy: definitely not interrupting */
593  return 0;
594 
595  /* drive ready: *might* be interrupting */
596  return 1;
597 }
598 
613 void ide_timer_expiry (unsigned long data)
614 {
615  ide_hwif_t *hwif = (ide_hwif_t *)data;
618  unsigned long flags;
619  int wait = -1;
620  int plug_device = 0;
621  struct request *uninitialized_var(rq_in_flight);
622 
623  spin_lock_irqsave(&hwif->lock, flags);
624 
625  handler = hwif->handler;
626 
627  if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
628  /*
629  * Either a marginal timeout occurred
630  * (got the interrupt just as timer expired),
631  * or we were "sleeping" to give other devices a chance.
632  * Either way, we don't really want to complain about anything.
633  */
634  } else {
635  ide_expiry_t *expiry = hwif->expiry;
636  ide_startstop_t startstop = ide_stopped;
637 
638  drive = hwif->cur_dev;
639 
640  if (expiry) {
641  wait = expiry(drive);
642  if (wait > 0) { /* continue */
643  /* reset timer */
644  hwif->timer.expires = jiffies + wait;
645  hwif->req_gen_timer = hwif->req_gen;
646  add_timer(&hwif->timer);
647  spin_unlock_irqrestore(&hwif->lock, flags);
648  return;
649  }
650  }
651  hwif->handler = NULL;
652  hwif->expiry = NULL;
653  /*
654  * We need to simulate a real interrupt when invoking
655  * the handler() function, which means we need to
656  * globally mask the specific IRQ:
657  */
658  spin_unlock(&hwif->lock);
659  /* disable_irq_nosync ?? */
660  disable_irq(hwif->irq);
661  /* local CPU only, as if we were handling an interrupt */
663  if (hwif->polling) {
664  startstop = handler(drive);
665  } else if (drive_is_ready(drive)) {
666  if (drive->waiting_for_dma)
667  hwif->dma_ops->dma_lost_irq(drive);
668  if (hwif->port_ops && hwif->port_ops->clear_irq)
669  hwif->port_ops->clear_irq(drive);
670 
671  printk(KERN_WARNING "%s: lost interrupt\n",
672  drive->name);
673  startstop = handler(drive);
674  } else {
675  if (drive->waiting_for_dma)
676  startstop = ide_dma_timeout_retry(drive, wait);
677  else
678  startstop = ide_error(drive, "irq timeout",
679  hwif->tp_ops->read_status(hwif));
680  }
681  spin_lock_irq(&hwif->lock);
682  enable_irq(hwif->irq);
683  if (startstop == ide_stopped && hwif->polling == 0) {
684  rq_in_flight = hwif->rq;
685  hwif->rq = NULL;
686  ide_unlock_port(hwif);
687  plug_device = 1;
688  }
689  }
690  spin_unlock_irqrestore(&hwif->lock, flags);
691 
692  if (plug_device) {
693  ide_unlock_host(hwif->host);
694  ide_requeue_and_plug(drive, rq_in_flight);
695  }
696 }
697 
727 static void unexpected_intr(int irq, ide_hwif_t *hwif)
728 {
729  u8 stat = hwif->tp_ops->read_status(hwif);
730 
731  if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
732  /* Try to not flood the console with msgs */
733  static unsigned long last_msgtime, count;
734  ++count;
735 
736  if (time_after(jiffies, last_msgtime + HZ)) {
737  last_msgtime = jiffies;
738  printk(KERN_ERR "%s: unexpected interrupt, "
739  "status=0x%02x, count=%ld\n",
740  hwif->name, stat, count);
741  }
742  }
743 }
744 
770 irqreturn_t ide_intr (int irq, void *dev_id)
771 {
772  ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
773  struct ide_host *host = hwif->host;
776  unsigned long flags;
777  ide_startstop_t startstop;
778  irqreturn_t irq_ret = IRQ_NONE;
779  int plug_device = 0;
780  struct request *uninitialized_var(rq_in_flight);
781 
782  if (host->host_flags & IDE_HFLAG_SERIALIZE) {
783  if (hwif != host->cur_port)
784  goto out_early;
785  }
786 
787  spin_lock_irqsave(&hwif->lock, flags);
788 
789  if (hwif->port_ops && hwif->port_ops->test_irq &&
790  hwif->port_ops->test_irq(hwif) == 0)
791  goto out;
792 
793  handler = hwif->handler;
794 
795  if (handler == NULL || hwif->polling) {
796  /*
797  * Not expecting an interrupt from this drive.
798  * That means this could be:
799  * (1) an interrupt from another PCI device
800  * sharing the same PCI INT# as us.
801  * or (2) a drive just entered sleep or standby mode,
802  * and is interrupting to let us know.
803  * or (3) a spurious interrupt of unknown origin.
804  *
805  * For PCI, we cannot tell the difference,
806  * so in that case we just ignore it and hope it goes away.
807  */
808  if ((host->irq_flags & IRQF_SHARED) == 0) {
809  /*
810  * Probably not a shared PCI interrupt,
811  * so we can safely try to do something about it:
812  */
813  unexpected_intr(irq, hwif);
814  } else {
815  /*
816  * Whack the status register, just in case
817  * we have a leftover pending IRQ.
818  */
819  (void)hwif->tp_ops->read_status(hwif);
820  }
821  goto out;
822  }
823 
824  drive = hwif->cur_dev;
825 
826  if (!drive_is_ready(drive))
827  /*
828  * This happens regularly when we share a PCI IRQ with
829  * another device. Unfortunately, it can also happen
830  * with some buggy drives that trigger the IRQ before
831  * their status register is up to date. Hopefully we have
832  * enough advance overhead that the latter isn't a problem.
833  */
834  goto out;
835 
836  hwif->handler = NULL;
837  hwif->expiry = NULL;
838  hwif->req_gen++;
839  del_timer(&hwif->timer);
840  spin_unlock(&hwif->lock);
841 
842  if (hwif->port_ops && hwif->port_ops->clear_irq)
843  hwif->port_ops->clear_irq(drive);
844 
845  if (drive->dev_flags & IDE_DFLAG_UNMASK)
847 
848  /* service this interrupt, may set handler for next interrupt */
849  startstop = handler(drive);
850 
851  spin_lock_irq(&hwif->lock);
852  /*
853  * Note that handler() may have set things up for another
854  * interrupt to occur soon, but it cannot happen until
855  * we exit from this routine, because it will be the
856  * same irq as is currently being serviced here, and Linux
857  * won't allow another of the same (on any CPU) until we return.
858  */
859  if (startstop == ide_stopped && hwif->polling == 0) {
860  BUG_ON(hwif->handler);
861  rq_in_flight = hwif->rq;
862  hwif->rq = NULL;
863  ide_unlock_port(hwif);
864  plug_device = 1;
865  }
866  irq_ret = IRQ_HANDLED;
867 out:
868  spin_unlock_irqrestore(&hwif->lock, flags);
869 out_early:
870  if (plug_device) {
871  ide_unlock_host(hwif->host);
872  ide_requeue_and_plug(drive, rq_in_flight);
873  }
874 
875  return irq_ret;
876 }
878 
879 void ide_pad_transfer(ide_drive_t *drive, int write, int len)
880 {
881  ide_hwif_t *hwif = drive->hwif;
882  u8 buf[4] = { 0 };
883 
884  while (len > 0) {
885  if (write)
886  hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
887  else
888  hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
889  len -= 4;
890  }
891 }