Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
main.c
Go to the documentation of this file.
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/device.h>
23 #include <linux/fs.h>
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/fcntl.h>
27 #include <linux/aio.h>
28 #include <linux/pci.h>
29 #include <linux/poll.h>
30 #include <linux/init.h>
31 #include <linux/ioctl.h>
32 #include <linux/cdev.h>
33 #include <linux/sched.h>
34 #include <linux/uuid.h>
35 #include <linux/compat.h>
36 #include <linux/jiffies.h>
37 #include <linux/interrupt.h>
38 #include <linux/miscdevice.h>
39 
40 #include "mei_dev.h"
41 #include <linux/mei.h>
42 #include "interface.h"
43 
44 /* AMT device is a singleton on the platform */
45 static struct pci_dev *mei_pdev;
46 
47 /* mei_pci_tbl - PCI Device ID Table */
48 static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
82 
83  /* required last entry */
84  {0, }
85 };
86 
87 MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
88 
89 static DEFINE_MUTEX(mei_mutex);
90 
91 
105 static bool mei_clear_list(struct mei_device *dev,
106  struct file *file, struct list_head *mei_cb_list)
107 {
108  struct mei_cl_cb *cb_pos = NULL;
109  struct mei_cl_cb *cb_next = NULL;
110  struct file *file_temp;
111  bool removed = false;
112 
113  /* list all list member */
114  list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
115  file_temp = (struct file *)cb_pos->file_object;
116  /* check if list member associated with a file */
117  if (file_temp == file) {
118  /* remove member from the list */
119  list_del(&cb_pos->cb_list);
120  /* check if cb equal to current iamthif cb */
121  if (dev->iamthif_current_cb == cb_pos) {
122  dev->iamthif_current_cb = NULL;
123  /* send flow control to iamthif client */
124  mei_send_flow_control(dev, &dev->iamthif_cl);
125  }
126  /* free all allocated buffers */
127  mei_free_cb_private(cb_pos);
128  cb_pos = NULL;
129  removed = true;
130  }
131  }
132  return removed;
133 }
134 
146 static bool mei_clear_lists(struct mei_device *dev, struct file *file)
147 {
148  bool removed = false;
149 
150  /* remove callbacks associated with a file */
151  mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
152  if (mei_clear_list(dev, file,
153  &dev->amthi_read_complete_list.mei_cb.cb_list))
154  removed = true;
155 
156  mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
157 
158  if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
159  removed = true;
160 
161  if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
162  removed = true;
163 
164  if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
165  removed = true;
166 
167  /* check if iamthif_current_cb not NULL */
168  if (dev->iamthif_current_cb && !removed) {
169  /* check file and iamthif current cb association */
170  if (dev->iamthif_current_cb->file_object == file) {
171  /* remove cb */
173  dev->iamthif_current_cb = NULL;
174  removed = true;
175  }
176  }
177  return removed;
178 }
187 static struct mei_cl_cb *find_read_list_entry(
188  struct mei_device *dev,
189  struct mei_cl *cl)
190 {
191  struct mei_cl_cb *pos = NULL;
192  struct mei_cl_cb *next = NULL;
193 
194  dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
195  list_for_each_entry_safe(pos, next,
196  &dev->read_list.mei_cb.cb_list, cb_list) {
197  struct mei_cl *cl_temp;
198  cl_temp = (struct mei_cl *)pos->file_private;
199 
200  if (mei_cl_cmp_id(cl, cl_temp))
201  return pos;
202  }
203  return NULL;
204 }
205 
214 static int mei_open(struct inode *inode, struct file *file)
215 {
216  struct mei_cl *cl;
217  struct mei_device *dev;
218  unsigned long cl_id;
219  int err;
220 
221  err = -ENODEV;
222  if (!mei_pdev)
223  goto out;
224 
225  dev = pci_get_drvdata(mei_pdev);
226  if (!dev)
227  goto out;
228 
229  mutex_lock(&dev->device_lock);
230  err = -ENOMEM;
231  cl = mei_cl_allocate(dev);
232  if (!cl)
233  goto out_unlock;
234 
235  err = -ENODEV;
236  if (dev->dev_state != MEI_DEV_ENABLED) {
237  dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
239  goto out_unlock;
240  }
241  err = -EMFILE;
243  dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
245  goto out_unlock;
246  }
247 
248  cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
249  if (cl_id >= MEI_CLIENTS_MAX) {
250  dev_err(&dev->pdev->dev, "client_id exceded %d",
251  MEI_CLIENTS_MAX) ;
252  goto out_unlock;
253  }
254 
255  cl->host_client_id = cl_id;
256 
257  dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
258 
259  dev->open_handle_count++;
260 
261  list_add_tail(&cl->link, &dev->file_list);
262 
263  set_bit(cl->host_client_id, dev->host_clients_map);
265  cl->sm_state = 0;
266 
267  file->private_data = cl;
268  mutex_unlock(&dev->device_lock);
269 
270  return nonseekable_open(inode, file);
271 
272 out_unlock:
273  mutex_unlock(&dev->device_lock);
274  kfree(cl);
275 out:
276  return err;
277 }
278 
287 static int mei_release(struct inode *inode, struct file *file)
288 {
289  struct mei_cl *cl = file->private_data;
290  struct mei_cl_cb *cb;
291  struct mei_device *dev;
292  int rets = 0;
293 
294  if (WARN_ON(!cl || !cl->dev))
295  return -ENODEV;
296 
297  dev = cl->dev;
298 
299  mutex_lock(&dev->device_lock);
300  if (cl != &dev->iamthif_cl) {
301  if (cl->state == MEI_FILE_CONNECTED) {
303  dev_dbg(&dev->pdev->dev,
304  "disconnecting client host client = %d, "
305  "ME client = %d\n",
306  cl->host_client_id,
307  cl->me_client_id);
308  rets = mei_disconnect_host_client(dev, cl);
309  }
311  dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
312  cl->host_client_id,
313  cl->me_client_id);
314 
315  if (dev->open_handle_count > 0) {
316  clear_bit(cl->host_client_id, dev->host_clients_map);
317  dev->open_handle_count--;
318  }
320 
321  /* free read cb */
322  cb = NULL;
323  if (cl->read_cb) {
324  cb = find_read_list_entry(dev, cl);
325  /* Remove entry from read list */
326  if (cb)
327  list_del(&cb->cb_list);
328 
329  cb = cl->read_cb;
330  cl->read_cb = NULL;
331  }
332 
333  file->private_data = NULL;
334 
335  if (cb) {
337  cb = NULL;
338  }
339 
340  kfree(cl);
341  } else {
342  if (dev->open_handle_count > 0)
343  dev->open_handle_count--;
344 
345  if (dev->iamthif_file_object == file &&
347 
348  dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
349  dev->iamthif_state);
350  dev->iamthif_canceled = true;
352  dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
354  }
355  }
356 
357  if (mei_clear_lists(dev, file))
359 
360  }
361  mutex_unlock(&dev->device_lock);
362  return rets;
363 }
364 
365 
376 static ssize_t mei_read(struct file *file, char __user *ubuf,
377  size_t length, loff_t *offset)
378 {
379  struct mei_cl *cl = file->private_data;
380  struct mei_cl_cb *cb_pos = NULL;
381  struct mei_cl_cb *cb = NULL;
382  struct mei_device *dev;
383  int i;
384  int rets;
385  int err;
386 
387 
388  if (WARN_ON(!cl || !cl->dev))
389  return -ENODEV;
390 
391  dev = cl->dev;
392 
393  mutex_lock(&dev->device_lock);
394  if (dev->dev_state != MEI_DEV_ENABLED) {
395  rets = -ENODEV;
396  goto out;
397  }
398 
399  if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
400  /* Do not allow to read watchdog client */
401  i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
402  if (i >= 0) {
403  struct mei_me_client *me_client = &dev->me_clients[i];
404  if (cl->me_client_id == me_client->client_id) {
405  rets = -EBADF;
406  goto out;
407  }
408  }
409  } else {
411  }
412 
413  if (cl == &dev->iamthif_cl) {
414  rets = amthi_read(dev, file, ubuf, length, offset);
415  goto out;
416  }
417 
418  if (cl->read_cb && cl->read_cb->information > *offset) {
419  cb = cl->read_cb;
420  goto copy_buffer;
421  } else if (cl->read_cb && cl->read_cb->information > 0 &&
422  cl->read_cb->information <= *offset) {
423  cb = cl->read_cb;
424  rets = 0;
425  goto free;
426  } else if ((!cl->read_cb || !cl->read_cb->information) &&
427  *offset > 0) {
428  /*Offset needs to be cleaned for contiguous reads*/
429  *offset = 0;
430  rets = 0;
431  goto out;
432  }
433 
434  err = mei_start_read(dev, cl);
435  if (err && err != -EBUSY) {
436  dev_dbg(&dev->pdev->dev,
437  "mei start read failure with status = %d\n", err);
438  rets = err;
439  goto out;
440  }
441 
442  if (MEI_READ_COMPLETE != cl->reading_state &&
443  !waitqueue_active(&cl->rx_wait)) {
444  if (file->f_flags & O_NONBLOCK) {
445  rets = -EAGAIN;
446  goto out;
447  }
448 
449  mutex_unlock(&dev->device_lock);
450 
453  MEI_FILE_INITIALIZING == cl->state ||
454  MEI_FILE_DISCONNECTED == cl->state ||
455  MEI_FILE_DISCONNECTING == cl->state))) {
456  if (signal_pending(current))
457  return -EINTR;
458  return -ERESTARTSYS;
459  }
460 
461  mutex_lock(&dev->device_lock);
462  if (MEI_FILE_INITIALIZING == cl->state ||
463  MEI_FILE_DISCONNECTED == cl->state ||
464  MEI_FILE_DISCONNECTING == cl->state) {
465  rets = -EBUSY;
466  goto out;
467  }
468  }
469 
470  cb = cl->read_cb;
471 
472  if (!cb) {
473  rets = -ENODEV;
474  goto out;
475  }
476  if (cl->reading_state != MEI_READ_COMPLETE) {
477  rets = 0;
478  goto out;
479  }
480  /* now copy the data to user space */
481 copy_buffer:
482  dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
483  cb->response_buffer.size);
484  dev_dbg(&dev->pdev->dev, "cb->information - %lu\n",
485  cb->information);
486  if (length == 0 || ubuf == NULL || *offset > cb->information) {
487  rets = -EMSGSIZE;
488  goto free;
489  }
490 
491  /* length is being truncated to PAGE_SIZE, however, */
492  /* information size may be longer */
493  length = min_t(size_t, length, (cb->information - *offset));
494 
495  if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
496  rets = -EFAULT;
497  goto free;
498  }
499 
500  rets = length;
501  *offset += length;
502  if ((unsigned long)*offset < cb->information)
503  goto out;
504 
505 free:
506  cb_pos = find_read_list_entry(dev, cl);
507  /* Remove entry from read list */
508  if (cb_pos)
509  list_del(&cb_pos->cb_list);
511  cl->reading_state = MEI_IDLE;
512  cl->read_cb = NULL;
513  cl->read_pending = 0;
514 out:
515  dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
516  mutex_unlock(&dev->device_lock);
517  return rets;
518 }
519 
530 static ssize_t mei_write(struct file *file, const char __user *ubuf,
531  size_t length, loff_t *offset)
532 {
533  struct mei_cl *cl = file->private_data;
534  struct mei_cl_cb *write_cb = NULL;
535  struct mei_msg_hdr mei_hdr;
536  struct mei_device *dev;
537  unsigned long timeout = 0;
538  int rets;
539  int i;
540 
541  if (WARN_ON(!cl || !cl->dev))
542  return -ENODEV;
543 
544  dev = cl->dev;
545 
546  mutex_lock(&dev->device_lock);
547 
548  if (dev->dev_state != MEI_DEV_ENABLED) {
549  mutex_unlock(&dev->device_lock);
550  return -ENODEV;
551  }
552 
553  if (cl == &dev->iamthif_cl) {
554  write_cb = find_amthi_read_list_entry(dev, file);
555 
556  if (write_cb) {
557  timeout = write_cb->read_time +
559 
560  if (time_after(jiffies, timeout) ||
562  *offset = 0;
563  list_del(&write_cb->cb_list);
564  mei_free_cb_private(write_cb);
565  write_cb = NULL;
566  }
567  }
568  }
569 
570  /* free entry used in read */
571  if (cl->reading_state == MEI_READ_COMPLETE) {
572  *offset = 0;
573  write_cb = find_read_list_entry(dev, cl);
574  if (write_cb) {
575  list_del(&write_cb->cb_list);
576  mei_free_cb_private(write_cb);
577  write_cb = NULL;
578  cl->reading_state = MEI_IDLE;
579  cl->read_cb = NULL;
580  cl->read_pending = 0;
581  }
582  } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
583  *offset = 0;
584 
585 
586  write_cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
587  if (!write_cb) {
588  mutex_unlock(&dev->device_lock);
589  return -ENOMEM;
590  }
591 
592  write_cb->file_object = file;
593  write_cb->file_private = cl;
594  write_cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
595  rets = -ENOMEM;
596  if (!write_cb->request_buffer.data)
597  goto unlock_dev;
598 
599  dev_dbg(&dev->pdev->dev, "length =%d\n", (int) length);
600 
601  rets = -EFAULT;
602  if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
603  goto unlock_dev;
604 
605  cl->sm_state = 0;
606  if (length == 4 &&
608  write_cb->request_buffer.data, 4) == 0) ||
610  write_cb->request_buffer.data, 4) == 0) ||
612  write_cb->request_buffer.data, 4) == 0)))
614 
615  INIT_LIST_HEAD(&write_cb->cb_list);
616  if (cl == &dev->iamthif_cl) {
617  write_cb->response_buffer.data =
619  if (!write_cb->response_buffer.data) {
620  rets = -ENOMEM;
621  goto unlock_dev;
622  }
623  if (dev->dev_state != MEI_DEV_ENABLED) {
624  rets = -ENODEV;
625  goto unlock_dev;
626  }
627  i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
628  if (i < 0) {
629  rets = -ENODEV;
630  goto unlock_dev;
631  }
632  if (length > dev->me_clients[i].props.max_msg_length ||
633  length <= 0) {
634  rets = -EMSGSIZE;
635  goto unlock_dev;
636  }
637 
638  write_cb->response_buffer.size = dev->iamthif_mtu;
639  write_cb->major_file_operations = MEI_IOCTL;
640  write_cb->information = 0;
641  write_cb->request_buffer.size = length;
642  if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
643  rets = -ENODEV;
644  goto unlock_dev;
645  }
646 
647  if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
649  dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
650  (int) dev->iamthif_state);
651  dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
652  list_add_tail(&write_cb->cb_list,
653  &dev->amthi_cmd_list.mei_cb.cb_list);
654  rets = length;
655  } else {
656  dev_dbg(&dev->pdev->dev, "call amthi write\n");
657  rets = amthi_write(dev, write_cb);
658 
659  if (rets) {
660  dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
661  rets);
662  goto unlock_dev;
663  }
664  rets = length;
665  }
666  mutex_unlock(&dev->device_lock);
667  return rets;
668  }
669 
670  write_cb->major_file_operations = MEI_WRITE;
671  /* make sure information is zero before we start */
672 
673  write_cb->information = 0;
674  write_cb->request_buffer.size = length;
675 
676  dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
677  cl->host_client_id, cl->me_client_id);
678  if (cl->state != MEI_FILE_CONNECTED) {
679  rets = -ENODEV;
680  dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
681  cl->host_client_id,
682  cl->me_client_id);
683  goto unlock_dev;
684  }
685  i = mei_me_cl_by_id(dev, cl->me_client_id);
686  if (i < 0) {
687  rets = -ENODEV;
688  goto unlock_dev;
689  }
690  if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
691  rets = -EINVAL;
692  goto unlock_dev;
693  }
694  write_cb->file_private = cl;
695 
696  rets = mei_flow_ctrl_creds(dev, cl);
697  if (rets < 0)
698  goto unlock_dev;
699 
700  if (rets && dev->mei_host_buffer_is_empty) {
701  rets = 0;
702  dev->mei_host_buffer_is_empty = false;
703  if (length > mei_hbuf_max_data(dev)) {
704  mei_hdr.length = mei_hbuf_max_data(dev);
705  mei_hdr.msg_complete = 0;
706  } else {
707  mei_hdr.length = length;
708  mei_hdr.msg_complete = 1;
709  }
710  mei_hdr.host_addr = cl->host_client_id;
711  mei_hdr.me_addr = cl->me_client_id;
712  mei_hdr.reserved = 0;
713  dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
714  *((u32 *) &mei_hdr));
715  if (mei_write_message(dev, &mei_hdr,
716  (unsigned char *) (write_cb->request_buffer.data),
717  mei_hdr.length)) {
718  rets = -ENODEV;
719  goto unlock_dev;
720  }
722  write_cb->information = mei_hdr.length;
723  if (mei_hdr.msg_complete) {
724  if (mei_flow_ctrl_reduce(dev, cl)) {
725  rets = -ENODEV;
726  goto unlock_dev;
727  }
728  list_add_tail(&write_cb->cb_list,
729  &dev->write_waiting_list.mei_cb.cb_list);
730  } else {
731  list_add_tail(&write_cb->cb_list,
732  &dev->write_list.mei_cb.cb_list);
733  }
734 
735  } else {
736 
737  write_cb->information = 0;
739  list_add_tail(&write_cb->cb_list,
740  &dev->write_list.mei_cb.cb_list);
741  }
742  mutex_unlock(&dev->device_lock);
743  return length;
744 
745 unlock_dev:
746  mutex_unlock(&dev->device_lock);
747  mei_free_cb_private(write_cb);
748  return rets;
749 }
750 
751 
761 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
762 {
763  struct mei_device *dev;
764  struct mei_cl *cl = file->private_data;
765  struct mei_connect_client_data *connect_data = NULL;
766  int rets;
767 
768  if (cmd != IOCTL_MEI_CONNECT_CLIENT)
769  return -EINVAL;
770 
771  if (WARN_ON(!cl || !cl->dev))
772  return -ENODEV;
773 
774  dev = cl->dev;
775 
776  dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
777 
778  mutex_lock(&dev->device_lock);
779  if (dev->dev_state != MEI_DEV_ENABLED) {
780  rets = -ENODEV;
781  goto out;
782  }
783 
784  dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
785 
786  connect_data = kzalloc(sizeof(struct mei_connect_client_data),
787  GFP_KERNEL);
788  if (!connect_data) {
789  rets = -ENOMEM;
790  goto out;
791  }
792  dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
793  if (copy_from_user(connect_data, (char __user *)data,
794  sizeof(struct mei_connect_client_data))) {
795  dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
796  rets = -EFAULT;
797  goto out;
798  }
799  rets = mei_ioctl_connect_client(file, connect_data);
800 
801  /* if all is ok, copying the data back to user. */
802  if (rets)
803  goto out;
804 
805  dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
806  if (copy_to_user((char __user *)data, connect_data,
807  sizeof(struct mei_connect_client_data))) {
808  dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
809  rets = -EFAULT;
810  goto out;
811  }
812 
813 out:
814  kfree(connect_data);
815  mutex_unlock(&dev->device_lock);
816  return rets;
817 }
818 
828 #ifdef CONFIG_COMPAT
829 static long mei_compat_ioctl(struct file *file,
830  unsigned int cmd, unsigned long data)
831 {
832  return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
833 }
834 #endif
835 
836 
845 static unsigned int mei_poll(struct file *file, poll_table *wait)
846 {
847  struct mei_cl *cl = file->private_data;
848  struct mei_device *dev;
849  unsigned int mask = 0;
850 
851  if (WARN_ON(!cl || !cl->dev))
852  return mask;
853 
854  dev = cl->dev;
855 
856  mutex_lock(&dev->device_lock);
857 
858  if (dev->dev_state != MEI_DEV_ENABLED)
859  goto out;
860 
861 
862  if (cl == &dev->iamthif_cl) {
863  mutex_unlock(&dev->device_lock);
864  poll_wait(file, &dev->iamthif_cl.wait, wait);
865  mutex_lock(&dev->device_lock);
867  dev->iamthif_file_object == file) {
868  mask |= (POLLIN | POLLRDNORM);
869  dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
871  }
872  goto out;
873  }
874 
875  mutex_unlock(&dev->device_lock);
876  poll_wait(file, &cl->tx_wait, wait);
877  mutex_lock(&dev->device_lock);
879  mask |= (POLLIN | POLLRDNORM);
880 
881 out:
882  mutex_unlock(&dev->device_lock);
883  return mask;
884 }
885 
886 /*
887  * file operations structure will be used for mei char device.
888  */
889 static const struct file_operations mei_fops = {
890  .owner = THIS_MODULE,
891  .read = mei_read,
892  .unlocked_ioctl = mei_ioctl,
893 #ifdef CONFIG_COMPAT
894  .compat_ioctl = mei_compat_ioctl,
895 #endif
896  .open = mei_open,
897  .release = mei_release,
898  .write = mei_write,
899  .poll = mei_poll,
900  .llseek = no_llseek
901 };
902 
903 
904 /*
905  * Misc Device Struct
906  */
907 static struct miscdevice mei_misc_device = {
908  .name = "mei",
909  .fops = &mei_fops,
910  .minor = MISC_DYNAMIC_MINOR,
911 };
912 
920 static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
921  const struct pci_device_id *ent)
922 {
923  u32 reg;
924  if (ent->device == MEI_DEV_ID_PBG_1) {
925  pci_read_config_dword(pdev, 0x48, &reg);
926  /* make sure that bit 9 is up and bit 10 is down */
927  if ((reg & 0x600) == 0x200) {
928  dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
929  return false;
930  }
931  }
932  return true;
933 }
942 static int __devinit mei_probe(struct pci_dev *pdev,
943  const struct pci_device_id *ent)
944 {
945  struct mei_device *dev;
946  int err;
947 
948  mutex_lock(&mei_mutex);
949 
950  if (!mei_quirk_probe(pdev, ent)) {
951  err = -ENODEV;
952  goto end;
953  }
954 
955  if (mei_pdev) {
956  err = -EEXIST;
957  goto end;
958  }
959  /* enable pci dev */
960  err = pci_enable_device(pdev);
961  if (err) {
962  dev_err(&pdev->dev, "failed to enable pci device.\n");
963  goto end;
964  }
965  /* set PCI host mastering */
966  pci_set_master(pdev);
967  /* pci request regions for mei driver */
968  err = pci_request_regions(pdev, KBUILD_MODNAME);
969  if (err) {
970  dev_err(&pdev->dev, "failed to get pci regions.\n");
971  goto disable_device;
972  }
973  /* allocates and initializes the mei dev structure */
974  dev = mei_device_init(pdev);
975  if (!dev) {
976  err = -ENOMEM;
977  goto release_regions;
978  }
979  /* mapping IO device memory */
980  dev->mem_addr = pci_iomap(pdev, 0, 0);
981  if (!dev->mem_addr) {
982  dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
983  err = -ENOMEM;
984  goto free_device;
985  }
986  pci_enable_msi(pdev);
987 
988  /* request and enable interrupt */
989  if (pci_dev_msi_enabled(pdev))
990  err = request_threaded_irq(pdev->irq,
991  NULL,
993  IRQF_ONESHOT, KBUILD_MODNAME, dev);
994  else
995  err = request_threaded_irq(pdev->irq,
998  IRQF_SHARED, KBUILD_MODNAME, dev);
999 
1000  if (err) {
1001  dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
1002  pdev->irq);
1003  goto disable_msi;
1004  }
1006  if (mei_hw_init(dev)) {
1007  dev_err(&pdev->dev, "init hw failure.\n");
1008  err = -ENODEV;
1009  goto release_irq;
1010  }
1011 
1012  err = misc_register(&mei_misc_device);
1013  if (err)
1014  goto release_irq;
1015 
1016  mei_pdev = pdev;
1017  pci_set_drvdata(pdev, dev);
1018 
1019 
1021 
1022  mutex_unlock(&mei_mutex);
1023 
1024  pr_debug("initialization successful.\n");
1025 
1026  return 0;
1027 
1028 release_irq:
1029  /* disable interrupts */
1030  dev->host_hw_state = mei_hcsr_read(dev);
1033  free_irq(pdev->irq, dev);
1034 disable_msi:
1035  pci_disable_msi(pdev);
1036  pci_iounmap(pdev, dev->mem_addr);
1037 free_device:
1038  kfree(dev);
1039 release_regions:
1040  pci_release_regions(pdev);
1041 disable_device:
1042  pci_disable_device(pdev);
1043 end:
1044  mutex_unlock(&mei_mutex);
1045  dev_err(&pdev->dev, "initialization failed.\n");
1046  return err;
1047 }
1048 
1057 static void __devexit mei_remove(struct pci_dev *pdev)
1058 {
1059  struct mei_device *dev;
1060 
1061  if (mei_pdev != pdev)
1062  return;
1063 
1064  dev = pci_get_drvdata(pdev);
1065  if (!dev)
1066  return;
1067 
1068  mutex_lock(&dev->device_lock);
1069 
1071 
1072  mei_wd_stop(dev);
1073 
1074  mei_pdev = NULL;
1075 
1076  if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
1077  dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
1079  }
1080  if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
1081  dev->wd_cl.state = MEI_FILE_DISCONNECTING;
1082  mei_disconnect_host_client(dev, &dev->wd_cl);
1083  }
1084 
1085  /* Unregistering watchdog device */
1087 
1088  /* remove entry if already in list */
1089  dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
1090  mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
1091  mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
1092 
1093  dev->iamthif_current_cb = NULL;
1094  dev->me_clients_num = 0;
1095 
1096  mutex_unlock(&dev->device_lock);
1097 
1099 
1100  /* disable interrupts */
1102 
1103  free_irq(pdev->irq, dev);
1104  pci_disable_msi(pdev);
1105  pci_set_drvdata(pdev, NULL);
1106 
1107  if (dev->mem_addr)
1108  pci_iounmap(pdev, dev->mem_addr);
1109 
1110  kfree(dev);
1111 
1112  pci_release_regions(pdev);
1113  pci_disable_device(pdev);
1114 
1115  misc_deregister(&mei_misc_device);
1116 }
1117 #ifdef CONFIG_PM
1118 static int mei_pci_suspend(struct device *device)
1119 {
1120  struct pci_dev *pdev = to_pci_dev(device);
1121  struct mei_device *dev = pci_get_drvdata(pdev);
1122  int err;
1123 
1124  if (!dev)
1125  return -ENODEV;
1126  mutex_lock(&dev->device_lock);
1127 
1129 
1130  /* Stop watchdog if exists */
1131  err = mei_wd_stop(dev);
1132  /* Set new mei state */
1133  if (dev->dev_state == MEI_DEV_ENABLED ||
1136  mei_reset(dev, 0);
1137  }
1138  mutex_unlock(&dev->device_lock);
1139 
1140  free_irq(pdev->irq, dev);
1141  pci_disable_msi(pdev);
1142 
1143  return err;
1144 }
1145 
1146 static int mei_pci_resume(struct device *device)
1147 {
1148  struct pci_dev *pdev = to_pci_dev(device);
1149  struct mei_device *dev;
1150  int err;
1151 
1152  dev = pci_get_drvdata(pdev);
1153  if (!dev)
1154  return -ENODEV;
1155 
1156  pci_enable_msi(pdev);
1157 
1158  /* request and enable interrupt */
1159  if (pci_dev_msi_enabled(pdev))
1160  err = request_threaded_irq(pdev->irq,
1161  NULL,
1163  IRQF_ONESHOT, KBUILD_MODNAME, dev);
1164  else
1165  err = request_threaded_irq(pdev->irq,
1168  IRQF_SHARED, KBUILD_MODNAME, dev);
1169 
1170  if (err) {
1171  dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
1172  pdev->irq);
1173  return err;
1174  }
1175 
1176  mutex_lock(&dev->device_lock);
1177  dev->dev_state = MEI_DEV_POWER_UP;
1178  mei_reset(dev, 1);
1179  mutex_unlock(&dev->device_lock);
1180 
1181  /* Start timer if stopped in suspend */
1183 
1184  return err;
1185 }
1186 static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1187 #define MEI_PM_OPS (&mei_pm_ops)
1188 #else
1189 #define MEI_PM_OPS NULL
1190 #endif /* CONFIG_PM */
1191 /*
1192  * PCI driver structure
1193  */
1194 static struct pci_driver mei_driver = {
1195  .name = KBUILD_MODNAME,
1196  .id_table = mei_pci_tbl,
1197  .probe = mei_probe,
1198  .remove = __devexit_p(mei_remove),
1199  .shutdown = __devexit_p(mei_remove),
1200  .driver.pm = MEI_PM_OPS,
1201 };
1202 
1203 module_pci_driver(mei_driver);
1204 
1205 MODULE_AUTHOR("Intel Corporation");
1206 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1207 MODULE_LICENSE("GPL v2");