Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sep_main.c
Go to the documentation of this file.
1 /*
2  *
3  * sep_main.c - Security Processor Driver main group of functions
4  *
5  * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6  * Contributions(c) 2009-2011 Discretix. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; version 2 of the License.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc., 59
19  * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  *
21  * CONTACTS:
22  *
23  * Mark Allyn [email protected]
24  * Jayant Mangalampalli [email protected]
25  *
26  * CHANGES:
27  *
28  * 2009.06.26 Initial publish
29  * 2010.09.14 Upgrade to Medfield
30  * 2011.01.21 Move to sep_main.c to allow for sep_crypto.c
31  * 2011.02.22 Enable kernel crypto operation
32  *
33  * Please note that this driver is based on information in the Discretix
34  * CryptoCell 5.2 Driver Implementation Guide; the Discretix CryptoCell 5.2
35  * Integration Intel Medfield appendix; the Discretix CryptoCell 5.2
36  * Linux Driver Integration Guide; and the Discretix CryptoCell 5.2 System
37  * Overview and Integration Guide.
38  */
39 /* #define DEBUG */
40 /* #define SEP_PERF_DEBUG */
41 
42 #include <linux/init.h>
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/miscdevice.h>
46 #include <linux/fs.h>
47 #include <linux/cdev.h>
48 #include <linux/kdev_t.h>
49 #include <linux/mutex.h>
50 #include <linux/sched.h>
51 #include <linux/mm.h>
52 #include <linux/poll.h>
53 #include <linux/wait.h>
54 #include <linux/pci.h>
55 #include <linux/pm_runtime.h>
56 #include <linux/slab.h>
57 #include <linux/ioctl.h>
58 #include <asm/current.h>
59 #include <linux/ioport.h>
60 #include <linux/io.h>
61 #include <linux/interrupt.h>
62 #include <linux/pagemap.h>
63 #include <asm/cacheflush.h>
64 #include <linux/delay.h>
65 #include <linux/jiffies.h>
66 #include <linux/async.h>
67 #include <linux/crypto.h>
68 #include <crypto/internal/hash.h>
69 #include <crypto/scatterwalk.h>
70 #include <crypto/sha.h>
71 #include <crypto/md5.h>
72 #include <crypto/aes.h>
73 #include <crypto/des.h>
74 #include <crypto/hash.h>
75 
76 #include "sep_driver_hw_defs.h"
77 #include "sep_driver_config.h"
78 #include "sep_driver_api.h"
79 #include "sep_dev.h"
80 #include "sep_crypto.h"
81 
82 #define CREATE_TRACE_POINTS
83 #include "sep_trace_events.h"
84 
85 /*
86  * Let's not spend cycles iterating over message
87  * area contents if debugging not enabled
88  */
89 #ifdef DEBUG
90 #define sep_dump_message(sep) _sep_dump_message(sep)
91 #else
92 #define sep_dump_message(sep)
93 #endif
94 
102 
111  struct sep_queue_info **queue_elem)
112 {
113  unsigned long lck_flags;
114 
115  dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove\n",
116  current->pid);
117 
118  if (!queue_elem || !(*queue_elem)) {
119  dev_dbg(&sep->pdev->dev, "PID%d %s null\n",
120  current->pid, __func__);
121  return;
122  }
123 
124  spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
125  list_del(&(*queue_elem)->list);
126  sep->sep_queue_num--;
127  spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
128 
129  kfree(*queue_elem);
130  *queue_elem = NULL;
131 
132  dev_dbg(&sep->pdev->dev, "[PID%d] sep_queue_status_remove return\n",
133  current->pid);
134  return;
135 }
136 
150  struct sep_device *sep,
151  u32 opcode,
152  u32 size,
153  u32 pid,
154  u8 *name, size_t name_len)
155 {
156  unsigned long lck_flags;
157  struct sep_queue_info *my_elem = NULL;
158 
159  my_elem = kzalloc(sizeof(struct sep_queue_info), GFP_KERNEL);
160 
161  if (!my_elem)
162  return NULL;
163 
164  dev_dbg(&sep->pdev->dev, "[PID%d] kzalloc ok\n", current->pid);
165 
166  my_elem->data.opcode = opcode;
167  my_elem->data.size = size;
168  my_elem->data.pid = pid;
169 
170  if (name_len > TASK_COMM_LEN)
171  name_len = TASK_COMM_LEN;
172 
173  memcpy(&my_elem->data.name, name, name_len);
174 
175  spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
176 
177  list_add_tail(&my_elem->list, &sep->sep_queue_status);
178  sep->sep_queue_num++;
179 
180  spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
181 
182  return my_elem;
183 }
184 
195 static int sep_allocate_dmatables_region(struct sep_device *sep,
196  void **dmatables_region,
197  struct sep_dma_context *dma_ctx,
198  const u32 table_count)
199 {
200  const size_t new_len =
202 
203  void *tmp_region = NULL;
204 
205  dev_dbg(&sep->pdev->dev, "[PID%d] dma_ctx = 0x%p\n",
206  current->pid, dma_ctx);
207  dev_dbg(&sep->pdev->dev, "[PID%d] dmatables_region = 0x%p\n",
208  current->pid, dmatables_region);
209 
210  if (!dma_ctx || !dmatables_region) {
211  dev_warn(&sep->pdev->dev,
212  "[PID%d] dma context/region uninitialized\n",
213  current->pid);
214  return -EINVAL;
215  }
216 
217  dev_dbg(&sep->pdev->dev, "[PID%d] newlen = 0x%08zX\n",
218  current->pid, new_len);
219  dev_dbg(&sep->pdev->dev, "[PID%d] oldlen = 0x%08X\n", current->pid,
220  dma_ctx->dmatables_len);
221  tmp_region = kzalloc(new_len + dma_ctx->dmatables_len, GFP_KERNEL);
222  if (!tmp_region) {
223  dev_warn(&sep->pdev->dev,
224  "[PID%d] no mem for dma tables region\n",
225  current->pid);
226  return -ENOMEM;
227  }
228 
229  /* Were there any previous tables that need to be preserved ? */
230  if (*dmatables_region) {
231  memcpy(tmp_region, *dmatables_region, dma_ctx->dmatables_len);
232  kfree(*dmatables_region);
233  *dmatables_region = NULL;
234  }
235 
236  *dmatables_region = tmp_region;
237 
238  dma_ctx->dmatables_len += new_len;
239 
240  return 0;
241 }
242 
248 {
249  int error = 0;
250  DEFINE_WAIT(wait);
251 
253  &sep->in_use_flags)) {
254  dev_dbg(&sep->pdev->dev,
255  "[PID%d] no transactions, returning\n",
256  current->pid);
257  goto end_function_setpid;
258  }
259 
260  /*
261  * Looping needed even for exclusive waitq entries
262  * due to process wakeup latencies, previous process
263  * might have already created another transaction.
264  */
265  for (;;) {
266  /*
267  * Exclusive waitq entry, so that only one process is
268  * woken up from the queue at a time.
269  */
271  &wait,
274  &sep->in_use_flags)) {
275  dev_dbg(&sep->pdev->dev,
276  "[PID%d] no transactions, breaking\n",
277  current->pid);
278  break;
279  }
280  dev_dbg(&sep->pdev->dev,
281  "[PID%d] transactions ongoing, sleeping\n",
282  current->pid);
283  schedule();
284  dev_dbg(&sep->pdev->dev, "[PID%d] woken up\n", current->pid);
285 
286  if (signal_pending(current)) {
287  dev_dbg(&sep->pdev->dev, "[PID%d] received signal\n",
288  current->pid);
289  error = -EINTR;
290  goto end_function;
291  }
292  }
293 end_function_setpid:
294  /*
295  * The pid_doing_transaction indicates that this process
296  * now owns the facilities to perform a transaction with
297  * the SEP. While this process is performing a transaction,
298  * no other process who has the SEP device open can perform
299  * any transactions. This method allows more than one process
300  * to have the device open at any given time, which provides
301  * finer granularity for device utilization by multiple
302  * processes.
303  */
304  /* Only one process is able to progress here at a time */
305  sep->pid_doing_transaction = current->pid;
306 
307 end_function:
309 
310  return error;
311 }
312 
317 static inline int sep_check_transaction_owner(struct sep_device *sep)
318 {
319  dev_dbg(&sep->pdev->dev, "[PID%d] transaction pid = %d\n",
320  current->pid,
321  sep->pid_doing_transaction);
322 
323  if ((sep->pid_doing_transaction == 0) ||
324  (current->pid != sep->pid_doing_transaction)) {
325  return -EACCES;
326  }
327 
328  /* We own the transaction */
329  return 0;
330 }
331 
332 #ifdef DEBUG
333 
340 static void _sep_dump_message(struct sep_device *sep)
341 {
342  int count;
343 
344  u32 *p = sep->shared_addr;
345 
346  for (count = 0; count < 10 * 4; count += 4)
347  dev_dbg(&sep->pdev->dev,
348  "[PID%d] Word %d of the message is %x\n",
349  current->pid, count/4, *p++);
350 }
351 
352 #endif
353 
359 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
360 {
361  sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
362  sep->shared_size,
363  &sep->shared_bus, GFP_KERNEL);
364 
365  if (!sep->shared_addr) {
366  dev_dbg(&sep->pdev->dev,
367  "[PID%d] shared memory dma_alloc_coherent failed\n",
368  current->pid);
369  return -ENOMEM;
370  }
371  dev_dbg(&sep->pdev->dev,
372  "[PID%d] shared_addr %zx bytes @%p (bus %llx)\n",
373  current->pid,
374  sep->shared_size, sep->shared_addr,
375  (unsigned long long)sep->shared_bus);
376  return 0;
377 }
378 
383 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
384 {
385  dma_free_coherent(&sep->pdev->dev, sep->shared_size,
386  sep->shared_addr, sep->shared_bus);
387 }
388 
389 #ifdef DEBUG
390 
399 static void *sep_shared_bus_to_virt(struct sep_device *sep,
400  dma_addr_t bus_address)
401 {
402  return sep->shared_addr + (bus_address - sep->shared_bus);
403 }
404 
405 #endif
406 
417 static int sep_open(struct inode *inode, struct file *filp)
418 {
419  struct sep_device *sep;
420  struct sep_private_data *priv;
421 
422  dev_dbg(&sep_dev->pdev->dev, "[PID%d] open\n", current->pid);
423 
424  if (filp->f_flags & O_NONBLOCK)
425  return -ENOTSUPP;
426 
427  /*
428  * Get the SEP device structure and use it for the
429  * private_data field in filp for other methods
430  */
431 
432  priv = kzalloc(sizeof(*priv), GFP_KERNEL);
433  if (!priv)
434  return -ENOMEM;
435 
436  sep = sep_dev;
437  priv->device = sep;
438  filp->private_data = priv;
439 
440  dev_dbg(&sep_dev->pdev->dev, "[PID%d] priv is 0x%p\n",
441  current->pid, priv);
442 
443  /* Anyone can open; locking takes place at transaction level */
444  return 0;
445 }
446 
455  struct sep_dma_context **dma_ctx)
456 {
457  int count;
458  int dcb_counter;
459  /* Pointer to the current dma_resource struct */
460  struct sep_dma_resource *dma;
461 
462  dev_dbg(&sep->pdev->dev,
463  "[PID%d] sep_free_dma_table_data_handler\n",
464  current->pid);
465 
466  if (!dma_ctx || !(*dma_ctx)) {
467  /* No context or context already freed */
468  dev_dbg(&sep->pdev->dev,
469  "[PID%d] no DMA context or context already freed\n",
470  current->pid);
471 
472  return 0;
473  }
474 
475  dev_dbg(&sep->pdev->dev, "[PID%d] (*dma_ctx)->nr_dcb_creat 0x%x\n",
476  current->pid,
477  (*dma_ctx)->nr_dcb_creat);
478 
479  for (dcb_counter = 0;
480  dcb_counter < (*dma_ctx)->nr_dcb_creat; dcb_counter++) {
481  dma = &(*dma_ctx)->dma_res_arr[dcb_counter];
482 
483  /* Unmap and free input map array */
484  if (dma->in_map_array) {
485  for (count = 0; count < dma->in_num_pages; count++) {
486  dma_unmap_page(&sep->pdev->dev,
487  dma->in_map_array[count].dma_addr,
488  dma->in_map_array[count].size,
489  DMA_TO_DEVICE);
490  }
491  kfree(dma->in_map_array);
492  }
493 
500  if (((*dma_ctx)->secure_dma == false) &&
501  (dma->out_map_array)) {
502 
503  for (count = 0; count < dma->out_num_pages; count++) {
504  dma_unmap_page(&sep->pdev->dev,
505  dma->out_map_array[count].dma_addr,
506  dma->out_map_array[count].size,
508  }
509  kfree(dma->out_map_array);
510  }
511 
512  /* Free page cache for output */
513  if (dma->in_page_array) {
514  for (count = 0; count < dma->in_num_pages; count++) {
515  flush_dcache_page(dma->in_page_array[count]);
516  page_cache_release(dma->in_page_array[count]);
517  }
518  kfree(dma->in_page_array);
519  }
520 
521  /* Again, we do this only for non secure dma */
522  if (((*dma_ctx)->secure_dma == false) &&
523  (dma->out_page_array)) {
524 
525  for (count = 0; count < dma->out_num_pages; count++) {
526  if (!PageReserved(dma->out_page_array[count]))
527 
528  SetPageDirty(dma->
529  out_page_array[count]);
530 
531  flush_dcache_page(dma->out_page_array[count]);
532  page_cache_release(dma->out_page_array[count]);
533  }
534  kfree(dma->out_page_array);
535  }
536 
544  if (dma->src_sg) {
545  dma_unmap_sg(&sep->pdev->dev, dma->src_sg,
547  dma->src_sg = NULL;
548  }
549 
550  if (dma->dst_sg) {
551  dma_unmap_sg(&sep->pdev->dev, dma->dst_sg,
553  dma->dst_sg = NULL;
554  }
555 
556  /* Reset all the values */
557  dma->in_page_array = NULL;
558  dma->out_page_array = NULL;
559  dma->in_num_pages = 0;
560  dma->out_num_pages = 0;
561  dma->in_map_array = NULL;
562  dma->out_map_array = NULL;
563  dma->in_map_num_entries = 0;
564  dma->out_map_num_entries = 0;
565  }
566 
567  (*dma_ctx)->nr_dcb_creat = 0;
568  (*dma_ctx)->num_lli_tables_created = 0;
569 
570  kfree(*dma_ctx);
571  *dma_ctx = NULL;
572 
573  dev_dbg(&sep->pdev->dev,
574  "[PID%d] sep_free_dma_table_data_handler end\n",
575  current->pid);
576 
577  return 0;
578 }
579 
588 static int sep_end_transaction_handler(struct sep_device *sep,
589  struct sep_dma_context **dma_ctx,
590  struct sep_call_status *call_status,
591  struct sep_queue_info **my_queue_elem)
592 {
593  dev_dbg(&sep->pdev->dev, "[PID%d] ending transaction\n", current->pid);
594 
595  /*
596  * Extraneous transaction clearing would mess up PM
597  * device usage counters and SEP would get suspended
598  * just before we send a command to SEP in the next
599  * transaction
600  * */
601  if (sep_check_transaction_owner(sep)) {
602  dev_dbg(&sep->pdev->dev, "[PID%d] not transaction owner\n",
603  current->pid);
604  return 0;
605  }
606 
607  /* Update queue status */
608  sep_queue_status_remove(sep, my_queue_elem);
609 
610  /* Check that all the DMA resources were freed */
611  if (dma_ctx)
612  sep_free_dma_table_data_handler(sep, dma_ctx);
613 
614  /* Reset call status for next transaction */
615  if (call_status)
616  call_status->status = 0;
617 
618  /* Clear the message area to avoid next transaction reading
619  * sensitive results from previous transaction */
620  memset(sep->shared_addr, 0,
622 
623  /* start suspend delay */
624 #ifdef SEP_ENABLE_RUNTIME_PM
625  if (sep->in_use) {
626  sep->in_use = 0;
627  pm_runtime_mark_last_busy(&sep->pdev->dev);
628  pm_runtime_put_autosuspend(&sep->pdev->dev);
629  }
630 #endif
631 
633  sep->pid_doing_transaction = 0;
634 
635  /* Now it's safe for next process to proceed */
636  dev_dbg(&sep->pdev->dev, "[PID%d] waking up next transaction\n",
637  current->pid);
640 
641  return 0;
642 }
643 
644 
652 static int sep_release(struct inode *inode, struct file *filp)
653 {
654  struct sep_private_data * const private_data = filp->private_data;
655  struct sep_call_status *call_status = &private_data->call_status;
656  struct sep_device *sep = private_data->device;
657  struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
658  struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
659 
660  dev_dbg(&sep->pdev->dev, "[PID%d] release\n", current->pid);
661 
662  sep_end_transaction_handler(sep, dma_ctx, call_status,
663  my_queue_elem);
664 
665  kfree(filp->private_data);
666 
667  return 0;
668 }
669 
677 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
678 {
679  struct sep_private_data * const private_data = filp->private_data;
680  struct sep_call_status *call_status = &private_data->call_status;
681  struct sep_device *sep = private_data->device;
682  struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
684  unsigned long error = 0;
685 
686  dev_dbg(&sep->pdev->dev, "[PID%d] sep_mmap\n", current->pid);
687 
688  /* Set the transaction busy (own the device) */
689  /*
690  * Problem for multithreaded applications is that here we're
691  * possibly going to sleep while holding a write lock on
692  * current->mm->mmap_sem, which will cause deadlock for ongoing
693  * transaction trying to create DMA tables
694  */
695  error = sep_wait_transaction(sep);
696  if (error)
697  /* Interrupted by signal, don't clear transaction */
698  goto end_function;
699 
700  /* Clear the message area to avoid next transaction reading
701  * sensitive results from previous transaction */
702  memset(sep->shared_addr, 0,
704 
705  /*
706  * Check that the size of the mapped range is as the size of the message
707  * shared area
708  */
709  if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
710  error = -EINVAL;
711  goto end_function_with_error;
712  }
713 
714  dev_dbg(&sep->pdev->dev, "[PID%d] shared_addr is %p\n",
715  current->pid, sep->shared_addr);
716 
717  /* Get bus address */
718  bus_addr = sep->shared_bus;
719 
720  if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
721  vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
722  dev_dbg(&sep->pdev->dev, "[PID%d] remap_pfn_range failed\n",
723  current->pid);
724  error = -EAGAIN;
725  goto end_function_with_error;
726  }
727 
728  /* Update call status */
730 
731  goto end_function;
732 
733 end_function_with_error:
734  /* Clear our transaction */
735  sep_end_transaction_handler(sep, NULL, call_status,
736  my_queue_elem);
737 
738 end_function:
739  return error;
740 }
741 
750 static unsigned int sep_poll(struct file *filp, poll_table *wait)
751 {
752  struct sep_private_data * const private_data = filp->private_data;
753  struct sep_call_status *call_status = &private_data->call_status;
754  struct sep_device *sep = private_data->device;
755  u32 mask = 0;
756  u32 retval = 0;
757  u32 retval2 = 0;
758  unsigned long lock_irq_flag;
759 
760  /* Am I the process that owns the transaction? */
761  if (sep_check_transaction_owner(sep)) {
762  dev_dbg(&sep->pdev->dev, "[PID%d] poll pid not owner\n",
763  current->pid);
764  mask = POLLERR;
765  goto end_function;
766  }
767 
768  /* Check if send command or send_reply were activated previously */
770  &call_status->status)) {
771  dev_warn(&sep->pdev->dev, "[PID%d] sendmsg not called\n",
772  current->pid);
773  mask = POLLERR;
774  goto end_function;
775  }
776 
777 
778  /* Add the event to the polling wait table */
779  dev_dbg(&sep->pdev->dev, "[PID%d] poll: calling wait sep_event\n",
780  current->pid);
781 
782  poll_wait(filp, &sep->event_interrupt, wait);
783 
784  dev_dbg(&sep->pdev->dev,
785  "[PID%d] poll: send_ct is %lx reply ct is %lx\n",
786  current->pid, sep->send_ct, sep->reply_ct);
787 
788  /* Check if error occurred during poll */
789  retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
790  if ((retval2 != 0x0) && (retval2 != 0x8)) {
791  dev_dbg(&sep->pdev->dev, "[PID%d] poll; poll error %x\n",
792  current->pid, retval2);
793  mask |= POLLERR;
794  goto end_function;
795  }
796 
797  spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
798 
799  if (sep->send_ct == sep->reply_ct) {
800  spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
801  retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
802  dev_dbg(&sep->pdev->dev,
803  "[PID%d] poll: data ready check (GPR2) %x\n",
804  current->pid, retval);
805 
806  /* Check if printf request */
807  if ((retval >> 30) & 0x1) {
808  dev_dbg(&sep->pdev->dev,
809  "[PID%d] poll: SEP printf request\n",
810  current->pid);
811  goto end_function;
812  }
813 
814  /* Check if the this is SEP reply or request */
815  if (retval >> 31) {
816  dev_dbg(&sep->pdev->dev,
817  "[PID%d] poll: SEP request\n",
818  current->pid);
819  } else {
820  dev_dbg(&sep->pdev->dev,
821  "[PID%d] poll: normal return\n",
822  current->pid);
823  sep_dump_message(sep);
824  dev_dbg(&sep->pdev->dev,
825  "[PID%d] poll; SEP reply POLLIN|POLLRDNORM\n",
826  current->pid);
827  mask |= POLLIN | POLLRDNORM;
828  }
830  } else {
831  spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
832  dev_dbg(&sep->pdev->dev,
833  "[PID%d] poll; no reply; returning mask of 0\n",
834  current->pid);
835  mask = 0;
836  }
837 
838 end_function:
839  return mask;
840 }
841 
849 static u32 *sep_time_address(struct sep_device *sep)
850 {
851  return sep->shared_addr +
853 }
854 
862 static unsigned long sep_set_time(struct sep_device *sep)
863 {
864  struct timeval time;
865  u32 *time_addr; /* Address of time as seen by the kernel */
866 
867 
869 
870  /* Set value in the SYSTEM MEMORY offset */
871  time_addr = sep_time_address(sep);
872 
873  time_addr[0] = SEP_TIME_VAL_TOKEN;
874  time_addr[1] = time.tv_sec;
875 
876  dev_dbg(&sep->pdev->dev, "[PID%d] time.tv_sec is %lu\n",
877  current->pid, time.tv_sec);
878  dev_dbg(&sep->pdev->dev, "[PID%d] time_addr is %p\n",
879  current->pid, time_addr);
880  dev_dbg(&sep->pdev->dev, "[PID%d] sep->shared_addr is %p\n",
881  current->pid, sep->shared_addr);
882 
883  return time.tv_sec;
884 }
885 
896 {
897  unsigned long lock_irq_flag;
898  u32 *msg_pool;
899  int error = 0;
900 
901  /* Basic sanity check; set msg pool to start of shared area */
902  msg_pool = (u32 *)sep->shared_addr;
903  msg_pool += 2;
904 
905  /* Look for start msg token */
906  if (*msg_pool != SEP_START_MSG_TOKEN) {
907  dev_warn(&sep->pdev->dev, "start message token not present\n");
908  error = -EPROTO;
909  goto end_function;
910  }
911 
912  /* Do we have a reasonable size? */
913  msg_pool += 1;
914  if ((*msg_pool < 2) ||
915  (*msg_pool > SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES)) {
916 
917  dev_warn(&sep->pdev->dev, "invalid message size\n");
918  error = -EPROTO;
919  goto end_function;
920  }
921 
922  /* Does the command look reasonable? */
923  msg_pool += 1;
924  if (*msg_pool < 2) {
925  dev_warn(&sep->pdev->dev, "invalid message opcode\n");
926  error = -EPROTO;
927  goto end_function;
928  }
929 
930 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
931  dev_dbg(&sep->pdev->dev, "[PID%d] before pm sync status 0x%X\n",
932  current->pid,
933  sep->pdev->dev.power.runtime_status);
934  sep->in_use = 1; /* device is about to be used */
935  pm_runtime_get_sync(&sep->pdev->dev);
936 #endif
937 
939  error = -EPROTO;
940  goto end_function;
941  }
942  sep->in_use = 1; /* device is about to be used */
943  sep_set_time(sep);
944 
945  sep_dump_message(sep);
946 
947  /* Update counter */
948  spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
949  sep->send_ct++;
950  spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
951 
952  dev_dbg(&sep->pdev->dev,
953  "[PID%d] sep_send_command_handler send_ct %lx reply_ct %lx\n",
954  current->pid, sep->send_ct, sep->reply_ct);
955 
956  /* Send interrupt to SEP */
957  sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
958 
959 end_function:
960  return error;
961 }
962 
978 static int sep_crypto_dma(
979  struct sep_device *sep,
980  struct scatterlist *sg,
981  struct sep_dma_map **dma_maps,
983 {
984  struct scatterlist *temp_sg;
985 
986  u32 count_segment;
987  u32 count_mapped;
988  struct sep_dma_map *sep_dma;
989  int ct1;
990 
991  if (sg->length == 0)
992  return 0;
993 
994  /* Count the segments */
995  temp_sg = sg;
996  count_segment = 0;
997  while (temp_sg) {
998  count_segment += 1;
999  temp_sg = scatterwalk_sg_next(temp_sg);
1000  }
1001  dev_dbg(&sep->pdev->dev,
1002  "There are (hex) %x segments in sg\n", count_segment);
1003 
1004  /* DMA map segments */
1005  count_mapped = dma_map_sg(&sep->pdev->dev, sg,
1006  count_segment, direction);
1007 
1008  dev_dbg(&sep->pdev->dev,
1009  "There are (hex) %x maps in sg\n", count_mapped);
1010 
1011  if (count_mapped == 0) {
1012  dev_dbg(&sep->pdev->dev, "Cannot dma_map_sg\n");
1013  return -ENOMEM;
1014  }
1015 
1016  sep_dma = kmalloc(sizeof(struct sep_dma_map) *
1017  count_mapped, GFP_ATOMIC);
1018 
1019  if (sep_dma == NULL) {
1020  dev_dbg(&sep->pdev->dev, "Cannot allocate dma_maps\n");
1021  return -ENOMEM;
1022  }
1023 
1024  for_each_sg(sg, temp_sg, count_mapped, ct1) {
1025  sep_dma[ct1].dma_addr = sg_dma_address(temp_sg);
1026  sep_dma[ct1].size = sg_dma_len(temp_sg);
1027  dev_dbg(&sep->pdev->dev, "(all hex) map %x dma %lx len %lx\n",
1028  ct1, (unsigned long)sep_dma[ct1].dma_addr,
1029  (unsigned long)sep_dma[ct1].size);
1030  }
1031 
1032  *dma_maps = sep_dma;
1033  return count_mapped;
1034 
1035 }
1036 
1056 static int sep_crypto_lli(
1057  struct sep_device *sep,
1058  struct scatterlist *sg,
1059  struct sep_dma_map **maps,
1060  struct sep_lli_entry **llis,
1061  u32 data_size,
1062  enum dma_data_direction direction)
1063 {
1064 
1065  int ct1;
1066  struct sep_lli_entry *sep_lli;
1067  struct sep_dma_map *sep_map;
1068 
1069  int nbr_ents;
1070 
1071  nbr_ents = sep_crypto_dma(sep, sg, maps, direction);
1072  if (nbr_ents <= 0) {
1073  dev_dbg(&sep->pdev->dev, "crypto_dma failed %x\n",
1074  nbr_ents);
1075  return nbr_ents;
1076  }
1077 
1078  sep_map = *maps;
1079 
1080  sep_lli = kmalloc(sizeof(struct sep_lli_entry) * nbr_ents, GFP_ATOMIC);
1081 
1082  if (sep_lli == NULL) {
1083  dev_dbg(&sep->pdev->dev, "Cannot allocate lli_maps\n");
1084 
1085  kfree(*maps);
1086  *maps = NULL;
1087  return -ENOMEM;
1088  }
1089 
1090  for (ct1 = 0; ct1 < nbr_ents; ct1 += 1) {
1091  sep_lli[ct1].bus_address = (u32)sep_map[ct1].dma_addr;
1092 
1093  /* Maximum for page is total data size */
1094  if (sep_map[ct1].size > data_size)
1095  sep_map[ct1].size = data_size;
1096 
1097  sep_lli[ct1].block_size = (u32)sep_map[ct1].size;
1098  }
1099 
1100  *llis = sep_lli;
1101  return nbr_ents;
1102 }
1103 
1122 static int sep_lock_kernel_pages(struct sep_device *sep,
1123  unsigned long kernel_virt_addr,
1124  u32 data_size,
1125  struct sep_lli_entry **lli_array_ptr,
1126  int in_out_flag,
1127  struct sep_dma_context *dma_ctx)
1128 
1129 {
1130  u32 num_pages;
1131  struct scatterlist *sg;
1132 
1133  /* Array of lli */
1134  struct sep_lli_entry *lli_array;
1135  /* Map array */
1136  struct sep_dma_map *map_array;
1137 
1139 
1140  lli_array = NULL;
1141  map_array = NULL;
1142 
1143  if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1144  direction = DMA_TO_DEVICE;
1145  sg = dma_ctx->src_sg;
1146  } else {
1147  direction = DMA_FROM_DEVICE;
1148  sg = dma_ctx->dst_sg;
1149  }
1150 
1151  num_pages = sep_crypto_lli(sep, sg, &map_array, &lli_array,
1152  data_size, direction);
1153 
1154  if (num_pages <= 0) {
1155  dev_dbg(&sep->pdev->dev, "sep_crypto_lli returned error %x\n",
1156  num_pages);
1157  return -ENOMEM;
1158  }
1159 
1160  /* Put mapped kernel sg into kernel resource array */
1161 
1162  /* Set output params according to the in_out flag */
1163  if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1164  *lli_array_ptr = lli_array;
1165  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1166  num_pages;
1167  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1168  NULL;
1169  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1170  map_array;
1171  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1172  num_pages;
1173  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg =
1174  dma_ctx->src_sg;
1175  } else {
1176  *lli_array_ptr = lli_array;
1177  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1178  num_pages;
1179  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1180  NULL;
1181  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1182  map_array;
1183  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1184  out_map_num_entries = num_pages;
1185  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg =
1186  dma_ctx->dst_sg;
1187  }
1188 
1189  return 0;
1190 }
1191 
1205 static int sep_lock_user_pages(struct sep_device *sep,
1206  u32 app_virt_addr,
1207  u32 data_size,
1208  struct sep_lli_entry **lli_array_ptr,
1209  int in_out_flag,
1210  struct sep_dma_context *dma_ctx)
1211 
1212 {
1213  int error = 0;
1214  u32 count;
1215  int result;
1216  /* The the page of the end address of the user space buffer */
1217  u32 end_page;
1218  /* The page of the start address of the user space buffer */
1219  u32 start_page;
1220  /* The range in pages */
1221  u32 num_pages;
1222  /* Array of pointers to page */
1223  struct page **page_array;
1224  /* Array of lli */
1225  struct sep_lli_entry *lli_array;
1226  /* Map array */
1227  struct sep_dma_map *map_array;
1228 
1229  /* Set start and end pages and num pages */
1230  end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1231  start_page = app_virt_addr >> PAGE_SHIFT;
1232  num_pages = end_page - start_page + 1;
1233 
1234  dev_dbg(&sep->pdev->dev,
1235  "[PID%d] lock user pages app_virt_addr is %x\n",
1236  current->pid, app_virt_addr);
1237 
1238  dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1239  current->pid, data_size);
1240  dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1241  current->pid, start_page);
1242  dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1243  current->pid, end_page);
1244  dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1245  current->pid, num_pages);
1246 
1247  /* Allocate array of pages structure pointers */
1248  page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1249  if (!page_array) {
1250  error = -ENOMEM;
1251  goto end_function;
1252  }
1253  map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1254  if (!map_array) {
1255  dev_warn(&sep->pdev->dev,
1256  "[PID%d] kmalloc for map_array failed\n",
1257  current->pid);
1258  error = -ENOMEM;
1259  goto end_function_with_error1;
1260  }
1261 
1262  lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1263  GFP_ATOMIC);
1264 
1265  if (!lli_array) {
1266  dev_warn(&sep->pdev->dev,
1267  "[PID%d] kmalloc for lli_array failed\n",
1268  current->pid);
1269  error = -ENOMEM;
1270  goto end_function_with_error2;
1271  }
1272 
1273  /* Convert the application virtual address into a set of physical */
1274  down_read(&current->mm->mmap_sem);
1275  result = get_user_pages(current, current->mm, app_virt_addr,
1276  num_pages,
1277  ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1278  0, page_array, NULL);
1279 
1280  up_read(&current->mm->mmap_sem);
1281 
1282  /* Check the number of pages locked - if not all then exit with error */
1283  if (result != num_pages) {
1284  dev_warn(&sep->pdev->dev,
1285  "[PID%d] not all pages locked by get_user_pages, "
1286  "result 0x%X, num_pages 0x%X\n",
1287  current->pid, result, num_pages);
1288  error = -ENOMEM;
1289  goto end_function_with_error3;
1290  }
1291 
1292  dev_dbg(&sep->pdev->dev, "[PID%d] get_user_pages succeeded\n",
1293  current->pid);
1294 
1295  /*
1296  * Fill the array using page array data and
1297  * map the pages - this action will also flush the cache as needed
1298  */
1299  for (count = 0; count < num_pages; count++) {
1300  /* Fill the map array */
1301  map_array[count].dma_addr =
1302  dma_map_page(&sep->pdev->dev, page_array[count],
1304 
1305  map_array[count].size = PAGE_SIZE;
1306 
1307  /* Fill the lli array entry */
1308  lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1309  lli_array[count].block_size = PAGE_SIZE;
1310 
1311  dev_dbg(&sep->pdev->dev,
1312  "[PID%d] lli_array[%x].bus_address is %08lx, "
1313  "lli_array[%x].block_size is (hex) %x\n", current->pid,
1314  count, (unsigned long)lli_array[count].bus_address,
1315  count, lli_array[count].block_size);
1316  }
1317 
1318  /* Check the offset for the first page */
1319  lli_array[0].bus_address =
1320  lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1321 
1322  /* Check that not all the data is in the first page only */
1323  if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1324  lli_array[0].block_size = data_size;
1325  else
1326  lli_array[0].block_size =
1327  PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1328 
1329  dev_dbg(&sep->pdev->dev,
1330  "[PID%d] After check if page 0 has all data\n",
1331  current->pid);
1332  dev_dbg(&sep->pdev->dev,
1333  "[PID%d] lli_array[0].bus_address is (hex) %08lx, "
1334  "lli_array[0].block_size is (hex) %x\n",
1335  current->pid,
1336  (unsigned long)lli_array[0].bus_address,
1337  lli_array[0].block_size);
1338 
1339 
1340  /* Check the size of the last page */
1341  if (num_pages > 1) {
1342  lli_array[num_pages - 1].block_size =
1343  (app_virt_addr + data_size) & (~PAGE_MASK);
1344  if (lli_array[num_pages - 1].block_size == 0)
1345  lli_array[num_pages - 1].block_size = PAGE_SIZE;
1346 
1347  dev_dbg(&sep->pdev->dev,
1348  "[PID%d] After last page size adjustment\n",
1349  current->pid);
1350  dev_dbg(&sep->pdev->dev,
1351  "[PID%d] lli_array[%x].bus_address is (hex) %08lx, "
1352  "lli_array[%x].block_size is (hex) %x\n",
1353  current->pid,
1354  num_pages - 1,
1355  (unsigned long)lli_array[num_pages - 1].bus_address,
1356  num_pages - 1,
1357  lli_array[num_pages - 1].block_size);
1358  }
1359 
1360  /* Set output params according to the in_out flag */
1361  if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1362  *lli_array_ptr = lli_array;
1363  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages =
1364  num_pages;
1365  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array =
1366  page_array;
1367  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array =
1368  map_array;
1369  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_num_entries =
1370  num_pages;
1371  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].src_sg = NULL;
1372  } else {
1373  *lli_array_ptr = lli_array;
1374  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages =
1375  num_pages;
1376  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array =
1377  page_array;
1378  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array =
1379  map_array;
1380  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
1381  out_map_num_entries = num_pages;
1382  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].dst_sg = NULL;
1383  }
1384  goto end_function;
1385 
1386 end_function_with_error3:
1387  /* Free lli array */
1388  kfree(lli_array);
1389 
1390 end_function_with_error2:
1391  kfree(map_array);
1392 
1393 end_function_with_error1:
1394  /* Free page array */
1395  kfree(page_array);
1396 
1397 end_function:
1398  return error;
1399 }
1400 
1414 static int sep_lli_table_secure_dma(struct sep_device *sep,
1415  u32 app_virt_addr,
1416  u32 data_size,
1417  struct sep_lli_entry **lli_array_ptr,
1418  int in_out_flag,
1419  struct sep_dma_context *dma_ctx)
1420 
1421 {
1422  int error = 0;
1423  u32 count;
1424  /* The the page of the end address of the user space buffer */
1425  u32 end_page;
1426  /* The page of the start address of the user space buffer */
1427  u32 start_page;
1428  /* The range in pages */
1429  u32 num_pages;
1430  /* Array of lli */
1431  struct sep_lli_entry *lli_array;
1432 
1433  /* Set start and end pages and num pages */
1434  end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1435  start_page = app_virt_addr >> PAGE_SHIFT;
1436  num_pages = end_page - start_page + 1;
1437 
1438  dev_dbg(&sep->pdev->dev,
1439  "[PID%d] lock user pages app_virt_addr is %x\n",
1440  current->pid, app_virt_addr);
1441 
1442  dev_dbg(&sep->pdev->dev, "[PID%d] data_size is (hex) %x\n",
1443  current->pid, data_size);
1444  dev_dbg(&sep->pdev->dev, "[PID%d] start_page is (hex) %x\n",
1445  current->pid, start_page);
1446  dev_dbg(&sep->pdev->dev, "[PID%d] end_page is (hex) %x\n",
1447  current->pid, end_page);
1448  dev_dbg(&sep->pdev->dev, "[PID%d] num_pages is (hex) %x\n",
1449  current->pid, num_pages);
1450 
1451  lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1452  GFP_ATOMIC);
1453 
1454  if (!lli_array) {
1455  dev_warn(&sep->pdev->dev,
1456  "[PID%d] kmalloc for lli_array failed\n",
1457  current->pid);
1458  return -ENOMEM;
1459  }
1460 
1461  /*
1462  * Fill the lli_array
1463  */
1464  start_page = start_page << PAGE_SHIFT;
1465  for (count = 0; count < num_pages; count++) {
1466  /* Fill the lli array entry */
1467  lli_array[count].bus_address = start_page;
1468  lli_array[count].block_size = PAGE_SIZE;
1469 
1470  start_page += PAGE_SIZE;
1471 
1472  dev_dbg(&sep->pdev->dev,
1473  "[PID%d] lli_array[%x].bus_address is %08lx, "
1474  "lli_array[%x].block_size is (hex) %x\n",
1475  current->pid,
1476  count, (unsigned long)lli_array[count].bus_address,
1477  count, lli_array[count].block_size);
1478  }
1479 
1480  /* Check the offset for the first page */
1481  lli_array[0].bus_address =
1482  lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1483 
1484  /* Check that not all the data is in the first page only */
1485  if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1486  lli_array[0].block_size = data_size;
1487  else
1488  lli_array[0].block_size =
1489  PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1490 
1491  dev_dbg(&sep->pdev->dev,
1492  "[PID%d] After check if page 0 has all data\n"
1493  "lli_array[0].bus_address is (hex) %08lx, "
1494  "lli_array[0].block_size is (hex) %x\n",
1495  current->pid,
1496  (unsigned long)lli_array[0].bus_address,
1497  lli_array[0].block_size);
1498 
1499  /* Check the size of the last page */
1500  if (num_pages > 1) {
1501  lli_array[num_pages - 1].block_size =
1502  (app_virt_addr + data_size) & (~PAGE_MASK);
1503  if (lli_array[num_pages - 1].block_size == 0)
1504  lli_array[num_pages - 1].block_size = PAGE_SIZE;
1505 
1506  dev_dbg(&sep->pdev->dev,
1507  "[PID%d] After last page size adjustment\n"
1508  "lli_array[%x].bus_address is (hex) %08lx, "
1509  "lli_array[%x].block_size is (hex) %x\n",
1510  current->pid, num_pages - 1,
1511  (unsigned long)lli_array[num_pages - 1].bus_address,
1512  num_pages - 1,
1513  lli_array[num_pages - 1].block_size);
1514  }
1515  *lli_array_ptr = lli_array;
1516  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages = num_pages;
1517  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
1518  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
1519  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_num_entries = 0;
1520 
1521  return error;
1522 }
1523 
1536 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1537  struct sep_lli_entry *lli_in_array_ptr,
1538  u32 num_array_entries,
1539  u32 *last_table_flag)
1540 {
1541  u32 counter;
1542  /* Table data size */
1543  u32 table_data_size = 0;
1544  /* Data size for the next table */
1545  u32 next_table_data_size;
1546 
1547  *last_table_flag = 0;
1548 
1549  /*
1550  * Calculate the data in the out lli table till we fill the whole
1551  * table or till the data has ended
1552  */
1553  for (counter = 0;
1554  (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1555  (counter < num_array_entries); counter++)
1556  table_data_size += lli_in_array_ptr[counter].block_size;
1557 
1558  /*
1559  * Check if we reached the last entry,
1560  * meaning this ia the last table to build,
1561  * and no need to check the block alignment
1562  */
1563  if (counter == num_array_entries) {
1564  /* Set the last table flag */
1565  *last_table_flag = 1;
1566  goto end_function;
1567  }
1568 
1569  /*
1570  * Calculate the data size of the next table.
1571  * Stop if no entries left or if data size is more the DMA restriction
1572  */
1573  next_table_data_size = 0;
1574  for (; counter < num_array_entries; counter++) {
1575  next_table_data_size += lli_in_array_ptr[counter].block_size;
1576  if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1577  break;
1578  }
1579 
1580  /*
1581  * Check if the next table data size is less then DMA rstriction.
1582  * if it is - recalculate the current table size, so that the next
1583  * table data size will be adaquete for DMA
1584  */
1585  if (next_table_data_size &&
1586  next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1587 
1588  table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1589  next_table_data_size);
1590 
1591 end_function:
1592  return table_data_size;
1593 }
1594 
1607 static void sep_build_lli_table(struct sep_device *sep,
1608  struct sep_lli_entry *lli_array_ptr,
1609  struct sep_lli_entry *lli_table_ptr,
1610  u32 *num_processed_entries_ptr,
1611  u32 *num_table_entries_ptr,
1612  u32 table_data_size)
1613 {
1614  /* Current table data size */
1615  u32 curr_table_data_size;
1616  /* Counter of lli array entry */
1617  u32 array_counter;
1618 
1619  /* Init current table data size and lli array entry counter */
1620  curr_table_data_size = 0;
1621  array_counter = 0;
1622  *num_table_entries_ptr = 1;
1623 
1624  dev_dbg(&sep->pdev->dev,
1625  "[PID%d] build lli table table_data_size: (hex) %x\n",
1626  current->pid, table_data_size);
1627 
1628  /* Fill the table till table size reaches the needed amount */
1629  while (curr_table_data_size < table_data_size) {
1630  /* Update the number of entries in table */
1631  (*num_table_entries_ptr)++;
1632 
1633  lli_table_ptr->bus_address =
1634  cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1635 
1636  lli_table_ptr->block_size =
1637  cpu_to_le32(lli_array_ptr[array_counter].block_size);
1638 
1639  curr_table_data_size += lli_array_ptr[array_counter].block_size;
1640 
1641  dev_dbg(&sep->pdev->dev,
1642  "[PID%d] lli_table_ptr is %p\n",
1643  current->pid, lli_table_ptr);
1644  dev_dbg(&sep->pdev->dev,
1645  "[PID%d] lli_table_ptr->bus_address: %08lx\n",
1646  current->pid,
1647  (unsigned long)lli_table_ptr->bus_address);
1648 
1649  dev_dbg(&sep->pdev->dev,
1650  "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1651  current->pid, lli_table_ptr->block_size);
1652 
1653  /* Check for overflow of the table data */
1654  if (curr_table_data_size > table_data_size) {
1655  dev_dbg(&sep->pdev->dev,
1656  "[PID%d] curr_table_data_size too large\n",
1657  current->pid);
1658 
1659  /* Update the size of block in the table */
1660  lli_table_ptr->block_size =
1661  cpu_to_le32(lli_table_ptr->block_size) -
1662  (curr_table_data_size - table_data_size);
1663 
1664  /* Update the physical address in the lli array */
1665  lli_array_ptr[array_counter].bus_address +=
1666  cpu_to_le32(lli_table_ptr->block_size);
1667 
1668  /* Update the block size left in the lli array */
1669  lli_array_ptr[array_counter].block_size =
1670  (curr_table_data_size - table_data_size);
1671  } else
1672  /* Advance to the next entry in the lli_array */
1673  array_counter++;
1674 
1675  dev_dbg(&sep->pdev->dev,
1676  "[PID%d] lli_table_ptr->bus_address is %08lx\n",
1677  current->pid,
1678  (unsigned long)lli_table_ptr->bus_address);
1679  dev_dbg(&sep->pdev->dev,
1680  "[PID%d] lli_table_ptr->block_size is (hex) %x\n",
1681  current->pid,
1682  lli_table_ptr->block_size);
1683 
1684  /* Move to the next entry in table */
1685  lli_table_ptr++;
1686  }
1687 
1688  /* Set the info entry to default */
1689  lli_table_ptr->bus_address = 0xffffffff;
1690  lli_table_ptr->block_size = 0;
1691 
1692  /* Set the output parameter */
1693  *num_processed_entries_ptr += array_counter;
1694 
1695 }
1696 
1707 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1708  void *virt_address)
1709 {
1710  dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys v %p\n",
1711  current->pid, virt_address);
1712  dev_dbg(&sep->pdev->dev, "[PID%d] sh virt to phys p %08lx\n",
1713  current->pid,
1714  (unsigned long)
1715  sep->shared_bus + (virt_address - sep->shared_addr));
1716 
1717  return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1718 }
1719 
1730 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1732 {
1733  dev_dbg(&sep->pdev->dev, "[PID%d] shared bus to virt b=%lx v=%lx\n",
1734  current->pid,
1735  (unsigned long)bus_address, (unsigned long)(sep->shared_addr +
1736  (size_t)(bus_address - sep->shared_bus)));
1737 
1738  return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1739 }
1740 
1750 static void sep_debug_print_lli_tables(struct sep_device *sep,
1751  struct sep_lli_entry *lli_table_ptr,
1752  unsigned long num_table_entries,
1753  unsigned long table_data_size)
1754 {
1755 #ifdef DEBUG
1756  unsigned long table_count = 1;
1757  unsigned long entries_count = 0;
1758 
1759  dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables start\n",
1760  current->pid);
1761  if (num_table_entries == 0) {
1762  dev_dbg(&sep->pdev->dev, "[PID%d] no table to print\n",
1763  current->pid);
1764  return;
1765  }
1766 
1767  while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) {
1768  dev_dbg(&sep->pdev->dev,
1769  "[PID%d] lli table %08lx, "
1770  "table_data_size is (hex) %lx\n",
1771  current->pid, table_count, table_data_size);
1772  dev_dbg(&sep->pdev->dev,
1773  "[PID%d] num_table_entries is (hex) %lx\n",
1774  current->pid, num_table_entries);
1775 
1776  /* Print entries of the table (without info entry) */
1777  for (entries_count = 0; entries_count < num_table_entries;
1778  entries_count++, lli_table_ptr++) {
1779 
1780  dev_dbg(&sep->pdev->dev,
1781  "[PID%d] lli_table_ptr address is %08lx\n",
1782  current->pid,
1783  (unsigned long) lli_table_ptr);
1784 
1785  dev_dbg(&sep->pdev->dev,
1786  "[PID%d] phys address is %08lx "
1787  "block size is (hex) %x\n", current->pid,
1788  (unsigned long)lli_table_ptr->bus_address,
1789  lli_table_ptr->block_size);
1790  }
1791 
1792  /* Point to the info entry */
1793  lli_table_ptr--;
1794 
1795  dev_dbg(&sep->pdev->dev,
1796  "[PID%d] phys lli_table_ptr->block_size "
1797  "is (hex) %x\n",
1798  current->pid,
1799  lli_table_ptr->block_size);
1800 
1801  dev_dbg(&sep->pdev->dev,
1802  "[PID%d] phys lli_table_ptr->physical_address "
1803  "is %08lx\n",
1804  current->pid,
1805  (unsigned long)lli_table_ptr->bus_address);
1806 
1807 
1808  table_data_size = lli_table_ptr->block_size & 0xffffff;
1809  num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1810 
1811  dev_dbg(&sep->pdev->dev,
1812  "[PID%d] phys table_data_size is "
1813  "(hex) %lx num_table_entries is"
1814  " %lx bus_address is%lx\n",
1815  current->pid,
1816  table_data_size,
1817  num_table_entries,
1818  (unsigned long)lli_table_ptr->bus_address);
1819 
1820  if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff)
1821  lli_table_ptr = (struct sep_lli_entry *)
1822  sep_shared_bus_to_virt(sep,
1823  (unsigned long)lli_table_ptr->bus_address);
1824 
1825  table_count++;
1826  }
1827  dev_dbg(&sep->pdev->dev, "[PID%d] sep_debug_print_lli_tables end\n",
1828  current->pid);
1829 #endif
1830 }
1831 
1832 
1844 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1845  dma_addr_t *lli_table_addr_ptr,
1846  u32 *num_entries_ptr,
1847  u32 *table_data_size_ptr,
1848  void **dmatables_region,
1849  struct sep_dma_context *dma_ctx)
1850 {
1851  struct sep_lli_entry *lli_table_ptr;
1852 
1853  /* Find the area for new table */
1854  lli_table_ptr =
1855  (struct sep_lli_entry *)(sep->shared_addr +
1857  dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1859 
1860  if (dmatables_region && *dmatables_region)
1861  lli_table_ptr = *dmatables_region;
1862 
1863  lli_table_ptr->bus_address = 0;
1864  lli_table_ptr->block_size = 0;
1865 
1866  lli_table_ptr++;
1867  lli_table_ptr->bus_address = 0xFFFFFFFF;
1868  lli_table_ptr->block_size = 0;
1869 
1870  /* Set the output parameter value */
1871  *lli_table_addr_ptr = sep->shared_bus +
1873  dma_ctx->num_lli_tables_created *
1874  sizeof(struct sep_lli_entry) *
1876 
1877  /* Set the num of entries and table data size for empty table */
1878  *num_entries_ptr = 2;
1879  *table_data_size_ptr = 0;
1880 
1881  /* Update the number of created tables */
1882  dma_ctx->num_lli_tables_created++;
1883 }
1884 
1900 static int sep_prepare_input_dma_table(struct sep_device *sep,
1901  unsigned long app_virt_addr,
1902  u32 data_size,
1903  u32 block_size,
1904  dma_addr_t *lli_table_ptr,
1905  u32 *num_entries_ptr,
1906  u32 *table_data_size_ptr,
1907  bool is_kva,
1908  void **dmatables_region,
1909  struct sep_dma_context *dma_ctx
1910 )
1911 {
1912  int error = 0;
1913  /* Pointer to the info entry of the table - the last entry */
1914  struct sep_lli_entry *info_entry_ptr;
1915  /* Array of pointers to page */
1916  struct sep_lli_entry *lli_array_ptr;
1917  /* Points to the first entry to be processed in the lli_in_array */
1918  u32 current_entry = 0;
1919  /* Num entries in the virtual buffer */
1920  u32 sep_lli_entries = 0;
1921  /* Lli table pointer */
1922  struct sep_lli_entry *in_lli_table_ptr;
1923  /* The total data in one table */
1924  u32 table_data_size = 0;
1925  /* Flag for last table */
1926  u32 last_table_flag = 0;
1927  /* Number of entries in lli table */
1928  u32 num_entries_in_table = 0;
1929  /* Next table address */
1930  void *lli_table_alloc_addr = NULL;
1931  void *dma_lli_table_alloc_addr = NULL;
1932  void *dma_in_lli_table_ptr = NULL;
1933 
1934  dev_dbg(&sep->pdev->dev,
1935  "[PID%d] prepare intput dma tbl data size: (hex) %x\n",
1936  current->pid, data_size);
1937 
1938  dev_dbg(&sep->pdev->dev, "[PID%d] block_size is (hex) %x\n",
1939  current->pid, block_size);
1940 
1941  /* Initialize the pages pointers */
1942  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
1943  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages = 0;
1944 
1945  /* Set the kernel address for first table to be allocated */
1946  lli_table_alloc_addr = (void *)(sep->shared_addr +
1948  dma_ctx->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1950 
1951  if (data_size == 0) {
1952  if (dmatables_region) {
1953  error = sep_allocate_dmatables_region(sep,
1954  dmatables_region,
1955  dma_ctx,
1956  1);
1957  if (error)
1958  return error;
1959  }
1960  /* Special case - create meptu table - 2 entries, zero data */
1961  sep_prepare_empty_lli_table(sep, lli_table_ptr,
1962  num_entries_ptr, table_data_size_ptr,
1963  dmatables_region, dma_ctx);
1964  goto update_dcb_counter;
1965  }
1966 
1967  /* Check if the pages are in Kernel Virtual Address layout */
1968  if (is_kva == true)
1969  error = sep_lock_kernel_pages(sep, app_virt_addr,
1970  data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1971  dma_ctx);
1972  else
1973  /*
1974  * Lock the pages of the user buffer
1975  * and translate them to pages
1976  */
1977  error = sep_lock_user_pages(sep, app_virt_addr,
1978  data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG,
1979  dma_ctx);
1980 
1981  if (error)
1982  goto end_function;
1983 
1984  dev_dbg(&sep->pdev->dev,
1985  "[PID%d] output sep_in_num_pages is (hex) %x\n",
1986  current->pid,
1987  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
1988 
1989  current_entry = 0;
1990  info_entry_ptr = NULL;
1991 
1992  sep_lli_entries =
1993  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages;
1994 
1995  dma_lli_table_alloc_addr = lli_table_alloc_addr;
1996  if (dmatables_region) {
1997  error = sep_allocate_dmatables_region(sep,
1998  dmatables_region,
1999  dma_ctx,
2000  sep_lli_entries);
2001  if (error)
2002  return error;
2003  lli_table_alloc_addr = *dmatables_region;
2004  }
2005 
2006  /* Loop till all the entries in in array are processed */
2007  while (current_entry < sep_lli_entries) {
2008 
2009  /* Set the new input and output tables */
2010  in_lli_table_ptr =
2011  (struct sep_lli_entry *)lli_table_alloc_addr;
2012  dma_in_lli_table_ptr =
2013  (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2014 
2015  lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2017  dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2019 
2020  if (dma_lli_table_alloc_addr >
2021  ((void *)sep->shared_addr +
2024 
2025  error = -ENOMEM;
2026  goto end_function_error;
2027 
2028  }
2029 
2030  /* Update the number of created tables */
2031  dma_ctx->num_lli_tables_created++;
2032 
2033  /* Calculate the maximum size of data for input table */
2034  table_data_size = sep_calculate_lli_table_max_size(sep,
2035  &lli_array_ptr[current_entry],
2036  (sep_lli_entries - current_entry),
2037  &last_table_flag);
2038 
2039  /*
2040  * If this is not the last table -
2041  * then align it to the block size
2042  */
2043  if (!last_table_flag)
2044  table_data_size =
2045  (table_data_size / block_size) * block_size;
2046 
2047  dev_dbg(&sep->pdev->dev,
2048  "[PID%d] output table_data_size is (hex) %x\n",
2049  current->pid,
2050  table_data_size);
2051 
2052  /* Construct input lli table */
2053  sep_build_lli_table(sep, &lli_array_ptr[current_entry],
2054  in_lli_table_ptr,
2055  &current_entry, &num_entries_in_table, table_data_size);
2056 
2057  if (info_entry_ptr == NULL) {
2058 
2059  /* Set the output parameters to physical addresses */
2060  *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
2061  dma_in_lli_table_ptr);
2062  *num_entries_ptr = num_entries_in_table;
2063  *table_data_size_ptr = table_data_size;
2064 
2065  dev_dbg(&sep->pdev->dev,
2066  "[PID%d] output lli_table_in_ptr is %08lx\n",
2067  current->pid,
2068  (unsigned long)*lli_table_ptr);
2069 
2070  } else {
2071  /* Update the info entry of the previous in table */
2072  info_entry_ptr->bus_address =
2073  sep_shared_area_virt_to_bus(sep,
2074  dma_in_lli_table_ptr);
2075  info_entry_ptr->block_size =
2076  ((num_entries_in_table) << 24) |
2077  (table_data_size);
2078  }
2079  /* Save the pointer to the info entry of the current tables */
2080  info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
2081  }
2082  /* Print input tables */
2083  if (!dmatables_region) {
2084  sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
2085  sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
2086  *num_entries_ptr, *table_data_size_ptr);
2087  }
2088 
2089  /* The array of the pages */
2090  kfree(lli_array_ptr);
2091 
2092 update_dcb_counter:
2093  /* Update DCB counter */
2094  dma_ctx->nr_dcb_creat++;
2095  goto end_function;
2096 
2097 end_function_error:
2098  /* Free all the allocated resources */
2099  kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2100  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2101  kfree(lli_array_ptr);
2102  kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2103  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2104 
2105 end_function:
2106  return error;
2107 
2108 }
2109 
2130 static int sep_construct_dma_tables_from_lli(
2131  struct sep_device *sep,
2132  struct sep_lli_entry *lli_in_array,
2133  u32 sep_in_lli_entries,
2134  struct sep_lli_entry *lli_out_array,
2135  u32 sep_out_lli_entries,
2136  u32 block_size,
2137  dma_addr_t *lli_table_in_ptr,
2138  dma_addr_t *lli_table_out_ptr,
2139  u32 *in_num_entries_ptr,
2140  u32 *out_num_entries_ptr,
2141  u32 *table_data_size_ptr,
2142  void **dmatables_region,
2143  struct sep_dma_context *dma_ctx)
2144 {
2145  /* Points to the area where next lli table can be allocated */
2146  void *lli_table_alloc_addr = NULL;
2147  /*
2148  * Points to the area in shared region where next lli table
2149  * can be allocated
2150  */
2151  void *dma_lli_table_alloc_addr = NULL;
2152  /* Input lli table in dmatables_region or shared region */
2153  struct sep_lli_entry *in_lli_table_ptr = NULL;
2154  /* Input lli table location in the shared region */
2155  struct sep_lli_entry *dma_in_lli_table_ptr = NULL;
2156  /* Output lli table in dmatables_region or shared region */
2157  struct sep_lli_entry *out_lli_table_ptr = NULL;
2158  /* Output lli table location in the shared region */
2159  struct sep_lli_entry *dma_out_lli_table_ptr = NULL;
2160  /* Pointer to the info entry of the table - the last entry */
2161  struct sep_lli_entry *info_in_entry_ptr = NULL;
2162  /* Pointer to the info entry of the table - the last entry */
2163  struct sep_lli_entry *info_out_entry_ptr = NULL;
2164  /* Points to the first entry to be processed in the lli_in_array */
2165  u32 current_in_entry = 0;
2166  /* Points to the first entry to be processed in the lli_out_array */
2167  u32 current_out_entry = 0;
2168  /* Max size of the input table */
2169  u32 in_table_data_size = 0;
2170  /* Max size of the output table */
2171  u32 out_table_data_size = 0;
2172  /* Flag te signifies if this is the last tables build */
2173  u32 last_table_flag = 0;
2174  /* The data size that should be in table */
2175  u32 table_data_size = 0;
2176  /* Number of entries in the input table */
2177  u32 num_entries_in_table = 0;
2178  /* Number of entries in the output table */
2179  u32 num_entries_out_table = 0;
2180 
2181  if (!dma_ctx) {
2182  dev_warn(&sep->pdev->dev, "DMA context uninitialized\n");
2183  return -EINVAL;
2184  }
2185 
2186  /* Initiate to point after the message area */
2187  lli_table_alloc_addr = (void *)(sep->shared_addr +
2189  (dma_ctx->num_lli_tables_created *
2190  (sizeof(struct sep_lli_entry) *
2191  SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
2192  dma_lli_table_alloc_addr = lli_table_alloc_addr;
2193 
2194  if (dmatables_region) {
2195  /* 2 for both in+out table */
2196  if (sep_allocate_dmatables_region(sep,
2197  dmatables_region,
2198  dma_ctx,
2199  2*sep_in_lli_entries))
2200  return -ENOMEM;
2201  lli_table_alloc_addr = *dmatables_region;
2202  }
2203 
2204  /* Loop till all the entries in in array are not processed */
2205  while (current_in_entry < sep_in_lli_entries) {
2206  /* Set the new input and output tables */
2207  in_lli_table_ptr =
2208  (struct sep_lli_entry *)lli_table_alloc_addr;
2209  dma_in_lli_table_ptr =
2210  (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2211 
2212  lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2214  dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2216 
2217  /* Set the first output tables */
2218  out_lli_table_ptr =
2219  (struct sep_lli_entry *)lli_table_alloc_addr;
2220  dma_out_lli_table_ptr =
2221  (struct sep_lli_entry *)dma_lli_table_alloc_addr;
2222 
2223  /* Check if the DMA table area limit was overrun */
2224  if ((dma_lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
2226  ((void *)sep->shared_addr +
2229 
2230  dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
2231  return -ENOMEM;
2232  }
2233 
2234  /* Update the number of the lli tables created */
2235  dma_ctx->num_lli_tables_created += 2;
2236 
2237  lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2239  dma_lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
2241 
2242  /* Calculate the maximum size of data for input table */
2243  in_table_data_size =
2244  sep_calculate_lli_table_max_size(sep,
2245  &lli_in_array[current_in_entry],
2246  (sep_in_lli_entries - current_in_entry),
2247  &last_table_flag);
2248 
2249  /* Calculate the maximum size of data for output table */
2250  out_table_data_size =
2251  sep_calculate_lli_table_max_size(sep,
2252  &lli_out_array[current_out_entry],
2253  (sep_out_lli_entries - current_out_entry),
2254  &last_table_flag);
2255 
2256  if (!last_table_flag) {
2257  in_table_data_size = (in_table_data_size /
2258  block_size) * block_size;
2259  out_table_data_size = (out_table_data_size /
2260  block_size) * block_size;
2261  }
2262 
2263  table_data_size = in_table_data_size;
2264  if (table_data_size > out_table_data_size)
2265  table_data_size = out_table_data_size;
2266 
2267  dev_dbg(&sep->pdev->dev,
2268  "[PID%d] construct tables from lli"
2269  " in_table_data_size is (hex) %x\n", current->pid,
2270  in_table_data_size);
2271 
2272  dev_dbg(&sep->pdev->dev,
2273  "[PID%d] construct tables from lli"
2274  "out_table_data_size is (hex) %x\n", current->pid,
2275  out_table_data_size);
2276 
2277  /* Construct input lli table */
2278  sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2279  in_lli_table_ptr,
2280  &current_in_entry,
2281  &num_entries_in_table,
2282  table_data_size);
2283 
2284  /* Construct output lli table */
2285  sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2286  out_lli_table_ptr,
2287  &current_out_entry,
2288  &num_entries_out_table,
2289  table_data_size);
2290 
2291  /* If info entry is null - this is the first table built */
2292  if (info_in_entry_ptr == NULL) {
2293  /* Set the output parameters to physical addresses */
2294  *lli_table_in_ptr =
2295  sep_shared_area_virt_to_bus(sep, dma_in_lli_table_ptr);
2296 
2297  *in_num_entries_ptr = num_entries_in_table;
2298 
2299  *lli_table_out_ptr =
2300  sep_shared_area_virt_to_bus(sep,
2301  dma_out_lli_table_ptr);
2302 
2303  *out_num_entries_ptr = num_entries_out_table;
2304  *table_data_size_ptr = table_data_size;
2305 
2306  dev_dbg(&sep->pdev->dev,
2307  "[PID%d] output lli_table_in_ptr is %08lx\n",
2308  current->pid,
2309  (unsigned long)*lli_table_in_ptr);
2310  dev_dbg(&sep->pdev->dev,
2311  "[PID%d] output lli_table_out_ptr is %08lx\n",
2312  current->pid,
2313  (unsigned long)*lli_table_out_ptr);
2314  } else {
2315  /* Update the info entry of the previous in table */
2316  info_in_entry_ptr->bus_address =
2317  sep_shared_area_virt_to_bus(sep,
2318  dma_in_lli_table_ptr);
2319 
2320  info_in_entry_ptr->block_size =
2321  ((num_entries_in_table) << 24) |
2322  (table_data_size);
2323 
2324  /* Update the info entry of the previous in table */
2325  info_out_entry_ptr->bus_address =
2326  sep_shared_area_virt_to_bus(sep,
2327  dma_out_lli_table_ptr);
2328 
2329  info_out_entry_ptr->block_size =
2330  ((num_entries_out_table) << 24) |
2331  (table_data_size);
2332 
2333  dev_dbg(&sep->pdev->dev,
2334  "[PID%d] output lli_table_in_ptr:%08lx %08x\n",
2335  current->pid,
2336  (unsigned long)info_in_entry_ptr->bus_address,
2337  info_in_entry_ptr->block_size);
2338 
2339  dev_dbg(&sep->pdev->dev,
2340  "[PID%d] output lli_table_out_ptr:"
2341  "%08lx %08x\n",
2342  current->pid,
2343  (unsigned long)info_out_entry_ptr->bus_address,
2344  info_out_entry_ptr->block_size);
2345  }
2346 
2347  /* Save the pointer to the info entry of the current tables */
2348  info_in_entry_ptr = in_lli_table_ptr +
2349  num_entries_in_table - 1;
2350  info_out_entry_ptr = out_lli_table_ptr +
2351  num_entries_out_table - 1;
2352 
2353  dev_dbg(&sep->pdev->dev,
2354  "[PID%d] output num_entries_out_table is %x\n",
2355  current->pid,
2356  (u32)num_entries_out_table);
2357  dev_dbg(&sep->pdev->dev,
2358  "[PID%d] output info_in_entry_ptr is %lx\n",
2359  current->pid,
2360  (unsigned long)info_in_entry_ptr);
2361  dev_dbg(&sep->pdev->dev,
2362  "[PID%d] output info_out_entry_ptr is %lx\n",
2363  current->pid,
2364  (unsigned long)info_out_entry_ptr);
2365  }
2366 
2367  /* Print input tables */
2368  if (!dmatables_region) {
2369  sep_debug_print_lli_tables(
2370  sep,
2371  (struct sep_lli_entry *)
2372  sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2373  *in_num_entries_ptr,
2374  *table_data_size_ptr);
2375  }
2376 
2377  /* Print output tables */
2378  if (!dmatables_region) {
2379  sep_debug_print_lli_tables(
2380  sep,
2381  (struct sep_lli_entry *)
2382  sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2383  *out_num_entries_ptr,
2384  *table_data_size_ptr);
2385  }
2386 
2387  return 0;
2388 }
2389 
2409 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2410  unsigned long app_virt_in_addr,
2411  unsigned long app_virt_out_addr,
2412  u32 data_size,
2413  u32 block_size,
2414  dma_addr_t *lli_table_in_ptr,
2415  dma_addr_t *lli_table_out_ptr,
2416  u32 *in_num_entries_ptr,
2417  u32 *out_num_entries_ptr,
2418  u32 *table_data_size_ptr,
2419  bool is_kva,
2420  void **dmatables_region,
2421  struct sep_dma_context *dma_ctx)
2422 
2423 {
2424  int error = 0;
2425  /* Array of pointers of page */
2426  struct sep_lli_entry *lli_in_array;
2427  /* Array of pointers of page */
2428  struct sep_lli_entry *lli_out_array;
2429 
2430  if (!dma_ctx) {
2431  error = -EINVAL;
2432  goto end_function;
2433  }
2434 
2435  if (data_size == 0) {
2436  /* Prepare empty table for input and output */
2437  if (dmatables_region) {
2438  error = sep_allocate_dmatables_region(
2439  sep,
2440  dmatables_region,
2441  dma_ctx,
2442  2);
2443  if (error)
2444  goto end_function;
2445  }
2446  sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2447  in_num_entries_ptr, table_data_size_ptr,
2448  dmatables_region, dma_ctx);
2449 
2450  sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2451  out_num_entries_ptr, table_data_size_ptr,
2452  dmatables_region, dma_ctx);
2453 
2454  goto update_dcb_counter;
2455  }
2456 
2457  /* Initialize the pages pointers */
2458  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2459  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2460 
2461  /* Lock the pages of the buffer and translate them to pages */
2462  if (is_kva == true) {
2463  dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel input pages\n",
2464  current->pid);
2465  error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2466  data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2467  dma_ctx);
2468  if (error) {
2469  dev_warn(&sep->pdev->dev,
2470  "[PID%d] sep_lock_kernel_pages for input "
2471  "virtual buffer failed\n", current->pid);
2472 
2473  goto end_function;
2474  }
2475 
2476  dev_dbg(&sep->pdev->dev, "[PID%d] Locking kernel output pages\n",
2477  current->pid);
2478  error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2479  data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2480  dma_ctx);
2481 
2482  if (error) {
2483  dev_warn(&sep->pdev->dev,
2484  "[PID%d] sep_lock_kernel_pages for output "
2485  "virtual buffer failed\n", current->pid);
2486 
2487  goto end_function_free_lli_in;
2488  }
2489 
2490  }
2491 
2492  else {
2493  dev_dbg(&sep->pdev->dev, "[PID%d] Locking user input pages\n",
2494  current->pid);
2495  error = sep_lock_user_pages(sep, app_virt_in_addr,
2496  data_size, &lli_in_array, SEP_DRIVER_IN_FLAG,
2497  dma_ctx);
2498  if (error) {
2499  dev_warn(&sep->pdev->dev,
2500  "[PID%d] sep_lock_user_pages for input "
2501  "virtual buffer failed\n", current->pid);
2502 
2503  goto end_function;
2504  }
2505 
2506  if (dma_ctx->secure_dma == true) {
2507  /* secure_dma requires use of non accessible memory */
2508  dev_dbg(&sep->pdev->dev, "[PID%d] in secure_dma\n",
2509  current->pid);
2510  error = sep_lli_table_secure_dma(sep,
2511  app_virt_out_addr, data_size, &lli_out_array,
2512  SEP_DRIVER_OUT_FLAG, dma_ctx);
2513  if (error) {
2514  dev_warn(&sep->pdev->dev,
2515  "[PID%d] secure dma table setup "
2516  " for output virtual buffer failed\n",
2517  current->pid);
2518 
2519  goto end_function_free_lli_in;
2520  }
2521  } else {
2522  /* For normal, non-secure dma */
2523  dev_dbg(&sep->pdev->dev, "[PID%d] not in secure_dma\n",
2524  current->pid);
2525 
2526  dev_dbg(&sep->pdev->dev,
2527  "[PID%d] Locking user output pages\n",
2528  current->pid);
2529 
2530  error = sep_lock_user_pages(sep, app_virt_out_addr,
2531  data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG,
2532  dma_ctx);
2533 
2534  if (error) {
2535  dev_warn(&sep->pdev->dev,
2536  "[PID%d] sep_lock_user_pages"
2537  " for output virtual buffer failed\n",
2538  current->pid);
2539 
2540  goto end_function_free_lli_in;
2541  }
2542  }
2543  }
2544 
2545  dev_dbg(&sep->pdev->dev,
2546  "[PID%d] After lock; prep input output dma table sep_in_num_pages is (hex) %x\n",
2547  current->pid,
2548  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_num_pages);
2549 
2550  dev_dbg(&sep->pdev->dev, "[PID%d] sep_out_num_pages is (hex) %x\n",
2551  current->pid,
2552  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_num_pages);
2553 
2554  dev_dbg(&sep->pdev->dev,
2555  "[PID%d] SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is (hex) %x\n",
2556  current->pid, SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2557 
2558  /* Call the function that creates table from the lli arrays */
2559  dev_dbg(&sep->pdev->dev, "[PID%d] calling create table from lli\n",
2560  current->pid);
2561  error = sep_construct_dma_tables_from_lli(
2562  sep, lli_in_array,
2563  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2564  in_num_pages,
2565  lli_out_array,
2566  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].
2567  out_num_pages,
2568  block_size, lli_table_in_ptr, lli_table_out_ptr,
2569  in_num_entries_ptr, out_num_entries_ptr,
2570  table_data_size_ptr, dmatables_region, dma_ctx);
2571 
2572  if (error) {
2573  dev_warn(&sep->pdev->dev,
2574  "[PID%d] sep_construct_dma_tables_from_lli failed\n",
2575  current->pid);
2576  goto end_function_with_error;
2577  }
2578 
2579  kfree(lli_out_array);
2580  kfree(lli_in_array);
2581 
2582 update_dcb_counter:
2583  /* Update DCB counter */
2584  dma_ctx->nr_dcb_creat++;
2585 
2586  goto end_function;
2587 
2588 end_function_with_error:
2589  kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array);
2590  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_map_array = NULL;
2591  kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array);
2592  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].out_page_array = NULL;
2593  kfree(lli_out_array);
2594 
2595 
2596 end_function_free_lli_in:
2597  kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array);
2598  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_map_array = NULL;
2599  kfree(dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array);
2600  dma_ctx->dma_res_arr[dma_ctx->nr_dcb_creat].in_page_array = NULL;
2601  kfree(lli_in_array);
2602 
2603 end_function:
2604 
2605  return error;
2606 
2607 }
2608 
2627  unsigned long app_in_address,
2628  unsigned long app_out_address,
2629  u32 data_in_size,
2630  u32 block_size,
2631  u32 tail_block_size,
2632  bool isapplet,
2633  bool is_kva,
2634  bool secure_dma,
2635  struct sep_dcblock *dcb_region,
2636  void **dmatables_region,
2637  struct sep_dma_context **dma_ctx,
2638  struct scatterlist *src_sg,
2639  struct scatterlist *dst_sg)
2640 {
2641  int error = 0;
2642  /* Size of tail */
2643  u32 tail_size = 0;
2644  /* Address of the created DCB table */
2645  struct sep_dcblock *dcb_table_ptr = NULL;
2646  /* The physical address of the first input DMA table */
2647  dma_addr_t in_first_mlli_address = 0;
2648  /* Number of entries in the first input DMA table */
2649  u32 in_first_num_entries = 0;
2650  /* The physical address of the first output DMA table */
2651  dma_addr_t out_first_mlli_address = 0;
2652  /* Number of entries in the first output DMA table */
2653  u32 out_first_num_entries = 0;
2654  /* Data in the first input/output table */
2655  u32 first_data_size = 0;
2656 
2657  dev_dbg(&sep->pdev->dev, "[PID%d] app_in_address %lx\n",
2658  current->pid, app_in_address);
2659 
2660  dev_dbg(&sep->pdev->dev, "[PID%d] app_out_address %lx\n",
2661  current->pid, app_out_address);
2662 
2663  dev_dbg(&sep->pdev->dev, "[PID%d] data_in_size %x\n",
2664  current->pid, data_in_size);
2665 
2666  dev_dbg(&sep->pdev->dev, "[PID%d] block_size %x\n",
2667  current->pid, block_size);
2668 
2669  dev_dbg(&sep->pdev->dev, "[PID%d] tail_block_size %x\n",
2670  current->pid, tail_block_size);
2671 
2672  dev_dbg(&sep->pdev->dev, "[PID%d] isapplet %x\n",
2673  current->pid, isapplet);
2674 
2675  dev_dbg(&sep->pdev->dev, "[PID%d] is_kva %x\n",
2676  current->pid, is_kva);
2677 
2678  dev_dbg(&sep->pdev->dev, "[PID%d] src_sg %p\n",
2679  current->pid, src_sg);
2680 
2681  dev_dbg(&sep->pdev->dev, "[PID%d] dst_sg %p\n",
2682  current->pid, dst_sg);
2683 
2684  if (!dma_ctx) {
2685  dev_warn(&sep->pdev->dev, "[PID%d] no DMA context pointer\n",
2686  current->pid);
2687  error = -EINVAL;
2688  goto end_function;
2689  }
2690 
2691  if (*dma_ctx) {
2692  /* In case there are multiple DCBs for this transaction */
2693  dev_dbg(&sep->pdev->dev, "[PID%d] DMA context already set\n",
2694  current->pid);
2695  } else {
2696  *dma_ctx = kzalloc(sizeof(**dma_ctx), GFP_KERNEL);
2697  if (!(*dma_ctx)) {
2698  dev_dbg(&sep->pdev->dev,
2699  "[PID%d] Not enough memory for DMA context\n",
2700  current->pid);
2701  error = -ENOMEM;
2702  goto end_function;
2703  }
2704  dev_dbg(&sep->pdev->dev,
2705  "[PID%d] Created DMA context addr at 0x%p\n",
2706  current->pid, *dma_ctx);
2707  }
2708 
2709  (*dma_ctx)->secure_dma = secure_dma;
2710 
2711  /* these are for kernel crypto only */
2712  (*dma_ctx)->src_sg = src_sg;
2713  (*dma_ctx)->dst_sg = dst_sg;
2714 
2715  if ((*dma_ctx)->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2716  /* No more DCBs to allocate */
2717  dev_dbg(&sep->pdev->dev, "[PID%d] no more DCBs available\n",
2718  current->pid);
2719  error = -ENOSPC;
2720  goto end_function_error;
2721  }
2722 
2723  /* Allocate new DCB */
2724  if (dcb_region) {
2725  dcb_table_ptr = dcb_region;
2726  } else {
2727  dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2729  ((*dma_ctx)->nr_dcb_creat *
2730  sizeof(struct sep_dcblock)));
2731  }
2732 
2733  /* Set the default values in the DCB */
2734  dcb_table_ptr->input_mlli_address = 0;
2735  dcb_table_ptr->input_mlli_num_entries = 0;
2736  dcb_table_ptr->input_mlli_data_size = 0;
2737  dcb_table_ptr->output_mlli_address = 0;
2738  dcb_table_ptr->output_mlli_num_entries = 0;
2739  dcb_table_ptr->output_mlli_data_size = 0;
2740  dcb_table_ptr->tail_data_size = 0;
2741  dcb_table_ptr->out_vr_tail_pt = 0;
2742 
2743  if (isapplet == true) {
2744 
2745  /* Check if there is enough data for DMA operation */
2746  if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2747  if (is_kva == true) {
2748  error = -ENODEV;
2749  goto end_function_error;
2750  } else {
2751  if (copy_from_user(dcb_table_ptr->tail_data,
2752  (void __user *)app_in_address,
2753  data_in_size)) {
2754  error = -EFAULT;
2755  goto end_function_error;
2756  }
2757  }
2758 
2759  dcb_table_ptr->tail_data_size = data_in_size;
2760 
2761  /* Set the output user-space address for mem2mem op */
2762  if (app_out_address)
2763  dcb_table_ptr->out_vr_tail_pt =
2764  (aligned_u64)app_out_address;
2765 
2766  /*
2767  * Update both data length parameters in order to avoid
2768  * second data copy and allow building of empty mlli
2769  * tables
2770  */
2771  tail_size = 0x0;
2772  data_in_size = 0x0;
2773 
2774  } else {
2775  if (!app_out_address) {
2776  tail_size = data_in_size % block_size;
2777  if (!tail_size) {
2778  if (tail_block_size == block_size)
2779  tail_size = block_size;
2780  }
2781  } else {
2782  tail_size = 0;
2783  }
2784  }
2785  if (tail_size) {
2786  if (tail_size > sizeof(dcb_table_ptr->tail_data))
2787  return -EINVAL;
2788  if (is_kva == true) {
2789  error = -ENODEV;
2790  goto end_function_error;
2791  } else {
2792  /* We have tail data - copy it to DCB */
2793  if (copy_from_user(dcb_table_ptr->tail_data,
2794  (void __user *)(app_in_address +
2795  data_in_size - tail_size), tail_size)) {
2796  error = -EFAULT;
2797  goto end_function_error;
2798  }
2799  }
2800  if (app_out_address)
2801  /*
2802  * Calculate the output address
2803  * according to tail data size
2804  */
2805  dcb_table_ptr->out_vr_tail_pt =
2806  (aligned_u64)app_out_address +
2807  data_in_size - tail_size;
2808 
2809  /* Save the real tail data size */
2810  dcb_table_ptr->tail_data_size = tail_size;
2811  /*
2812  * Update the data size without the tail
2813  * data size AKA data for the dma
2814  */
2815  data_in_size = (data_in_size - tail_size);
2816  }
2817  }
2818  /* Check if we need to build only input table or input/output */
2819  if (app_out_address) {
2820  /* Prepare input/output tables */
2821  error = sep_prepare_input_output_dma_table(sep,
2822  app_in_address,
2823  app_out_address,
2824  data_in_size,
2825  block_size,
2826  &in_first_mlli_address,
2827  &out_first_mlli_address,
2828  &in_first_num_entries,
2829  &out_first_num_entries,
2830  &first_data_size,
2831  is_kva,
2832  dmatables_region,
2833  *dma_ctx);
2834  } else {
2835  /* Prepare input tables */
2836  error = sep_prepare_input_dma_table(sep,
2837  app_in_address,
2838  data_in_size,
2839  block_size,
2840  &in_first_mlli_address,
2841  &in_first_num_entries,
2842  &first_data_size,
2843  is_kva,
2844  dmatables_region,
2845  *dma_ctx);
2846  }
2847 
2848  if (error) {
2849  dev_warn(&sep->pdev->dev,
2850  "prepare DMA table call failed "
2851  "from prepare DCB call\n");
2852  goto end_function_error;
2853  }
2854 
2855  /* Set the DCB values */
2856  dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2857  dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2858  dcb_table_ptr->input_mlli_data_size = first_data_size;
2859  dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2860  dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2861  dcb_table_ptr->output_mlli_data_size = first_data_size;
2862 
2863  goto end_function;
2864 
2865 end_function_error:
2866  kfree(*dma_ctx);
2867  *dma_ctx = NULL;
2868 
2869 end_function:
2870  return error;
2871 
2872 }
2873 
2874 
2883 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2884  bool is_kva, struct sep_dma_context **dma_ctx)
2885 {
2886  struct sep_dcblock *dcb_table_ptr;
2887  unsigned long pt_hold;
2888  void *tail_pt;
2889 
2890  int i = 0;
2891  int error = 0;
2892  int error_temp = 0;
2893 
2894  dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb\n",
2895  current->pid);
2896 
2897  if (((*dma_ctx)->secure_dma == false) && (isapplet == true)) {
2898  dev_dbg(&sep->pdev->dev, "[PID%d] handling applet\n",
2899  current->pid);
2900 
2901  /* Tail stuff is only for non secure_dma */
2902  /* Set pointer to first DCB table */
2903  dcb_table_ptr = (struct sep_dcblock *)
2904  (sep->shared_addr +
2906 
2911  for (i = 0; dma_ctx && *dma_ctx &&
2912  i < (*dma_ctx)->nr_dcb_creat; i++, dcb_table_ptr++) {
2913  if (dcb_table_ptr->out_vr_tail_pt) {
2914  pt_hold = (unsigned long)dcb_table_ptr->
2916  tail_pt = (void *)pt_hold;
2917  if (is_kva == true) {
2918  error = -ENODEV;
2919  break;
2920  } else {
2921  error_temp = copy_to_user(
2922  (void __user *)tail_pt,
2923  dcb_table_ptr->tail_data,
2924  dcb_table_ptr->tail_data_size);
2925  }
2926  if (error_temp) {
2927  /* Release the DMA resource */
2928  error = -EFAULT;
2929  break;
2930  }
2931  }
2932  }
2933  }
2934 
2935  /* Free the output pages, if any */
2936  sep_free_dma_table_data_handler(sep, dma_ctx);
2937 
2938  dev_dbg(&sep->pdev->dev, "[PID%d] sep_free_dma_tables_and_dcb end\n",
2939  current->pid);
2940 
2941  return error;
2942 }
2943 
2953 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg,
2954  bool secure_dma,
2955  struct sep_dma_context **dma_ctx)
2956 {
2957  int error;
2958  /* Command arguments */
2959  static struct build_dcb_struct command_args;
2960 
2961  /* Get the command arguments */
2962  if (copy_from_user(&command_args, (void __user *)arg,
2963  sizeof(struct build_dcb_struct))) {
2964  error = -EFAULT;
2965  goto end_function;
2966  }
2967 
2968  dev_dbg(&sep->pdev->dev,
2969  "[PID%d] prep dcb handler app_in_address is %08llx\n",
2970  current->pid, command_args.app_in_address);
2971  dev_dbg(&sep->pdev->dev,
2972  "[PID%d] app_out_address is %08llx\n",
2973  current->pid, command_args.app_out_address);
2974  dev_dbg(&sep->pdev->dev,
2975  "[PID%d] data_size is %x\n",
2976  current->pid, command_args.data_in_size);
2977  dev_dbg(&sep->pdev->dev,
2978  "[PID%d] block_size is %x\n",
2979  current->pid, command_args.block_size);
2980  dev_dbg(&sep->pdev->dev,
2981  "[PID%d] tail block_size is %x\n",
2982  current->pid, command_args.tail_block_size);
2983  dev_dbg(&sep->pdev->dev,
2984  "[PID%d] is_applet is %x\n",
2985  current->pid, command_args.is_applet);
2986 
2987  if (!command_args.app_in_address) {
2988  dev_warn(&sep->pdev->dev,
2989  "[PID%d] null app_in_address\n", current->pid);
2990  error = -EINVAL;
2991  goto end_function;
2992  }
2993 
2995  (unsigned long)command_args.app_in_address,
2996  (unsigned long)command_args.app_out_address,
2997  command_args.data_in_size, command_args.block_size,
2998  command_args.tail_block_size,
2999  command_args.is_applet, false,
3000  secure_dma, NULL, NULL, dma_ctx, NULL, NULL);
3001 
3002 end_function:
3003  return error;
3004 
3005 }
3006 
3014 static int sep_free_dcb_handler(struct sep_device *sep,
3015  struct sep_dma_context **dma_ctx)
3016 {
3017  if (!dma_ctx || !(*dma_ctx)) {
3018  dev_dbg(&sep->pdev->dev,
3019  "[PID%d] no dma context defined, nothing to free\n",
3020  current->pid);
3021  return -EINVAL;
3022  }
3023 
3024  dev_dbg(&sep->pdev->dev, "[PID%d] free dcbs num of DCBs %x\n",
3025  current->pid,
3026  (*dma_ctx)->nr_dcb_creat);
3027 
3028  return sep_free_dma_tables_and_dcb(sep, false, false, dma_ctx);
3029 }
3030 
3039 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3040 {
3041  struct sep_private_data * const private_data = filp->private_data;
3042  struct sep_call_status *call_status = &private_data->call_status;
3043  struct sep_device *sep = private_data->device;
3044  struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3045  struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3046  int error = 0;
3047 
3048  dev_dbg(&sep->pdev->dev, "[PID%d] ioctl cmd 0x%x\n",
3049  current->pid, cmd);
3050  dev_dbg(&sep->pdev->dev, "[PID%d] dma context addr 0x%p\n",
3051  current->pid, *dma_ctx);
3052 
3053  /* Make sure we own this device */
3054  error = sep_check_transaction_owner(sep);
3055  if (error) {
3056  dev_dbg(&sep->pdev->dev, "[PID%d] ioctl pid is not owner\n",
3057  current->pid);
3058  goto end_function;
3059  }
3060 
3061  /* Check that sep_mmap has been called before */
3063  &call_status->status)) {
3064  dev_dbg(&sep->pdev->dev,
3065  "[PID%d] mmap not called\n", current->pid);
3066  error = -EPROTO;
3067  goto end_function;
3068  }
3069 
3070  /* Check that the command is for SEP device */
3071  if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3072  error = -ENOTTY;
3073  goto end_function;
3074  }
3075 
3076  switch (cmd) {
3077  case SEP_IOCSENDSEPCOMMAND:
3078  dev_dbg(&sep->pdev->dev,
3079  "[PID%d] SEP_IOCSENDSEPCOMMAND start\n",
3080  current->pid);
3082  &call_status->status)) {
3083  dev_warn(&sep->pdev->dev,
3084  "[PID%d] send msg already done\n",
3085  current->pid);
3086  error = -EPROTO;
3087  goto end_function;
3088  }
3089  /* Send command to SEP */
3090  error = sep_send_command_handler(sep);
3091  if (!error)
3093  &call_status->status);
3094  dev_dbg(&sep->pdev->dev,
3095  "[PID%d] SEP_IOCSENDSEPCOMMAND end\n",
3096  current->pid);
3097  break;
3098  case SEP_IOCENDTRANSACTION:
3099  dev_dbg(&sep->pdev->dev,
3100  "[PID%d] SEP_IOCENDTRANSACTION start\n",
3101  current->pid);
3102  error = sep_end_transaction_handler(sep, dma_ctx, call_status,
3103  my_queue_elem);
3104  dev_dbg(&sep->pdev->dev,
3105  "[PID%d] SEP_IOCENDTRANSACTION end\n",
3106  current->pid);
3107  break;
3108  case SEP_IOCPREPAREDCB:
3109  dev_dbg(&sep->pdev->dev,
3110  "[PID%d] SEP_IOCPREPAREDCB start\n",
3111  current->pid);
3113  dev_dbg(&sep->pdev->dev,
3114  "[PID%d] SEP_IOCPREPAREDCB_SECURE_DMA start\n",
3115  current->pid);
3117  &call_status->status)) {
3118  dev_dbg(&sep->pdev->dev,
3119  "[PID%d] dcb prep needed before send msg\n",
3120  current->pid);
3121  error = -EPROTO;
3122  goto end_function;
3123  }
3124 
3125  if (!arg) {
3126  dev_dbg(&sep->pdev->dev,
3127  "[PID%d] dcb null arg\n", current->pid);
3128  error = -EINVAL;
3129  goto end_function;
3130  }
3131 
3132  if (cmd == SEP_IOCPREPAREDCB) {
3133  /* No secure dma */
3134  dev_dbg(&sep->pdev->dev,
3135  "[PID%d] SEP_IOCPREPAREDCB (no secure_dma)\n",
3136  current->pid);
3137 
3138  error = sep_prepare_dcb_handler(sep, arg, false,
3139  dma_ctx);
3140  } else {
3141  /* Secure dma */
3142  dev_dbg(&sep->pdev->dev,
3143  "[PID%d] SEP_IOC_POC (with secure_dma)\n",
3144  current->pid);
3145 
3146  error = sep_prepare_dcb_handler(sep, arg, true,
3147  dma_ctx);
3148  }
3149  dev_dbg(&sep->pdev->dev, "[PID%d] dcb's end\n",
3150  current->pid);
3151  break;
3152  case SEP_IOCFREEDCB:
3153  dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB start\n",
3154  current->pid);
3156  dev_dbg(&sep->pdev->dev,
3157  "[PID%d] SEP_IOCFREEDCB_SECURE_DMA start\n",
3158  current->pid);
3159  error = sep_free_dcb_handler(sep, dma_ctx);
3160  dev_dbg(&sep->pdev->dev, "[PID%d] SEP_IOCFREEDCB end\n",
3161  current->pid);
3162  break;
3163  default:
3164  error = -ENOTTY;
3165  dev_dbg(&sep->pdev->dev, "[PID%d] default end\n",
3166  current->pid);
3167  break;
3168  }
3169 
3170 end_function:
3171  dev_dbg(&sep->pdev->dev, "[PID%d] ioctl end\n", current->pid);
3172 
3173  return error;
3174 }
3175 
3181 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3182 {
3183  unsigned long lock_irq_flag;
3184  u32 reg_val, reg_val2 = 0;
3185  struct sep_device *sep = dev_id;
3186  irqreturn_t int_error = IRQ_HANDLED;
3187 
3188  /* Are we in power save? */
3189 #if defined(CONFIG_PM_RUNTIME) && defined(SEP_ENABLE_RUNTIME_PM)
3190  if (sep->pdev->dev.power.runtime_status != RPM_ACTIVE) {
3191  dev_dbg(&sep->pdev->dev, "interrupt during pwr save\n");
3192  return IRQ_NONE;
3193  }
3194 #endif
3195 
3196  if (test_bit(SEP_WORKING_LOCK_BIT, &sep->in_use_flags) == 0) {
3197  dev_dbg(&sep->pdev->dev, "interrupt while nobody using sep\n");
3198  return IRQ_NONE;
3199  }
3200 
3201  /* Read the IRR register to check if this is SEP interrupt */
3202  reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3203 
3204  dev_dbg(&sep->pdev->dev, "sep int: IRR REG val: %x\n", reg_val);
3205 
3206  if (reg_val & (0x1 << 13)) {
3207 
3208  /* Lock and update the counter of reply messages */
3209  spin_lock_irqsave(&sep->snd_rply_lck, lock_irq_flag);
3210  sep->reply_ct++;
3211  spin_unlock_irqrestore(&sep->snd_rply_lck, lock_irq_flag);
3212 
3213  dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3214  sep->send_ct, sep->reply_ct);
3215 
3216  /* Is this a kernel client request */
3217  if (sep->in_kernel) {
3218  tasklet_schedule(&sep->finish_tasklet);
3219  goto finished_interrupt;
3220  }
3221 
3222  /* Is this printf or daemon request? */
3223  reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3224  dev_dbg(&sep->pdev->dev,
3225  "SEP Interrupt - GPR2 is %08x\n", reg_val2);
3226 
3228 
3229  if ((reg_val2 >> 30) & 0x1) {
3230  dev_dbg(&sep->pdev->dev, "int: printf request\n");
3231  } else if (reg_val2 >> 31) {
3232  dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3233  } else {
3234  dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3235  wake_up(&sep->event_interrupt);
3236  }
3237  } else {
3238  dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3239  int_error = IRQ_NONE;
3240  }
3241 
3242 finished_interrupt:
3243 
3244  if (int_error == IRQ_HANDLED)
3245  sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3246 
3247  return int_error;
3248 }
3249 
3257 static int sep_reconfig_shared_area(struct sep_device *sep)
3258 {
3259  int ret_val;
3260 
3261  /* use to limit waiting for SEP */
3262  unsigned long end_time;
3263 
3264  /* Send the new SHARED MESSAGE AREA to the SEP */
3265  dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n",
3266  (unsigned long long)sep->shared_bus);
3267 
3268  sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3269 
3270  /* Poll for SEP response */
3271  ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3272 
3273  end_time = jiffies + (WAIT_TIME * HZ);
3274 
3275  while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) &&
3276  (ret_val != sep->shared_bus))
3277  ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3278 
3279  /* Check the return value (register) */
3280  if (ret_val != sep->shared_bus) {
3281  dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3282  dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3283  ret_val = -ENOMEM;
3284  } else
3285  ret_val = 0;
3286 
3287  dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3288 
3289  return ret_val;
3290 }
3291 
3301  struct sep_dcblock **dcb_region,
3302  void **dmatables_region,
3303  struct sep_dma_context *dma_ctx)
3304 {
3305  void *dmaregion_free_start = NULL;
3306  void *dmaregion_free_end = NULL;
3307  void *dcbregion_free_start = NULL;
3308  void *dcbregion_free_end = NULL;
3309  ssize_t error = 0;
3310 
3311  dev_dbg(&sep->pdev->dev, "[PID%d] activating dcb/dma region\n",
3312  current->pid);
3313 
3314  if (1 > dma_ctx->nr_dcb_creat) {
3315  dev_warn(&sep->pdev->dev,
3316  "[PID%d] invalid number of dcbs to activate 0x%08X\n",
3317  current->pid, dma_ctx->nr_dcb_creat);
3318  error = -EINVAL;
3319  goto end_function;
3320  }
3321 
3322  dmaregion_free_start = sep->shared_addr
3324  dmaregion_free_end = dmaregion_free_start
3326 
3327  if (dmaregion_free_start
3328  + dma_ctx->dmatables_len > dmaregion_free_end) {
3329  error = -ENOMEM;
3330  goto end_function;
3331  }
3332  memcpy(dmaregion_free_start,
3333  *dmatables_region,
3334  dma_ctx->dmatables_len);
3335  /* Free MLLI table copy */
3336  kfree(*dmatables_region);
3337  *dmatables_region = NULL;
3338 
3339  /* Copy thread's DCB table copy to DCB table region */
3340  dcbregion_free_start = sep->shared_addr +
3342  dcbregion_free_end = dcbregion_free_start +
3344  sizeof(struct sep_dcblock)) - 1;
3345 
3346  if (dcbregion_free_start
3347  + (dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock))
3348  > dcbregion_free_end) {
3349  error = -ENOMEM;
3350  goto end_function;
3351  }
3352 
3353  memcpy(dcbregion_free_start,
3354  *dcb_region,
3355  dma_ctx->nr_dcb_creat * sizeof(struct sep_dcblock));
3356 
3357  /* Print the tables */
3358  dev_dbg(&sep->pdev->dev, "activate: input table\n");
3359  sep_debug_print_lli_tables(sep,
3360  (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3361  (*dcb_region)->input_mlli_address),
3362  (*dcb_region)->input_mlli_num_entries,
3363  (*dcb_region)->input_mlli_data_size);
3364 
3365  dev_dbg(&sep->pdev->dev, "activate: output table\n");
3366  sep_debug_print_lli_tables(sep,
3367  (struct sep_lli_entry *)sep_shared_area_bus_to_virt(sep,
3368  (*dcb_region)->output_mlli_address),
3369  (*dcb_region)->output_mlli_num_entries,
3370  (*dcb_region)->output_mlli_data_size);
3371 
3372  dev_dbg(&sep->pdev->dev,
3373  "[PID%d] printing activated tables\n", current->pid);
3374 
3375 end_function:
3376  kfree(*dmatables_region);
3377  *dmatables_region = NULL;
3378 
3379  kfree(*dcb_region);
3380  *dcb_region = NULL;
3381 
3382  return error;
3383 }
3384 
3395 static ssize_t sep_create_dcb_dmatables_context(struct sep_device *sep,
3396  struct sep_dcblock **dcb_region,
3397  void **dmatables_region,
3398  struct sep_dma_context **dma_ctx,
3399  const struct build_dcb_struct __user *user_dcb_args,
3400  const u32 num_dcbs, bool secure_dma)
3401 {
3402  int error = 0;
3403  int i = 0;
3404  struct build_dcb_struct *dcb_args = NULL;
3405 
3406  dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3407  current->pid);
3408 
3409  if (!dcb_region || !dma_ctx || !dmatables_region || !user_dcb_args) {
3410  error = -EINVAL;
3411  goto end_function;
3412  }
3413 
3414  if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3415  dev_warn(&sep->pdev->dev,
3416  "[PID%d] invalid number of dcbs 0x%08X\n",
3417  current->pid, num_dcbs);
3418  error = -EINVAL;
3419  goto end_function;
3420  }
3421 
3422  dcb_args = kzalloc(num_dcbs * sizeof(struct build_dcb_struct),
3423  GFP_KERNEL);
3424  if (!dcb_args) {
3425  dev_warn(&sep->pdev->dev, "[PID%d] no memory for dcb args\n",
3426  current->pid);
3427  error = -ENOMEM;
3428  goto end_function;
3429  }
3430 
3431  if (copy_from_user(dcb_args,
3432  user_dcb_args,
3433  num_dcbs * sizeof(struct build_dcb_struct))) {
3434  error = -EINVAL;
3435  goto end_function;
3436  }
3437 
3438  /* Allocate thread-specific memory for DCB */
3439  *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3440  GFP_KERNEL);
3441  if (!(*dcb_region)) {
3442  error = -ENOMEM;
3443  goto end_function;
3444  }
3445 
3446  /* Prepare DCB and MLLI table into the allocated regions */
3447  for (i = 0; i < num_dcbs; i++) {
3449  (unsigned long)dcb_args[i].app_in_address,
3450  (unsigned long)dcb_args[i].app_out_address,
3451  dcb_args[i].data_in_size,
3452  dcb_args[i].block_size,
3453  dcb_args[i].tail_block_size,
3454  dcb_args[i].is_applet,
3455  false, secure_dma,
3456  *dcb_region, dmatables_region,
3457  dma_ctx,
3458  NULL,
3459  NULL);
3460  if (error) {
3461  dev_warn(&sep->pdev->dev,
3462  "[PID%d] dma table creation failed\n",
3463  current->pid);
3464  goto end_function;
3465  }
3466 
3467  if (dcb_args[i].app_in_address != 0)
3468  (*dma_ctx)->input_data_len += dcb_args[i].data_in_size;
3469  }
3470 
3471 end_function:
3472  kfree(dcb_args);
3473  return error;
3474 
3475 }
3476 
3492  struct sep_dcblock **dcb_region,
3493  void **dmatables_region,
3494  struct sep_dma_context **dma_ctx,
3495  const struct build_dcb_struct_kernel *dcb_data,
3496  const u32 num_dcbs)
3497 {
3498  int error = 0;
3499  int i = 0;
3500 
3501  dev_dbg(&sep->pdev->dev, "[PID%d] creating dcb/dma region\n",
3502  current->pid);
3503 
3504  if (!dcb_region || !dma_ctx || !dmatables_region || !dcb_data) {
3505  error = -EINVAL;
3506  goto end_function;
3507  }
3508 
3509  if (SEP_MAX_NUM_SYNC_DMA_OPS < num_dcbs) {
3510  dev_warn(&sep->pdev->dev,
3511  "[PID%d] invalid number of dcbs 0x%08X\n",
3512  current->pid, num_dcbs);
3513  error = -EINVAL;
3514  goto end_function;
3515  }
3516 
3517  dev_dbg(&sep->pdev->dev, "[PID%d] num_dcbs is %d\n",
3518  current->pid, num_dcbs);
3519 
3520  /* Allocate thread-specific memory for DCB */
3521  *dcb_region = kzalloc(num_dcbs * sizeof(struct sep_dcblock),
3522  GFP_KERNEL);
3523  if (!(*dcb_region)) {
3524  error = -ENOMEM;
3525  goto end_function;
3526  }
3527 
3528  /* Prepare DCB and MLLI table into the allocated regions */
3529  for (i = 0; i < num_dcbs; i++) {
3531  (unsigned long)dcb_data->app_in_address,
3532  (unsigned long)dcb_data->app_out_address,
3533  dcb_data->data_in_size,
3534  dcb_data->block_size,
3535  dcb_data->tail_block_size,
3536  dcb_data->is_applet,
3537  true,
3538  false,
3539  *dcb_region, dmatables_region,
3540  dma_ctx,
3541  dcb_data->src_sg,
3542  dcb_data->dst_sg);
3543  if (error) {
3544  dev_warn(&sep->pdev->dev,
3545  "[PID%d] dma table creation failed\n",
3546  current->pid);
3547  goto end_function;
3548  }
3549  }
3550 
3551 end_function:
3552  return error;
3553 
3554 }
3555 
3562 static ssize_t sep_activate_msgarea_context(struct sep_device *sep,
3563  void **msg_region,
3564  const size_t msg_len)
3565 {
3566  dev_dbg(&sep->pdev->dev, "[PID%d] activating msg region\n",
3567  current->pid);
3568 
3569  if (!msg_region || !(*msg_region) ||
3571  dev_warn(&sep->pdev->dev,
3572  "[PID%d] invalid act msgarea len 0x%08zX\n",
3573  current->pid, msg_len);
3574  return -EINVAL;
3575  }
3576 
3577  memcpy(sep->shared_addr, *msg_region, msg_len);
3578 
3579  return 0;
3580 }
3581 
3589 static ssize_t sep_create_msgarea_context(struct sep_device *sep,
3590  void **msg_region,
3591  const void __user *msg_user,
3592  const size_t msg_len)
3593 {
3594  int error = 0;
3595 
3596  dev_dbg(&sep->pdev->dev, "[PID%d] creating msg region\n",
3597  current->pid);
3598 
3599  if (!msg_region ||
3600  !msg_user ||
3603  dev_warn(&sep->pdev->dev,
3604  "[PID%d] invalid creat msgarea len 0x%08zX\n",
3605  current->pid, msg_len);
3606  error = -EINVAL;
3607  goto end_function;
3608  }
3609 
3610  /* Allocate thread-specific memory for message buffer */
3611  *msg_region = kzalloc(msg_len, GFP_KERNEL);
3612  if (!(*msg_region)) {
3613  dev_warn(&sep->pdev->dev,
3614  "[PID%d] no mem for msgarea context\n",
3615  current->pid);
3616  error = -ENOMEM;
3617  goto end_function;
3618  }
3619 
3620  /* Copy input data to write() to allocated message buffer */
3621  if (copy_from_user(*msg_region, msg_user, msg_len)) {
3622  error = -EINVAL;
3623  goto end_function;
3624  }
3625 
3626 end_function:
3627  if (error && msg_region) {
3628  kfree(*msg_region);
3629  *msg_region = NULL;
3630  }
3631 
3632  return error;
3633 }
3634 
3635 
3646 static ssize_t sep_read(struct file *filp,
3647  char __user *buf_user, size_t count_user,
3648  loff_t *offset)
3649 {
3650  struct sep_private_data * const private_data = filp->private_data;
3651  struct sep_call_status *call_status = &private_data->call_status;
3652  struct sep_device *sep = private_data->device;
3653  struct sep_dma_context **dma_ctx = &private_data->dma_ctx;
3654  struct sep_queue_info **my_queue_elem = &private_data->my_queue_elem;
3655  ssize_t error = 0, error_tmp = 0;
3656 
3657  /* Am I the process that owns the transaction? */
3658  error = sep_check_transaction_owner(sep);
3659  if (error) {
3660  dev_dbg(&sep->pdev->dev, "[PID%d] read pid is not owner\n",
3661  current->pid);
3662  goto end_function;
3663  }
3664 
3665  /* Checks that user has called necessary apis */
3667  &call_status->status)) {
3668  dev_warn(&sep->pdev->dev,
3669  "[PID%d] fastcall write not called\n",
3670  current->pid);
3671  error = -EPROTO;
3672  goto end_function_error;
3673  }
3674 
3675  if (!buf_user) {
3676  dev_warn(&sep->pdev->dev,
3677  "[PID%d] null user buffer\n",
3678  current->pid);
3679  error = -EINVAL;
3680  goto end_function_error;
3681  }
3682 
3683 
3684  /* Wait for SEP to finish */
3687  &sep->in_use_flags) == 0);
3688 
3689  sep_dump_message(sep);
3690 
3691  dev_dbg(&sep->pdev->dev, "[PID%d] count_user = 0x%08zX\n",
3692  current->pid, count_user);
3693 
3694  /* In case user has allocated bigger buffer */
3697 
3698  if (copy_to_user(buf_user, sep->shared_addr, count_user)) {
3699  error = -EFAULT;
3700  goto end_function_error;
3701  }
3702 
3703  dev_dbg(&sep->pdev->dev, "[PID%d] read succeeded\n", current->pid);
3704  error = count_user;
3705 
3706 end_function_error:
3707  /* Copy possible tail data to user and free DCB and MLLIs */
3708  error_tmp = sep_free_dcb_handler(sep, dma_ctx);
3709  if (error_tmp)
3710  dev_warn(&sep->pdev->dev, "[PID%d] dcb free failed\n",
3711  current->pid);
3712 
3713  /* End the transaction, wakeup pending ones */
3714  error_tmp = sep_end_transaction_handler(sep, dma_ctx, call_status,
3715  my_queue_elem);
3716  if (error_tmp)
3717  dev_warn(&sep->pdev->dev,
3718  "[PID%d] ending transaction failed\n",
3719  current->pid);
3720 
3721 end_function:
3722  return error;
3723 }
3724 
3732 static inline ssize_t sep_fastcall_args_get(struct sep_device *sep,
3733  struct sep_fastcall_hdr *args,
3734  const char __user *buf_user,
3735  const size_t count_user)
3736 {
3737  ssize_t error = 0;
3738  size_t actual_count = 0;
3739 
3740  if (!buf_user) {
3741  dev_warn(&sep->pdev->dev,
3742  "[PID%d] null user buffer\n",
3743  current->pid);
3744  error = -EINVAL;
3745  goto end_function;
3746  }
3747 
3748  if (count_user < sizeof(struct sep_fastcall_hdr)) {
3749  dev_warn(&sep->pdev->dev,
3750  "[PID%d] too small message size 0x%08zX\n",
3751  current->pid, count_user);
3752  error = -EINVAL;
3753  goto end_function;
3754  }
3755 
3756 
3757  if (copy_from_user(args, buf_user, sizeof(struct sep_fastcall_hdr))) {
3758  error = -EFAULT;
3759  goto end_function;
3760  }
3761 
3762  if (SEP_FC_MAGIC != args->magic) {
3763  dev_warn(&sep->pdev->dev,
3764  "[PID%d] invalid fastcall magic 0x%08X\n",
3765  current->pid, args->magic);
3766  error = -EINVAL;
3767  goto end_function;
3768  }
3769 
3770  dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr num of DCBs 0x%08X\n",
3771  current->pid, args->num_dcbs);
3772  dev_dbg(&sep->pdev->dev, "[PID%d] fastcall hdr msg len 0x%08X\n",
3773  current->pid, args->msg_len);
3774 
3775  if (SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES < args->msg_len ||
3777  dev_warn(&sep->pdev->dev,
3778  "[PID%d] invalid message length\n",
3779  current->pid);
3780  error = -EINVAL;
3781  goto end_function;
3782  }
3783 
3784  actual_count = sizeof(struct sep_fastcall_hdr)
3785  + args->msg_len
3786  + (args->num_dcbs * sizeof(struct build_dcb_struct));
3787 
3788  if (actual_count != count_user) {
3789  dev_warn(&sep->pdev->dev,
3790  "[PID%d] inconsistent message "
3791  "sizes 0x%08zX vs 0x%08zX\n",
3792  current->pid, actual_count, count_user);
3793  error = -EMSGSIZE;
3794  goto end_function;
3795  }
3796 
3797 end_function:
3798  return error;
3799 }
3800 
3811 static ssize_t sep_write(struct file *filp,
3812  const char __user *buf_user, size_t count_user,
3813  loff_t *offset)
3814 {
3815  struct sep_private_data * const private_data = filp->private_data;
3816  struct sep_call_status *call_status = &private_data->call_status;
3817  struct sep_device *sep = private_data->device;
3818  struct sep_dma_context *dma_ctx = NULL;
3819  struct sep_fastcall_hdr call_hdr = {0};
3820  void *msg_region = NULL;
3821  void *dmatables_region = NULL;
3822  struct sep_dcblock *dcb_region = NULL;
3823  ssize_t error = 0;
3824  struct sep_queue_info *my_queue_elem = NULL;
3825  bool my_secure_dma; /* are we using secure_dma (IMR)? */
3826 
3827  dev_dbg(&sep->pdev->dev, "[PID%d] sep dev is 0x%p\n",
3828  current->pid, sep);
3829  dev_dbg(&sep->pdev->dev, "[PID%d] private_data is 0x%p\n",
3830  current->pid, private_data);
3831 
3832  error = sep_fastcall_args_get(sep, &call_hdr, buf_user, count_user);
3833  if (error)
3834  goto end_function;
3835 
3836  buf_user += sizeof(struct sep_fastcall_hdr);
3837 
3838  if (call_hdr.secure_dma == 0)
3839  my_secure_dma = false;
3840  else
3841  my_secure_dma = true;
3842 
3843  /*
3844  * Controlling driver memory usage by limiting amount of
3845  * buffers created. Only SEP_DOUBLEBUF_USERS_LIMIT number
3846  * of threads can progress further at a time
3847  */
3848  dev_dbg(&sep->pdev->dev,
3849  "[PID%d] waiting for double buffering region access\n",
3850  current->pid);
3851  error = down_interruptible(&sep->sep_doublebuf);
3852  dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region start\n",
3853  current->pid);
3854  if (error) {
3855  /* Signal received */
3856  goto end_function_error;
3857  }
3858 
3859 
3860  /*
3861  * Prepare contents of the shared area regions for
3862  * the operation into temporary buffers
3863  */
3864  if (0 < call_hdr.num_dcbs) {
3865  error = sep_create_dcb_dmatables_context(sep,
3866  &dcb_region,
3867  &dmatables_region,
3868  &dma_ctx,
3869  (const struct build_dcb_struct __user *)
3870  buf_user,
3871  call_hdr.num_dcbs, my_secure_dma);
3872  if (error)
3873  goto end_function_error_doublebuf;
3874 
3875  buf_user += call_hdr.num_dcbs * sizeof(struct build_dcb_struct);
3876  }
3877 
3878  error = sep_create_msgarea_context(sep,
3879  &msg_region,
3880  buf_user,
3881  call_hdr.msg_len);
3882  if (error)
3883  goto end_function_error_doublebuf;
3884 
3885  dev_dbg(&sep->pdev->dev, "[PID%d] updating queue status\n",
3886  current->pid);
3887  my_queue_elem = sep_queue_status_add(sep,
3888  ((struct sep_msgarea_hdr *)msg_region)->opcode,
3889  (dma_ctx) ? dma_ctx->input_data_len : 0,
3890  current->pid,
3891  current->comm, sizeof(current->comm));
3892 
3893  if (!my_queue_elem) {
3894  dev_dbg(&sep->pdev->dev,
3895  "[PID%d] updating queue status error\n", current->pid);
3896  error = -ENOMEM;
3897  goto end_function_error_doublebuf;
3898  }
3899 
3900  /* Wait until current process gets the transaction */
3901  error = sep_wait_transaction(sep);
3902 
3903  if (error) {
3904  /* Interrupted by signal, don't clear transaction */
3905  dev_dbg(&sep->pdev->dev, "[PID%d] interrupted by signal\n",
3906  current->pid);
3907  sep_queue_status_remove(sep, &my_queue_elem);
3908  goto end_function_error_doublebuf;
3909  }
3910 
3911  dev_dbg(&sep->pdev->dev, "[PID%d] saving queue element\n",
3912  current->pid);
3913  private_data->my_queue_elem = my_queue_elem;
3914 
3915  /* Activate shared area regions for the transaction */
3916  error = sep_activate_msgarea_context(sep, &msg_region,
3917  call_hdr.msg_len);
3918  if (error)
3919  goto end_function_error_clear_transact;
3920 
3921  sep_dump_message(sep);
3922 
3923  if (0 < call_hdr.num_dcbs) {
3925  &dcb_region,
3926  &dmatables_region,
3927  dma_ctx);
3928  if (error)
3929  goto end_function_error_clear_transact;
3930  }
3931 
3932  /* Send command to SEP */
3933  error = sep_send_command_handler(sep);
3934  if (error)
3935  goto end_function_error_clear_transact;
3936 
3937  /* Store DMA context for the transaction */
3938  private_data->dma_ctx = dma_ctx;
3939  /* Update call status */
3941  error = count_user;
3942 
3943  up(&sep->sep_doublebuf);
3944  dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3945  current->pid);
3946 
3947  goto end_function;
3948 
3949 end_function_error_clear_transact:
3950  sep_end_transaction_handler(sep, &dma_ctx, call_status,
3951  &private_data->my_queue_elem);
3952 
3953 end_function_error_doublebuf:
3954  up(&sep->sep_doublebuf);
3955  dev_dbg(&sep->pdev->dev, "[PID%d] double buffering region end\n",
3956  current->pid);
3957 
3958 end_function_error:
3959  if (dma_ctx)
3960  sep_free_dma_table_data_handler(sep, &dma_ctx);
3961 
3962 end_function:
3963  kfree(dcb_region);
3964  kfree(dmatables_region);
3965  kfree(msg_region);
3966 
3967  return error;
3968 }
3978 static loff_t sep_seek(struct file *filp, loff_t offset, int origin)
3979 {
3980  return -ENOSYS;
3981 }
3982 
3983 
3984 
3996 static const struct file_operations sep_file_operations = {
3997  .owner = THIS_MODULE,
3998  .unlocked_ioctl = sep_ioctl,
3999  .poll = sep_poll,
4000  .open = sep_open,
4001  .release = sep_release,
4002  .mmap = sep_mmap,
4003  .read = sep_read,
4004  .write = sep_write,
4005  .llseek = sep_seek,
4006 };
4007 
4019 static ssize_t
4020 sep_sysfs_read(struct file *filp, struct kobject *kobj,
4021  struct bin_attribute *attr,
4022  char *buf, loff_t pos, size_t count)
4023 {
4024  unsigned long lck_flags;
4025  size_t nleft = count;
4026  struct sep_device *sep = sep_dev;
4027  struct sep_queue_info *queue_elem = NULL;
4028  u32 queue_num = 0;
4029  u32 i = 1;
4030 
4031  spin_lock_irqsave(&sep->sep_queue_lock, lck_flags);
4032 
4033  queue_num = sep->sep_queue_num;
4034  if (queue_num > SEP_DOUBLEBUF_USERS_LIMIT)
4035  queue_num = SEP_DOUBLEBUF_USERS_LIMIT;
4036 
4037 
4038  if (count < sizeof(queue_num)
4039  + (queue_num * sizeof(struct sep_queue_data))) {
4040  spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4041  return -EINVAL;
4042  }
4043 
4044  memcpy(buf, &queue_num, sizeof(queue_num));
4045  buf += sizeof(queue_num);
4046  nleft -= sizeof(queue_num);
4047 
4048  list_for_each_entry(queue_elem, &sep->sep_queue_status, list) {
4049  if (i++ > queue_num)
4050  break;
4051 
4052  memcpy(buf, &queue_elem->data, sizeof(queue_elem->data));
4053  nleft -= sizeof(queue_elem->data);
4054  buf += sizeof(queue_elem->data);
4055  }
4056  spin_unlock_irqrestore(&sep->sep_queue_lock, lck_flags);
4057 
4058  return count - nleft;
4059 }
4060 
4067 static const struct bin_attribute queue_status = {
4068  .attr = {.name = "queue_status", .mode = 0444},
4069  .read = sep_sysfs_read,
4070  .size = sizeof(u32)
4071  + (SEP_DOUBLEBUF_USERS_LIMIT * sizeof(struct sep_queue_data)),
4072 };
4073 
4080 static int sep_register_driver_with_fs(struct sep_device *sep)
4081 {
4082  int ret_val;
4083 
4084  sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
4085  sep->miscdev_sep.name = SEP_DEV_NAME;
4086  sep->miscdev_sep.fops = &sep_file_operations;
4087 
4088  ret_val = misc_register(&sep->miscdev_sep);
4089  if (ret_val) {
4090  dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
4091  ret_val);
4092  return ret_val;
4093  }
4094 
4095  ret_val = device_create_bin_file(sep->miscdev_sep.this_device,
4096  &queue_status);
4097  if (ret_val) {
4098  dev_warn(&sep->pdev->dev, "sysfs attribute1 fails for SEP %x\n",
4099  ret_val);
4100  return ret_val;
4101  }
4102 
4103  return ret_val;
4104 }
4105 
4106 
4115 static int __devinit sep_probe(struct pci_dev *pdev,
4116  const struct pci_device_id *ent)
4117 {
4118  int error = 0;
4119  struct sep_device *sep = NULL;
4120 
4121  if (sep_dev != NULL) {
4122  dev_dbg(&pdev->dev, "only one SEP supported.\n");
4123  return -EBUSY;
4124  }
4125 
4126  /* Enable the device */
4127  error = pci_enable_device(pdev);
4128  if (error) {
4129  dev_warn(&pdev->dev, "error enabling pci device\n");
4130  goto end_function;
4131  }
4132 
4133  /* Allocate the sep_device structure for this device */
4134  sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
4135  if (sep_dev == NULL) {
4136  dev_warn(&pdev->dev,
4137  "can't kmalloc the sep_device structure\n");
4138  error = -ENOMEM;
4139  goto end_function_disable_device;
4140  }
4141 
4142  /*
4143  * We're going to use another variable for actually
4144  * working with the device; this way, if we have
4145  * multiple devices in the future, it would be easier
4146  * to make appropriate changes
4147  */
4148  sep = sep_dev;
4149 
4150  sep->pdev = pci_dev_get(pdev);
4151 
4156  sema_init(&sep->sep_doublebuf, SEP_DOUBLEBUF_USERS_LIMIT);
4157 
4158  INIT_LIST_HEAD(&sep->sep_queue_status);
4159 
4160  dev_dbg(&sep->pdev->dev,
4161  "sep probe: PCI obtained, device being prepared\n");
4162 
4163  /* Set up our register area */
4164  sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
4165  if (!sep->reg_physical_addr) {
4166  dev_warn(&sep->pdev->dev, "Error getting register start\n");
4167  error = -ENODEV;
4168  goto end_function_free_sep_dev;
4169  }
4170 
4171  sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
4172  if (!sep->reg_physical_end) {
4173  dev_warn(&sep->pdev->dev, "Error getting register end\n");
4174  error = -ENODEV;
4175  goto end_function_free_sep_dev;
4176  }
4177 
4179  (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
4180  if (!sep->reg_addr) {
4181  dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
4182  error = -ENODEV;
4183  goto end_function_free_sep_dev;
4184  }
4185 
4186  dev_dbg(&sep->pdev->dev,
4187  "Register area start %llx end %llx virtual %p\n",
4188  (unsigned long long)sep->reg_physical_addr,
4189  (unsigned long long)sep->reg_physical_end,
4190  sep->reg_addr);
4191 
4192  /* Allocate the shared area */
4198 
4199  if (sep_map_and_alloc_shared_area(sep)) {
4200  error = -ENOMEM;
4201  /* Allocation failed */
4202  goto end_function_error;
4203  }
4204 
4205  /* Clear ICR register */
4206  sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4207 
4208  /* Set the IMR register - open only GPR 2 */
4209  sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4210 
4211  /* Read send/receive counters from SEP */
4212  sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4213  sep->reply_ct &= 0x3FFFFFFF;
4214  sep->send_ct = sep->reply_ct;
4215 
4216  /* Get the interrupt line */
4217  error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
4218  "sep_driver", sep);
4219 
4220  if (error)
4221  goto end_function_deallocate_sep_shared_area;
4222 
4223  /* The new chip requires a shared area reconfigure */
4224  error = sep_reconfig_shared_area(sep);
4225  if (error)
4226  goto end_function_free_irq;
4227 
4228  sep->in_use = 1;
4229 
4230  /* Finally magic up the device nodes */
4231  /* Register driver with the fs */
4232  error = sep_register_driver_with_fs(sep);
4233 
4234  if (error) {
4235  dev_err(&sep->pdev->dev, "error registering dev file\n");
4236  goto end_function_free_irq;
4237  }
4238 
4239  sep->in_use = 0; /* through touching the device */
4240 #ifdef SEP_ENABLE_RUNTIME_PM
4241  pm_runtime_put_noidle(&sep->pdev->dev);
4242  pm_runtime_allow(&sep->pdev->dev);
4244  SUSPEND_DELAY);
4245  pm_runtime_use_autosuspend(&sep->pdev->dev);
4246  pm_runtime_mark_last_busy(&sep->pdev->dev);
4247  sep->power_save_setup = 1;
4248 #endif
4249  /* register kernel crypto driver */
4250 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4251  error = sep_crypto_setup();
4252  if (error) {
4253  dev_err(&sep->pdev->dev, "crypto setup failed\n");
4254  goto end_function_free_irq;
4255  }
4256 #endif
4257  goto end_function;
4258 
4259 end_function_free_irq:
4260  free_irq(pdev->irq, sep);
4261 
4262 end_function_deallocate_sep_shared_area:
4263  /* De-allocate shared area */
4264  sep_unmap_and_free_shared_area(sep);
4265 
4266 end_function_error:
4267  iounmap(sep->reg_addr);
4268 
4269 end_function_free_sep_dev:
4270  pci_dev_put(sep_dev->pdev);
4271  kfree(sep_dev);
4272  sep_dev = NULL;
4273 
4274 end_function_disable_device:
4275  pci_disable_device(pdev);
4276 
4277 end_function:
4278  return error;
4279 }
4280 
4289 static void sep_remove(struct pci_dev *pdev)
4290 {
4291  struct sep_device *sep = sep_dev;
4292 
4293  /* Unregister from fs */
4295 
4296  /* Unregister from kernel crypto */
4297 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
4299 #endif
4300  /* Free the irq */
4301  free_irq(sep->pdev->irq, sep);
4302 
4303  /* Free the shared area */
4304  sep_unmap_and_free_shared_area(sep_dev);
4305  iounmap(sep_dev->reg_addr);
4306 
4307 #ifdef SEP_ENABLE_RUNTIME_PM
4308  if (sep->in_use) {
4309  sep->in_use = 0;
4310  pm_runtime_forbid(&sep->pdev->dev);
4311  pm_runtime_get_noresume(&sep->pdev->dev);
4312  }
4313 #endif
4314  pci_dev_put(sep_dev->pdev);
4315  kfree(sep_dev);
4316  sep_dev = NULL;
4317 }
4318 
4319 /* Initialize struct pci_device_id for our driver */
4320 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
4321  {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0826)},
4322  {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08e9)},
4323  {0}
4324 };
4325 
4326 /* Export our pci_device_id structure to user space */
4327 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
4328 
4329 #ifdef SEP_ENABLE_RUNTIME_PM
4330 
4339 static int sep_pci_resume(struct device *dev)
4340 {
4341  struct sep_device *sep = sep_dev;
4342 
4343  dev_dbg(&sep->pdev->dev, "pci resume called\n");
4344 
4345  if (sep->power_state == SEP_DRIVER_POWERON)
4346  return 0;
4347 
4348  /* Clear ICR register */
4349  sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4350 
4351  /* Set the IMR register - open only GPR 2 */
4352  sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4353 
4354  /* Read send/receive counters from SEP */
4355  sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4356  sep->reply_ct &= 0x3FFFFFFF;
4357  sep->send_ct = sep->reply_ct;
4358 
4360 
4361  return 0;
4362 }
4363 
4372 static int sep_pci_suspend(struct device *dev)
4373 {
4374  struct sep_device *sep = sep_dev;
4375 
4376  dev_dbg(&sep->pdev->dev, "pci suspend called\n");
4377  if (sep->in_use == 1)
4378  return -EAGAIN;
4379 
4381 
4382  /* Clear ICR register */
4383  sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4384 
4385  /* Set the IMR to block all */
4386  sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0xFFFFFFFF);
4387 
4388  return 0;
4389 }
4390 
4397 static int sep_pm_runtime_resume(struct device *dev)
4398 {
4399 
4400  u32 retval2;
4401  u32 delay_count;
4402  struct sep_device *sep = sep_dev;
4403 
4404  dev_dbg(&sep->pdev->dev, "pm runtime resume called\n");
4405 
4413  retval2 = 0;
4414  delay_count = 0;
4415  while ((!retval2) && (delay_count < SCU_DELAY_MAX)) {
4416  retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
4417  retval2 &= 0x00000008;
4418  if (!retval2) {
4420  delay_count += 1;
4421  }
4422  }
4423 
4424  if (!retval2) {
4425  dev_warn(&sep->pdev->dev, "scu boot bit not set at resume\n");
4426  return -EINVAL;
4427  }
4428 
4429  /* Clear ICR register */
4430  sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4431 
4432  /* Set the IMR register - open only GPR 2 */
4433  sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
4434 
4435  /* Read send/receive counters from SEP */
4436  sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
4437  sep->reply_ct &= 0x3FFFFFFF;
4438  sep->send_ct = sep->reply_ct;
4439 
4440  return 0;
4441 }
4442 
4449 static int sep_pm_runtime_suspend(struct device *dev)
4450 {
4451  struct sep_device *sep = sep_dev;
4452 
4453  dev_dbg(&sep->pdev->dev, "pm runtime suspend called\n");
4454 
4455  /* Clear ICR register */
4456  sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
4457  return 0;
4458 }
4459 
4467 static const struct dev_pm_ops sep_pm = {
4468  .runtime_resume = sep_pm_runtime_resume,
4469  .runtime_suspend = sep_pm_runtime_suspend,
4470  .resume = sep_pci_resume,
4471  .suspend = sep_pci_suspend,
4472 };
4473 #endif /* SEP_ENABLE_RUNTIME_PM */
4474 
4482 static struct pci_driver sep_pci_driver = {
4483 #ifdef SEP_ENABLE_RUNTIME_PM
4484  .driver = {
4485  .pm = &sep_pm,
4486  },
4487 #endif
4488  .name = "sep_sec_driver",
4489  .id_table = sep_pci_id_tbl,
4490  .probe = sep_probe,
4491  .remove = sep_remove
4492 };
4493 
4494 module_pci_driver(sep_pci_driver);
4495 MODULE_LICENSE("GPL");