Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
s5p_mfc_opr_v5.c
Go to the documentation of this file.
1 /*
2  * drivers/media/platform/samsung/mfc5/s5p_mfc_opr_v5.c
3  *
4  * Samsung MFC (Multi Function Codec - FIMV) driver
5  * This file contains hw related functions.
6  *
7  * Kamil Debski, Copyright (c) 2011 Samsung Electronics
8  * http://www.samsung.com/
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include "s5p_mfc_common.h"
16 #include "s5p_mfc_cmd.h"
17 #include "s5p_mfc_ctrl.h"
18 #include "s5p_mfc_debug.h"
19 #include "s5p_mfc_intr.h"
20 #include "s5p_mfc_pm.h"
21 #include "s5p_mfc_opr.h"
22 #include "s5p_mfc_opr_v5.h"
23 #include <asm/cacheflush.h>
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/err.h>
27 #include <linux/firmware.h>
28 #include <linux/io.h>
29 #include <linux/jiffies.h>
30 #include <linux/mm.h>
31 #include <linux/sched.h>
32 
33 #define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT)
34 #define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT)
35 
36 /* Allocate temporary buffers for decoding */
38 {
39  struct s5p_mfc_dev *dev = ctx->dev;
40  struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
41 
42  ctx->dsc.alloc = vb2_dma_contig_memops.alloc(
44  buf_size->dsc);
45  if (IS_ERR_VALUE((int)ctx->dsc.alloc)) {
46  ctx->dsc.alloc = NULL;
47  mfc_err("Allocating DESC buffer failed\n");
48  return -ENOMEM;
49  }
50  ctx->dsc.dma = s5p_mfc_mem_cookie(
51  dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->dsc.alloc);
52  BUG_ON(ctx->dsc.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
53  ctx->dsc.virt = vb2_dma_contig_memops.vaddr(ctx->dsc.alloc);
54  if (ctx->dsc.virt == NULL) {
55  vb2_dma_contig_memops.put(ctx->dsc.alloc);
56  ctx->dsc.dma = 0;
57  ctx->dsc.alloc = NULL;
58  mfc_err("Remapping DESC buffer failed\n");
59  return -ENOMEM;
60  }
61  memset(ctx->dsc.virt, 0, buf_size->dsc);
62  wmb();
63  return 0;
64 }
65 
66 /* Release temporary buffers for decoding */
68 {
69  if (ctx->dsc.dma) {
70  vb2_dma_contig_memops.put(ctx->dsc.alloc);
71  ctx->dsc.alloc = NULL;
72  ctx->dsc.dma = 0;
73  }
74 }
75 
76 /* Allocate codec buffers */
78 {
79  struct s5p_mfc_dev *dev = ctx->dev;
80  unsigned int enc_ref_y_size = 0;
81  unsigned int enc_ref_c_size = 0;
82  unsigned int guard_width, guard_height;
83 
84  if (ctx->type == MFCINST_DECODER) {
85  mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n",
86  ctx->luma_size, ctx->chroma_size, ctx->mv_size);
87  mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count);
88  } else if (ctx->type == MFCINST_ENCODER) {
89  enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
91  enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
92 
93  if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) {
94  enc_ref_c_size = ALIGN(ctx->img_width,
96  * ALIGN(ctx->img_height >> 1,
98  enc_ref_c_size = ALIGN(enc_ref_c_size,
100  } else {
101  guard_width = ALIGN(ctx->img_width + 16,
103  guard_height = ALIGN((ctx->img_height >> 1) + 4,
105  enc_ref_c_size = ALIGN(guard_width * guard_height,
107  }
108  mfc_debug(2, "recon luma size: %d chroma size: %d\n",
109  enc_ref_y_size, enc_ref_c_size);
110  } else {
111  return -EINVAL;
112  }
113  /* Codecs have different memory requirements */
114  switch (ctx->codec_mode) {
116  ctx->bank1_size =
120  ctx->bank2_size = ctx->total_dpb_count * ctx->mv_size;
121  break;
123  ctx->bank1_size =
130  ctx->bank2_size = 0;
131  break;
134  ctx->bank1_size =
141  ctx->bank2_size = 0;
142  break;
144  ctx->bank1_size = 0;
145  ctx->bank2_size = 0;
146  break;
148  ctx->bank1_size =
154  ctx->bank2_size = 0;
155  break;
157  ctx->bank1_size = (enc_ref_y_size * 2) +
162  ctx->bank2_size = (enc_ref_y_size * 2) +
163  (enc_ref_c_size * 4) +
165  break;
167  ctx->bank1_size = (enc_ref_y_size * 2) +
171  ctx->bank2_size = (enc_ref_y_size * 2) +
172  (enc_ref_c_size * 4);
173  break;
175  ctx->bank1_size = (enc_ref_y_size * 2) +
178  ctx->bank2_size = (enc_ref_y_size * 2) +
179  (enc_ref_c_size * 4);
180  break;
181  default:
182  break;
183  }
184  /* Allocate only if memory from bank 1 is necessary */
185  if (ctx->bank1_size > 0) {
186  ctx->bank1_buf = vb2_dma_contig_memops.alloc(
188  if (IS_ERR(ctx->bank1_buf)) {
189  ctx->bank1_buf = NULL;
191  "Buf alloc for decoding failed (port A)\n");
192  return -ENOMEM;
193  }
194  ctx->bank1_phys = s5p_mfc_mem_cookie(
196  BUG_ON(ctx->bank1_phys & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
197  }
198  /* Allocate only if memory from bank 2 is necessary */
199  if (ctx->bank2_size > 0) {
200  ctx->bank2_buf = vb2_dma_contig_memops.alloc(
202  if (IS_ERR(ctx->bank2_buf)) {
203  ctx->bank2_buf = NULL;
204  mfc_err("Buf alloc for decoding failed (port B)\n");
205  return -ENOMEM;
206  }
207  ctx->bank2_phys = s5p_mfc_mem_cookie(
209  BUG_ON(ctx->bank2_phys & ((1 << MFC_BANK2_ALIGN_ORDER) - 1));
210  }
211  return 0;
212 }
213 
214 /* Release buffers allocated for codec */
216 {
217  if (ctx->bank1_buf) {
219  ctx->bank1_buf = NULL;
220  ctx->bank1_phys = 0;
221  ctx->bank1_size = 0;
222  }
223  if (ctx->bank2_buf) {
225  ctx->bank2_buf = NULL;
226  ctx->bank2_phys = 0;
227  ctx->bank2_size = 0;
228  }
229 }
230 
231 /* Allocate memory for instance data buffer */
233 {
234  struct s5p_mfc_dev *dev = ctx->dev;
235  struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
236 
237  if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
239  ctx->ctx.size = buf_size->h264_ctx;
240  else
241  ctx->ctx.size = buf_size->non_h264_ctx;
242  ctx->ctx.alloc = vb2_dma_contig_memops.alloc(
243  dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.size);
244  if (IS_ERR(ctx->ctx.alloc)) {
245  mfc_err("Allocating context buffer failed\n");
246  ctx->ctx.alloc = NULL;
247  return -ENOMEM;
248  }
249  ctx->ctx.dma = s5p_mfc_mem_cookie(
250  dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->ctx.alloc);
251  BUG_ON(ctx->ctx.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
252  ctx->ctx.ofs = OFFSETA(ctx->ctx.dma);
253  ctx->ctx.virt = vb2_dma_contig_memops.vaddr(ctx->ctx.alloc);
254  if (!ctx->ctx.virt) {
255  mfc_err("Remapping instance buffer failed\n");
256  vb2_dma_contig_memops.put(ctx->ctx.alloc);
257  ctx->ctx.alloc = NULL;
258  ctx->ctx.ofs = 0;
259  ctx->ctx.dma = 0;
260  return -ENOMEM;
261  }
262  /* Zero content of the allocated memory */
263  memset(ctx->ctx.virt, 0, ctx->ctx.size);
264  wmb();
265 
266  /* Initialize shared memory */
267  ctx->shm.alloc = vb2_dma_contig_memops.alloc(
268  dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], buf_size->shm);
269  if (IS_ERR(ctx->shm.alloc)) {
270  mfc_err("failed to allocate shared memory\n");
271  return PTR_ERR(ctx->shm.alloc);
272  }
273  /* shared memory offset only keeps the offset from base (port a) */
274  ctx->shm.ofs = s5p_mfc_mem_cookie(
275  dev->alloc_ctx[MFC_BANK1_ALLOC_CTX], ctx->shm.alloc)
276  - dev->bank1;
277  BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
278 
279  ctx->shm.virt = vb2_dma_contig_memops.vaddr(ctx->shm.alloc);
280  if (!ctx->shm.virt) {
281  vb2_dma_contig_memops.put(ctx->shm.alloc);
282  ctx->shm.alloc = NULL;
283  ctx->shm.ofs = 0;
284  mfc_err("failed to virt addr of shared memory\n");
285  return -ENOMEM;
286  }
287  memset((void *)ctx->shm.virt, 0, buf_size->shm);
288  wmb();
289  return 0;
290 }
291 
292 /* Release instance buffer */
294 {
295  if (ctx->ctx.alloc) {
296  vb2_dma_contig_memops.put(ctx->ctx.alloc);
297  ctx->ctx.alloc = NULL;
298  ctx->ctx.ofs = 0;
299  ctx->ctx.virt = NULL;
300  ctx->ctx.dma = 0;
301  }
302  if (ctx->shm.alloc) {
303  vb2_dma_contig_memops.put(ctx->shm.alloc);
304  ctx->shm.alloc = NULL;
305  ctx->shm.ofs = 0;
306  ctx->shm.virt = NULL;
307  }
308 }
309 
311 {
312  /* NOP */
313 
314  return 0;
315 }
316 
318 {
319  /* NOP */
320 }
321 
322 static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data,
323  unsigned int ofs)
324 {
325  writel(data, (ctx->shm.virt + ofs));
326  wmb();
327 }
328 
329 static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx,
330  unsigned int ofs)
331 {
332  rmb();
333  return readl(ctx->shm.virt + ofs);
334 }
335 
337 {
338  unsigned int guard_width, guard_height;
339 
342  mfc_debug(2,
343  "SEQ Done: Movie dimensions %dx%d, buffer dimensions: %dx%d\n",
344  ctx->img_width, ctx->img_height, ctx->buf_width,
345  ctx->buf_height);
346 
347  if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) {
348  ctx->luma_size = ALIGN(ctx->buf_width * ctx->buf_height,
350  ctx->chroma_size = ALIGN(ctx->buf_width *
351  ALIGN((ctx->img_height >> 1),
354  ctx->mv_size = ALIGN(ctx->buf_width *
355  ALIGN((ctx->buf_height >> 2),
358  } else {
359  guard_width =
361  guard_height =
363  ctx->luma_size = ALIGN(guard_width * guard_height,
365 
366  guard_width =
368  guard_height =
369  ALIGN((ctx->img_height >> 1) + 4,
371  ctx->chroma_size = ALIGN(guard_width * guard_height,
373 
374  ctx->mv_size = 0;
375  }
376 }
377 
379 {
380  if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) {
382 
386  * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12M_CVALIGN);
387 
389  ctx->chroma_size =
391  } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
393 
396  ctx->chroma_size =
398  * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
399 
401  ctx->chroma_size =
403  }
404 }
405 
406 /* Set registers for decoding temporary buffers */
407 static void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx)
408 {
409  struct s5p_mfc_dev *dev = ctx->dev;
410  struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv;
411 
413  mfc_write(dev, buf_size->dsc, S5P_FIMV_SI_CH0_DESC_SIZE);
414 }
415 
416 /* Set registers for shared buffer */
417 static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx)
418 {
419  struct s5p_mfc_dev *dev = ctx->dev;
420  mfc_write(dev, ctx->shm.ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR);
421 }
422 
423 /* Set registers for decoding stream buffer */
425  unsigned int start_num_byte, unsigned int buf_size)
426 {
427  struct s5p_mfc_dev *dev = ctx->dev;
428 
431  mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE);
432  s5p_mfc_write_info_v5(ctx, start_num_byte, START_BYTE_NUM);
433  return 0;
434 }
435 
436 /* Set decoding frame buffer */
438 {
439  unsigned int frame_size, i;
440  unsigned int frame_size_ch, frame_size_mv;
441  struct s5p_mfc_dev *dev = ctx->dev;
442  unsigned int dpb;
443  size_t buf_addr1, buf_addr2;
444  int buf_size1, buf_size2;
445 
446  buf_addr1 = ctx->bank1_phys;
447  buf_size1 = ctx->bank1_size;
448  buf_addr2 = ctx->bank2_phys;
449  buf_size2 = ctx->bank2_size;
452  mfc_write(dev, ctx->total_dpb_count | dpb,
454  s5p_mfc_set_shared_buffer(ctx);
455  switch (ctx->codec_mode) {
457  mfc_write(dev, OFFSETA(buf_addr1),
459  buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE;
460  buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE;
461  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR);
462  buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE;
463  buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE;
464  break;
466  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR);
467  buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
468  buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
470  buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
471  buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
472  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR);
473  buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
474  buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
475  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR);
476  buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE;
477  buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE;
478  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR);
481  break;
483  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR);
486  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR);
487  buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
488  buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
489  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR);
490  buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
491  buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
492  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR);
493  buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
494  buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
495  break;
498  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR);
499  buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE;
500  buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE;
501  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR);
504  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR);
505  buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE;
506  buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE;
507  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR);
508  buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
509  buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE;
510  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR);
511  buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
512  buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
513  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR);
514  buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
515  buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
516  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR);
517  buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
518  buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE;
519  break;
521  break;
522  default:
523  mfc_err("Unknown codec for decoding (%x)\n",
524  ctx->codec_mode);
525  return -EINVAL;
526  break;
527  }
528  frame_size = ctx->luma_size;
529  frame_size_ch = ctx->chroma_size;
530  frame_size_mv = ctx->mv_size;
531  mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch,
532  frame_size_mv);
533  for (i = 0; i < ctx->total_dpb_count; i++) {
534  /* Bank2 */
535  mfc_debug(2, "Luma %d: %x\n", i,
536  ctx->dst_bufs[i].cookie.raw.luma);
537  mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma),
538  S5P_FIMV_DEC_LUMA_ADR + i * 4);
539  mfc_debug(2, "\tChroma %d: %x\n", i,
540  ctx->dst_bufs[i].cookie.raw.chroma);
541  mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma),
542  S5P_FIMV_DEC_CHROMA_ADR + i * 4);
543  if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) {
544  mfc_debug(2, "\tBuf2: %x, size: %d\n",
545  buf_addr2, buf_size2);
546  mfc_write(dev, OFFSETB(buf_addr2),
547  S5P_FIMV_H264_MV_ADR + i * 4);
548  buf_addr2 += frame_size_mv;
549  buf_size2 -= frame_size_mv;
550  }
551  }
552  mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1);
553  mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n",
554  buf_size1, buf_size2, ctx->total_dpb_count);
555  if (buf_size1 < 0 || buf_size2 < 0) {
556  mfc_debug(2, "Not enough memory has been allocated\n");
557  return -ENOMEM;
558  }
559  s5p_mfc_write_info_v5(ctx, frame_size, ALLOC_LUMA_DPB_SIZE);
560  s5p_mfc_write_info_v5(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE);
562  s5p_mfc_write_info_v5(ctx, frame_size_mv, ALLOC_MV_SIZE);
564  << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
566  return 0;
567 }
568 
569 /* Set registers for encoding stream buffer */
571  unsigned long addr, unsigned int size)
572 {
573  struct s5p_mfc_dev *dev = ctx->dev;
574 
577  return 0;
578 }
579 
581  unsigned long y_addr, unsigned long c_addr)
582 {
583  struct s5p_mfc_dev *dev = ctx->dev;
584 
587 }
588 
590  unsigned long *y_addr, unsigned long *c_addr)
591 {
592  struct s5p_mfc_dev *dev = ctx->dev;
593 
594  *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR)
595  << MFC_OFFSET_SHIFT);
596  *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR)
597  << MFC_OFFSET_SHIFT);
598 }
599 
600 /* Set encoding ref & codec buffer */
602 {
603  struct s5p_mfc_dev *dev = ctx->dev;
604  size_t buf_addr1, buf_addr2;
605  size_t buf_size1, buf_size2;
606  unsigned int enc_ref_y_size, enc_ref_c_size;
607  unsigned int guard_width, guard_height;
608  int i;
609 
610  buf_addr1 = ctx->bank1_phys;
611  buf_size1 = ctx->bank1_size;
612  buf_addr2 = ctx->bank2_phys;
613  buf_size2 = ctx->bank2_size;
614  enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
616  enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN);
617  if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) {
618  enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN)
619  * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN);
620  enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN);
621  } else {
622  guard_width = ALIGN(ctx->img_width + 16,
624  guard_height = ALIGN((ctx->img_height >> 1) + 4,
626  enc_ref_c_size = ALIGN(guard_width * guard_height,
628  }
629  mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2);
630  switch (ctx->codec_mode) {
632  for (i = 0; i < 2; i++) {
633  mfc_write(dev, OFFSETA(buf_addr1),
634  S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
635  buf_addr1 += enc_ref_y_size;
636  buf_size1 -= enc_ref_y_size;
637 
638  mfc_write(dev, OFFSETB(buf_addr2),
639  S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
640  buf_addr2 += enc_ref_y_size;
641  buf_size2 -= enc_ref_y_size;
642  }
643  for (i = 0; i < 4; i++) {
644  mfc_write(dev, OFFSETB(buf_addr2),
645  S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
646  buf_addr2 += enc_ref_c_size;
647  buf_size2 -= enc_ref_c_size;
648  }
649  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR);
650  buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
651  buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
652  mfc_write(dev, OFFSETA(buf_addr1),
654  buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
655  buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
656  mfc_write(dev, OFFSETA(buf_addr1),
658  buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE;
659  buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE;
660  mfc_write(dev, OFFSETB(buf_addr2),
662  buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE;
663  buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE;
664  mfc_write(dev, OFFSETA(buf_addr1),
666  buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE;
667  buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE;
668  mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
669  buf_size1, buf_size2);
670  break;
672  for (i = 0; i < 2; i++) {
673  mfc_write(dev, OFFSETA(buf_addr1),
674  S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
675  buf_addr1 += enc_ref_y_size;
676  buf_size1 -= enc_ref_y_size;
677  mfc_write(dev, OFFSETB(buf_addr2),
678  S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
679  buf_addr2 += enc_ref_y_size;
680  buf_size2 -= enc_ref_y_size;
681  }
682  for (i = 0; i < 4; i++) {
683  mfc_write(dev, OFFSETB(buf_addr2),
684  S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
685  buf_addr2 += enc_ref_c_size;
686  buf_size2 -= enc_ref_c_size;
687  }
688  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR);
689  buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
690  buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
691  mfc_write(dev, OFFSETA(buf_addr1),
693  buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE;
694  buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE;
695  mfc_write(dev, OFFSETA(buf_addr1),
697  buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
698  buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
699  mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
700  buf_size1, buf_size2);
701  break;
703  for (i = 0; i < 2; i++) {
704  mfc_write(dev, OFFSETA(buf_addr1),
705  S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i));
706  buf_addr1 += enc_ref_y_size;
707  buf_size1 -= enc_ref_y_size;
708  mfc_write(dev, OFFSETB(buf_addr2),
709  S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i));
710  buf_addr2 += enc_ref_y_size;
711  buf_size2 -= enc_ref_y_size;
712  }
713  for (i = 0; i < 4; i++) {
714  mfc_write(dev, OFFSETB(buf_addr2),
715  S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i));
716  buf_addr2 += enc_ref_c_size;
717  buf_size2 -= enc_ref_c_size;
718  }
719  mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR);
720  buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE;
721  buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE;
723  buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE;
724  buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE;
725  mfc_debug(2, "buf_size1: %d, buf_size2: %d\n",
726  buf_size1, buf_size2);
727  break;
728  default:
729  mfc_err("Unknown codec set for encoding: %d\n",
730  ctx->codec_mode);
731  return -EINVAL;
732  }
733  return 0;
734 }
735 
736 static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx)
737 {
738  struct s5p_mfc_dev *dev = ctx->dev;
739  struct s5p_mfc_enc_params *p = &ctx->enc_params;
740  unsigned int reg;
741  unsigned int shm;
742 
743  /* width */
745  /* height */
747  /* pictype : enable, IDR period */
749  reg |= (1 << 18);
750  reg &= ~(0xFFFF);
751  reg |= p->gop_size;
754  /* multi-slice control */
755  /* multi-slice MB number or bit size */
761  } else {
764  }
765  /* cyclic intra refresh */
767  /* memory structure cur. frame */
768  if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
770  else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
772  /* padding control & value */
774  if (p->pad) {
776  reg |= (1 << 31);
778  reg &= ~(0xFF << 16);
779  reg |= (p->pad_cr << 16);
781  reg &= ~(0xFF << 8);
782  reg |= (p->pad_cb << 8);
784  reg &= ~(0xFF);
785  reg |= (p->pad_luma);
786  } else {
788  reg = 0;
789  }
791  /* rate control config. */
792  reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
794  reg &= ~(0x1 << 9);
795  reg |= (p->rc_frame << 9);
797  /* bit rate */
798  if (p->rc_frame)
799  mfc_write(dev, p->rc_bitrate,
801  else
803  /* reaction coefficient */
804  if (p->rc_frame)
806  shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
807  /* seq header ctrl */
808  shm &= ~(0x1 << 3);
809  shm |= (p->seq_hdr_mode << 3);
810  /* frame skip mode */
811  shm &= ~(0x3 << 1);
812  shm |= (p->frame_skip_mode << 1);
813  s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
814  /* fixed target bit */
815  s5p_mfc_write_info_v5(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG);
816  return 0;
817 }
818 
819 static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx)
820 {
821  struct s5p_mfc_dev *dev = ctx->dev;
822  struct s5p_mfc_enc_params *p = &ctx->enc_params;
823  struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264;
824  unsigned int reg;
825  unsigned int shm;
826 
827  s5p_mfc_set_enc_params(ctx);
828  /* pictype : number of B */
830  /* num_b_frame - 0 ~ 2 */
831  reg &= ~(0x3 << 16);
832  reg |= (p->num_b_frame << 16);
834  /* profile & level */
835  reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
836  /* level */
837  reg &= ~(0xFF << 8);
838  reg |= (p_264->level << 8);
839  /* profile - 0 ~ 2 */
840  reg &= ~(0x3F);
841  reg |= p_264->profile;
842  mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
843  /* interlace */
845  /* height */
846  if (p_264->interlace)
847  mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX);
848  /* loopfilter ctrl */
850  /* loopfilter alpha offset */
851  if (p_264->loop_filter_alpha < 0) {
852  reg = 0x10;
853  reg |= (0xFF - p_264->loop_filter_alpha) + 1;
854  } else {
855  reg = 0x00;
856  reg |= (p_264->loop_filter_alpha & 0xF);
857  }
859  /* loopfilter beta offset */
860  if (p_264->loop_filter_beta < 0) {
861  reg = 0x10;
862  reg |= (0xFF - p_264->loop_filter_beta) + 1;
863  } else {
864  reg = 0x00;
865  reg |= (p_264->loop_filter_beta & 0xF);
866  }
867  mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF);
868  /* entropy coding mode */
871  else
873  /* number of ref. picture */
875  /* num of ref. pictures of P */
876  reg &= ~(0x3 << 5);
877  reg |= (p_264->num_ref_pic_4p << 5);
878  /* max number of ref. pictures */
879  reg &= ~(0x1F);
880  reg |= p_264->max_ref_pic;
882  /* 8x8 transform enable */
884  /* rate control config. */
885  reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
886  /* macroblock level rate control */
887  reg &= ~(0x1 << 8);
888  reg |= (p->rc_mb << 8);
889  /* frame QP */
890  reg &= ~(0x3F);
891  reg |= p_264->rc_frame_qp;
893  /* frame rate */
894  if (p->rc_frame && p->rc_framerate_denom)
895  mfc_write(dev, p->rc_framerate_num * 1000
897  else
899  /* max & min value of QP */
900  reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
901  /* max QP */
902  reg &= ~(0x3F << 8);
903  reg |= (p_264->rc_max_qp << 8);
904  /* min QP */
905  reg &= ~(0x3F);
906  reg |= p_264->rc_min_qp;
908  /* macroblock adaptive scaling features */
909  if (p->rc_mb) {
910  reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL);
911  /* dark region */
912  reg &= ~(0x1 << 3);
913  reg |= (p_264->rc_mb_dark << 3);
914  /* smooth region */
915  reg &= ~(0x1 << 2);
916  reg |= (p_264->rc_mb_smooth << 2);
917  /* static region */
918  reg &= ~(0x1 << 1);
919  reg |= (p_264->rc_mb_static << 1);
920  /* high activity region */
921  reg &= ~(0x1);
922  reg |= p_264->rc_mb_activity;
924  }
925  if (!p->rc_frame && !p->rc_mb) {
926  shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
927  shm &= ~(0xFFF);
928  shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6);
929  shm |= (p_264->rc_p_frame_qp & 0x3F);
930  s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
931  }
932  /* extended encoder ctrl */
933  shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
934  /* AR VUI control */
935  shm &= ~(0x1 << 15);
936  shm |= (p_264->vui_sar << 1);
937  s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
938  if (p_264->vui_sar) {
939  /* aspect ration IDC */
940  shm = s5p_mfc_read_info_v5(ctx, SAMPLE_ASPECT_RATIO_IDC);
941  shm &= ~(0xFF);
942  shm |= p_264->vui_sar_idc;
943  s5p_mfc_write_info_v5(ctx, shm, SAMPLE_ASPECT_RATIO_IDC);
944  if (p_264->vui_sar_idc == 0xFF) {
945  /* sample AR info */
946  shm = s5p_mfc_read_info_v5(ctx, EXTENDED_SAR);
947  shm &= ~(0xFFFFFFFF);
948  shm |= p_264->vui_ext_sar_width << 16;
949  shm |= p_264->vui_ext_sar_height;
950  s5p_mfc_write_info_v5(ctx, shm, EXTENDED_SAR);
951  }
952  }
953  /* intra picture period for H.264 */
954  shm = s5p_mfc_read_info_v5(ctx, H264_I_PERIOD);
955  /* control */
956  shm &= ~(0x1 << 16);
957  shm |= (p_264->open_gop << 16);
958  /* value */
959  if (p_264->open_gop) {
960  shm &= ~(0xFFFF);
961  shm |= p_264->open_gop_size;
962  }
963  s5p_mfc_write_info_v5(ctx, shm, H264_I_PERIOD);
964  /* extended encoder ctrl */
965  shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
966  /* vbv buffer size */
967  if (p->frame_skip_mode ==
969  shm &= ~(0xFFFF << 16);
970  shm |= (p_264->cpb_size << 16);
971  }
972  s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
973  return 0;
974 }
975 
976 static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx)
977 {
978  struct s5p_mfc_dev *dev = ctx->dev;
979  struct s5p_mfc_enc_params *p = &ctx->enc_params;
980  struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4;
981  unsigned int reg;
982  unsigned int shm;
983  unsigned int framerate;
984 
985  s5p_mfc_set_enc_params(ctx);
986  /* pictype : number of B */
988  /* num_b_frame - 0 ~ 2 */
989  reg &= ~(0x3 << 16);
990  reg |= (p->num_b_frame << 16);
992  /* profile & level */
993  reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE);
994  /* level */
995  reg &= ~(0xFF << 8);
996  reg |= (p_mpeg4->level << 8);
997  /* profile - 0 ~ 2 */
998  reg &= ~(0x3F);
999  reg |= p_mpeg4->profile;
1000  mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE);
1001  /* quarter_pixel */
1003  /* qp */
1004  if (!p->rc_frame) {
1005  shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
1006  shm &= ~(0xFFF);
1007  shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6);
1008  shm |= (p_mpeg4->rc_p_frame_qp & 0x3F);
1009  s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
1010  }
1011  /* frame rate */
1012  if (p->rc_frame) {
1013  if (p->rc_framerate_denom > 0) {
1014  framerate = p->rc_framerate_num * 1000 /
1015  p->rc_framerate_denom;
1016  mfc_write(dev, framerate,
1018  shm = s5p_mfc_read_info_v5(ctx, RC_VOP_TIMING);
1019  shm &= ~(0xFFFFFFFF);
1020  shm |= (1 << 31);
1021  shm |= ((p->rc_framerate_num & 0x7FFF) << 16);
1022  shm |= (p->rc_framerate_denom & 0xFFFF);
1023  s5p_mfc_write_info_v5(ctx, shm, RC_VOP_TIMING);
1024  }
1025  } else {
1027  }
1028  /* rate control config. */
1029  reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
1030  /* frame QP */
1031  reg &= ~(0x3F);
1032  reg |= p_mpeg4->rc_frame_qp;
1033  mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
1034  /* max & min value of QP */
1035  reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
1036  /* max QP */
1037  reg &= ~(0x3F << 8);
1038  reg |= (p_mpeg4->rc_max_qp << 8);
1039  /* min QP */
1040  reg &= ~(0x3F);
1041  reg |= p_mpeg4->rc_min_qp;
1042  mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
1043  /* extended encoder ctrl */
1044  shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
1045  /* vbv buffer size */
1046  if (p->frame_skip_mode ==
1048  shm &= ~(0xFFFF << 16);
1049  shm |= (p->vbv_size << 16);
1050  }
1051  s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
1052  return 0;
1053 }
1054 
1055 static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
1056 {
1057  struct s5p_mfc_dev *dev = ctx->dev;
1058  struct s5p_mfc_enc_params *p = &ctx->enc_params;
1059  struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4;
1060  unsigned int reg;
1061  unsigned int shm;
1062 
1063  s5p_mfc_set_enc_params(ctx);
1064  /* qp */
1065  if (!p->rc_frame) {
1066  shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP);
1067  shm &= ~(0xFFF);
1068  shm |= (p_h263->rc_p_frame_qp & 0x3F);
1069  s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP);
1070  }
1071  /* frame rate */
1072  if (p->rc_frame && p->rc_framerate_denom)
1073  mfc_write(dev, p->rc_framerate_num * 1000
1075  else
1077  /* rate control config. */
1078  reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG);
1079  /* frame QP */
1080  reg &= ~(0x3F);
1081  reg |= p_h263->rc_frame_qp;
1082  mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG);
1083  /* max & min value of QP */
1084  reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND);
1085  /* max QP */
1086  reg &= ~(0x3F << 8);
1087  reg |= (p_h263->rc_max_qp << 8);
1088  /* min QP */
1089  reg &= ~(0x3F);
1090  reg |= p_h263->rc_min_qp;
1091  mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND);
1092  /* extended encoder ctrl */
1093  shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL);
1094  /* vbv buffer size */
1095  if (p->frame_skip_mode ==
1097  shm &= ~(0xFFFF << 16);
1098  shm |= (p->vbv_size << 16);
1099  }
1100  s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL);
1101  return 0;
1102 }
1103 
1104 /* Initialize decoding */
1106 {
1107  struct s5p_mfc_dev *dev = ctx->dev;
1108 
1109  s5p_mfc_set_shared_buffer(ctx);
1110  /* Setup loop filter, for decoding this is only valid for MPEG4 */
1111  if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC)
1113  else
1120  mfc_write(dev,
1122  | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1123  return 0;
1124 }
1125 
1126 static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush)
1127 {
1128  struct s5p_mfc_dev *dev = ctx->dev;
1129  unsigned int dpb;
1130 
1131  if (flush)
1132  dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | (
1134  else
1138 }
1139 
1140 /* Decode a single frame */
1142  enum s5p_mfc_decode_arg last_frame)
1143 {
1144  struct s5p_mfc_dev *dev = ctx->dev;
1145 
1147  s5p_mfc_set_shared_buffer(ctx);
1148  s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag);
1149  /* Issue different commands to instance basing on whether it
1150  * is the last frame or not. */
1151  switch (last_frame) {
1152  case MFC_DEC_FRAME:
1155  break;
1156  case MFC_DEC_LAST_FRAME:
1157  mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) <<
1159  break;
1160  case MFC_DEC_RES_CHANGE:
1162  S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no),
1164  break;
1165  }
1166  mfc_debug(2, "Decoding a usual frame\n");
1167  return 0;
1168 }
1169 
1171 {
1172  struct s5p_mfc_dev *dev = ctx->dev;
1173 
1174  if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC)
1175  s5p_mfc_set_enc_params_h264(ctx);
1176  else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC)
1177  s5p_mfc_set_enc_params_mpeg4(ctx);
1178  else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC)
1179  s5p_mfc_set_enc_params_h263(ctx);
1180  else {
1181  mfc_err("Unknown codec for encoding (%x)\n",
1182  ctx->codec_mode);
1183  return -EINVAL;
1184  }
1185  s5p_mfc_set_shared_buffer(ctx);
1186  mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) |
1188  return 0;
1189 }
1190 
1191 /* Encode a single frame */
1193 {
1194  struct s5p_mfc_dev *dev = ctx->dev;
1195  int cmd;
1196  /* memory structure cur. frame */
1197  if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M)
1199  else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT)
1201  s5p_mfc_set_shared_buffer(ctx);
1202 
1203  if (ctx->state == MFCINST_FINISHING)
1204  cmd = S5P_FIMV_CH_LAST_FRAME;
1205  else
1207  mfc_write(dev, ((cmd & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT)
1208  | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID);
1209 
1210  return 0;
1211 }
1212 
1213 static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev)
1214 {
1215  unsigned long flags;
1216  int new_ctx;
1217  int cnt;
1218 
1219  spin_lock_irqsave(&dev->condlock, flags);
1220  new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS;
1221  cnt = 0;
1222  while (!test_bit(new_ctx, &dev->ctx_work_bits)) {
1223  new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS;
1224  if (++cnt > MFC_NUM_CONTEXTS) {
1225  /* No contexts to run */
1226  spin_unlock_irqrestore(&dev->condlock, flags);
1227  return -EAGAIN;
1228  }
1229  }
1230  spin_unlock_irqrestore(&dev->condlock, flags);
1231  return new_ctx;
1232 }
1233 
1234 static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx)
1235 {
1236  struct s5p_mfc_dev *dev = ctx->dev;
1237 
1238  s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0);
1239  dev->curr_ctx = ctx->num;
1242 }
1243 
1244 static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame)
1245 {
1246  struct s5p_mfc_dev *dev = ctx->dev;
1247  struct s5p_mfc_buf *temp_vb;
1248  unsigned long flags;
1249  unsigned int index;
1250 
1251  spin_lock_irqsave(&dev->irqlock, flags);
1252  /* Frames are being decoded */
1253  if (list_empty(&ctx->src_queue)) {
1254  mfc_debug(2, "No src buffers\n");
1255  spin_unlock_irqrestore(&dev->irqlock, flags);
1256  return -EAGAIN;
1257  }
1258  /* Get the next source buffer */
1259  temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1260  temp_vb->flags |= MFC_BUF_FLAG_USED;
1262  vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1263  ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused);
1264  spin_unlock_irqrestore(&dev->irqlock, flags);
1265  index = temp_vb->b->v4l2_buf.index;
1266  dev->curr_ctx = ctx->num;
1268  if (temp_vb->b->v4l2_planes[0].bytesused == 0) {
1269  last_frame = MFC_DEC_LAST_FRAME;
1270  mfc_debug(2, "Setting ctx->state to FINISHING\n");
1271  ctx->state = MFCINST_FINISHING;
1272  }
1273  s5p_mfc_decode_one_frame_v5(ctx, last_frame);
1274  return 0;
1275 }
1276 
1277 static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
1278 {
1279  struct s5p_mfc_dev *dev = ctx->dev;
1280  unsigned long flags;
1281  struct s5p_mfc_buf *dst_mb;
1282  struct s5p_mfc_buf *src_mb;
1283  unsigned long src_y_addr, src_c_addr, dst_addr;
1284  unsigned int dst_size;
1285 
1286  spin_lock_irqsave(&dev->irqlock, flags);
1287  if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) {
1288  mfc_debug(2, "no src buffers\n");
1289  spin_unlock_irqrestore(&dev->irqlock, flags);
1290  return -EAGAIN;
1291  }
1292  if (list_empty(&ctx->dst_queue)) {
1293  mfc_debug(2, "no dst buffers\n");
1294  spin_unlock_irqrestore(&dev->irqlock, flags);
1295  return -EAGAIN;
1296  }
1297  if (list_empty(&ctx->src_queue)) {
1298  /* send null frame */
1299  s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, dev->bank2);
1300  src_mb = NULL;
1301  } else {
1302  src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
1303  list);
1304  src_mb->flags |= MFC_BUF_FLAG_USED;
1305  if (src_mb->b->v4l2_planes[0].bytesused == 0) {
1306  /* send null frame */
1308  dev->bank2);
1309  ctx->state = MFCINST_FINISHING;
1310  } else {
1311  src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
1312  0);
1313  src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b,
1314  1);
1315  s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr,
1316  src_c_addr);
1317  if (src_mb->flags & MFC_BUF_FLAG_EOS)
1318  ctx->state = MFCINST_FINISHING;
1319  }
1320  }
1321  dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1322  dst_mb->flags |= MFC_BUF_FLAG_USED;
1323  dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1324  dst_size = vb2_plane_size(dst_mb->b, 0);
1325  s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1326  spin_unlock_irqrestore(&dev->irqlock, flags);
1327  dev->curr_ctx = ctx->num;
1329  mfc_debug(2, "encoding buffer with index=%d state=%d",
1330  src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
1332  return 0;
1333 }
1334 
1335 static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx)
1336 {
1337  struct s5p_mfc_dev *dev = ctx->dev;
1338  unsigned long flags;
1339  struct s5p_mfc_buf *temp_vb;
1340 
1341  /* Initializing decoding - parsing header */
1342  spin_lock_irqsave(&dev->irqlock, flags);
1343  mfc_debug(2, "Preparing to init decoding\n");
1344  temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1345  s5p_mfc_set_dec_desc_buffer(ctx);
1346  mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1348  vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1349  0, temp_vb->b->v4l2_planes[0].bytesused);
1350  spin_unlock_irqrestore(&dev->irqlock, flags);
1351  dev->curr_ctx = ctx->num;
1354 }
1355 
1356 static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx)
1357 {
1358  struct s5p_mfc_dev *dev = ctx->dev;
1359  unsigned long flags;
1360  struct s5p_mfc_buf *dst_mb;
1361  unsigned long dst_addr;
1362  unsigned int dst_size;
1363 
1365  spin_lock_irqsave(&dev->irqlock, flags);
1366  dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list);
1367  dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0);
1368  dst_size = vb2_plane_size(dst_mb->b, 0);
1369  s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size);
1370  spin_unlock_irqrestore(&dev->irqlock, flags);
1371  dev->curr_ctx = ctx->num;
1374 }
1375 
1376 static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
1377 {
1378  struct s5p_mfc_dev *dev = ctx->dev;
1379  unsigned long flags;
1380  struct s5p_mfc_buf *temp_vb;
1381  int ret;
1382 
1383  /*
1384  * Header was parsed now starting processing
1385  * First set the output frame buffers
1386  */
1387  if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
1388  mfc_err("It seems that not all destionation buffers were "
1389  "mmaped\nMFC requires that all destination are mmaped "
1390  "before starting processing\n");
1391  return -EAGAIN;
1392  }
1393  spin_lock_irqsave(&dev->irqlock, flags);
1394  if (list_empty(&ctx->src_queue)) {
1395  mfc_err("Header has been deallocated in the middle of"
1396  " initialization\n");
1397  spin_unlock_irqrestore(&dev->irqlock, flags);
1398  return -EIO;
1399  }
1400  temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
1401  mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused);
1403  vb2_dma_contig_plane_dma_addr(temp_vb->b, 0),
1404  0, temp_vb->b->v4l2_planes[0].bytesused);
1405  spin_unlock_irqrestore(&dev->irqlock, flags);
1406  dev->curr_ctx = ctx->num;
1409  if (ret) {
1410  mfc_err("Failed to alloc frame mem\n");
1411  ctx->state = MFCINST_ERROR;
1412  }
1413  return ret;
1414 }
1415 
1416 /* Try running an operation on hardware */
1418 {
1419  struct s5p_mfc_ctx *ctx;
1420  int new_ctx;
1421  unsigned int ret = 0;
1422 
1423  if (test_bit(0, &dev->enter_suspend)) {
1424  mfc_debug(1, "Entering suspend so do not schedule any jobs\n");
1425  return;
1426  }
1427  /* Check whether hardware is not running */
1428  if (test_and_set_bit(0, &dev->hw_lock) != 0) {
1429  /* This is perfectly ok, the scheduled ctx should wait */
1430  mfc_debug(1, "Couldn't lock HW\n");
1431  return;
1432  }
1433  /* Choose the context to run */
1434  new_ctx = s5p_mfc_get_new_ctx(dev);
1435  if (new_ctx < 0) {
1436  /* No contexts to run */
1437  if (test_and_clear_bit(0, &dev->hw_lock) == 0) {
1438  mfc_err("Failed to unlock hardware\n");
1439  return;
1440  }
1441  mfc_debug(1, "No ctx is scheduled to be run\n");
1442  return;
1443  }
1444  ctx = dev->ctx[new_ctx];
1445  /* Got context to run in ctx */
1446  /*
1447  * Last frame has already been sent to MFC.
1448  * Now obtaining frames from MFC buffer
1449  */
1450  s5p_mfc_clock_on();
1451  if (ctx->type == MFCINST_DECODER) {
1452  s5p_mfc_set_dec_desc_buffer(ctx);
1453  switch (ctx->state) {
1454  case MFCINST_FINISHING:
1455  s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME);
1456  break;
1457  case MFCINST_RUNNING:
1458  ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1459  break;
1460  case MFCINST_INIT:
1462  ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1463  ctx);
1464  break;
1465  case MFCINST_RETURN_INST:
1467  ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1468  ctx);
1469  break;
1470  case MFCINST_GOT_INST:
1471  s5p_mfc_run_init_dec(ctx);
1472  break;
1473  case MFCINST_HEAD_PARSED:
1474  ret = s5p_mfc_run_init_dec_buffers(ctx);
1475  mfc_debug(1, "head parsed\n");
1476  break;
1478  s5p_mfc_run_res_change(ctx);
1479  break;
1481  s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME);
1482  break;
1484  mfc_debug(2, "Finished remaining frames after resolution change\n");
1485  ctx->capture_state = QUEUE_FREE;
1486  mfc_debug(2, "Will re-init the codec\n");
1487  s5p_mfc_run_init_dec(ctx);
1488  break;
1489  default:
1490  ret = -EAGAIN;
1491  }
1492  } else if (ctx->type == MFCINST_ENCODER) {
1493  switch (ctx->state) {
1494  case MFCINST_FINISHING:
1495  case MFCINST_RUNNING:
1496  ret = s5p_mfc_run_enc_frame(ctx);
1497  break;
1498  case MFCINST_INIT:
1500  ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd,
1501  ctx);
1502  break;
1503  case MFCINST_RETURN_INST:
1505  ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd,
1506  ctx);
1507  break;
1508  case MFCINST_GOT_INST:
1509  s5p_mfc_run_init_enc(ctx);
1510  break;
1511  default:
1512  ret = -EAGAIN;
1513  }
1514  } else {
1515  mfc_err("Invalid context type: %d\n", ctx->type);
1516  ret = -EAGAIN;
1517  }
1518 
1519  if (ret) {
1520  /* Free hardware lock */
1521  if (test_and_clear_bit(0, &dev->hw_lock) == 0)
1522  mfc_err("Failed to unlock hardware\n");
1523 
1524  /* This is in deed imporant, as no operation has been
1525  * scheduled, reduce the clock count as no one will
1526  * ever do this, because no interrupt related to this try_run
1527  * will ever come from hardware. */
1529  }
1530 }
1531 
1532 
1533 void s5p_mfc_cleanup_queue_v5(struct list_head *lh, struct vb2_queue *vq)
1534 {
1535  struct s5p_mfc_buf *b;
1536  int i;
1537 
1538  while (!list_empty(lh)) {
1539  b = list_entry(lh->next, struct s5p_mfc_buf, list);
1540  for (i = 0; i < b->b->num_planes; i++)
1541  vb2_set_plane_payload(b->b, i, 0);
1543  list_del(&b->list);
1544  }
1545 }
1546 
1548 {
1551  mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
1552 }
1553 
1555 {
1557 }
1558 
1560 {
1562 }
1563 
1565 {
1566  return mfc_read(dev, S5P_FIMV_SI_DISPLAY_STATUS);
1567 }
1568 
1570 {
1571  return mfc_read(dev, S5P_FIMV_SI_DECODE_STATUS);
1572 }
1573 
1575 {
1576  return mfc_read(dev, S5P_FIMV_DECODE_FRAME_TYPE) &
1578 }
1579 
1581 {
1582  return (s5p_mfc_read_info_v5(ctx, DISP_PIC_FRAME_TYPE) >>
1585 }
1586 
1588 {
1589  return mfc_read(dev, S5P_FIMV_SI_CONSUMED_BYTES);
1590 }
1591 
1593 {
1594  int reason;
1595  reason = mfc_read(dev, S5P_FIMV_RISC2HOST_CMD) &
1597  switch (reason) {
1600  break;
1603  break;
1606  break;
1609  break;
1612  break;
1615  break;
1618  break;
1620  reason = S5P_MFC_R2H_CMD_SLEEP_RET;
1621  break;
1623  reason = S5P_MFC_R2H_CMD_WAKEUP_RET;
1624  break;
1627  break;
1630  break;
1632  reason = S5P_MFC_R2H_CMD_ERR_RET;
1633  break;
1634  default:
1635  reason = S5P_MFC_R2H_CMD_EMPTY;
1636  };
1637  return reason;
1638 }
1639 
1641 {
1642  return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG2);
1643 }
1644 
1645 int s5p_mfc_err_dec_v5(unsigned int err)
1646 {
1648 }
1649 
1650 int s5p_mfc_err_dspl_v5(unsigned int err)
1651 {
1653 }
1654 
1656 {
1657  return mfc_read(dev, S5P_FIMV_SI_HRESOL);
1658 }
1659 
1661 {
1662  return mfc_read(dev, S5P_FIMV_SI_VRESOL);
1663 }
1664 
1666 {
1667  return mfc_read(dev, S5P_FIMV_SI_BUF_NUMBER);
1668 }
1669 
1671 {
1672  /* NOP */
1673  return -1;
1674 }
1675 
1677 {
1678  return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG1);
1679 }
1680 
1682 {
1683  return mfc_read(dev, S5P_FIMV_ENC_SI_STRM_SIZE);
1684 }
1685 
1687 {
1688  return mfc_read(dev, S5P_FIMV_ENC_SI_SLICE_TYPE);
1689 }
1690 
1692 {
1693  return -1;
1694 }
1695 
1697 {
1698  return mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT);
1699 }
1700 
1702 {
1703  return s5p_mfc_read_info_v5(ctx, FRAME_PACK_SEI_AVAIL);
1704 }
1705 
1707 {
1708  return -1;
1709 }
1710 
1712 {
1713  return -1;
1714 }
1715 
1716 unsigned int s5p_mfc_get_pic_type_top_v5(struct s5p_mfc_ctx *ctx)
1717 {
1718  return s5p_mfc_read_info_v5(ctx, PIC_TIME_TOP);
1719 }
1720 
1721 unsigned int s5p_mfc_get_pic_type_bot_v5(struct s5p_mfc_ctx *ctx)
1722 {
1723  return s5p_mfc_read_info_v5(ctx, PIC_TIME_BOT);
1724 }
1725 
1726 unsigned int s5p_mfc_get_crop_info_h_v5(struct s5p_mfc_ctx *ctx)
1727 {
1728  return s5p_mfc_read_info_v5(ctx, CROP_INFO_H);
1729 }
1730 
1731 unsigned int s5p_mfc_get_crop_info_v_v5(struct s5p_mfc_ctx *ctx)
1732 {
1733  return s5p_mfc_read_info_v5(ctx, CROP_INFO_V);
1734 }
1735 
1736 /* Initialize opr function pointers for MFC v5 */
1737 static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = {
1738  .alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v5,
1739  .release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v5,
1740  .alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v5,
1741  .release_codec_buffers = s5p_mfc_release_codec_buffers_v5,
1742  .alloc_instance_buffer = s5p_mfc_alloc_instance_buffer_v5,
1743  .release_instance_buffer = s5p_mfc_release_instance_buffer_v5,
1744  .alloc_dev_context_buffer = s5p_mfc_alloc_dev_context_buffer_v5,
1745  .release_dev_context_buffer = s5p_mfc_release_dev_context_buffer_v5,
1746  .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v5,
1747  .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v5,
1748  .set_dec_stream_buffer = s5p_mfc_set_dec_stream_buffer_v5,
1749  .set_dec_frame_buffer = s5p_mfc_set_dec_frame_buffer_v5,
1750  .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v5,
1751  .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v5,
1752  .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v5,
1753  .set_enc_ref_buffer = s5p_mfc_set_enc_ref_buffer_v5,
1754  .init_decode = s5p_mfc_init_decode_v5,
1755  .init_encode = s5p_mfc_init_encode_v5,
1756  .encode_one_frame = s5p_mfc_encode_one_frame_v5,
1757  .try_run = s5p_mfc_try_run_v5,
1758  .cleanup_queue = s5p_mfc_cleanup_queue_v5,
1759  .clear_int_flags = s5p_mfc_clear_int_flags_v5,
1760  .write_info = s5p_mfc_write_info_v5,
1761  .read_info = s5p_mfc_read_info_v5,
1762  .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v5,
1763  .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v5,
1764  .get_dspl_status = s5p_mfc_get_dspl_status_v5,
1765  .get_dec_status = s5p_mfc_get_dec_status_v5,
1766  .get_dec_frame_type = s5p_mfc_get_dec_frame_type_v5,
1767  .get_disp_frame_type = s5p_mfc_get_disp_frame_type_v5,
1768  .get_consumed_stream = s5p_mfc_get_consumed_stream_v5,
1769  .get_int_reason = s5p_mfc_get_int_reason_v5,
1770  .get_int_err = s5p_mfc_get_int_err_v5,
1771  .err_dec = s5p_mfc_err_dec_v5,
1772  .err_dspl = s5p_mfc_err_dspl_v5,
1773  .get_img_width = s5p_mfc_get_img_width_v5,
1774  .get_img_height = s5p_mfc_get_img_height_v5,
1775  .get_dpb_count = s5p_mfc_get_dpb_count_v5,
1776  .get_mv_count = s5p_mfc_get_mv_count_v5,
1777  .get_inst_no = s5p_mfc_get_inst_no_v5,
1778  .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v5,
1779  .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v5,
1780  .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v5,
1781  .get_enc_pic_count = s5p_mfc_get_enc_pic_count_v5,
1782  .get_sei_avail_status = s5p_mfc_get_sei_avail_status_v5,
1783  .get_mvc_num_views = s5p_mfc_get_mvc_num_views_v5,
1784  .get_mvc_view_id = s5p_mfc_get_mvc_view_id_v5,
1785  .get_pic_type_top = s5p_mfc_get_pic_type_top_v5,
1786  .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v5,
1787  .get_crop_info_h = s5p_mfc_get_crop_info_h_v5,
1788  .get_crop_info_v = s5p_mfc_get_crop_info_v_v5,
1789 };
1790 
1792 {
1793  return &s5p_mfc_ops_v5;
1794 }