2 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
4 * Copyright (c) 2013 Texas Instruments Inc.
5 * David Griego, <dagriego@biglakesoftware.com>
6 * Dale Farnsworth, <dale@farnsworth.org>
7 * Archit Taneja, <archit@ti.com>
9 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
10 * Pawel Osciak, <pawel@osciak.com>
11 * Marek Szyprowski, <m.szyprowski@samsung.com>
13 * Based on the virtual v4l2-mem2mem example device
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License version 2 as published by
17 * the Free Software Foundation
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
24 #include <linux/interrupt.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/videodev2.h>
34 #include <media/v4l2-common.h>
35 #include <media/v4l2-ctrls.h>
36 #include <media/v4l2-device.h>
37 #include <media/v4l2-event.h>
38 #include <media/v4l2-ioctl.h>
39 #include <media/v4l2-mem2mem.h>
40 #include <media/videobuf2-core.h>
41 #include <media/videobuf2-dma-contig.h>
46 #define VPE_MODULE_NAME "vpe"
48 /* minimum and maximum frame sizes */
54 /* required alignments */
55 #define S_ALIGN 0 /* multiple of 1 */
56 #define H_ALIGN 1 /* multiple of 2 */
57 #define W_ALIGN 1 /* multiple of 2 */
59 /* multiple of 128 bits, line stride, 16 bytes */
62 /* flags that indicate a format can be used for capture/output */
63 #define VPE_FMT_TYPE_CAPTURE (1 << 0)
64 #define VPE_FMT_TYPE_OUTPUT (1 << 1)
66 /* used as plane indices */
67 #define VPE_MAX_PLANES 2
71 /* per m2m context info */
72 #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
75 * each VPE context can need up to 3 config desciptors, 7 input descriptors,
76 * 3 output descriptors, and 10 control descriptors
78 #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
79 13 * VPDMA_CFD_CTD_DESC_SIZE)
81 #define vpe_dbg(vpedev, fmt, arg...) \
82 dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
83 #define vpe_err(vpedev, fmt, arg...) \
84 dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
86 struct vpe_us_coeffs
{
87 unsigned short anchor_fid0_c0
;
88 unsigned short anchor_fid0_c1
;
89 unsigned short anchor_fid0_c2
;
90 unsigned short anchor_fid0_c3
;
91 unsigned short interp_fid0_c0
;
92 unsigned short interp_fid0_c1
;
93 unsigned short interp_fid0_c2
;
94 unsigned short interp_fid0_c3
;
95 unsigned short anchor_fid1_c0
;
96 unsigned short anchor_fid1_c1
;
97 unsigned short anchor_fid1_c2
;
98 unsigned short anchor_fid1_c3
;
99 unsigned short interp_fid1_c0
;
100 unsigned short interp_fid1_c1
;
101 unsigned short interp_fid1_c2
;
102 unsigned short interp_fid1_c3
;
106 * Default upsampler coefficients
108 static const struct vpe_us_coeffs us_coeffs
[] = {
110 /* Coefficients for progressive input */
111 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
112 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
117 * The port_data structure contains per-port data.
119 struct vpe_port_data
{
120 enum vpdma_channel channel
; /* VPDMA channel */
121 u8 vb_part
; /* plane index for co-panar formats */
125 * Define indices into the port_data tables
127 #define VPE_PORT_LUMA1_IN 0
128 #define VPE_PORT_CHROMA1_IN 1
129 #define VPE_PORT_LUMA_OUT 8
130 #define VPE_PORT_CHROMA_OUT 9
131 #define VPE_PORT_RGB_OUT 10
133 static const struct vpe_port_data port_data
[11] = {
134 [VPE_PORT_LUMA1_IN
] = {
135 .channel
= VPE_CHAN_LUMA1_IN
,
138 [VPE_PORT_CHROMA1_IN
] = {
139 .channel
= VPE_CHAN_CHROMA1_IN
,
140 .vb_part
= VPE_CHROMA
,
142 [VPE_PORT_LUMA_OUT
] = {
143 .channel
= VPE_CHAN_LUMA_OUT
,
146 [VPE_PORT_CHROMA_OUT
] = {
147 .channel
= VPE_CHAN_CHROMA_OUT
,
148 .vb_part
= VPE_CHROMA
,
150 [VPE_PORT_RGB_OUT
] = {
151 .channel
= VPE_CHAN_RGB_OUT
,
157 /* driver info for each of the supported video formats */
159 char *name
; /* human-readable name */
160 u32 fourcc
; /* standard format identifier */
161 u8 types
; /* CAPTURE and/or OUTPUT */
162 u8 coplanar
; /* set for unpacked Luma and Chroma */
163 /* vpdma format info for each plane */
164 struct vpdma_data_format
const *vpdma_fmt
[VPE_MAX_PLANES
];
167 static struct vpe_fmt vpe_formats
[] = {
169 .name
= "YUV 422 co-planar",
170 .fourcc
= V4L2_PIX_FMT_NV16
,
171 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
173 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_Y444
],
174 &vpdma_yuv_fmts
[VPDMA_DATA_FMT_C444
],
178 .name
= "YUV 420 co-planar",
179 .fourcc
= V4L2_PIX_FMT_NV12
,
180 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
182 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_Y420
],
183 &vpdma_yuv_fmts
[VPDMA_DATA_FMT_C420
],
187 .name
= "YUYV 422 packed",
188 .fourcc
= V4L2_PIX_FMT_YUYV
,
189 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
191 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_YC422
],
195 .name
= "UYVY 422 packed",
196 .fourcc
= V4L2_PIX_FMT_UYVY
,
197 .types
= VPE_FMT_TYPE_CAPTURE
| VPE_FMT_TYPE_OUTPUT
,
199 .vpdma_fmt
= { &vpdma_yuv_fmts
[VPDMA_DATA_FMT_CY422
],
205 * per-queue, driver-specific private data.
206 * there is one source queue and one destination queue for each m2m context.
209 unsigned int width
; /* frame width */
210 unsigned int height
; /* frame height */
211 unsigned int bytesperline
[VPE_MAX_PLANES
]; /* bytes per line in memory */
212 enum v4l2_colorspace colorspace
;
214 unsigned int sizeimage
[VPE_MAX_PLANES
]; /* image size in memory */
215 struct v4l2_rect c_rect
; /* crop/compose rectangle */
216 struct vpe_fmt
*fmt
; /* format info */
219 /* vpe_q_data flag bits */
220 #define Q_DATA_FRAME_1D (1 << 0)
221 #define Q_DATA_MODE_TILED (1 << 1)
228 /* find our format description corresponding to the passed v4l2_format */
229 static struct vpe_fmt
*find_format(struct v4l2_format
*f
)
234 for (k
= 0; k
< ARRAY_SIZE(vpe_formats
); k
++) {
235 fmt
= &vpe_formats
[k
];
236 if (fmt
->fourcc
== f
->fmt
.pix
.pixelformat
)
244 * there is one vpe_dev structure in the driver, it is shared by
248 struct v4l2_device v4l2_dev
;
249 struct video_device vfd
;
250 struct v4l2_m2m_dev
*m2m_dev
;
252 atomic_t num_instances
; /* count of driver instances */
253 dma_addr_t loaded_mmrs
; /* shadow mmrs in device */
254 struct mutex dev_mutex
;
260 struct vb2_alloc_ctx
*alloc_ctx
;
261 struct vpdma_data
*vpdma
; /* vpdma data handle */
265 * There is one vpe_ctx structure for each m2m context.
270 struct v4l2_m2m_ctx
*m2m_ctx
;
271 struct v4l2_ctrl_handler hdl
;
273 unsigned int sequence
; /* current frame/field seq */
274 unsigned int aborting
; /* abort after next irq */
276 unsigned int bufs_per_job
; /* input buffers per batch */
277 unsigned int bufs_completed
; /* bufs done in this batch */
279 struct vpe_q_data q_data
[2]; /* src & dst queue data */
280 struct vb2_buffer
*src_vb
;
281 struct vb2_buffer
*dst_vb
;
283 struct vpdma_buf mmr_adb
; /* shadow reg addr/data block */
284 struct vpdma_desc_list desc_list
; /* DMA descriptor list */
286 bool load_mmrs
; /* have new shadow reg values */
291 * M2M devices get 2 queues.
292 * Return the queue given the type.
294 static struct vpe_q_data
*get_q_data(struct vpe_ctx
*ctx
,
295 enum v4l2_buf_type type
)
298 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
:
299 return &ctx
->q_data
[Q_DATA_SRC
];
300 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
:
301 return &ctx
->q_data
[Q_DATA_DST
];
308 static u32
read_reg(struct vpe_dev
*dev
, int offset
)
310 return ioread32(dev
->base
+ offset
);
313 static void write_reg(struct vpe_dev
*dev
, int offset
, u32 value
)
315 iowrite32(value
, dev
->base
+ offset
);
318 /* register field read/write helpers */
319 static int get_field(u32 value
, u32 mask
, int shift
)
321 return (value
& (mask
<< shift
)) >> shift
;
324 static int read_field_reg(struct vpe_dev
*dev
, int offset
, u32 mask
, int shift
)
326 return get_field(read_reg(dev
, offset
), mask
, shift
);
329 static void write_field(u32
*valp
, u32 field
, u32 mask
, int shift
)
333 val
&= ~(mask
<< shift
);
334 val
|= (field
& mask
) << shift
;
338 static void write_field_reg(struct vpe_dev
*dev
, int offset
, u32 field
,
341 u32 val
= read_reg(dev
, offset
);
343 write_field(&val
, field
, mask
, shift
);
345 write_reg(dev
, offset
, val
);
349 * DMA address/data block for the shadow registers
352 struct vpdma_adb_hdr out_fmt_hdr
;
355 struct vpdma_adb_hdr us1_hdr
;
357 struct vpdma_adb_hdr us2_hdr
;
359 struct vpdma_adb_hdr us3_hdr
;
361 struct vpdma_adb_hdr dei_hdr
;
364 struct vpdma_adb_hdr sc_hdr
;
367 struct vpdma_adb_hdr csc_hdr
;
372 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
373 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
375 * Set the headers for all of the address/data block structures.
377 static void init_adb_hdrs(struct vpe_ctx
*ctx
)
379 VPE_SET_MMR_ADB_HDR(ctx
, out_fmt_hdr
, out_fmt_reg
, VPE_CLK_FORMAT_SELECT
);
380 VPE_SET_MMR_ADB_HDR(ctx
, us1_hdr
, us1_regs
, VPE_US1_R0
);
381 VPE_SET_MMR_ADB_HDR(ctx
, us2_hdr
, us2_regs
, VPE_US2_R0
);
382 VPE_SET_MMR_ADB_HDR(ctx
, us3_hdr
, us3_regs
, VPE_US3_R0
);
383 VPE_SET_MMR_ADB_HDR(ctx
, dei_hdr
, dei_regs
, VPE_DEI_FRAME_SIZE
);
384 VPE_SET_MMR_ADB_HDR(ctx
, sc_hdr
, sc_regs
, VPE_SC_MP_SC0
);
385 VPE_SET_MMR_ADB_HDR(ctx
, csc_hdr
, csc_regs
, VPE_CSC_CSC00
);
389 * Enable or disable the VPE clocks
391 static void vpe_set_clock_enable(struct vpe_dev
*dev
, bool on
)
396 val
= VPE_DATA_PATH_CLK_ENABLE
| VPE_VPEDMA_CLK_ENABLE
;
397 write_reg(dev
, VPE_CLK_ENABLE
, val
);
400 static void vpe_top_reset(struct vpe_dev
*dev
)
403 write_field_reg(dev
, VPE_CLK_RESET
, 1, VPE_DATA_PATH_CLK_RESET_MASK
,
404 VPE_DATA_PATH_CLK_RESET_SHIFT
);
406 usleep_range(100, 150);
408 write_field_reg(dev
, VPE_CLK_RESET
, 0, VPE_DATA_PATH_CLK_RESET_MASK
,
409 VPE_DATA_PATH_CLK_RESET_SHIFT
);
412 static void vpe_top_vpdma_reset(struct vpe_dev
*dev
)
414 write_field_reg(dev
, VPE_CLK_RESET
, 1, VPE_VPDMA_CLK_RESET_MASK
,
415 VPE_VPDMA_CLK_RESET_SHIFT
);
417 usleep_range(100, 150);
419 write_field_reg(dev
, VPE_CLK_RESET
, 0, VPE_VPDMA_CLK_RESET_MASK
,
420 VPE_VPDMA_CLK_RESET_SHIFT
);
424 * Load the correct of upsampler coefficients into the shadow MMRs
426 static void set_us_coefficients(struct vpe_ctx
*ctx
)
428 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
429 u32
*us1_reg
= &mmr_adb
->us1_regs
[0];
430 u32
*us2_reg
= &mmr_adb
->us2_regs
[0];
431 u32
*us3_reg
= &mmr_adb
->us3_regs
[0];
432 const unsigned short *cp
, *end_cp
;
434 cp
= &us_coeffs
[0].anchor_fid0_c0
;
436 end_cp
= cp
+ sizeof(us_coeffs
[0]) / sizeof(*cp
);
438 while (cp
< end_cp
) {
439 write_field(us1_reg
, *cp
++, VPE_US_C0_MASK
, VPE_US_C0_SHIFT
);
440 write_field(us1_reg
, *cp
++, VPE_US_C1_MASK
, VPE_US_C1_SHIFT
);
441 *us2_reg
++ = *us1_reg
;
442 *us3_reg
++ = *us1_reg
++;
444 ctx
->load_mmrs
= true;
448 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
450 static void set_cfg_and_line_modes(struct vpe_ctx
*ctx
)
452 struct vpe_fmt
*fmt
= ctx
->q_data
[Q_DATA_SRC
].fmt
;
453 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
454 u32
*us1_reg0
= &mmr_adb
->us1_regs
[0];
455 u32
*us2_reg0
= &mmr_adb
->us2_regs
[0];
456 u32
*us3_reg0
= &mmr_adb
->us3_regs
[0];
461 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
462 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
465 if (fmt
->fourcc
== V4L2_PIX_FMT_NV12
) {
467 line_mode
= 0; /* double lines to line buffer */
470 write_field(us1_reg0
, cfg_mode
, VPE_US_MODE_MASK
, VPE_US_MODE_SHIFT
);
471 write_field(us2_reg0
, cfg_mode
, VPE_US_MODE_MASK
, VPE_US_MODE_SHIFT
);
472 write_field(us3_reg0
, cfg_mode
, VPE_US_MODE_MASK
, VPE_US_MODE_SHIFT
);
475 vpdma_set_line_mode(ctx
->dev
->vpdma
, line_mode
, VPE_CHAN_CHROMA1_IN
);
477 /* frame start for input luma */
478 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
481 /* frame start for input chroma */
482 vpdma_set_frame_start_event(ctx
->dev
->vpdma
, VPDMA_FSEVENT_CHANNEL_ACTIVE
,
483 VPE_CHAN_CHROMA1_IN
);
485 ctx
->load_mmrs
= true;
489 * Set the shadow registers that are modified when the source
492 static void set_src_registers(struct vpe_ctx
*ctx
)
494 set_us_coefficients(ctx
);
498 * Set the shadow registers that are modified when the destination
501 static void set_dst_registers(struct vpe_ctx
*ctx
)
503 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
504 struct vpe_fmt
*fmt
= ctx
->q_data
[Q_DATA_DST
].fmt
;
507 /* select RGB path when color space conversion is supported in future */
508 if (fmt
->fourcc
== V4L2_PIX_FMT_RGB24
)
509 val
|= VPE_RGB_OUT_SELECT
| VPE_CSC_SRC_DEI_SCALER
;
510 else if (fmt
->fourcc
== V4L2_PIX_FMT_NV16
)
511 val
|= VPE_COLOR_SEPARATE_422
;
513 /* The source of CHR_DS is always the scaler, whether it's used or not */
514 val
|= VPE_DS_SRC_DEI_SCALER
;
516 if (fmt
->fourcc
!= V4L2_PIX_FMT_NV12
)
517 val
|= VPE_DS_BYPASS
;
519 mmr_adb
->out_fmt_reg
[0] = val
;
521 ctx
->load_mmrs
= true;
525 * Set the de-interlacer shadow register values
527 static void set_dei_regs_bypass(struct vpe_ctx
*ctx
)
529 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
530 struct vpe_q_data
*s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
531 unsigned int src_h
= s_q_data
->c_rect
.height
;
532 unsigned int src_w
= s_q_data
->c_rect
.width
;
533 u32
*dei_mmr0
= &mmr_adb
->dei_regs
[0];
537 * according to TRM, we should set DEI in progressive bypass mode when
538 * the input content is progressive, however, DEI is bypassed correctly
539 * for both progressive and interlace content in interlace bypass mode.
540 * It has been recommended not to use progressive bypass mode.
542 val
= VPE_DEI_INTERLACE_BYPASS
;
544 val
|= (src_h
<< VPE_DEI_HEIGHT_SHIFT
) |
545 (src_w
<< VPE_DEI_WIDTH_SHIFT
) |
550 ctx
->load_mmrs
= true;
553 static void set_csc_coeff_bypass(struct vpe_ctx
*ctx
)
555 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
556 u32
*shadow_csc_reg5
= &mmr_adb
->csc_regs
[5];
558 *shadow_csc_reg5
|= VPE_CSC_BYPASS
;
560 ctx
->load_mmrs
= true;
563 static void set_sc_regs_bypass(struct vpe_ctx
*ctx
)
565 struct vpe_mmr_adb
*mmr_adb
= ctx
->mmr_adb
.addr
;
566 u32
*sc_reg0
= &mmr_adb
->sc_regs
[0];
569 val
|= VPE_SC_BYPASS
;
572 ctx
->load_mmrs
= true;
576 * Set the shadow registers whose values are modified when either the
577 * source or destination format is changed.
579 static int set_srcdst_params(struct vpe_ctx
*ctx
)
583 set_cfg_and_line_modes(ctx
);
584 set_dei_regs_bypass(ctx
);
585 set_csc_coeff_bypass(ctx
);
586 set_sc_regs_bypass(ctx
);
592 * Return the vpe_ctx structure for a given struct file
594 static struct vpe_ctx
*file2ctx(struct file
*file
)
596 return container_of(file
->private_data
, struct vpe_ctx
, fh
);
604 * job_ready() - check whether an instance is ready to be scheduled to run
606 static int job_ready(void *priv
)
608 struct vpe_ctx
*ctx
= priv
;
609 int needed
= ctx
->bufs_per_job
;
611 if (v4l2_m2m_num_src_bufs_ready(ctx
->m2m_ctx
) < needed
)
617 static void job_abort(void *priv
)
619 struct vpe_ctx
*ctx
= priv
;
621 /* Will cancel the transaction in the next interrupt handler */
626 * Lock access to the device
628 static void vpe_lock(void *priv
)
630 struct vpe_ctx
*ctx
= priv
;
631 struct vpe_dev
*dev
= ctx
->dev
;
632 mutex_lock(&dev
->dev_mutex
);
635 static void vpe_unlock(void *priv
)
637 struct vpe_ctx
*ctx
= priv
;
638 struct vpe_dev
*dev
= ctx
->dev
;
639 mutex_unlock(&dev
->dev_mutex
);
642 static void vpe_dump_regs(struct vpe_dev
*dev
)
644 #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
646 vpe_dbg(dev
, "VPE Registers:\n");
650 DUMPREG(INT0_STATUS0_RAW
);
651 DUMPREG(INT0_STATUS0
);
652 DUMPREG(INT0_ENABLE0
);
653 DUMPREG(INT0_STATUS1_RAW
);
654 DUMPREG(INT0_STATUS1
);
655 DUMPREG(INT0_ENABLE1
);
658 DUMPREG(CLK_FORMAT_SELECT
);
659 DUMPREG(CLK_RANGE_MAP
);
684 DUMPREG(DEI_FRAME_SIZE
);
686 DUMPREG(MDT_SF_THRESHOLD
);
688 DUMPREG(DEI_EDI_LUT_R0
);
689 DUMPREG(DEI_EDI_LUT_R1
);
690 DUMPREG(DEI_EDI_LUT_R2
);
691 DUMPREG(DEI_EDI_LUT_R3
);
692 DUMPREG(DEI_FMD_WINDOW_R0
);
693 DUMPREG(DEI_FMD_WINDOW_R1
);
694 DUMPREG(DEI_FMD_CONTROL_R0
);
695 DUMPREG(DEI_FMD_CONTROL_R1
);
696 DUMPREG(DEI_FMD_STATUS_R0
);
697 DUMPREG(DEI_FMD_STATUS_R1
);
698 DUMPREG(DEI_FMD_STATUS_R2
);
730 static void add_out_dtd(struct vpe_ctx
*ctx
, int port
)
732 struct vpe_q_data
*q_data
= &ctx
->q_data
[Q_DATA_DST
];
733 const struct vpe_port_data
*p_data
= &port_data
[port
];
734 struct vb2_buffer
*vb
= ctx
->dst_vb
;
735 struct v4l2_rect
*c_rect
= &q_data
->c_rect
;
736 struct vpe_fmt
*fmt
= q_data
->fmt
;
737 const struct vpdma_data_format
*vpdma_fmt
;
738 int plane
= fmt
->coplanar
? p_data
->vb_part
: 0;
742 vpdma_fmt
= fmt
->vpdma_fmt
[plane
];
743 dma_addr
= vb2_dma_contig_plane_dma_addr(vb
, plane
);
746 "acquiring output buffer(%d) dma_addr failed\n",
751 if (q_data
->flags
& Q_DATA_FRAME_1D
)
752 flags
|= VPDMA_DATA_FRAME_1D
;
753 if (q_data
->flags
& Q_DATA_MODE_TILED
)
754 flags
|= VPDMA_DATA_MODE_TILED
;
756 vpdma_add_out_dtd(&ctx
->desc_list
, c_rect
, vpdma_fmt
, dma_addr
,
757 p_data
->channel
, flags
);
760 static void add_in_dtd(struct vpe_ctx
*ctx
, int port
)
762 struct vpe_q_data
*q_data
= &ctx
->q_data
[Q_DATA_SRC
];
763 const struct vpe_port_data
*p_data
= &port_data
[port
];
764 struct vb2_buffer
*vb
= ctx
->src_vb
;
765 struct v4l2_rect
*c_rect
= &q_data
->c_rect
;
766 struct vpe_fmt
*fmt
= q_data
->fmt
;
767 const struct vpdma_data_format
*vpdma_fmt
;
768 int plane
= fmt
->coplanar
? p_data
->vb_part
: 0;
773 vpdma_fmt
= fmt
->vpdma_fmt
[plane
];
775 dma_addr
= vb2_dma_contig_plane_dma_addr(vb
, plane
);
778 "acquiring input buffer(%d) dma_addr failed\n",
783 if (q_data
->flags
& Q_DATA_FRAME_1D
)
784 flags
|= VPDMA_DATA_FRAME_1D
;
785 if (q_data
->flags
& Q_DATA_MODE_TILED
)
786 flags
|= VPDMA_DATA_MODE_TILED
;
788 vpdma_add_in_dtd(&ctx
->desc_list
, q_data
->width
, q_data
->height
,
789 c_rect
, vpdma_fmt
, dma_addr
, p_data
->channel
, field
, flags
);
793 * Enable the expected IRQ sources
795 static void enable_irqs(struct vpe_ctx
*ctx
)
797 write_reg(ctx
->dev
, VPE_INT0_ENABLE0_SET
, VPE_INT0_LIST0_COMPLETE
);
798 write_reg(ctx
->dev
, VPE_INT0_ENABLE1_SET
, VPE_DS1_UV_ERROR_INT
);
800 vpdma_enable_list_complete_irq(ctx
->dev
->vpdma
, 0, true);
803 static void disable_irqs(struct vpe_ctx
*ctx
)
805 write_reg(ctx
->dev
, VPE_INT0_ENABLE0_CLR
, 0xffffffff);
806 write_reg(ctx
->dev
, VPE_INT0_ENABLE1_CLR
, 0xffffffff);
808 vpdma_enable_list_complete_irq(ctx
->dev
->vpdma
, 0, false);
811 /* device_run() - prepares and starts the device
813 * This function is only called when both the source and destination
814 * buffers are in place.
816 static void device_run(void *priv
)
818 struct vpe_ctx
*ctx
= priv
;
819 struct vpe_q_data
*d_q_data
= &ctx
->q_data
[Q_DATA_DST
];
821 ctx
->src_vb
= v4l2_m2m_src_buf_remove(ctx
->m2m_ctx
);
822 WARN_ON(ctx
->src_vb
== NULL
);
823 ctx
->dst_vb
= v4l2_m2m_dst_buf_remove(ctx
->m2m_ctx
);
824 WARN_ON(ctx
->dst_vb
== NULL
);
826 /* config descriptors */
827 if (ctx
->dev
->loaded_mmrs
!= ctx
->mmr_adb
.dma_addr
|| ctx
->load_mmrs
) {
828 vpdma_map_desc_buf(ctx
->dev
->vpdma
, &ctx
->mmr_adb
);
829 vpdma_add_cfd_adb(&ctx
->desc_list
, CFD_MMR_CLIENT
, &ctx
->mmr_adb
);
830 ctx
->dev
->loaded_mmrs
= ctx
->mmr_adb
.dma_addr
;
831 ctx
->load_mmrs
= false;
834 add_out_dtd(ctx
, VPE_PORT_LUMA_OUT
);
835 if (d_q_data
->fmt
->coplanar
)
836 add_out_dtd(ctx
, VPE_PORT_CHROMA_OUT
);
838 add_in_dtd(ctx
, VPE_PORT_LUMA1_IN
);
839 add_in_dtd(ctx
, VPE_PORT_CHROMA1_IN
);
841 /* sync on channel control descriptors for input ports */
842 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_LUMA1_IN
);
843 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_CHROMA1_IN
);
845 /* sync on channel control descriptors for output ports */
846 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_LUMA_OUT
);
847 if (d_q_data
->fmt
->coplanar
)
848 vpdma_add_sync_on_channel_ctd(&ctx
->desc_list
, VPE_CHAN_CHROMA_OUT
);
852 vpdma_map_desc_buf(ctx
->dev
->vpdma
, &ctx
->desc_list
.buf
);
853 vpdma_submit_descs(ctx
->dev
->vpdma
, &ctx
->desc_list
);
856 static void ds1_uv_error(struct vpe_ctx
*ctx
)
858 dev_warn(ctx
->dev
->v4l2_dev
.dev
,
859 "received downsampler error interrupt\n");
862 static irqreturn_t
vpe_irq(int irq_vpe
, void *data
)
864 struct vpe_dev
*dev
= (struct vpe_dev
*)data
;
866 struct vb2_buffer
*s_vb
, *d_vb
;
867 struct v4l2_buffer
*s_buf
, *d_buf
;
871 irqst0
= read_reg(dev
, VPE_INT0_STATUS0
);
873 write_reg(dev
, VPE_INT0_STATUS0_CLR
, irqst0
);
874 vpe_dbg(dev
, "INT0_STATUS0 = 0x%08x\n", irqst0
);
877 irqst1
= read_reg(dev
, VPE_INT0_STATUS1
);
879 write_reg(dev
, VPE_INT0_STATUS1_CLR
, irqst1
);
880 vpe_dbg(dev
, "INT0_STATUS1 = 0x%08x\n", irqst1
);
883 ctx
= v4l2_m2m_get_curr_priv(dev
->m2m_dev
);
885 vpe_err(dev
, "instance released before end of transaction\n");
889 if (irqst1
& VPE_DS1_UV_ERROR_INT
) {
890 irqst1
&= ~VPE_DS1_UV_ERROR_INT
;
895 if (irqst0
& VPE_INT0_LIST0_COMPLETE
)
896 vpdma_clear_list_stat(ctx
->dev
->vpdma
);
898 irqst0
&= ~(VPE_INT0_LIST0_COMPLETE
);
901 if (irqst0
| irqst1
) {
902 dev_warn(dev
->v4l2_dev
.dev
, "Unexpected interrupt: "
903 "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
909 vpdma_unmap_desc_buf(dev
->vpdma
, &ctx
->desc_list
.buf
);
910 vpdma_unmap_desc_buf(dev
->vpdma
, &ctx
->mmr_adb
);
912 vpdma_reset_desc_list(&ctx
->desc_list
);
919 s_buf
= &s_vb
->v4l2_buf
;
920 d_buf
= &d_vb
->v4l2_buf
;
922 d_buf
->timestamp
= s_buf
->timestamp
;
923 if (s_buf
->flags
& V4L2_BUF_FLAG_TIMECODE
) {
924 d_buf
->flags
|= V4L2_BUF_FLAG_TIMECODE
;
925 d_buf
->timecode
= s_buf
->timecode
;
928 d_buf
->sequence
= ctx
->sequence
;
932 spin_lock_irqsave(&dev
->lock
, flags
);
933 v4l2_m2m_buf_done(s_vb
, VB2_BUF_STATE_DONE
);
934 v4l2_m2m_buf_done(d_vb
, VB2_BUF_STATE_DONE
);
935 spin_unlock_irqrestore(&dev
->lock
, flags
);
937 ctx
->bufs_completed
++;
938 if (ctx
->bufs_completed
< ctx
->bufs_per_job
) {
944 vpe_dbg(ctx
->dev
, "finishing transaction\n");
945 ctx
->bufs_completed
= 0;
946 v4l2_m2m_job_finish(dev
->m2m_dev
, ctx
->m2m_ctx
);
954 static int vpe_querycap(struct file
*file
, void *priv
,
955 struct v4l2_capability
*cap
)
957 strncpy(cap
->driver
, VPE_MODULE_NAME
, sizeof(cap
->driver
) - 1);
958 strncpy(cap
->card
, VPE_MODULE_NAME
, sizeof(cap
->card
) - 1);
959 strlcpy(cap
->bus_info
, VPE_MODULE_NAME
, sizeof(cap
->bus_info
));
960 cap
->device_caps
= V4L2_CAP_VIDEO_M2M
| V4L2_CAP_STREAMING
;
961 cap
->capabilities
= cap
->device_caps
| V4L2_CAP_DEVICE_CAPS
;
965 static int __enum_fmt(struct v4l2_fmtdesc
*f
, u32 type
)
968 struct vpe_fmt
*fmt
= NULL
;
971 for (i
= 0; i
< ARRAY_SIZE(vpe_formats
); ++i
) {
972 if (vpe_formats
[i
].types
& type
) {
973 if (index
== f
->index
) {
974 fmt
= &vpe_formats
[i
];
984 strncpy(f
->description
, fmt
->name
, sizeof(f
->description
) - 1);
985 f
->pixelformat
= fmt
->fourcc
;
989 static int vpe_enum_fmt(struct file
*file
, void *priv
,
990 struct v4l2_fmtdesc
*f
)
992 if (V4L2_TYPE_IS_OUTPUT(f
->type
))
993 return __enum_fmt(f
, VPE_FMT_TYPE_OUTPUT
);
995 return __enum_fmt(f
, VPE_FMT_TYPE_CAPTURE
);
998 static int vpe_g_fmt(struct file
*file
, void *priv
, struct v4l2_format
*f
)
1000 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
1001 struct vpe_ctx
*ctx
= file2ctx(file
);
1002 struct vb2_queue
*vq
;
1003 struct vpe_q_data
*q_data
;
1006 vq
= v4l2_m2m_get_vq(ctx
->m2m_ctx
, f
->type
);
1010 q_data
= get_q_data(ctx
, f
->type
);
1012 pix
->width
= q_data
->width
;
1013 pix
->height
= q_data
->height
;
1014 pix
->pixelformat
= q_data
->fmt
->fourcc
;
1016 if (V4L2_TYPE_IS_OUTPUT(f
->type
)) {
1017 pix
->colorspace
= q_data
->colorspace
;
1019 struct vpe_q_data
*s_q_data
;
1021 /* get colorspace from the source queue */
1022 s_q_data
= get_q_data(ctx
, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
);
1024 pix
->colorspace
= s_q_data
->colorspace
;
1027 pix
->num_planes
= q_data
->fmt
->coplanar
? 2 : 1;
1029 for (i
= 0; i
< pix
->num_planes
; i
++) {
1030 pix
->plane_fmt
[i
].bytesperline
= q_data
->bytesperline
[i
];
1031 pix
->plane_fmt
[i
].sizeimage
= q_data
->sizeimage
[i
];
1037 static int __vpe_try_fmt(struct vpe_ctx
*ctx
, struct v4l2_format
*f
,
1038 struct vpe_fmt
*fmt
, int type
)
1040 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
1041 struct v4l2_plane_pix_format
*plane_fmt
;
1044 if (!fmt
|| !(fmt
->types
& type
)) {
1045 vpe_err(ctx
->dev
, "Fourcc format (0x%08x) invalid.\n",
1050 pix
->field
= V4L2_FIELD_NONE
;
1052 v4l_bound_align_image(&pix
->width
, MIN_W
, MAX_W
, W_ALIGN
,
1053 &pix
->height
, MIN_H
, MAX_H
, H_ALIGN
,
1056 pix
->num_planes
= fmt
->coplanar
? 2 : 1;
1057 pix
->pixelformat
= fmt
->fourcc
;
1059 if (type
== VPE_FMT_TYPE_CAPTURE
) {
1060 struct vpe_q_data
*s_q_data
;
1062 /* get colorspace from the source queue */
1063 s_q_data
= get_q_data(ctx
, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
);
1065 pix
->colorspace
= s_q_data
->colorspace
;
1067 if (!pix
->colorspace
)
1068 pix
->colorspace
= V4L2_COLORSPACE_SMPTE240M
;
1071 for (i
= 0; i
< pix
->num_planes
; i
++) {
1074 plane_fmt
= &pix
->plane_fmt
[i
];
1075 depth
= fmt
->vpdma_fmt
[i
]->depth
;
1078 plane_fmt
->bytesperline
=
1079 round_up((pix
->width
* depth
) >> 3,
1082 plane_fmt
->bytesperline
= pix
->width
;
1084 plane_fmt
->sizeimage
=
1085 (pix
->height
* pix
->width
* depth
) >> 3;
1091 static int vpe_try_fmt(struct file
*file
, void *priv
, struct v4l2_format
*f
)
1093 struct vpe_ctx
*ctx
= file2ctx(file
);
1094 struct vpe_fmt
*fmt
= find_format(f
);
1096 if (V4L2_TYPE_IS_OUTPUT(f
->type
))
1097 return __vpe_try_fmt(ctx
, f
, fmt
, VPE_FMT_TYPE_OUTPUT
);
1099 return __vpe_try_fmt(ctx
, f
, fmt
, VPE_FMT_TYPE_CAPTURE
);
1102 static int __vpe_s_fmt(struct vpe_ctx
*ctx
, struct v4l2_format
*f
)
1104 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
1105 struct v4l2_plane_pix_format
*plane_fmt
;
1106 struct vpe_q_data
*q_data
;
1107 struct vb2_queue
*vq
;
1110 vq
= v4l2_m2m_get_vq(ctx
->m2m_ctx
, f
->type
);
1114 if (vb2_is_busy(vq
)) {
1115 vpe_err(ctx
->dev
, "queue busy\n");
1119 q_data
= get_q_data(ctx
, f
->type
);
1123 q_data
->fmt
= find_format(f
);
1124 q_data
->width
= pix
->width
;
1125 q_data
->height
= pix
->height
;
1126 q_data
->colorspace
= pix
->colorspace
;
1128 for (i
= 0; i
< pix
->num_planes
; i
++) {
1129 plane_fmt
= &pix
->plane_fmt
[i
];
1131 q_data
->bytesperline
[i
] = plane_fmt
->bytesperline
;
1132 q_data
->sizeimage
[i
] = plane_fmt
->sizeimage
;
1135 q_data
->c_rect
.left
= 0;
1136 q_data
->c_rect
.top
= 0;
1137 q_data
->c_rect
.width
= q_data
->width
;
1138 q_data
->c_rect
.height
= q_data
->height
;
1140 vpe_dbg(ctx
->dev
, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1141 f
->type
, q_data
->width
, q_data
->height
, q_data
->fmt
->fourcc
,
1142 q_data
->bytesperline
[VPE_LUMA
]);
1143 if (q_data
->fmt
->coplanar
)
1144 vpe_dbg(ctx
->dev
, " bpl_uv %d\n",
1145 q_data
->bytesperline
[VPE_CHROMA
]);
1150 static int vpe_s_fmt(struct file
*file
, void *priv
, struct v4l2_format
*f
)
1153 struct vpe_ctx
*ctx
= file2ctx(file
);
1155 ret
= vpe_try_fmt(file
, priv
, f
);
1159 ret
= __vpe_s_fmt(ctx
, f
);
1163 if (V4L2_TYPE_IS_OUTPUT(f
->type
))
1164 set_src_registers(ctx
);
1166 set_dst_registers(ctx
);
1168 return set_srcdst_params(ctx
);
1171 static int vpe_reqbufs(struct file
*file
, void *priv
,
1172 struct v4l2_requestbuffers
*reqbufs
)
1174 struct vpe_ctx
*ctx
= file2ctx(file
);
1176 return v4l2_m2m_reqbufs(file
, ctx
->m2m_ctx
, reqbufs
);
1179 static int vpe_querybuf(struct file
*file
, void *priv
, struct v4l2_buffer
*buf
)
1181 struct vpe_ctx
*ctx
= file2ctx(file
);
1183 return v4l2_m2m_querybuf(file
, ctx
->m2m_ctx
, buf
);
1186 static int vpe_qbuf(struct file
*file
, void *priv
, struct v4l2_buffer
*buf
)
1188 struct vpe_ctx
*ctx
= file2ctx(file
);
1190 return v4l2_m2m_qbuf(file
, ctx
->m2m_ctx
, buf
);
1193 static int vpe_dqbuf(struct file
*file
, void *priv
, struct v4l2_buffer
*buf
)
1195 struct vpe_ctx
*ctx
= file2ctx(file
);
1197 return v4l2_m2m_dqbuf(file
, ctx
->m2m_ctx
, buf
);
1200 static int vpe_streamon(struct file
*file
, void *priv
, enum v4l2_buf_type type
)
1202 struct vpe_ctx
*ctx
= file2ctx(file
);
1204 return v4l2_m2m_streamon(file
, ctx
->m2m_ctx
, type
);
1207 static int vpe_streamoff(struct file
*file
, void *priv
, enum v4l2_buf_type type
)
1209 struct vpe_ctx
*ctx
= file2ctx(file
);
1211 vpe_dump_regs(ctx
->dev
);
1212 vpdma_dump_regs(ctx
->dev
->vpdma
);
1214 return v4l2_m2m_streamoff(file
, ctx
->m2m_ctx
, type
);
1218 * defines number of buffers/frames a context can process with VPE before
1219 * switching to a different context. default value is 1 buffer per context
1221 #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
1223 static int vpe_s_ctrl(struct v4l2_ctrl
*ctrl
)
1225 struct vpe_ctx
*ctx
=
1226 container_of(ctrl
->handler
, struct vpe_ctx
, hdl
);
1229 case V4L2_CID_VPE_BUFS_PER_JOB
:
1230 ctx
->bufs_per_job
= ctrl
->val
;
1234 vpe_err(ctx
->dev
, "Invalid control\n");
1241 static const struct v4l2_ctrl_ops vpe_ctrl_ops
= {
1242 .s_ctrl
= vpe_s_ctrl
,
1245 static const struct v4l2_ioctl_ops vpe_ioctl_ops
= {
1246 .vidioc_querycap
= vpe_querycap
,
1248 .vidioc_enum_fmt_vid_cap_mplane
= vpe_enum_fmt
,
1249 .vidioc_g_fmt_vid_cap_mplane
= vpe_g_fmt
,
1250 .vidioc_try_fmt_vid_cap_mplane
= vpe_try_fmt
,
1251 .vidioc_s_fmt_vid_cap_mplane
= vpe_s_fmt
,
1253 .vidioc_enum_fmt_vid_out_mplane
= vpe_enum_fmt
,
1254 .vidioc_g_fmt_vid_out_mplane
= vpe_g_fmt
,
1255 .vidioc_try_fmt_vid_out_mplane
= vpe_try_fmt
,
1256 .vidioc_s_fmt_vid_out_mplane
= vpe_s_fmt
,
1258 .vidioc_reqbufs
= vpe_reqbufs
,
1259 .vidioc_querybuf
= vpe_querybuf
,
1261 .vidioc_qbuf
= vpe_qbuf
,
1262 .vidioc_dqbuf
= vpe_dqbuf
,
1264 .vidioc_streamon
= vpe_streamon
,
1265 .vidioc_streamoff
= vpe_streamoff
,
1266 .vidioc_subscribe_event
= v4l2_ctrl_subscribe_event
,
1267 .vidioc_unsubscribe_event
= v4l2_event_unsubscribe
,
1273 static int vpe_queue_setup(struct vb2_queue
*vq
,
1274 const struct v4l2_format
*fmt
,
1275 unsigned int *nbuffers
, unsigned int *nplanes
,
1276 unsigned int sizes
[], void *alloc_ctxs
[])
1279 struct vpe_ctx
*ctx
= vb2_get_drv_priv(vq
);
1280 struct vpe_q_data
*q_data
;
1282 q_data
= get_q_data(ctx
, vq
->type
);
1284 *nplanes
= q_data
->fmt
->coplanar
? 2 : 1;
1286 for (i
= 0; i
< *nplanes
; i
++) {
1287 sizes
[i
] = q_data
->sizeimage
[i
];
1288 alloc_ctxs
[i
] = ctx
->dev
->alloc_ctx
;
1291 vpe_dbg(ctx
->dev
, "get %d buffer(s) of size %d", *nbuffers
,
1293 if (q_data
->fmt
->coplanar
)
1294 vpe_dbg(ctx
->dev
, " and %d\n", sizes
[VPE_CHROMA
]);
1299 static int vpe_buf_prepare(struct vb2_buffer
*vb
)
1301 struct vpe_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
1302 struct vpe_q_data
*q_data
;
1305 vpe_dbg(ctx
->dev
, "type: %d\n", vb
->vb2_queue
->type
);
1307 q_data
= get_q_data(ctx
, vb
->vb2_queue
->type
);
1308 num_planes
= q_data
->fmt
->coplanar
? 2 : 1;
1310 for (i
= 0; i
< num_planes
; i
++) {
1311 if (vb2_plane_size(vb
, i
) < q_data
->sizeimage
[i
]) {
1313 "data will not fit into plane (%lu < %lu)\n",
1314 vb2_plane_size(vb
, i
),
1315 (long) q_data
->sizeimage
[i
]);
1320 for (i
= 0; i
< num_planes
; i
++)
1321 vb2_set_plane_payload(vb
, i
, q_data
->sizeimage
[i
]);
1326 static void vpe_buf_queue(struct vb2_buffer
*vb
)
1328 struct vpe_ctx
*ctx
= vb2_get_drv_priv(vb
->vb2_queue
);
1329 v4l2_m2m_buf_queue(ctx
->m2m_ctx
, vb
);
1332 static void vpe_wait_prepare(struct vb2_queue
*q
)
1334 struct vpe_ctx
*ctx
= vb2_get_drv_priv(q
);
1338 static void vpe_wait_finish(struct vb2_queue
*q
)
1340 struct vpe_ctx
*ctx
= vb2_get_drv_priv(q
);
1344 static struct vb2_ops vpe_qops
= {
1345 .queue_setup
= vpe_queue_setup
,
1346 .buf_prepare
= vpe_buf_prepare
,
1347 .buf_queue
= vpe_buf_queue
,
1348 .wait_prepare
= vpe_wait_prepare
,
1349 .wait_finish
= vpe_wait_finish
,
1352 static int queue_init(void *priv
, struct vb2_queue
*src_vq
,
1353 struct vb2_queue
*dst_vq
)
1355 struct vpe_ctx
*ctx
= priv
;
1358 memset(src_vq
, 0, sizeof(*src_vq
));
1359 src_vq
->type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
;
1360 src_vq
->io_modes
= VB2_MMAP
;
1361 src_vq
->drv_priv
= ctx
;
1362 src_vq
->buf_struct_size
= sizeof(struct v4l2_m2m_buffer
);
1363 src_vq
->ops
= &vpe_qops
;
1364 src_vq
->mem_ops
= &vb2_dma_contig_memops
;
1365 src_vq
->timestamp_type
= V4L2_BUF_FLAG_TIMESTAMP_COPY
;
1367 ret
= vb2_queue_init(src_vq
);
1371 memset(dst_vq
, 0, sizeof(*dst_vq
));
1372 dst_vq
->type
= V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE
;
1373 dst_vq
->io_modes
= VB2_MMAP
;
1374 dst_vq
->drv_priv
= ctx
;
1375 dst_vq
->buf_struct_size
= sizeof(struct v4l2_m2m_buffer
);
1376 dst_vq
->ops
= &vpe_qops
;
1377 dst_vq
->mem_ops
= &vb2_dma_contig_memops
;
1378 dst_vq
->timestamp_type
= V4L2_BUF_FLAG_TIMESTAMP_COPY
;
1380 return vb2_queue_init(dst_vq
);
1383 static const struct v4l2_ctrl_config vpe_bufs_per_job
= {
1384 .ops
= &vpe_ctrl_ops
,
1385 .id
= V4L2_CID_VPE_BUFS_PER_JOB
,
1386 .name
= "Buffers Per Transaction",
1387 .type
= V4L2_CTRL_TYPE_INTEGER
,
1388 .def
= VPE_DEF_BUFS_PER_JOB
,
1390 .max
= VIDEO_MAX_FRAME
,
1397 static int vpe_open(struct file
*file
)
1399 struct vpe_dev
*dev
= video_drvdata(file
);
1400 struct vpe_ctx
*ctx
= NULL
;
1401 struct vpe_q_data
*s_q_data
;
1402 struct v4l2_ctrl_handler
*hdl
;
1405 vpe_dbg(dev
, "vpe_open\n");
1407 if (!dev
->vpdma
->ready
) {
1408 vpe_err(dev
, "vpdma firmware not loaded\n");
1412 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
1418 if (mutex_lock_interruptible(&dev
->dev_mutex
)) {
1423 ret
= vpdma_create_desc_list(&ctx
->desc_list
, VPE_DESC_LIST_SIZE
,
1424 VPDMA_LIST_TYPE_NORMAL
);
1428 ret
= vpdma_alloc_desc_buf(&ctx
->mmr_adb
, sizeof(struct vpe_mmr_adb
));
1430 goto free_desc_list
;
1434 v4l2_fh_init(&ctx
->fh
, video_devdata(file
));
1435 file
->private_data
= &ctx
->fh
;
1438 v4l2_ctrl_handler_init(hdl
, 1);
1439 v4l2_ctrl_new_custom(hdl
, &vpe_bufs_per_job
, NULL
);
1444 ctx
->fh
.ctrl_handler
= hdl
;
1445 v4l2_ctrl_handler_setup(hdl
);
1447 s_q_data
= &ctx
->q_data
[Q_DATA_SRC
];
1448 s_q_data
->fmt
= &vpe_formats
[2];
1449 s_q_data
->width
= 1920;
1450 s_q_data
->height
= 1080;
1451 s_q_data
->sizeimage
[VPE_LUMA
] = (s_q_data
->width
* s_q_data
->height
*
1452 s_q_data
->fmt
->vpdma_fmt
[VPE_LUMA
]->depth
) >> 3;
1453 s_q_data
->colorspace
= V4L2_COLORSPACE_SMPTE240M
;
1454 s_q_data
->c_rect
.left
= 0;
1455 s_q_data
->c_rect
.top
= 0;
1456 s_q_data
->c_rect
.width
= s_q_data
->width
;
1457 s_q_data
->c_rect
.height
= s_q_data
->height
;
1458 s_q_data
->flags
= 0;
1460 ctx
->q_data
[Q_DATA_DST
] = *s_q_data
;
1462 set_src_registers(ctx
);
1463 set_dst_registers(ctx
);
1464 ret
= set_srcdst_params(ctx
);
1468 ctx
->m2m_ctx
= v4l2_m2m_ctx_init(dev
->m2m_dev
, ctx
, &queue_init
);
1470 if (IS_ERR(ctx
->m2m_ctx
)) {
1471 ret
= PTR_ERR(ctx
->m2m_ctx
);
1475 v4l2_fh_add(&ctx
->fh
);
1478 * for now, just report the creation of the first instance, we can later
1479 * optimize the driver to enable or disable clocks when the first
1480 * instance is created or the last instance released
1482 if (atomic_inc_return(&dev
->num_instances
) == 1)
1483 vpe_dbg(dev
, "first instance created\n");
1485 ctx
->bufs_per_job
= VPE_DEF_BUFS_PER_JOB
;
1487 ctx
->load_mmrs
= true;
1489 vpe_dbg(dev
, "created instance %p, m2m_ctx: %p\n",
1492 mutex_unlock(&dev
->dev_mutex
);
1496 v4l2_ctrl_handler_free(hdl
);
1497 v4l2_fh_exit(&ctx
->fh
);
1498 vpdma_free_desc_buf(&ctx
->mmr_adb
);
1500 vpdma_free_desc_list(&ctx
->desc_list
);
1502 mutex_unlock(&dev
->dev_mutex
);
1508 static int vpe_release(struct file
*file
)
1510 struct vpe_dev
*dev
= video_drvdata(file
);
1511 struct vpe_ctx
*ctx
= file2ctx(file
);
1513 vpe_dbg(dev
, "releasing instance %p\n", ctx
);
1515 mutex_lock(&dev
->dev_mutex
);
1516 vpdma_free_desc_list(&ctx
->desc_list
);
1517 vpdma_free_desc_buf(&ctx
->mmr_adb
);
1519 v4l2_fh_del(&ctx
->fh
);
1520 v4l2_fh_exit(&ctx
->fh
);
1521 v4l2_ctrl_handler_free(&ctx
->hdl
);
1522 v4l2_m2m_ctx_release(ctx
->m2m_ctx
);
1527 * for now, just report the release of the last instance, we can later
1528 * optimize the driver to enable or disable clocks when the first
1529 * instance is created or the last instance released
1531 if (atomic_dec_return(&dev
->num_instances
) == 0)
1532 vpe_dbg(dev
, "last instance released\n");
1534 mutex_unlock(&dev
->dev_mutex
);
1539 static unsigned int vpe_poll(struct file
*file
,
1540 struct poll_table_struct
*wait
)
1542 struct vpe_ctx
*ctx
= file2ctx(file
);
1543 struct vpe_dev
*dev
= ctx
->dev
;
1546 mutex_lock(&dev
->dev_mutex
);
1547 ret
= v4l2_m2m_poll(file
, ctx
->m2m_ctx
, wait
);
1548 mutex_unlock(&dev
->dev_mutex
);
1552 static int vpe_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1554 struct vpe_ctx
*ctx
= file2ctx(file
);
1555 struct vpe_dev
*dev
= ctx
->dev
;
1558 if (mutex_lock_interruptible(&dev
->dev_mutex
))
1559 return -ERESTARTSYS
;
1560 ret
= v4l2_m2m_mmap(file
, ctx
->m2m_ctx
, vma
);
1561 mutex_unlock(&dev
->dev_mutex
);
1565 static const struct v4l2_file_operations vpe_fops
= {
1566 .owner
= THIS_MODULE
,
1568 .release
= vpe_release
,
1570 .unlocked_ioctl
= video_ioctl2
,
1574 static struct video_device vpe_videodev
= {
1575 .name
= VPE_MODULE_NAME
,
1577 .ioctl_ops
= &vpe_ioctl_ops
,
1579 .release
= video_device_release
,
1580 .vfl_dir
= VFL_DIR_M2M
,
1583 static struct v4l2_m2m_ops m2m_ops
= {
1584 .device_run
= device_run
,
1585 .job_ready
= job_ready
,
1586 .job_abort
= job_abort
,
1588 .unlock
= vpe_unlock
,
1591 static int vpe_runtime_get(struct platform_device
*pdev
)
1595 dev_dbg(&pdev
->dev
, "vpe_runtime_get\n");
1597 r
= pm_runtime_get_sync(&pdev
->dev
);
1599 return r
< 0 ? r
: 0;
1602 static void vpe_runtime_put(struct platform_device
*pdev
)
1607 dev_dbg(&pdev
->dev
, "vpe_runtime_put\n");
1609 r
= pm_runtime_put_sync(&pdev
->dev
);
1610 WARN_ON(r
< 0 && r
!= -ENOSYS
);
1613 static int vpe_probe(struct platform_device
*pdev
)
1615 struct vpe_dev
*dev
;
1616 struct video_device
*vfd
;
1617 struct resource
*res
;
1620 dev
= devm_kzalloc(&pdev
->dev
, sizeof(*dev
), GFP_KERNEL
);
1622 return PTR_ERR(dev
);
1624 spin_lock_init(&dev
->lock
);
1626 ret
= v4l2_device_register(&pdev
->dev
, &dev
->v4l2_dev
);
1630 atomic_set(&dev
->num_instances
, 0);
1631 mutex_init(&dev
->dev_mutex
);
1633 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "vpe_top");
1635 * HACK: we get resource info from device tree in the form of a list of
1636 * VPE sub blocks, the driver currently uses only the base of vpe_top
1637 * for register access, the driver should be changed later to access
1638 * registers based on the sub block base addresses
1640 dev
->base
= devm_ioremap(&pdev
->dev
, res
->start
, SZ_32K
);
1641 if (IS_ERR(dev
->base
)) {
1642 ret
= PTR_ERR(dev
->base
);
1643 goto v4l2_dev_unreg
;
1646 irq
= platform_get_irq(pdev
, 0);
1647 ret
= devm_request_irq(&pdev
->dev
, irq
, vpe_irq
, 0, VPE_MODULE_NAME
,
1650 goto v4l2_dev_unreg
;
1652 platform_set_drvdata(pdev
, dev
);
1654 dev
->alloc_ctx
= vb2_dma_contig_init_ctx(&pdev
->dev
);
1655 if (IS_ERR(dev
->alloc_ctx
)) {
1656 vpe_err(dev
, "Failed to alloc vb2 context\n");
1657 ret
= PTR_ERR(dev
->alloc_ctx
);
1658 goto v4l2_dev_unreg
;
1661 dev
->m2m_dev
= v4l2_m2m_init(&m2m_ops
);
1662 if (IS_ERR(dev
->m2m_dev
)) {
1663 vpe_err(dev
, "Failed to init mem2mem device\n");
1664 ret
= PTR_ERR(dev
->m2m_dev
);
1668 pm_runtime_enable(&pdev
->dev
);
1670 ret
= vpe_runtime_get(pdev
);
1674 /* Perform clk enable followed by reset */
1675 vpe_set_clock_enable(dev
, 1);
1679 func
= read_field_reg(dev
, VPE_PID
, VPE_PID_FUNC_MASK
,
1680 VPE_PID_FUNC_SHIFT
);
1681 vpe_dbg(dev
, "VPE PID function %x\n", func
);
1683 vpe_top_vpdma_reset(dev
);
1685 dev
->vpdma
= vpdma_create(pdev
);
1686 if (IS_ERR(dev
->vpdma
))
1690 *vfd
= vpe_videodev
;
1691 vfd
->lock
= &dev
->dev_mutex
;
1692 vfd
->v4l2_dev
= &dev
->v4l2_dev
;
1694 ret
= video_register_device(vfd
, VFL_TYPE_GRABBER
, 0);
1696 vpe_err(dev
, "Failed to register video device\n");
1700 video_set_drvdata(vfd
, dev
);
1701 snprintf(vfd
->name
, sizeof(vfd
->name
), "%s", vpe_videodev
.name
);
1702 dev_info(dev
->v4l2_dev
.dev
, "Device registered as /dev/video%d\n",
1708 vpe_runtime_put(pdev
);
1710 pm_runtime_disable(&pdev
->dev
);
1711 v4l2_m2m_release(dev
->m2m_dev
);
1713 vb2_dma_contig_cleanup_ctx(dev
->alloc_ctx
);
1715 v4l2_device_unregister(&dev
->v4l2_dev
);
1720 static int vpe_remove(struct platform_device
*pdev
)
1722 struct vpe_dev
*dev
=
1723 (struct vpe_dev
*) platform_get_drvdata(pdev
);
1725 v4l2_info(&dev
->v4l2_dev
, "Removing " VPE_MODULE_NAME
);
1727 v4l2_m2m_release(dev
->m2m_dev
);
1728 video_unregister_device(&dev
->vfd
);
1729 v4l2_device_unregister(&dev
->v4l2_dev
);
1730 vb2_dma_contig_cleanup_ctx(dev
->alloc_ctx
);
1732 vpe_set_clock_enable(dev
, 0);
1733 vpe_runtime_put(pdev
);
1734 pm_runtime_disable(&pdev
->dev
);
1739 #if defined(CONFIG_OF)
1740 static const struct of_device_id vpe_of_match
[] = {
1742 .compatible
= "ti,vpe",
1747 #define vpe_of_match NULL
1750 static struct platform_driver vpe_pdrv
= {
1752 .remove
= vpe_remove
,
1754 .name
= VPE_MODULE_NAME
,
1755 .owner
= THIS_MODULE
,
1756 .of_match_table
= vpe_of_match
,
1760 static void __exit
vpe_exit(void)
1762 platform_driver_unregister(&vpe_pdrv
);
1765 static int __init
vpe_init(void)
1767 return platform_driver_register(&vpe_pdrv
);
1770 module_init(vpe_init
);
1771 module_exit(vpe_exit
);
1773 MODULE_DESCRIPTION("TI VPE driver");
1774 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
1775 MODULE_LICENSE("GPL");