[FFmpeg-devel] [PATCH 15/25] avcodec/v4l2_m2m: add support for AV_PIX_FMT_DRM_PRIME

Aman Gupta ffmpeg at tmm1.net
Tue Sep 3 04:02:20 EEST 2019


From: Aman Gupta <aman at tmm1.net>

Based on patch originally developed by Lukas Rusak <lorusak at gmail.com>

    This allows for a zero-copy output by exporting the v4l2 buffer then wrapping that buffer
    in the AVDRMFrameDescriptor like it is done in rkmpp.

    This has been in use for quite some time with great success on many platforms including:
     - Amlogic S905
     - Raspberry Pi
     - i.MX6
     - Dragonboard 410c

    This was developed in conjunction with Kodi to allow handling the zero-copy buffer rendering.
    A simply utility for testing is also available here: https://github.com/BayLibre/ffmpeg-drm

Signed-off-by: Aman Gupta <aman at tmm1.net>
---
 configure                 |   1 +
 libavcodec/v4l2_buffers.c | 188 ++++++++++++++++++++++++++++++++++++--
 libavcodec/v4l2_buffers.h |   4 +
 libavcodec/v4l2_context.c |  73 +++++++++++++--
 libavcodec/v4l2_context.h |   6 ++
 libavcodec/v4l2_m2m.c     |  10 +-
 libavcodec/v4l2_m2m.h     |   3 +
 7 files changed, 267 insertions(+), 18 deletions(-)

diff --git a/configure b/configure
index 4c77e1cab1..c8a72e1760 100755
--- a/configure
+++ b/configure
@@ -2995,6 +2995,7 @@ qsvenc_select="qsv"
 qsvvpp_select="qsv"
 vaapi_encode_deps="vaapi"
 v4l2_m2m_deps="linux_videodev2_h sem_timedwait"
+v4l2_m2m_suggest="libdrm"
 
 hwupload_cuda_filter_deps="ffnvcodec"
 scale_npp_filter_deps="ffnvcodec libnpp"
diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c
index 46c9f11d7b..2a1eac7a35 100644
--- a/libavcodec/v4l2_buffers.c
+++ b/libavcodec/v4l2_buffers.c
@@ -21,6 +21,11 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#include "config.h"
+#if CONFIG_LIBDRM
+#include <drm_fourcc.h>
+#endif
+
 #include <linux/videodev2.h>
 #include <sys/ioctl.h>
 #include <sys/mman.h>
@@ -29,6 +34,8 @@
 #include <poll.h>
 #include "libavcodec/avcodec.h"
 #include "libavcodec/internal.h"
+#include "libavutil/avassert.h"
+#include "libavutil/hwcontext.h"
 #include "libavutil/pixdesc.h"
 #include "v4l2_context.h"
 #include "v4l2_buffers.h"
@@ -248,6 +255,127 @@ static int v4l2_buf_increase_ref(V4L2Buffer *in)
     return 0;
 }
 
+#if CONFIG_LIBDRM
+static uint8_t *v4l2_get_drm_frame(V4L2Buffer *avbuf)
+{
+    AVDRMFrameDescriptor *drm_desc = &avbuf->drm_frame;
+    AVDRMLayerDescriptor *layer;
+
+    /* fill the DRM frame descriptor */
+    drm_desc->nb_objects = avbuf->num_planes;
+    drm_desc->nb_layers = 1;
+
+    layer = &drm_desc->layers[0];
+    layer->nb_planes = avbuf->num_planes;
+
+    for (int i = 0; i < avbuf->num_planes; i++) {
+        layer->planes[i].object_index = i;
+        layer->planes[i].offset = 0;
+        layer->planes[i].pitch = avbuf->plane_info[i].bytesperline;
+    }
+
+    switch (avbuf->context->av_pix_fmt) {
+    case AV_PIX_FMT_YUYV422:
+        layer->format = DRM_FORMAT_YUYV;
+        layer->nb_planes = 1;
+
+        break;
+
+    case AV_PIX_FMT_NV12:
+    case AV_PIX_FMT_NV21:
+        layer->format = avbuf->context->av_pix_fmt == AV_PIX_FMT_NV12 ?
+            DRM_FORMAT_NV12 : DRM_FORMAT_NV21;
+
+        if (avbuf->num_planes > 1)
+            break;
+
+        layer->nb_planes = 2;
+
+        layer->planes[1].object_index = 0;
+        layer->planes[1].offset = avbuf->plane_info[0].bytesperline *
+            avbuf->context->format.fmt.pix.height;
+        layer->planes[1].pitch = avbuf->plane_info[0].bytesperline;
+        break;
+
+    case AV_PIX_FMT_YUV420P:
+        layer->format = DRM_FORMAT_YUV420;
+
+        if (avbuf->num_planes > 1)
+            break;
+
+        layer->nb_planes = 3;
+
+        layer->planes[1].object_index = 0;
+        layer->planes[1].offset = avbuf->plane_info[0].bytesperline *
+            avbuf->context->format.fmt.pix.height;
+        layer->planes[1].pitch = avbuf->plane_info[0].bytesperline >> 1;
+
+        layer->planes[2].object_index = 0;
+        layer->planes[2].offset = layer->planes[1].offset +
+            ((avbuf->plane_info[0].bytesperline *
+              avbuf->context->format.fmt.pix.height) >> 2);
+        layer->planes[2].pitch = avbuf->plane_info[0].bytesperline >> 1;
+        break;
+
+    default:
+        drm_desc->nb_layers = 0;
+        break;
+    }
+
+    return (uint8_t *)drm_desc;
+}
+
+static int v4l2_buffer_export_drm(V4L2Buffer* avbuf)
+{
+    struct v4l2_exportbuffer expbuf;
+    int i, ret;
+
+    for (i = 0; i < avbuf->num_planes; i++) {
+        memset(&expbuf, 0, sizeof(expbuf));
+
+        expbuf.index = avbuf->buf.index;
+        expbuf.type = avbuf->buf.type;
+        expbuf.plane = i;
+
+        ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_EXPBUF, &expbuf);
+        if (ret < 0)
+            return AVERROR(errno);
+
+        if (V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type)) {
+            /* drm frame */
+            avbuf->drm_frame.objects[i].size = avbuf->buf.m.planes[i].length;
+            avbuf->drm_frame.objects[i].fd = expbuf.fd;
+            avbuf->drm_frame.objects[i].format_modifier = DRM_FORMAT_MOD_LINEAR;
+        } else {
+            /* drm frame */
+            avbuf->drm_frame.objects[0].size = avbuf->buf.length;
+            avbuf->drm_frame.objects[0].fd = expbuf.fd;
+            avbuf->drm_frame.objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR;
+        }
+    }
+
+    return 0;
+}
+
+static int v4l2_buf_to_bufref_drm(V4L2Buffer *in, AVBufferRef **buf)
+{
+    int ret;
+
+    *buf = av_buffer_create((uint8_t *) &in->drm_frame,
+                            sizeof(in->drm_frame),
+                            v4l2_free_buffer,
+                            in, AV_BUFFER_FLAG_READONLY);
+    if (!*buf)
+        return AVERROR(ENOMEM);
+
+    ret = v4l2_buf_increase_ref(in);
+    if (ret)
+         av_buffer_unref(buf);
+
+    return ret;
+}
+#endif
+
 static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
 {
     int ret;
@@ -255,7 +383,7 @@ static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
     if (plane >= in->num_planes)
         return AVERROR(EINVAL);
 
-    /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
+    /* most encoders return 0 in data_offset but vp8 does require this value */
     *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
                             in->plane_info[plane].length, v4l2_free_buffer, in, 0);
     if (!*buf)
@@ -399,6 +527,21 @@ int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
 {
     v4l2_set_pts(out, frame->pts);
 
+    if (frame->format == AV_PIX_FMT_DRM_PRIME) {
+        AVDRMFrameDescriptor *drm_desc = (AVDRMFrameDescriptor *)frame->data[0];
+        int i;
+        av_assert0(out->buf.memory == V4L2_MEMORY_DMABUF);
+
+        if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
+            for (i = 0; i < drm_desc->nb_objects; i++) {
+                out->buf.m.planes[i].m.fd = drm_desc->objects[i].fd;
+            }
+        } else {
+            out->buf.m.fd = drm_desc->objects[0].fd;
+        }
+        return 0;
+    }
+
     return v4l2_buffer_swframe_to_buf(frame, out);
 }
 
@@ -409,9 +552,21 @@ int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
     av_frame_unref(frame);
 
     /* 1. get references to the actual data */
-    ret = v4l2_buffer_buf_to_swframe(frame, avbuf);
-    if (ret)
-        return ret;
+    if (avbuf->context->av_pix_fmt == AV_PIX_FMT_DRM_PRIME) {
+#if CONFIG_LIBDRM
+        ret = v4l2_buf_to_bufref_drm(avbuf, &frame->buf[0]);
+        if (ret)
+            return ret;
+
+        frame->data[0] = (uint8_t *)v4l2_get_drm_frame(avbuf);
+        frame->format = AV_PIX_FMT_DRM_PRIME;
+        frame->hw_frames_ctx = av_buffer_ref(avbuf->context->frames_ref);
+#endif
+    } else {
+        ret = v4l2_buffer_buf_to_swframe(frame, avbuf);
+        if (ret)
+            return ret;
+    }
 
     /* 2. get frame information */
     frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
@@ -481,7 +636,8 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
     V4L2Context *ctx = avbuf->context;
     int ret, i;
 
-    avbuf->buf.memory = V4L2_MEMORY_MMAP;
+    avbuf->buf.memory = ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME && V4L2_TYPE_IS_OUTPUT(ctx->type) ?
+        V4L2_MEMORY_DMABUF : V4L2_MEMORY_MMAP;
     avbuf->buf.type = ctx->type;
     avbuf->buf.index = index;
 
@@ -514,11 +670,19 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
 
         if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
             avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
+
+            if (ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME)
+                continue;
+
             avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
                                            PROT_READ | PROT_WRITE, MAP_SHARED,
                                            buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
         } else {
             avbuf->plane_info[i].length = avbuf->buf.length;
+
+            if (ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME)
+                continue;
+
             avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
                                           PROT_READ | PROT_WRITE, MAP_SHARED,
                                           buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
@@ -530,9 +694,6 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
 
     avbuf->status = V4L2BUF_AVAILABLE;
 
-    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
-        return 0;
-
     if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
         avbuf->buf.m.planes = avbuf->planes;
         avbuf->buf.length   = avbuf->num_planes;
@@ -542,6 +703,17 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
         avbuf->buf.length    = avbuf->planes[0].length;
     }
 
+    if (V4L2_TYPE_IS_OUTPUT(ctx->type))
+        return 0;
+
+    if (ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME) {
+#if CONFIG_LIBDRM
+        ret = v4l2_buffer_export_drm(avbuf);
+        if (ret)
+            return ret;
+#endif
+    }
+
     return ff_v4l2_buffer_enqueue(avbuf);
 }
 
diff --git a/libavcodec/v4l2_buffers.h b/libavcodec/v4l2_buffers.h
index 8dbc7fc104..037e667997 100644
--- a/libavcodec/v4l2_buffers.h
+++ b/libavcodec/v4l2_buffers.h
@@ -27,6 +27,7 @@
 #include <stdatomic.h>
 #include <linux/videodev2.h>
 
+#include "libavutil/hwcontext_drm.h"
 #include "avcodec.h"
 
 enum V4L2Buffer_status {
@@ -42,6 +43,9 @@ typedef struct V4L2Buffer {
     /* each buffer needs to have a reference to its context */
     struct V4L2Context *context;
 
+    /* DRM descriptor */
+    AVDRMFrameDescriptor drm_frame;
+
     /* This object is refcounted per-plane, so we need to keep track
      * of how many context-refs we are holding. */
     AVBufferRef *context_ref;
diff --git a/libavcodec/v4l2_context.c b/libavcodec/v4l2_context.c
index b02142a5d6..32246cf564 100644
--- a/libavcodec/v4l2_context.c
+++ b/libavcodec/v4l2_context.c
@@ -29,6 +29,7 @@
 #include <poll.h>
 #include "libavcodec/avcodec.h"
 #include "libavcodec/internal.h"
+#include "libavutil/avassert.h"
 #include "v4l2_buffers.h"
 #include "v4l2_fmt.h"
 #include "v4l2_m2m.h"
@@ -108,8 +109,12 @@ static inline void v4l2_save_to_context(V4L2Context* ctx, struct v4l2_format_upd
 {
     ctx->format.type = ctx->type;
 
-    if (fmt->update_avfmt)
-        ctx->av_pix_fmt = fmt->av_fmt;
+    if (fmt->update_avfmt) {
+        if (ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME)
+            ctx->sw_pix_fmt = fmt->av_fmt;
+        else
+            ctx->av_pix_fmt = fmt->av_fmt;
+    }
 
     if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
         /* update the sizes to handle the reconfiguration of the capture stream at runtime */
@@ -406,22 +411,47 @@ static int v4l2_release_buffers(V4L2Context* ctx)
     struct v4l2_requestbuffers req = {
         .memory = V4L2_MEMORY_MMAP,
         .type = ctx->type,
-        .count = 0, /* 0 -> unmaps buffers from the driver */
+        .count = 0, /* 0 -> unmap all buffers from the driver */
     };
-    int i, j;
+    int ret, i, j;
 
     for (i = 0; i < ctx->num_buffers; i++) {
         V4L2Buffer *buffer = &ctx->buffers[i];
 
         for (j = 0; j < buffer->num_planes; j++) {
             struct V4L2Plane_info *p = &buffer->plane_info[j];
+
+            if (ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME && !V4L2_TYPE_IS_OUTPUT(ctx->type)) {
+                /* use the DRM frame to close */
+                if (buffer->drm_frame.objects[j].fd >= 0) {
+                    if (close(buffer->drm_frame.objects[j].fd) < 0) {
+                        av_log(logger(ctx), AV_LOG_ERROR, "%s close drm fd [buffer=%2d, plane=%d, fd=%2d] - %s \n",
+                            ctx->name, i, j, buffer->drm_frame.objects[j].fd,
+                            av_err2str(AVERROR(errno)));
+                    }
+                }
+            }
+
             if (p->mm_addr && p->length)
                 if (munmap(p->mm_addr, p->length) < 0)
                     av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno)));
         }
     }
 
-    return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
+    ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
+    if (ret < 0) {
+            av_log(logger(ctx), AV_LOG_ERROR, "release all %s buffers (%s)\n",
+                ctx->name, av_err2str(AVERROR(errno)));
+
+            if (ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME)
+                av_log(logger(ctx), AV_LOG_ERROR,
+                    "Make sure the DRM client releases all FB/GEM objects before closing the codec (ie):\n"
+                    "for all buffers: \n"
+                    "  1. drmModeRmFB(..)\n"
+                    "  2. drmIoctl(.., DRM_IOCTL_GEM_CLOSE,... )\n");
+    }
+
+    return ret;
 }
 
 static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfmt)
@@ -457,6 +487,8 @@ static int v4l2_get_raw_format(V4L2Context* ctx, enum AVPixelFormat *p)
     memset(&fdesc, 0, sizeof(fdesc));
     fdesc.type = ctx->type;
 
+    if (pixfmt == AV_PIX_FMT_DRM_PRIME)
+        pixfmt = ctx->sw_pix_fmt;
     if (pixfmt != AV_PIX_FMT_NONE) {
         ret = v4l2_try_raw_format(ctx, pixfmt);
         if (!ret)
@@ -658,6 +690,7 @@ int ff_v4l2_context_set_format(V4L2Context* ctx)
 
 void ff_v4l2_context_release(V4L2Context* ctx)
 {
+    V4L2m2mContext *s = ctx_to_m2mctx(ctx);
     int ret;
 
     if (!ctx->buffers)
@@ -669,6 +702,9 @@ void ff_v4l2_context_release(V4L2Context* ctx)
 
     av_free(ctx->buffers);
     ctx->buffers = NULL;
+
+    av_buffer_unref(&ctx->frames_ref);
+    av_buffer_unref(&s->device_ref);
 }
 
 int ff_v4l2_context_init(V4L2Context* ctx)
@@ -682,13 +718,35 @@ int ff_v4l2_context_init(V4L2Context* ctx)
         return AVERROR_PATCHWELCOME;
     }
 
+    if (ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME) {
+        AVHWFramesContext *hwframes;
+
+        av_assert0(s->device_ref);
+        av_buffer_unref(&ctx->frames_ref);
+        ctx->frames_ref = av_hwframe_ctx_alloc(s->device_ref);
+        if (!ctx->frames_ref) {
+            ret = AVERROR(ENOMEM);
+            return ret;
+        }
+
+        hwframes = (AVHWFramesContext*)ctx->frames_ref->data;
+        hwframes->format = ctx->av_pix_fmt;
+        hwframes->sw_format = ctx->sw_pix_fmt;
+        hwframes->width = ctx->width;
+        hwframes->height = ctx->height;
+        ret = av_hwframe_ctx_init(ctx->frames_ref);
+        if (ret < 0)
+            return ret;
+    }
+
     ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
     if (ret)
         av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);
 
     memset(&req, 0, sizeof(req));
     req.count = ctx->num_buffers;
-    req.memory = V4L2_MEMORY_MMAP;
+    req.memory = ctx->av_pix_fmt == AV_PIX_FMT_DRM_PRIME && V4L2_TYPE_IS_OUTPUT(ctx->type) ?
+        V4L2_MEMORY_DMABUF : V4L2_MEMORY_MMAP;
     req.type = ctx->type;
     ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
     if (ret < 0) {
@@ -712,9 +770,10 @@ int ff_v4l2_context_init(V4L2Context* ctx)
         }
     }
 
-    av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
+    av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d %s buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
         V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
         req.count,
+        req.memory == V4L2_MEMORY_DMABUF ? "DMA" : "MMAP",
         v4l2_get_width(&ctx->format),
         v4l2_get_height(&ctx->format),
         V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
diff --git a/libavcodec/v4l2_context.h b/libavcodec/v4l2_context.h
index ee08f3de41..f9d02b748f 100644
--- a/libavcodec/v4l2_context.h
+++ b/libavcodec/v4l2_context.h
@@ -51,6 +51,7 @@ typedef struct V4L2Context {
      * AV_PIX_FMT_NONE means this is an encoded stream.
      */
     enum AVPixelFormat av_pix_fmt;
+    enum AVPixelFormat sw_pix_fmt;
 
     /**
      * AVCodecID corresponding to this buffer context.
@@ -91,6 +92,11 @@ typedef struct V4L2Context {
      */
     int done;
 
+    /**
+     *  Reference to AVHWDeviceContext* for generating AV_PIX_FMT_DRM_PRIME
+     *  frames.
+     */
+    AVBufferRef *frames_ref;
 } V4L2Context;
 
 /**
diff --git a/libavcodec/v4l2_m2m.c b/libavcodec/v4l2_m2m.c
index 358f587797..363bfab548 100644
--- a/libavcodec/v4l2_m2m.c
+++ b/libavcodec/v4l2_m2m.c
@@ -149,13 +149,15 @@ static int v4l2_configure_contexts(V4L2m2mContext* s)
 
     ofmt = s->output.format;
     cfmt = s->capture.format;
-    av_log(log_ctx, AV_LOG_INFO, "requesting formats: output=%s capture=%s\n",
+    av_log(log_ctx, AV_LOG_INFO, "requesting formats: output=%s/%s capture=%s/%s\n",
                                  av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(ofmt.type) ?
                                                ofmt.fmt.pix_mp.pixelformat :
                                                ofmt.fmt.pix.pixelformat),
+                                 av_get_pix_fmt_name(s->output.av_pix_fmt) ?: "none",
                                  av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(cfmt.type) ?
                                                cfmt.fmt.pix_mp.pixelformat :
-                                               cfmt.fmt.pix.pixelformat));
+                                               cfmt.fmt.pix.pixelformat),
+                                 av_get_pix_fmt_name(s->capture.av_pix_fmt) ?: "none");
 
     ret = ff_v4l2_context_set_format(&s->output);
     if (ret) {
@@ -175,7 +177,9 @@ static int v4l2_configure_contexts(V4L2m2mContext* s)
         goto error;
     }
 
-    /* decoder's buffers need to be updated at a later stage */
+    /* decoder's capture buffers are updated during v4l2_try_start once we find
+     * the valid format.
+     */
     if (!s->avctx || !av_codec_is_decoder(s->avctx->codec)) {
         ret = ff_v4l2_context_init(&s->capture);
         if (ret) {
diff --git a/libavcodec/v4l2_m2m.h b/libavcodec/v4l2_m2m.h
index c860e96ef5..97848363ef 100644
--- a/libavcodec/v4l2_m2m.h
+++ b/libavcodec/v4l2_m2m.h
@@ -62,6 +62,9 @@ typedef struct V4L2m2mContext {
 
     /* reference back to V4L2m2mPriv */
     void *priv;
+
+    /* generate DRM frames */
+    AVBufferRef *device_ref;
 } V4L2m2mContext;
 
 typedef struct V4L2m2mPriv {
-- 
2.20.1



More information about the ffmpeg-devel mailing list