kernel-brax3-ubuntu-touch/drivers/media/platform/mtk-vcodec/venc/venc_vcp_if.c
erascape f319b992b1 kernel-5.15: Initial import brax3 UT kernel
* halium configs enabled

Signed-off-by: erascape <erascape@proton.me>
2025-09-23 15:17:10 +00:00

1834 lines
53 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 MediaTek Inc.
* Author: Chia-Mao Hung<chia-mao.hung@mediatek.com>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/soc/mediatek/mtk_tinysys_ipi.h>
#include <linux/mtk_vcu_controls.h>
#include <linux/delay.h>
#include <soc/mediatek/smi.h>
#include <media/v4l2-mem2mem.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include "../mtk_vcodec_drv.h"
#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_enc.h"
#include "../venc_drv_base.h"
#include "../venc_ipi_msg.h"
#include "../venc_vcu_if.h"
#include "mtk_vcodec_enc_pm.h"
#include "vcp_ipi_pin.h"
#include "vcp_mbox_layout.h" /* for IPI mbox size */
#include "vcp_helper.h"
// TODO: need remove ISR ipis
#include "mtk_vcodec_intr.h"
#ifdef CONFIG_MTK_ENG_BUILD
#define IPI_TIMEOUT_MS (10000U)
#else
#define IPI_TIMEOUT_MS (5000U + ((mtk_vcodec_dbg | mtk_v4l2_dbg_level) ? 5000U : 0U))
#endif
#define IPI_FIRST_VENC_SETPARAM_TIMEOUT_MS (60000U)
#define IPI_POLLING_INTERVAL_US 10
struct vcp_enc_mem_list {
struct vcodec_mem_obj mem;
struct dma_buf_attachment *attach;
struct sg_table *sgt;
struct list_head list;
};
static void handle_enc_init_msg(struct mtk_vcodec_dev *dev, struct venc_vcu_inst *vcu, void *data)
{
struct venc_vcu_ipi_msg_init *msg = data;
__u64 shmem_pa_start = (__u64)vcp_get_reserve_mem_phys(VENC_MEM_ID);
__u64 inst_offset = ((msg->vcu_inst_addr & 0x0FFFFFFF) - (shmem_pa_start & 0x0FFFFFFF));
if (vcu == NULL)
return;
mtk_vcodec_debug(vcu, "+ venc_inst = 0x%lx, vcu_inst_addr = 0x%x, id = %d",
(uintptr_t)msg->ap_inst_addr, msg->vcu_inst_addr, msg->msg_id);
vcu->inst_addr = msg->vcu_inst_addr;
vcu->vsi = (void *)((__u64)vcp_get_reserve_mem_virt(VENC_MEM_ID) + inst_offset);
dev->tf_info = (struct mtk_tf_info *)
((__u64)vcp_get_reserve_mem_virt(VENC_MEM_ID) + VENC_TF_INFO_OFFSET);
}
static void handle_query_cap_ack_msg(struct venc_vcu_ipi_query_cap_ack *msg)
{
struct venc_vcu_inst *vcu = (struct venc_vcu_inst *)msg->ap_inst_addr;
void *data;
int size = 0;
__u64 shmem_pa_start = (__u64)vcp_get_reserve_mem_phys(VENC_MEM_ID);
__u64 data_offset = ((msg->vcu_data_addr & 0x0FFFFFFF) - (shmem_pa_start & 0x0FFFFFFF));
if (vcu == NULL)
return;
mtk_vcodec_debug(vcu, "+ ap_inst_addr = 0x%lx, vcu_data_addr = 0x%x, id = %d",
(uintptr_t)msg->ap_inst_addr, msg->vcu_data_addr, msg->id);
/* mapping vcp address to kernel virtual address */
data = (void *)((__u64)vcp_get_reserve_mem_virt(VENC_MEM_ID) + data_offset);
if (data == NULL)
return;
switch (msg->id) {
case GET_PARAM_VENC_CAP_SUPPORTED_FORMATS:
size = sizeof(struct mtk_video_fmt);
memcpy((void *)msg->ap_data_addr, data,
size * MTK_MAX_ENC_CODECS_SUPPORT);
break;
case GET_PARAM_VENC_CAP_FRAME_SIZES:
size = sizeof(struct mtk_codec_framesizes);
memcpy((void *)msg->ap_data_addr, data,
size * MTK_MAX_ENC_CODECS_SUPPORT);
break;
default:
break;
}
mtk_vcodec_debug(vcu, "- vcu_inst_addr = 0x%llx", vcu->inst_addr);
}
static struct device *get_dev_by_mem_type(struct venc_inst *inst, struct vcodec_mem_obj *mem)
{
if (mem->type == MEM_TYPE_FOR_SW) {
if (inst->ctx->id & 1)
return vcp_get_io_device(VCP_IOMMU_WORK_256MB2);
else
return vcp_get_io_device(VCP_IOMMU_256MB1);
} else if (mem->type == MEM_TYPE_FOR_SEC_SW)
return vcp_get_io_device(VCP_IOMMU_SEC);
else if (mem->type == MEM_TYPE_FOR_HW || mem->type == MEM_TYPE_FOR_SEC_HW
|| mem->type == MEM_TYPE_FOR_SEC_WFD_HW)
return &inst->vcu_inst.ctx->dev->plat_dev->dev;
else
return NULL;
}
static int venc_vcp_ipi_send(struct venc_inst *inst, void *msg, int len, bool is_ack)
{
int ret, ipi_size;
unsigned long timeout = 0;
struct share_obj obj;
unsigned int suspend_block_cnt = 0;
int ipi_wait_type = IPI_SEND_WAIT;
struct venc_ap_ipi_msg_set_param *ap_out_msg;
if (preempt_count())
ipi_wait_type = IPI_SEND_POLLING;
if (!is_ack) {
mutex_lock(&inst->ctx->dev->ipi_mutex);
if (*(u32 *)msg != AP_IPIMSG_ENC_BACKUP) {
while (inst->ctx->dev->is_codec_suspending == 1) {
mutex_unlock(&inst->ctx->dev->ipi_mutex);
suspend_block_cnt++;
if (suspend_block_cnt > SUSPEND_TIMEOUT_CNT) {
mtk_v4l2_debug(0, "VENC blocked by suspend\n");
suspend_block_cnt = 0;
}
usleep_range(10000, 20000);
mutex_lock(&inst->ctx->dev->ipi_mutex);
}
}
}
if (inst->vcu_inst.abort || inst->vcu_inst.daemon_pid != get_vcp_generation())
goto ipi_err_unlock;
while (!is_vcp_ready(VCP_A_ID)) {
mtk_v4l2_debug((((timeout % 20) == 10) ? 0 : 4), "[VCP] wait ready %d ms", timeout);
mdelay(1);
timeout++;
if (timeout > VCP_SYNC_TIMEOUT_MS) {
mtk_vcodec_err(inst, "VCP_A_ID not ready");
mtk_smi_dbg_hang_detect("VENC VCP");
#if IS_ENABLED(CONFIG_MTK_EMI)
mtk_emidbg_dump();
#endif
//BUG_ON(1);
goto ipi_err_unlock;
}
}
if (len > (sizeof(struct share_obj) - sizeof(int32_t) - sizeof(uint32_t))) {
mtk_vcodec_err(inst, "ipi data size wrong %d > %d", len, sizeof(struct share_obj));
goto ipi_err_unlock;
}
memset(&obj, 0, sizeof(obj));
memcpy(obj.share_buf, msg, len);
obj.id = inst->vcu_inst.id;
obj.len = len;
ipi_size = ((sizeof(u32) * 2) + len + 3) / 4;
inst->vcu_inst.failure = 0;
inst->ctx->err_msg = *(__u32 *)msg;
mtk_v4l2_debug(2, "id %d len %d msg 0x%x is_ack %d %d", obj.id, obj.len, *(u32 *)msg,
is_ack, inst->vcu_inst.signaled);
ret = mtk_ipi_send(&vcp_ipidev, IPI_OUT_VENC_0, ipi_wait_type, &obj,
ipi_size, IPI_TIMEOUT_MS);
if (is_ack)
return 0;
if (ret != IPI_ACTION_DONE) {
mtk_vcodec_err(inst, "mtk_ipi_send %X fail %d", *(u32 *)msg, ret);
goto ipi_err_wait_and_unlock;
}
if (!is_ack) {
/* wait for VCP's ACK */
timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
if (*(__u32 *)msg == AP_IPIMSG_ENC_SET_PARAM &&
inst->ctx->state == MTK_STATE_INIT) {
ap_out_msg = (struct venc_ap_ipi_msg_set_param *) msg;
if (ap_out_msg->param_id == VENC_SET_PARAM_ENC)
timeout = msecs_to_jiffies(IPI_FIRST_VENC_SETPARAM_TIMEOUT_MS);
}
if (ipi_wait_type == IPI_SEND_POLLING) {
ret = IPI_TIMEOUT_MS * 1000;
while (inst->vcu_inst.signaled == false) {
udelay(IPI_POLLING_INTERVAL_US);
ret -= IPI_POLLING_INTERVAL_US;
if (ret < 0) {
ret = 0;
break;
}
}
} else
ret = wait_event_timeout(inst->vcu_inst.wq_hd,
inst->vcu_inst.signaled, timeout);
inst->vcu_inst.signaled = false;
if (ret == 0 || inst->vcu_inst.failure) {
mtk_vcodec_err(inst, "wait vcp ipi %X ack time out or fail!%d %d",
*(u32 *)msg, ret, inst->vcu_inst.failure);
goto ipi_err_wait_and_unlock;
}
}
mutex_unlock(&inst->ctx->dev->ipi_mutex);
return 0;
ipi_err_wait_and_unlock:
timeout = 0;
if (inst->vcu_inst.daemon_pid == get_vcp_generation()) {
trigger_vcp_halt(VCP_A_ID);
while (inst->vcu_inst.daemon_pid == get_vcp_generation()) {
if (timeout > VCP_SYNC_TIMEOUT_MS) {
mtk_v4l2_debug(0, "halt restart timeout %x\n",
inst->vcu_inst.daemon_pid);
break;
}
usleep_range(10000, 20000);
timeout += 10;
}
}
inst->vcu_inst.failure = VENC_IPI_MSG_STATUS_FAIL;
inst->ctx->err_msg = *(__u32 *)msg;
ipi_err_unlock:
inst->vcu_inst.abort = 1;
if (!is_ack)
mutex_unlock(&inst->ctx->dev->ipi_mutex);
return -EIO;
}
static void handle_venc_mem_alloc(struct venc_vcu_ipi_mem_op *msg)
{
struct venc_vcu_inst *vcu = (struct venc_vcu_inst *)msg->ap_inst_addr;
struct venc_inst *inst = NULL;
struct device *dev = NULL;
struct vcp_enc_mem_list *tmp = NULL;
struct dma_buf_attachment *attach = NULL;
struct sg_table *sgt = NULL;
if (msg->mem.type == MEM_TYPE_FOR_SHM) {
msg->status = 0;
msg->mem.va = (__u64)vcp_get_reserve_mem_virt(VENC_MEM_ID);
msg->mem.pa = (__u64)vcp_get_reserve_mem_phys(VENC_MEM_ID);
msg->mem.len = (__u64)vcp_get_reserve_mem_size(VENC_MEM_ID);
msg->mem.iova = msg->mem.pa;
mtk_v4l2_debug(4, "va 0x%llx pa 0x%llx iova 0x%llx len %d type %d size of %d %d\n",
msg->mem.va, msg->mem.pa, msg->mem.iova, msg->mem.len, msg->mem.type,
sizeof(msg->mem), sizeof(*msg));
} else {
if (IS_ERR_OR_NULL(vcu))
return;
inst = container_of(vcu, struct venc_inst, vcu_inst);
dev = get_dev_by_mem_type(inst, &msg->mem);
msg->status = mtk_vcodec_alloc_mem(&msg->mem, dev, &attach, &sgt);
mtk_vcodec_debug(vcu, "va 0x%llx pa 0x%llx iova 0x%llx len %d type %d\n",
msg->mem.va, msg->mem.pa, msg->mem.iova, msg->mem.len, msg->mem.type);
}
if (msg->status) {
mtk_vcodec_err(vcu, "fail %d, va 0x%llx pa 0x%llx iova 0x%llx len %d type %d",
msg->status, msg->mem.va, msg->mem.pa,
msg->mem.iova, msg->mem.len, msg->mem.type);
/* reset prevent VCP TF */
msg->mem.pa = 0;
msg->mem.iova = 0;
} else if (msg->mem.type != MEM_TYPE_FOR_SHM) {
tmp = kmalloc(sizeof(struct vcp_enc_mem_list), GFP_KERNEL);
if (tmp) {
mutex_lock(vcu->ctx_ipi_lock);
tmp->attach = attach;
tmp->sgt = sgt;
tmp->mem = msg->mem;
list_add_tail(&tmp->list, &vcu->bufs);
mutex_unlock(vcu->ctx_ipi_lock);
}
}
}
static void handle_venc_mem_free(struct venc_vcu_ipi_mem_op *msg)
{
struct venc_vcu_inst *vcu = (struct venc_vcu_inst *)msg->ap_inst_addr;
struct venc_inst *inst = NULL;
struct device *dev = NULL;
struct vcp_enc_mem_list *tmp = NULL;
struct list_head *p, *q;
bool found = 0;
if (IS_ERR_OR_NULL(vcu))
return;
mutex_lock(vcu->ctx_ipi_lock);
list_for_each_safe(p, q, &vcu->bufs) {
tmp = list_entry(p, struct vcp_enc_mem_list, list);
if (!memcmp(&tmp->mem, &msg->mem, sizeof(struct vcodec_mem_obj))) {
found = 1;
list_del(p);
break;
}
}
mutex_unlock(vcu->ctx_ipi_lock);
if (!found) {
mtk_vcodec_err(vcu, "not found %d, va 0x%llx pa 0x%llx iova 0x%llx len %d type %d",
msg->status, msg->mem.va, msg->mem.pa,
msg->mem.iova, msg->mem.len, msg->mem.type);
return;
}
mtk_vcodec_debug(vcu, "va 0x%llx pa 0x%llx iova 0x%llx len %d type %d\n",
msg->mem.va, msg->mem.pa, msg->mem.iova, msg->mem.len, msg->mem.type);
inst = container_of(vcu, struct venc_inst, vcu_inst);
dev = get_dev_by_mem_type(inst, &msg->mem);
msg->status = mtk_vcodec_free_mem(&msg->mem, dev, tmp->attach, tmp->sgt);
kfree(tmp);
if (msg->status)
mtk_vcodec_err(vcu, "fail %d, va 0x%llx pa 0x%llx iova 0x%llx len %d type %d",
msg->status, msg->mem.va, msg->mem.pa,
msg->mem.iova, msg->mem.len, msg->mem.type);
}
static void handle_enc_waitisr_msg(struct venc_vcu_inst *vcu,
void *data, uint32_t timeout)
{
struct venc_vcu_ipi_msg_waitisr *msg = data;
struct mtk_vcodec_ctx *ctx = vcu->ctx;
msg->irq_status = ctx->irq_status;
msg->timeout = timeout;
}
static int check_codec_id(struct venc_vcu_ipi_msg_common *msg, unsigned int fmt)
{
int codec_id = 0, ret = 0;
switch (fmt) {
case V4L2_PIX_FMT_H264:
codec_id = VENC_H264;
break;
case V4L2_PIX_FMT_VP8:
codec_id = VENC_VP8;
break;
case V4L2_PIX_FMT_MPEG4:
codec_id = VENC_MPEG4;
break;
case V4L2_PIX_FMT_H263:
codec_id = VENC_H263;
break;
case V4L2_PIX_FMT_H265:
codec_id = VENC_H265;
break;
case V4L2_PIX_FMT_HEIF:
codec_id = VENC_HEIF;
break;
default:
pr_info("%s fourcc not supported", __func__);
break;
}
if (codec_id == 0) {
mtk_v4l2_err("[error] venc unsupported fourcc\n");
ret = -1;
} else if (msg->codec_id == codec_id) {
pr_info("%s ipi id %d is correct\n", __func__, msg->codec_id);
ret = 0;
} else {
mtk_v4l2_debug(2, "[Info] ipi id %d is incorrect\n", msg->codec_id);
ret = -1;
}
return ret;
}
static int handle_enc_get_bs_buf(struct venc_vcu_inst *vcu, void *data)
{
struct mtk_vcodec_mem *pbs_buf;
struct mtk_vcodec_ctx *ctx = vcu->ctx;
struct venc_vsi *vsi = (struct venc_vsi *)vcu->vsi;
struct venc_vcu_ipi_msg_get_bs *msg = (struct venc_vcu_ipi_msg_get_bs *)data;
long timeout_jiff;
int ret = 1;
pbs_buf = mtk_vcodec_get_bs(ctx);
timeout_jiff = msecs_to_jiffies(1000);
while (pbs_buf == NULL) {
ret = wait_event_interruptible_timeout(
vcu->ctx->bs_wq,
v4l2_m2m_num_dst_bufs_ready(
vcu->ctx->m2m_ctx) > 0 ||
vcu->ctx->state == MTK_STATE_FLUSH,
timeout_jiff);
pbs_buf = mtk_vcodec_get_bs(ctx);
}
vsi->venc.venc_bs_va = (u64)(uintptr_t)pbs_buf;
msg->bs_addr = pbs_buf->dma_addr;
msg->bs_size = pbs_buf->size;
pbs_buf->buf_fd = msg->bs_fd;
return 1;
}
static void venc_vcp_free_mq_node(struct mtk_vcodec_dev *dev,
struct mtk_vcodec_msg_node *mq_node)
{
unsigned long flags;
spin_lock_irqsave(&dev->mq.lock, flags);
list_add(&mq_node->list, &dev->mq.nodes);
spin_unlock_irqrestore(&dev->mq.lock, flags);
}
int vcp_enc_ipi_handler(void *arg)
{
struct mtk_vcodec_dev *dev = (struct mtk_vcodec_dev *)arg;
struct share_obj *obj = NULL;
struct venc_vcu_ipi_msg_common *msg = NULL;
struct venc_inst *inst = NULL;
struct venc_vcu_inst *vcu;
struct venc_vsi *vsi = NULL;
struct mtk_vcodec_ctx *ctx;
int ret = 0;
struct mtk_vcodec_msg_node *mq_node;
struct venc_vcu_ipi_mem_op *shem_msg;
unsigned long flags;
struct list_head *p, *q;
struct mtk_vcodec_ctx *temp_ctx;
int msg_valid = 0;
struct sched_param sched_p = { .sched_priority = MTK_VCODEC_IPI_THREAD_PRIORITY };
mtk_v4l2_debug_enter();
BUILD_BUG_ON(sizeof(struct venc_ap_ipi_msg_init) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_ap_ipi_query_cap) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_ap_ipi_msg_set_param) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_ap_ipi_msg_enc) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_ap_ipi_msg_deinit) > SHARE_BUF_SIZE);
BUILD_BUG_ON(
sizeof(struct venc_vcu_ipi_query_cap_ack) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_vcu_ipi_msg_common) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_vcu_ipi_msg_init) > SHARE_BUF_SIZE);
BUILD_BUG_ON(
sizeof(struct venc_vcu_ipi_msg_set_param) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_vcu_ipi_msg_enc) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_vcu_ipi_msg_deinit) > SHARE_BUF_SIZE);
BUILD_BUG_ON(sizeof(struct venc_vcu_ipi_msg_waitisr) > SHARE_BUF_SIZE);
BUILD_BUG_ON(
sizeof(struct venc_vcu_ipi_mem_op) > SHARE_BUF_SIZE);
sched_setscheduler(current, SCHED_FIFO, &sched_p);
do {
ret = wait_event_interruptible(dev->mq.wq, atomic_read(&dev->mq.cnt) > 0);
if (ret < 0) {
mtk_v4l2_debug(0, "wait event return %d (suspending %d)\n",
ret, atomic_read(&dev->mq.cnt));
continue;
}
spin_lock_irqsave(&dev->mq.lock, flags);
mq_node = list_entry(dev->mq.head.next, struct mtk_vcodec_msg_node, list);
list_del(&(mq_node->list));
atomic_dec(&dev->mq.cnt);
spin_unlock_irqrestore(&dev->mq.lock, flags);
obj = &mq_node->ipi_data;
msg = (struct venc_vcu_ipi_msg_common *)obj->share_buf;
if (msg == NULL ||
(struct venc_vcu_inst *)(unsigned long)msg->ap_inst_addr == NULL) {
mtk_v4l2_err(" msg invalid %lx\n", msg);
venc_vcp_free_mq_node(dev, mq_node);
continue;
}
/* handling VSI (shared memory) preparation when VCP init service without inst*/
if (msg->msg_id == VCU_IPIMSG_ENC_MEM_ALLOC) {
shem_msg = (struct venc_vcu_ipi_mem_op *)obj->share_buf;
if (shem_msg->mem.type == MEM_TYPE_FOR_SHM) {
handle_venc_mem_alloc((void *)shem_msg);
shem_msg->vcp_addr[0] = (__u32)VCP_PACK_IOVA(
vcp_get_reserve_mem_phys(VENC_SET_PROP_MEM_ID));
shem_msg->vcp_addr[1] = (__u32)VCP_PACK_IOVA(
vcp_get_reserve_mem_phys(VENC_VCP_LOG_INFO_ID));
shem_msg->msg_id = AP_IPIMSG_ENC_MEM_ALLOC_DONE;
ret = mtk_ipi_send(&vcp_ipidev, IPI_OUT_VENC_0, IPI_SEND_WAIT, obj,
PIN_OUT_SIZE_VENC, 100);
if (ret != IPI_ACTION_DONE)
mtk_v4l2_err("mtk_ipi_send fail %d", ret);
venc_vcp_free_mq_node(dev, mq_node);
continue;
}
}
vcu = (struct venc_vcu_inst *)(unsigned long)msg->ap_inst_addr;
/* Check IPI inst is valid */
mutex_lock(&dev->ctx_mutex);
msg_valid = 0;
list_for_each_safe(p, q, &dev->ctx_list) {
temp_ctx = list_entry(p, struct mtk_vcodec_ctx, list);
inst = (struct venc_inst *)temp_ctx->drv_handle;
if (inst != NULL && vcu == &inst->vcu_inst && vcu->ctx == temp_ctx) {
msg_valid = 1;
break;
}
}
if (!msg_valid) {
mtk_v4l2_err(" msg vcu not exist %p\n", vcu);
mutex_unlock(&dev->ctx_mutex);
venc_vcp_free_mq_node(dev, mq_node);
continue;
}
if (vcu->abort || vcu->daemon_pid != get_vcp_generation()) {
mtk_v4l2_err(" [%d] msg vcu abort %d %d\n",
vcu->ctx->id, vcu->daemon_pid, get_vcp_generation());
mutex_unlock(&dev->ctx_mutex);
venc_vcp_free_mq_node(dev, mq_node);
continue;
}
inst = container_of(vcu, struct venc_inst, vcu_inst);
mtk_v4l2_debug(2, "+ pop msg_id %X, ml_cnt %d, vcu %lx, status %d",
msg->msg_id, atomic_read(&dev->mq.cnt), (unsigned long)vcu, msg->status);
ctx = vcu->ctx;
msg->ctx_id = ctx->id;
vsi = (struct venc_vsi *)vcu->vsi;
switch (msg->msg_id) {
case VCU_IPIMSG_ENC_INIT_DONE:
handle_enc_init_msg(dev, vcu, (void *)obj->share_buf);
if (msg->status != VENC_IPI_MSG_STATUS_OK)
vcu->failure = VENC_IPI_MSG_STATUS_FAIL;
else
vcu->ctx->state = MTK_STATE_INIT;
case VCU_IPIMSG_ENC_SET_PARAM_DONE:
case VCU_IPIMSG_ENC_ENCODE_DONE:
case VCU_IPIMSG_ENC_DEINIT_DONE:
case VCU_IPIMSG_ENC_BACKUP_DONE:
vcu->signaled = true;
wake_up(&vcu->wq_hd);
break;
case VCU_IPIMSG_ENC_QUERY_CAP_DONE:
handle_query_cap_ack_msg((void *)obj->share_buf);
vcu->signaled = true;
wake_up(&vcu->wq_hd);
break;
case VCU_IPIMSG_ENC_PUT_BUFFER:
mtk_enc_put_buf(ctx);
msg->msg_id = AP_IPIMSG_ENC_PUT_BUFFER_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(*msg), 1);
break;
case VCU_IPIMSG_ENC_MEM_ALLOC:
handle_venc_mem_alloc((void *)obj->share_buf);
msg->msg_id = AP_IPIMSG_ENC_MEM_ALLOC_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(struct venc_vcu_ipi_mem_op), 1);
break;
case VCU_IPIMSG_ENC_MEM_FREE:
handle_venc_mem_free((void *)obj->share_buf);
msg->msg_id = AP_IPIMSG_ENC_MEM_FREE_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(struct venc_vcu_ipi_mem_op), 1);
break;
// TODO: need remove HW locks /power ipis
case VCU_IPIMSG_ENC_WAIT_ISR:
if (msg->status == MTK_VENC_CORE_0)
vcodec_trace_count("VENC_HW_CORE_0", 2);
else
vcodec_trace_count("VENC_HW_CORE_1", 2);
if (-1 == mtk_vcodec_wait_for_done_ctx(ctx, msg->status,
MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS)) {
handle_enc_waitisr_msg(vcu, msg, 1);
mtk_vcodec_debug(vcu,
"irq_status %x <-", ctx->irq_status);
} else
handle_enc_waitisr_msg(vcu, msg, 0);
if (msg->status == MTK_VENC_CORE_0)
vcodec_trace_count("VENC_HW_CORE_0", 1);
else
vcodec_trace_count("VENC_HW_CORE_1", 1);
msg->msg_id = AP_IPIMSG_ENC_WAIT_ISR_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(*msg), 1);
break;
case VCU_IPIMSG_ENC_POWER_ON:
ctx->sysram_enable = vsi->config.sysram_enable;
venc_encode_prepare(ctx, msg->status, &flags);
msg->msg_id = AP_IPIMSG_ENC_POWER_ON_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(*msg), 1);
break;
case VCU_IPIMSG_ENC_POWER_OFF:
ctx->sysram_enable = vsi->config.sysram_enable;
venc_encode_unprepare(ctx, msg->status, &flags);
msg->msg_id = AP_IPIMSG_ENC_POWER_OFF_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(*msg), 1);
break;
case VCU_IPIMSG_ENC_TRACE:
{
struct venc_vcu_ipi_msg_trace *trace_msg =
(struct venc_vcu_ipi_msg_trace *)obj->share_buf;
char buf[16];
sprintf(buf, "VENC_TRACE_%d", trace_msg->trace_id);
vcodec_trace_count(buf, trace_msg->flag);
}
break;
case VCU_IPIMSG_ENC_CHECK_CODEC_ID:
{
if (check_codec_id(msg, ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc) == 0)
msg->status = 0;
else
msg->status = -1;
msg->msg_id = AP_IPIMSG_ENC_CHECK_CODEC_ID_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(*msg), 1);
}
break;
case VCU_IPIMSG_ENC_GET_BS_BUFFER:
{
handle_enc_get_bs_buf(vcu, (void *)obj->share_buf);
msg->msg_id = AP_IPIMSG_ENC_GET_BS_BUFFER_DONE;
venc_vcp_ipi_send(inst, msg, sizeof(*msg), 1);
}
break;
default:
mtk_vcodec_err(vcu, "unknown msg id %x", msg->msg_id);
break;
}
mtk_vcodec_debug(vcu, "- id=%X", msg->msg_id);
mutex_unlock(&dev->ctx_mutex);
venc_vcp_free_mq_node(dev, mq_node);
} while (!kthread_should_stop());
mtk_v4l2_debug_leave();
return ret;
}
static int venc_vcp_ipi_isr(unsigned int id, void *prdata, void *data, unsigned int len)
{
struct mtk_vcodec_dev *dev = (struct mtk_vcodec_dev *)prdata;
struct venc_vcu_ipi_msg_common *msg = NULL;
struct share_obj *obj = (struct share_obj *)data;
struct mtk_vcodec_msg_node *mq_node;
unsigned long flags;
msg = (struct venc_vcu_ipi_msg_common *)obj->share_buf;
// add to ipi msg list
spin_lock_irqsave(&dev->mq.lock, flags);
if (!list_empty(&dev->mq.nodes)) {
mq_node = list_entry(dev->mq.nodes.next, struct mtk_vcodec_msg_node, list);
memcpy(&mq_node->ipi_data, obj, sizeof(struct share_obj));
list_move_tail(&mq_node->list, &dev->mq.head);
atomic_inc(&dev->mq.cnt);
spin_unlock_irqrestore(&dev->mq.lock, flags);
mtk_v4l2_debug(8, "push ipi_id %x msg_id %x, ml_cnt %d",
obj->id, msg->msg_id, atomic_read(&dev->mq.cnt));
wake_up(&dev->mq.wq);
} else {
spin_unlock_irqrestore(&dev->mq.lock, flags);
mtk_v4l2_err("mq no free nodes\n");
}
return 0;
}
static int venc_vcp_backup(struct venc_inst *inst)
{
struct venc_vcu_ipi_msg_common msg;
int err = 0;
if (!inst)
return -EINVAL;
mtk_vcodec_debug_enter(inst);
memset(&msg, 0, sizeof(msg));
msg.msg_id = AP_IPIMSG_ENC_BACKUP;
msg.ap_inst_addr = inst->vcu_inst.inst_addr;
msg.ctx_id = inst->ctx->id;
err = venc_vcp_ipi_send(inst, &msg, sizeof(msg), 0);
mtk_vcodec_debug(inst, "- ret=%d", err);
return err;
}
static int vcp_venc_notify_callback(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct mtk_vcodec_dev *dev;
struct list_head *p, *q;
struct mtk_vcodec_ctx *ctx;
int timeout = 0;
bool backup = false;
struct venc_inst *inst = NULL;
if (!mtk_vcodec_is_vcp(MTK_INST_ENCODER))
return 0;
dev = container_of(this, struct mtk_vcodec_dev, vcp_notify);
switch (event) {
case VCP_EVENT_STOP:
timeout = 0;
while (atomic_read(&dev->mq.cnt)) {
timeout++;
mdelay(1);
if (timeout > VCP_SYNC_TIMEOUT_MS) {
mtk_v4l2_debug(0, "VCP_EVENT_STOP timeout\n");
break;
}
}
mutex_lock(&dev->ctx_mutex);
// check release all ctx lock
list_for_each_safe(p, q, &dev->ctx_list) {
ctx = list_entry(p, struct mtk_vcodec_ctx, list);
if (ctx != NULL && ctx->state != MTK_STATE_ABORT) {
ctx->state = MTK_STATE_ABORT;
inst = (struct venc_inst *)(ctx->drv_handle);
if (inst != NULL) {
inst->vcu_inst.failure = VENC_IPI_MSG_STATUS_FAIL;
inst->vcu_inst.abort = 1;
}
venc_check_release_lock(ctx);
mtk_venc_queue_error_event(ctx);
}
}
mutex_unlock(&dev->ctx_mutex);
break;
case VCP_EVENT_SUSPEND:
dev->is_codec_suspending = 1;
// check no more ipi in progress
mutex_lock(&dev->ipi_mutex);
mutex_unlock(&dev->ipi_mutex);
// send backup ipi to vcp by one of any instances
mutex_lock(&dev->ctx_mutex);
list_for_each_safe(p, q, &dev->ctx_list) {
ctx = list_entry(p, struct mtk_vcodec_ctx, list);
if (ctx != NULL && ctx->drv_handle != 0 &&
ctx->state < MTK_STATE_ABORT && ctx->state > MTK_STATE_FREE) {
mutex_unlock(&dev->ctx_mutex);
backup = true;
venc_vcp_backup((struct venc_inst *)ctx->drv_handle);
break;
}
}
if (!backup)
mutex_unlock(&dev->ctx_mutex);
while (atomic_read(&dev->mq.cnt)) {
timeout += 20;
usleep_range(10000, 20000);
if (timeout > VCP_SYNC_TIMEOUT_MS) {
mtk_v4l2_debug(0, "VCP_EVENT_SUSPEND timeout\n");
break;
}
}
break;
}
return NOTIFY_DONE;
}
void venc_vcp_probe(struct mtk_vcodec_dev *dev)
{
int ret, i;
struct mtk_vcodec_msg_node *mq_node;
mtk_v4l2_debug_enter();
INIT_LIST_HEAD(&dev->mq.head);
spin_lock_init(&dev->mq.lock);
init_waitqueue_head(&dev->mq.wq);
atomic_set(&dev->mq.cnt, 0);
INIT_LIST_HEAD(&dev->mq.nodes);
for (i = 0; i < MTK_VCODEC_MAX_MQ_NODE_CNT; i++) {
mq_node = kmalloc(sizeof(struct mtk_vcodec_msg_node), GFP_DMA | GFP_ATOMIC);
list_add(&mq_node->list, &dev->mq.nodes);
}
if (!VCU_FPTR(vcu_load_firmware))
mtk_vcodec_vcp |= 1 << MTK_INST_ENCODER;
ret = mtk_ipi_register(&vcp_ipidev, IPI_IN_VENC_0,
venc_vcp_ipi_isr, dev, &dev->enc_ipi_data);
if (ret)
mtk_v4l2_debug(0, " ipi_register, ret %d\n", ret);
kthread_run(vcp_enc_ipi_handler, dev, "venc_ipi_recv");
dev->vcp_notify.notifier_call = vcp_venc_notify_callback;
vcp_A_register_notify(&dev->vcp_notify);
mtk_v4l2_debug_leave();
}
void venc_vcp_remove(struct mtk_vcodec_dev *dev)
{
int timeout = 0;
struct mtk_vcodec_msg_node *mq_node, *next;
unsigned long flags;
while (atomic_read(&dev->mq.cnt)) {
timeout++;
mdelay(1);
if (timeout > VCP_SYNC_TIMEOUT_MS) {
mtk_v4l2_err("wait msgq empty timeout\n");
break;
}
}
spin_lock_irqsave(&dev->mq.lock, flags);
list_for_each_entry_safe(mq_node, next, &dev->mq.nodes, list) {
list_del(&(mq_node->list));
kfree(mq_node);
}
spin_unlock_irqrestore(&dev->mq.lock, flags);
}
static unsigned int venc_h265_get_profile(struct venc_inst *inst,
unsigned int profile)
{
switch (profile) {
case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
return 1;
case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
return 2;
case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
return 4;
default:
mtk_vcodec_debug(inst, "unsupported profile %d", profile);
return 1;
}
}
static unsigned int venc_h265_get_level(struct venc_inst *inst,
unsigned int level, unsigned int tier)
{
switch (level) {
case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 2 : 3;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 8 : 9;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 10 : 11;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 13 : 14;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 15 : 16;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 18 : 19;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 20 : 21;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 23 : 24;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 25 : 26;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 27 : 28;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 29 : 30;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 31 : 32;
case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
return (tier == V4L2_MPEG_VIDEO_HEVC_TIER_MAIN) ? 33 : 34;
default:
mtk_vcodec_debug(inst, "unsupported level %d", level);
return 25;
}
}
static unsigned int venc_mpeg4_get_profile(struct venc_inst *inst,
unsigned int profile)
{
switch (profile) {
case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
return 0;
case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
return 1;
case V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE:
return 2;
case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE:
return 3;
case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY:
return 4;
default:
mtk_vcodec_debug(inst, "unsupported mpeg4 profile %d", profile);
return 100;
}
}
static unsigned int venc_mpeg4_get_level(struct venc_inst *inst,
unsigned int level)
{
switch (level) {
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0:
return 0;
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B:
return 1;
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1:
return 2;
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2:
return 3;
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3:
return 4;
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B:
return 5;
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4:
return 6;
case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5:
return 7;
default:
mtk_vcodec_debug(inst, "unsupported mpeg4 level %d", level);
return 4;
}
}
int vcp_enc_encode(struct venc_inst *inst, unsigned int bs_mode,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
unsigned int *bs_size)
{
struct venc_ap_ipi_msg_enc out;
struct venc_ap_ipi_msg_set_param out_slb;
struct venc_vsi *vsi = (struct venc_vsi *)inst->vcu_inst.vsi;
unsigned int i, ret, ret_slb;
mtk_vcodec_debug(inst, "bs_mode %d ->", bs_mode);
if (sizeof(out) > SHARE_BUF_SIZE) {
mtk_vcodec_err(inst, "venc_ap_ipi_msg_enc cannot be large than %d sizeof(out):%zu",
SHARE_BUF_SIZE, sizeof(out));
return -EINVAL;
}
memset(&out, 0, sizeof(out));
out.msg_id = AP_IPIMSG_ENC_ENCODE;
out.vcu_inst_addr = inst->vcu_inst.inst_addr;
out.ctx_id = inst->ctx->id;
out.bs_mode = bs_mode;
if (frm_buf) {
out.fb_num_planes = frm_buf->num_planes;
for (i = 0; i < frm_buf->num_planes; i++) {
vsi->venc.input_addr[i] =
frm_buf->fb_addr[i].dma_addr;
vsi->venc.fb_dma[i] =
frm_buf->fb_addr[i].dma_addr;
out.input_size[i] =
frm_buf->fb_addr[i].size;
out.data_offset[i] =
frm_buf->fb_addr[i].data_offset;
}
if (frm_buf->has_meta) {
vsi->meta_size = sizeof(struct mtk_hdr_dynamic_info);
vsi->meta_addr = frm_buf->meta_addr;
} else {
vsi->meta_size = 0;
vsi->meta_addr = 0;
}
if (frm_buf->has_qpmap) {
vsi->qpmap_addr = frm_buf->qpmap_dma_addr;
vsi->qpmap_size = frm_buf->qpmap_dma->size;
} else {
vsi->qpmap_addr = 0;
vsi->qpmap_size = 0;
}
if (frm_buf->dyparams_dma) {
vsi->dynamicparams_addr = frm_buf->dyparams_dma_addr;
vsi->dynamicparams_size = sizeof(struct inputqueue_dynamic_info);
mtk_vcodec_debug(inst, "vsi dynamic params addr %llx size%d",
vsi->dynamicparams_addr,
vsi->dynamicparams_size);
} else {
vsi->dynamicparams_addr = 0;
vsi->dynamicparams_size = 0;
}
mtk_vcodec_debug(inst, " num_planes = %d input (dmabuf:%lx), size %d %llx",
frm_buf->num_planes,
(unsigned long)frm_buf->fb_addr[0].dmabuf,
vsi->meta_size,
vsi->meta_addr);
mtk_vcodec_debug(inst, "vsi qpmap addr %llx size%d",
vsi->qpmap_addr, vsi->qpmap_size);
}
if (bs_buf) {
vsi->venc.bs_addr = bs_buf->dma_addr;
vsi->venc.bs_dma = bs_buf->dma_addr;
out.bs_size = bs_buf->size;
mtk_vcodec_debug(inst, " output (dma:%lx)",
(unsigned long)bs_buf->dmabuf);
}
if (inst->ctx->use_slbc && atomic_read(&mtk_venc_slb_cb.release_slbc)) {
memset(&out_slb, 0, sizeof(out_slb));
out_slb.msg_id = AP_IPIMSG_ENC_SET_PARAM;
out_slb.vcu_inst_addr = inst->vcu_inst.inst_addr;
out_slb.ctx_id = inst->ctx->id;
out_slb.param_id = VENC_SET_PARAM_RELEASE_SLB;
out_slb.data_item = 2;
out_slb.data[0] = 1; //release_slb 1
out_slb.data[1] = 0x0; //slbc_addr
ret_slb = venc_vcp_ipi_send(inst, &out_slb, sizeof(out_slb), 0);
if (ret_slb)
mtk_vcodec_err(inst, "set VENC_SET_PARAM_RELEASE_SLB fail %d", ret_slb);
else {
mtk_v4l2_debug(0, "slbc_release, %p\n", &inst->ctx->sram_data);
slbc_release(&inst->ctx->sram_data);
inst->ctx->use_slbc = 0;
atomic_inc(&mtk_venc_slb_cb.later_cnt);
if (inst->ctx->enc_params.slbc_encode_performance)
atomic_dec(&mtk_venc_slb_cb.perf_used_cnt);
mtk_v4l2_debug(0, "slbc_release ref %d\n", inst->ctx->sram_data.ref);
if (inst->ctx->sram_data.ref <= 0)
atomic_set(&mtk_venc_slb_cb.release_slbc, 0);
}
mtk_v4l2_debug(0, "slb_cb %d/%d perf %d cnt %d/%d",
atomic_read(&mtk_venc_slb_cb.release_slbc),
atomic_read(&mtk_venc_slb_cb.request_slbc),
inst->ctx->enc_params.slbc_encode_performance,
atomic_read(&mtk_venc_slb_cb.perf_used_cnt),
atomic_read(&mtk_venc_slb_cb.later_cnt));
} else if (!inst->ctx->use_slbc && atomic_read(&mtk_venc_slb_cb.request_slbc)) {
if (slbc_request(&inst->ctx->sram_data) >= 0) {
inst->ctx->use_slbc = 1;
inst->ctx->slbc_addr = (unsigned int)(unsigned long)
inst->ctx->sram_data.paddr;
} else {
mtk_vcodec_err(inst, "slbc_request fail\n");
inst->ctx->use_slbc = 0;
}
if (inst->ctx->slbc_addr % 256 != 0 || inst->ctx->slbc_addr == 0) {
mtk_vcodec_err(inst, "slbc_addr error 0x%x\n", inst->ctx->slbc_addr);
inst->ctx->use_slbc = 0;
}
if (inst->ctx->use_slbc == 1) {
if (inst->ctx->enc_params.slbc_encode_performance)
atomic_inc(&mtk_venc_slb_cb.perf_used_cnt);
atomic_dec(&mtk_venc_slb_cb.later_cnt);
if (atomic_read(&mtk_venc_slb_cb.later_cnt) <= 0)
atomic_set(&mtk_venc_slb_cb.request_slbc, 0);
memset(&out_slb, 0, sizeof(out_slb));
out_slb.msg_id = AP_IPIMSG_ENC_SET_PARAM;
out_slb.vcu_inst_addr = inst->vcu_inst.inst_addr;
out_slb.ctx_id = inst->ctx->id;
out_slb.param_id = VENC_SET_PARAM_RELEASE_SLB;
out_slb.data_item = 2;
out_slb.data[0] = 0; //release_slb 0
out_slb.data[1] = inst->ctx->slbc_addr;
ret_slb = venc_vcp_ipi_send(inst, &out_slb, sizeof(out_slb), 0);
if (ret_slb) {
mtk_vcodec_err(inst, "set VENC_SET_PARAM_RELEASE_SLB fail %d",
ret_slb);
}
}
mtk_v4l2_debug(0, "slbc_request %d, 0x%x, 0x%llx\n",
inst->ctx->use_slbc, inst->ctx->slbc_addr, inst->ctx->sram_data.paddr);
mtk_v4l2_debug(0, "slb_cb %d/%d perf %d cnt %d/%d",
atomic_read(&mtk_venc_slb_cb.release_slbc),
atomic_read(&mtk_venc_slb_cb.request_slbc),
inst->ctx->enc_params.slbc_encode_performance,
atomic_read(&mtk_venc_slb_cb.perf_used_cnt),
atomic_read(&mtk_venc_slb_cb.later_cnt));
}
ret = venc_vcp_ipi_send(inst, &out, sizeof(out), 0);
if (ret) {
mtk_vcodec_err(inst, "AP_IPIMSG_ENC_ENCODE %d fail %d",
bs_mode, ret);
return ret;
}
mtk_vcodec_debug(inst, "bs_mode %d size %d key_frm %d <-",
bs_mode, inst->vcu_inst.bs_size, inst->vcu_inst.is_key_frm);
return 0;
}
static int venc_encode_header(struct venc_inst *inst,
struct mtk_vcodec_mem *bs_buf,
unsigned int *bs_size)
{
int ret = 0;
mtk_vcodec_debug_enter(inst);
if (bs_buf == NULL)
inst->vsi->venc.venc_bs_va = 0;
else
inst->vsi->venc.venc_bs_va = (u64)(uintptr_t)bs_buf;
inst->vsi->venc.venc_fb_va = 0;
mtk_vcodec_debug(inst, "vsi venc_bs_va 0x%llx",
inst->vsi->venc.venc_bs_va);
ret = vcp_enc_encode(inst, VENC_BS_MODE_SEQ_HDR, NULL,
bs_buf, bs_size);
return ret;
}
static int venc_encode_frame(struct venc_inst *inst,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
unsigned int *bs_size)
{
int ret = 0;
unsigned int fm_fourcc = inst->ctx->q_data[MTK_Q_DATA_SRC].fmt->fourcc;
unsigned int bs_fourcc = inst->ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc;
mtk_vcodec_debug_enter(inst);
if (bs_buf == NULL)
inst->vsi->venc.venc_bs_va = 0;
else
inst->vsi->venc.venc_bs_va = (u64)(uintptr_t)bs_buf;
if (frm_buf == NULL)
inst->vsi->venc.venc_fb_va = 0;
else {
inst->vsi->venc.venc_fb_va = (u64)(uintptr_t)frm_buf;
inst->vsi->venc.timestamp = frm_buf->timestamp;
}
ret = vcp_enc_encode(inst, VENC_BS_MODE_FRAME, frm_buf,
bs_buf, bs_size);
if (ret)
return ret;
++inst->frm_cnt;
mtk_vcodec_debug(inst,
"Format: frame_va %llx (%c%c%c%c) bs_va:%llx (%c%c%c%c)",
inst->vsi->venc.venc_fb_va,
fm_fourcc & 0xFF, (fm_fourcc >> 8) & 0xFF,
(fm_fourcc >> 16) & 0xFF, (fm_fourcc >> 24) & 0xFF,
inst->vsi->venc.venc_bs_va,
bs_fourcc & 0xFF, (bs_fourcc >> 8) & 0xFF,
(bs_fourcc >> 16) & 0xFF, (bs_fourcc >> 24) & 0xFF);
return ret;
}
static int venc_encode_frame_final(struct venc_inst *inst,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
unsigned int *bs_size)
{
int ret = 0;
mtk_v4l2_debug(4, "check inst->vsi %p +", inst->vsi);
if (inst == NULL || inst->vsi == NULL)
return -EINVAL;
if (bs_buf == NULL)
inst->vsi->venc.venc_bs_va = 0;
else
inst->vsi->venc.venc_bs_va = (u64)(uintptr_t)bs_buf;
if (frm_buf == NULL)
inst->vsi->venc.venc_fb_va = 0;
else
inst->vsi->venc.venc_fb_va = (u64)(uintptr_t)frm_buf;
ret = vcp_enc_encode(inst, VENC_BS_MODE_FRAME_FINAL, frm_buf,
bs_buf, bs_size);
if (ret)
return ret;
*bs_size = inst->vcu_inst.bs_size;
mtk_vcodec_debug(inst, "bs size %d <-", *bs_size);
return ret;
}
static int venc_vcp_init(struct mtk_vcodec_ctx *ctx, unsigned long *handle)
{
int ret = 0;
struct venc_inst *inst;
struct venc_ap_ipi_msg_init out;
inst = kzalloc(sizeof(*inst), GFP_KERNEL);
if (!inst) {
*handle = (unsigned long)NULL;
return -ENOMEM;
}
inst->ctx = ctx;
inst->vcu_inst.ctx = ctx;
inst->vcu_inst.dev = ctx->dev->vcu_plat_dev;
inst->vcu_inst.signaled = false;
inst->vcu_inst.id = IPI_VENC_COMMON;
inst->hw_base = mtk_vcodec_get_enc_reg_addr(inst->ctx, VENC_SYS);
mtk_vcodec_debug_enter(inst);
init_waitqueue_head(&inst->vcu_inst.wq_hd);
inst->vcu_inst.ctx_ipi_lock = kzalloc(sizeof(struct mutex),
GFP_KERNEL);
if (!inst->vcu_inst.ctx_ipi_lock) {
kfree(inst);
*handle = (unsigned long)NULL;
return -ENOMEM;
}
mutex_init(inst->vcu_inst.ctx_ipi_lock);
INIT_LIST_HEAD(&inst->vcu_inst.bufs);
memset(&out, 0, sizeof(out));
out.msg_id = AP_IPIMSG_ENC_INIT;
out.ctx_id = inst->ctx->id;
out.ap_inst_addr = (unsigned long)&inst->vcu_inst;
(*handle) = (unsigned long)inst;
inst->vcu_inst.daemon_pid = get_vcp_generation();
mtk_vcodec_add_ctx_list(ctx);
ret = venc_vcp_ipi_send(inst, &out, sizeof(out), 0);
inst->vsi = (struct venc_vsi *)inst->vcu_inst.vsi;
mtk_vcodec_debug_leave(inst);
if (ret) {
mtk_vcodec_del_ctx_list(ctx);
kfree(inst->vcu_inst.ctx_ipi_lock);
kfree(inst);
(*handle) = (unsigned long)NULL;
}
return ret;
}
static int venc_vcp_encode(unsigned long handle,
enum venc_start_opt opt,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
struct venc_done_result *result)
{
int ret = 0;
struct venc_inst *inst = (struct venc_inst *)handle;
if (inst == NULL || inst->vsi == NULL)
return -EINVAL;
mtk_vcodec_debug(inst, "opt %d ->", opt);
switch (opt) {
case VENC_START_OPT_ENCODE_SEQUENCE_HEADER: {
unsigned int bs_size_hdr = 0;
ret = venc_encode_header(inst, bs_buf, &bs_size_hdr);
if (ret)
goto encode_err;
result->bs_size = bs_size_hdr;
result->is_key_frm = false;
break;
}
case VENC_START_OPT_ENCODE_FRAME: {
/* only run @ worker then send ipi
* VPU flush cmd binding ctx & handle
* or cause cmd calllback ctx error
*/
ret = venc_encode_frame(inst, frm_buf, bs_buf,
&result->bs_size);
if (ret)
goto encode_err;
result->is_key_frm = inst->vcu_inst.is_key_frm;
break;
}
case VENC_START_OPT_ENCODE_FRAME_FINAL: {
ret = venc_encode_frame_final(inst,
frm_buf, bs_buf, &result->bs_size);
if (ret)
goto encode_err;
result->is_key_frm = inst->vcu_inst.is_key_frm;
break;
}
default:
mtk_vcodec_err(inst, "venc_start_opt %d not supported", opt);
ret = -EINVAL;
break;
}
encode_err:
mtk_vcodec_debug(inst, "opt %d <-", opt);
return ret;
}
static void venc_get_free_buffers(struct venc_inst *inst,
struct ring_input_list *list,
struct venc_done_result *pResult)
{
if (list->count < 0 || list->count >= VENC_MAX_FB_NUM) {
mtk_vcodec_err(inst, "list count %d invalid ! (write_idx %d, read_idx %d)",
list->count, list->write_idx, list->read_idx);
if (list->write_idx < 0 || list->write_idx >= VENC_MAX_FB_NUM ||
list->read_idx < 0 || list->read_idx >= VENC_MAX_FB_NUM)
list->write_idx = list->read_idx = 0;
if (list->write_idx >= list->read_idx)
list->count = list->write_idx - list->read_idx;
else
list->count = list->write_idx + VENC_MAX_FB_NUM - list->read_idx;
}
if (list->count == 0) {
mtk_vcodec_debug(inst, "[FB] there is no free buffers");
pResult->bs_va = 0;
pResult->frm_va = 0;
pResult->is_key_frm = false;
pResult->bs_size = 0;
return;
}
pResult->bs_size = list->bs_size[list->read_idx];
pResult->is_key_frm = list->is_key_frm[list->read_idx];
pResult->bs_va = list->venc_bs_va_list[list->read_idx];
pResult->frm_va = list->venc_fb_va_list[list->read_idx];
pResult->is_last_slc = list->is_last_slice[list->read_idx];
pResult->flags = list->flags[list->read_idx];
mtk_vcodec_debug(inst, "bsva %lx frva %lx bssize %d iskey %d is_last_slc=%d flags 0x%x",
pResult->bs_va,
pResult->frm_va,
pResult->bs_size,
pResult->is_key_frm,
pResult->is_last_slc,
pResult->flags);
list->read_idx = (list->read_idx == VENC_MAX_FB_NUM - 1U) ?
0U : list->read_idx + 1U;
list->count--;
}
static void venc_get_resolution_change(struct venc_inst *inst,
struct venc_vcu_config *Config,
struct venc_resolution_change *pResChange)
{
pResChange->width = Config->pic_w;
pResChange->height = Config->pic_h;
pResChange->framerate = Config->framerate;
pResChange->resolutionchange = Config->resolutionChange;
if (Config->resolutionChange)
Config->resolutionChange = 0;
mtk_vcodec_debug(inst, "get reschange %d %d %d %d\n",
pResChange->width,
pResChange->height,
pResChange->framerate,
pResChange->resolutionchange);
}
static int venc_vcp_get_param(unsigned long handle,
enum venc_get_param_type type,
void *out)
{
int ret = 0;
struct venc_inst *inst = (struct venc_inst *)handle;
struct venc_ap_ipi_query_cap msg;
if (inst == NULL)
return -EINVAL;
mtk_vcodec_debug(inst, "%s: %d", __func__, type);
inst->vcu_inst.ctx = inst->ctx;
switch (type) {
case GET_PARAM_VENC_CAP_FRAME_SIZES:
case GET_PARAM_VENC_CAP_SUPPORTED_FORMATS:
inst->vcu_inst.id = (inst->vcu_inst.id == IPI_VCU_INIT) ?
IPI_VENC_COMMON : inst->vcu_inst.id;
init_waitqueue_head(&inst->vcu_inst.wq_hd);
memset(&msg, 0, sizeof(msg));
msg.msg_id = AP_IPIMSG_ENC_QUERY_CAP;
msg.id = type;
msg.ap_inst_addr = (uintptr_t)&inst->vcu_inst;
msg.ap_data_addr = (uintptr_t)out;
msg.ctx_id = inst->ctx->id;
inst->vcu_inst.daemon_pid = get_vcp_generation();
ret = venc_vcp_ipi_send(inst, &msg, sizeof(msg), 0);
break;
case GET_PARAM_FREE_BUFFERS:
if (inst->vsi == NULL)
return -EINVAL;
venc_get_free_buffers(inst, &inst->vsi->list_free, out);
break;
case GET_PARAM_ROI_RC_QP: {
if (inst->vsi == NULL || out == NULL)
return -EINVAL;
*(int *)out = inst->vsi->config.roi_rc_qp;
break;
}
case GET_PARAM_RESOLUTION_CHANGE:
if (inst->vsi == NULL)
return -EINVAL;
venc_get_resolution_change(inst, &inst->vsi->config, out);
break;
default:
mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
ret = -EINVAL;
break;
}
return ret;
}
void set_venc_vcp_data(struct venc_inst *inst, enum vcp_reserve_mem_id_t id, void *string)
{
struct venc_ap_ipi_msg_set_param msg;
void *string_va = (void *)(__u64)vcp_get_reserve_mem_virt(id);
void *string_pa = (void *)(__u64)vcp_get_reserve_mem_phys(id);
__u64 mem_size = (__u64)vcp_get_reserve_mem_size(id);
int string_len = strlen((char *)string);
mtk_vcodec_debug(inst, "mem_size 0x%llx, string_va 0x%llx, string_pa 0x%llx\n",
mem_size, string_va, string_pa);
mtk_vcodec_debug(inst, "string: %s\n", (char *)string);
mtk_vcodec_debug(inst, "string_len:%d\n", string_len);
if (string_len <= (mem_size-1))
memcpy(string_va, (char *)string, string_len + 1);
inst->vcu_inst.ctx = inst->ctx;
inst->vcu_inst.id =
(inst->vcu_inst.id == IPI_VCU_INIT) ? IPI_VENC_COMMON : inst->vcu_inst.id;
memset(&msg, 0, sizeof(msg));
msg.msg_id = AP_IPIMSG_ENC_SET_PARAM;
if (id == VENC_SET_PROP_MEM_ID)
msg.param_id = VENC_SET_PARAM_PROPERTY;
else if (id == VENC_VCP_LOG_INFO_ID)
msg.param_id = VENC_SET_PARAM_VCP_LOG_INFO;
else
mtk_vcodec_err(inst, "unknown id (%d)", msg.param_id);
msg.vcu_inst_addr = inst->vcu_inst.inst_addr;
msg.data[0] = (__u32)((__u64)string_pa & 0xFFFFFFFF);
msg.data[1] = (__u32)((__u64)string_pa >> 32);
inst->vcu_inst.daemon_pid = get_vcp_generation();
mtk_vcodec_debug(inst, "msg.param_id %d msg.data[0]:0x%08x, msg.data[1]:0x%08x vcu_inst_addr=%llx\n",
msg.param_id, msg.data[0], msg.data[1], msg.vcu_inst_addr);
}
int vcp_enc_set_param(struct venc_inst *inst,
enum venc_set_param_type id,
struct venc_enc_param *enc_param)
{
struct venc_ap_ipi_msg_set_param out;
mtk_vcodec_debug(inst, "id %d ->", id);
if (sizeof(out) > SHARE_BUF_SIZE) {
mtk_vcodec_err(inst, "venc_ap_ipi_msg_set_param cannot be large than %d",
SHARE_BUF_SIZE);
return -EINVAL;
}
memset(&out, 0, sizeof(out));
out.msg_id = AP_IPIMSG_ENC_SET_PARAM;
out.vcu_inst_addr = inst->vcu_inst.inst_addr;
out.ctx_id = inst->ctx->id;
out.param_id = id;
switch (id) {
case VENC_SET_PARAM_ENC:
out.data_item = 0;
break;
case VENC_SET_PARAM_FORCE_INTRA:
out.data_item = 0;
break;
case VENC_SET_PARAM_ADJUST_BITRATE:
out.data_item = 1;
out.data[0] = enc_param->bitrate;
break;
case VENC_SET_PARAM_ADJUST_FRAMERATE:
out.data_item = 1;
out.data[0] = enc_param->frm_rate;
break;
case VENC_SET_PARAM_GOP_SIZE:
out.data_item = 1;
out.data[0] = enc_param->gop_size;
break;
case VENC_SET_PARAM_INTRA_PERIOD:
out.data_item = 1;
out.data[0] = enc_param->intra_period;
break;
case VENC_SET_PARAM_SKIP_FRAME:
out.data_item = 0;
break;
case VENC_SET_PARAM_PREPEND_HEADER:
out.data_item = 0;
break;
case VENC_SET_PARAM_SCENARIO:
out.data_item = 1;
out.data[0] = enc_param->scenario;
break;
case VENC_SET_PARAM_NONREFP:
out.data_item = 1;
out.data[0] = enc_param->nonrefp;
break;
case VENC_SET_PARAM_NONREFPFREQ:
out.data_item = 1;
out.data[0] = enc_param->nonrefpfreq;
break;
case VENC_SET_PARAM_DETECTED_FRAMERATE:
out.data_item = 1;
out.data[0] = enc_param->detectframerate;
break;
case VENC_SET_PARAM_RFS_ON:
out.data_item = 1;
out.data[0] = enc_param->rfs;
break;
case VENC_SET_PARAM_PREPEND_SPSPPS_TO_IDR:
out.data_item = 1;
out.data[0] = enc_param->prependheader;
break;
case VENC_SET_PARAM_OPERATION_RATE:
out.data_item = 1;
out.data[0] = enc_param->operationrate;
break;
case VENC_SET_PARAM_BITRATE_MODE:
out.data_item = 1;
out.data[0] = enc_param->bitratemode;
break;
case VENC_SET_PARAM_ROI_ON:
out.data_item = 1;
out.data[0] = enc_param->roion;
break;
case VENC_SET_PARAM_HEIF_GRID_SIZE:
out.data_item = 1;
out.data[0] = enc_param->heif_grid_size;
break;
case VENC_SET_PARAM_COLOR_DESC:
out.data_item = 0; // passed via vsi
break;
case VENC_SET_PARAM_SEC_MODE:
out.data_item = 1;
out.data[0] = enc_param->svp_mode;
break;
case VENC_SET_PARAM_TSVC:
out.data_item = 1;
out.data[0] = enc_param->tsvc;
break;
case VENC_SET_PARAM_ENABLE_HIGHQUALITY:
out.data_item = 1;
out.data[0] = enc_param->highquality;
break;
case VENC_SET_PARAM_ADJUST_MAX_QP:
out.data_item = 1;
out.data[0] = enc_param->max_qp;
break;
case VENC_SET_PARAM_ADJUST_MIN_QP:
out.data_item = 1;
out.data[0] = enc_param->min_qp;
break;
case VENC_SET_PARAM_ADJUST_I_P_QP_DELTA:
out.data_item = 1;
out.data[0] = enc_param->ip_qpdelta;
break;
case VENC_SET_PARAM_ADJUST_FRAME_LEVEL_QP:
out.data_item = 1;
out.data[0] = enc_param->framelvl_qp;
break;
case VENC_SET_PARAM_ADJUST_QP_CONTROL_MODE:
out.data_item = 1;
out.data[0] = enc_param->qp_control_mode;
break;
case VENC_SET_PARAM_ENABLE_DUMMY_NAL:
out.data_item = 1;
out.data[0] = enc_param->dummynal;
break;
case VENC_SET_PARAM_TEMPORAL_LAYER_CNT:
out.data_item = 2;
out.data[0] = enc_param->temporal_layer_pcount;
out.data[1] = enc_param->temporal_layer_bcount;
break;
default:
mtk_vcodec_err(inst, "id %d not supported", id);
return -EINVAL;
}
if (venc_vcp_ipi_send(inst, &out, sizeof(out), 0)) {
mtk_vcodec_err(inst,
"AP_IPIMSG_ENC_SET_PARAM %d fail", id);
return -EINVAL;
}
mtk_vcodec_debug(inst, "id %d <-", id);
return 0;
}
static int venc_vcp_set_param(unsigned long handle,
enum venc_set_param_type type,
struct venc_enc_param *enc_prm)
{
int i;
int ret = 0;
struct venc_inst *inst = (struct venc_inst *)handle;
unsigned int fmt = 0;
if (inst == NULL)
return -EINVAL;
mtk_vcodec_debug(inst, "->type=%d", type);
switch (type) {
case VENC_SET_PARAM_ENC:
if (inst->vsi == NULL)
return -EINVAL;
inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
inst->vsi->config.bitrate = enc_prm->bitrate;
inst->vsi->config.pic_w = enc_prm->width;
inst->vsi->config.pic_h = enc_prm->height;
inst->vsi->config.buf_w = enc_prm->buf_width;
inst->vsi->config.buf_h = enc_prm->buf_height;
inst->vsi->config.gop_size = enc_prm->gop_size;
inst->vsi->config.framerate = enc_prm->frm_rate;
inst->vsi->config.intra_period = enc_prm->intra_period;
inst->vsi->config.operationrate = enc_prm->operationrate;
inst->vsi->config.bitratemode = enc_prm->bitratemode;
inst->vsi->config.roion = enc_prm->roion;
inst->vsi->config.scenario = enc_prm->scenario;
inst->vsi->config.prependheader = enc_prm->prependheader;
inst->vsi->config.heif_grid_size = enc_prm->heif_grid_size;
inst->vsi->config.max_w = enc_prm->max_w;
inst->vsi->config.max_h = enc_prm->max_h;
inst->vsi->config.num_b_frame = enc_prm->num_b_frame;
inst->vsi->config.slbc_ready = enc_prm->slbc_ready;
inst->vsi->config.slbc_addr = enc_prm->slbc_addr;
inst->vsi->config.i_qp = enc_prm->i_qp;
inst->vsi->config.p_qp = enc_prm->p_qp;
inst->vsi->config.b_qp = enc_prm->b_qp;
inst->vsi->config.svp_mode = enc_prm->svp_mode;
inst->vsi->config.tsvc = enc_prm->tsvc;
inst->vsi->config.highquality = enc_prm->highquality;
inst->vsi->config.max_qp = enc_prm->max_qp;
inst->vsi->config.min_qp = enc_prm->min_qp;
inst->vsi->config.i_p_qp_delta = enc_prm->ip_qpdelta;
inst->vsi->config.qp_control_mode = enc_prm->qp_control_mode;
inst->vsi->config.frame_level_qp = enc_prm->framelvl_qp;
inst->vsi->config.dummynal = enc_prm->dummynal;
inst->vsi->config.hier_ref_layer = enc_prm->hier_ref_layer;
inst->vsi->config.hier_ref_type = enc_prm->hier_ref_type;
inst->vsi->config.temporal_layer_pcount = enc_prm->temporal_layer_pcount;
inst->vsi->config.temporal_layer_bcount = enc_prm->temporal_layer_bcount;
inst->vsi->config.max_ltr_num = enc_prm->max_ltr_num;
inst->vsi->config.cb_qp_offset = enc_prm->cb_qp_offset;
inst->vsi->config.cr_qp_offset = enc_prm->cr_qp_offset;
if (enc_prm->color_desc) {
memcpy(&inst->vsi->config.color_desc,
enc_prm->color_desc,
sizeof(struct mtk_color_desc));
}
if (enc_prm->multi_ref) {
memcpy(&inst->vsi->config.multi_ref,
enc_prm->multi_ref,
sizeof(struct mtk_venc_multi_ref));
}
if (enc_prm->vui_info) {
memcpy(&inst->vsi->config.vui_info,
enc_prm->vui_info,
sizeof(struct mtk_venc_vui_info));
}
inst->vsi->config.slice_header_spacing =
enc_prm->slice_header_spacing;
fmt = inst->ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc;
mtk_vcodec_debug(inst, "fmt:%u", fmt);
if (fmt == V4L2_PIX_FMT_H264) {
inst->vsi->config.profile = enc_prm->profile;
inst->vsi->config.level = enc_prm->level;
} else if (fmt == V4L2_PIX_FMT_H265 ||
fmt == V4L2_PIX_FMT_HEIF) {
inst->vsi->config.profile =
venc_h265_get_profile(inst, enc_prm->profile);
inst->vsi->config.level =
venc_h265_get_level(inst, enc_prm->level,
enc_prm->tier);
} else if (fmt == V4L2_PIX_FMT_MPEG4) {
inst->vsi->config.profile =
venc_mpeg4_get_profile(inst, enc_prm->profile);
inst->vsi->config.level =
venc_mpeg4_get_level(inst, enc_prm->level);
}
inst->vsi->config.wfd = 0;
ret = vcp_enc_set_param(inst, type, enc_prm);
if (ret)
break;
for (i = 0; i < MTK_VCODEC_MAX_PLANES; i++) {
enc_prm->sizeimage[i] =
inst->vsi->sizeimage[i];
mtk_vcodec_debug(inst, "sizeimage[%d] size=0x%x", i,
enc_prm->sizeimage[i]);
}
inst->ctx->async_mode = !(inst->vsi->sync_mode);
break;
case VENC_SET_PARAM_PREPEND_HEADER:
inst->prepend_hdr = 1;
ret = vcp_enc_set_param(inst, type, enc_prm);
break;
case VENC_SET_PARAM_COLOR_DESC:
if (inst->vsi == NULL)
return -EINVAL;
memcpy(&inst->vsi->config.color_desc, enc_prm->color_desc,
sizeof(struct mtk_color_desc));
ret = vcp_enc_set_param(inst, type, enc_prm);
break;
case VENC_SET_PARAM_PROPERTY:
mtk_vcodec_debug(inst, "enc_prm->set_vcp_buf:%s", enc_prm->set_vcp_buf);
set_venc_vcp_data(inst, VENC_SET_PROP_MEM_ID, enc_prm->set_vcp_buf);
break;
case VENC_SET_PARAM_VCP_LOG_INFO:
mtk_vcodec_debug(inst, "enc_prm->set_vcp_buf:%s", enc_prm->set_vcp_buf);
set_venc_vcp_data(inst, VENC_VCP_LOG_INFO_ID, enc_prm->set_vcp_buf);
break;
default:
if (inst->vsi == NULL)
return -EINVAL;
ret = vcp_enc_set_param(inst, type, enc_prm);
inst->ctx->async_mode = !(inst->vsi->sync_mode);
break;
}
mtk_vcodec_debug_leave(inst);
return ret;
}
static int venc_vcp_deinit(unsigned long handle)
{
int ret = 0;
struct venc_inst *inst = (struct venc_inst *)handle;
struct venc_ap_ipi_msg_deinit out;
struct vcp_enc_mem_list *tmp = NULL;
struct list_head *p, *q;
struct device *dev = NULL;
memset(&out, 0, sizeof(out));
out.msg_id = AP_IPIMSG_ENC_DEINIT;
out.vcu_inst_addr = inst->vcu_inst.inst_addr;
out.ctx_id = inst->ctx->id;
mtk_vcodec_debug_enter(inst);
ret = venc_vcp_ipi_send(inst, &out, sizeof(out), 0);
mtk_vcodec_debug_leave(inst);
mtk_vcodec_del_ctx_list(inst->ctx);
mutex_lock(inst->vcu_inst.ctx_ipi_lock);
list_for_each_safe(p, q, &inst->vcu_inst.bufs) {
tmp = list_entry(p, struct vcp_enc_mem_list, list);
dev = get_dev_by_mem_type(inst, &tmp->mem);
mtk_vcodec_free_mem(&tmp->mem, dev, tmp->attach, tmp->sgt);
mtk_v4l2_debug(0, "[%d] leak free va 0x%llx pa 0x%llx iova 0x%llx len %d type %d",
inst->ctx->id, tmp->mem.va, tmp->mem.pa,
tmp->mem.iova, tmp->mem.len, tmp->mem.type);
list_del(p);
kfree(tmp);
}
mutex_unlock(inst->vcu_inst.ctx_ipi_lock);
mutex_destroy(inst->vcu_inst.ctx_ipi_lock);
kfree(inst->vcu_inst.ctx_ipi_lock);
kfree(inst);
return ret;
}
static const struct venc_common_if venc_vcp_if = {
.init = venc_vcp_init,
.encode = venc_vcp_encode,
.get_param = venc_vcp_get_param,
.set_param = venc_vcp_set_param,
.deinit = venc_vcp_deinit,
};
const struct venc_common_if *get_enc_vcp_if(void)
{
return &venc_vcp_if;
}