dwc3_probe
static int dwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res, dwc_res;
struct dwc3 *dwc;
int ret;
void __iomem *regs;
int irq;
char dma_ipc_log_ctx_name[40];
if (count >= DWC_CTRL_COUNT) {
dev_err(dev, "Err dwc instance %d >= %d available\n",
count, DWC_CTRL_COUNT);
ret = -EINVAL;
return ret;
}
dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
if (!dwc)
return -ENOMEM;
dwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "missing memory resource\n");
return -ENODEV;
}
dwc->reg_phys = res->start;
dwc->xhci_resources[0].start = res->start;
dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
DWC3_XHCI_REGS_END;
dwc->xhci_resources[0].flags = res->flags;
dwc->xhci_resources[0].name = res->name;
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
ret = devm_request_irq(dev, irq, dwc3_interrupt, IRQF_SHARED, "dwc3",
dwc);
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
return -ENODEV;
}
if (notify_event)
/* will be enabled in dwc3_msm_resume() */
disable_irq(irq);
dwc->irq = irq;
/*
* Request memory region but exclude xHCI regs,
* since it will be requested by the xhci-plat driver.
*/
dwc_res = *res;
dwc_res.start += DWC3_GLOBALS_REGS_START;
regs = devm_ioremap_resource(dev, &dwc_res);
if (IS_ERR(regs))
return PTR_ERR(regs);
dwc->dwc_wq = alloc_ordered_workqueue("dwc_wq", WQ_HIGHPRI);
if (!dwc->dwc_wq) {
dev_err(dev,
"%s: Unable to create workqueue dwc_wq\n", __func__);
goto err0;
}
INIT_WORK(&dwc->bh_work, dwc3_bh_work);
dwc->regs = regs;
dwc->regs_size = resource_size(&dwc_res);
dwc3_get_properties(dwc);
dwc->reset = devm_reset_control_array_get(dev, true, true);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
if (dev->of_node) {
ret = devm_clk_bulk_get_all(dev, &dwc->clks);
if (ret == -EPROBE_DEFER)
goto err0;
/*
* Clocks are optional, but new DT platforms should support all
* clocks as required by the DT-binding.
*/
if (ret < 0)
dwc->num_clks = 0;
else
dwc->num_clks = ret;
}
ret = dwc3_extract_num_phys(dwc);
if (ret) {
dev_err(dwc->dev, "Unable to extract number of PHYs\n");
goto err0;
}
dwc->usb2_phy = devm_kzalloc(dwc->dev,
sizeof(*dwc->usb2_phy) * dwc->num_hsphy, GFP_KERNEL);
dwc->usb3_phy = devm_kzalloc(dwc->dev,
sizeof(*dwc->usb3_phy) * dwc->num_ssphy, GFP_KERNEL);
ret = reset_control_deassert(dwc->reset);
if (ret)
goto err0;
ret = clk_bulk_prepare_enable(dwc->num_clks, dwc->clks);
if (ret)
goto assert_reset;
platform_set_drvdata(pdev, dwc);
init_waitqueue_head(&dwc->wait_linkstate);
spin_lock_init(&dwc->lock);
pm_runtime_no_callbacks(dev);
pm_runtime_set_active(dev);
if (dwc->enable_bus_suspend) {
pm_runtime_set_autosuspend_delay(dev,
DWC3_DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
}
pm_runtime_enable(dev);
pm_runtime_forbid(dev);
//分配event buf的dma地址
ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
goto err1;
}
ret = dwc3_alloc_scratch_buffers(dwc);
if (ret)
goto err2;
dwc3_debugfs_init(dwc);
if (!notify_event) {
ret = dwc3_core_init(dwc);
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to initialize core: %d\n",
ret);
goto err3;
}
//将event buf 的dma 物理地址写入寄存器
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
goto err3;
}
ret = dwc3_core_init_mode(dwc);
if (ret) {
dwc3_event_buffers_cleanup(dwc);
goto err3;
}
} else if (dwc->dr_mode == USB_DR_MODE_OTG ||
dwc->dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = dwc3_gadget_init(dwc);
if (ret) {
dev_err(dwc->dev, "gadget init failed %d\n", ret);
goto err3;
}
}
dwc->dwc_ipc_log_ctxt = ipc_log_context_create(NUM_LOG_PAGES,
dev_name(dwc->dev), 0);
if (!dwc->dwc_ipc_log_ctxt)
dev_dbg(dwc->dev, "ipc_log_ctxt is not available\n");
snprintf(dma_ipc_log_ctx_name, sizeof(dma_ipc_log_ctx_name),
"%s.ep_events", dev_name(dwc->dev));
dwc->dwc_dma_ipc_log_ctxt = ipc_log_context_create(2 * NUM_LOG_PAGES,
dma_ipc_log_ctx_name, 0);
if (!dwc->dwc_dma_ipc_log_ctxt)
dev_dbg(dwc->dev, "ipc_log_ctxt for ep_events is not available\n");
dwc3_instance[count] = dwc;
dwc->index = count;
count++;
pm_runtime_allow(dev);
return 0;
err3:
dwc3_debugfs_exit(dwc);
dwc3_free_scratch_buffers(dwc);
err2:
dwc3_free_event_buffers(dwc);
err1:
pm_runtime_allow(&pdev->dev);
pm_runtime_disable(&pdev->dev);
clk_bulk_disable_unprepare(dwc->num_clks, dwc->clks);
assert_reset:
reset_control_assert(dwc->reset);
destroy_workqueue(dwc->dwc_wq);
err0:
return ret;
}
dwc3_alloc_event_buffers
/**
* dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
* @dwc: pointer to our controller context structure
* @length: size of event buffer
*
* Returns 0 on success otherwise negative errno. In the error case, dwc
* may contain some buffers allocated but not all which were requested.
*/
static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
struct dwc3_event_buffer *evt;
evt = dwc3_alloc_one_event_buffer(dwc, length);
if (IS_ERR(evt)) {
dev_err(dwc->dev, "can't allocate event buffer\n");
return PTR_ERR(evt);
}
dwc->ev_buf = evt;
/* alloc GSI related event buffers */
dwc3_notify_event(dwc, DWC3_GSI_EVT_BUF_ALLOC, 0);
return 0;
}
dwc3_alloc_one_event_buffer
建立dma buf 映射,获取event 的虚拟地址 evt->buf 物理地址evt->dma
/**
* dwc3_alloc_one_event_buffer - Allocates one event buffer structure
* @dwc: Pointer to our controller context structure
* @length: size of the event buffer
*
* Returns a pointer to the allocated event buffer structure on success
* otherwise ERR_PTR(errno).
*/
static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
unsigned length)
{
struct dwc3_event_buffer *evt;
evt = devm_kzalloc(dwc->dev, sizeof(*evt), GFP_KERNEL);
if (!evt)
return ERR_PTR(-ENOMEM);
evt->dwc = dwc;
evt->length = length;
evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL);
if (!evt->cache)
return ERR_PTR(-ENOMEM);
//建立dma buf 映射,获取event 的虚拟地址 evt->buf 物理地址evt->dma
evt->buf = dma_alloc_coherent(dwc->sysdev, length,
&evt->dma, GFP_KERNEL);
if (!evt->buf)
return ERR_PTR(-ENOMEM);
return evt;
}
dwc3_event_buffers_setup 将event buf dma 地址写入寄存器
/**
* dwc3_event_buffers_setup - setup our allocated event buffers
* @dwc: pointer to our controller context structure
*
* Returns 0 on success otherwise negative errno.
*/
int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
evt = dwc->ev_buf;
evt->lpos = 0;
//event buf dma 地址写入寄存器
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
lower_32_bits(evt->dma));
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
upper_32_bits(evt->dma));
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_SIZE(evt->length));
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
/* setup GSI related event buffers */
dwc3_notify_event(dwc, DWC3_GSI_EVT_BUF_SETUP, 0);
return 0;
__dwc3_msm_ep_queue 对req 进行dma地址绑定dwc3_ep
static inline dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
struct dwc3_trb *trb)
{
u32 offset = (char *) trb - (char *) dep->trb_pool;
return dep->trb_pool_dma + offset;
}
/**
* Helper function.
* See the header of the dwc3_msm_ep_queue function.
*
* @dwc3_ep - pointer to dwc3_ep instance.
* @req - pointer to dwc3_request instance.
*
* @return int - 0 on success, negative on error.
*/
static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
{
struct dwc3_trb *trb;
struct dwc3_trb *trb_link;
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
int ret = 0;
/* We push the request to the dep->started_list list to indicate that
* this request is issued with start transfer. The request will be out
* from this list in 2 cases. The first is that the transfer will be
* completed (not if the transfer is endless using a circular TRBs with
* with link TRB). The second case is an option to do stop stransfer,
* this can be initiated by the function driver when calling dequeue.
*/
req->status = DWC3_REQUEST_STATUS_STARTED;
将req list 与 dep->started_list关联
list_add_tail(&req->list, &dep->started_list);
//获取dwc3_ep trb_pool
/* First, prepare a normal TRB, point to the fake buffer */
trb = &dep->trb_pool[dep->trb_enqueue];
if (++dep->trb_enqueue == (DWC3_TRB_NUM - 1))
dep->trb_enqueue = 0;
memset(trb, 0, sizeof(*trb));
将trb_pool 添加到对应绑定的req list上
req->trb = trb;
req->num_trbs = 1;
trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
//获取req 的 trb dma
//dep->trb_pool_dma + offset;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
/* Second, prepare a Link TRB that points to the first TRB*/
trb_link = &dep->trb_pool[dep->trb_enqueue];
if (++dep->trb_enqueue == (DWC3_TRB_NUM - 1))
dep->trb_enqueue = 0;
memset(trb_link, 0, sizeof(*trb_link));
trb_link->bpl = lower_32_bits(req->trb_dma);
trb_link->bph = upper_32_bits(req->trb_dma) | DBM_TRB_BIT |
DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
trb_link->size = 0;
trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
/*
* Now start the transfer
*/
memset(¶ms, 0, sizeof(params));
params.param0 = upper_32_bits(req->trb_dma); /* TDAddr High */
params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
/* DBM requires IOC to be set */
cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
将dma req dma 地址写入寄存器
ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms);
if (ret < 0) {
dev_dbg(dep->dwc->dev,
"%s: failed to send STARTTRANSFER command\n",
__func__);
list_del(&req->list);
return ret;
}
return ret;
}
dwc3_gadget_ep_cleanup_completed_requests
收到中断时直接将req list数据从dep->started_list中获取
static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event, int status)
{
struct dwc3_request *req;
while (!list_empty(&dep->started_list)) {
int ret;
req = next_request(&dep->started_list);
ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
req, status);
if (ret)
break;
}
}
dpdk在ip分片的实现中,采用了一种称作零拷贝的技术。而这种实现方式的底层,正是由scatter-gather DMA支撑的。dpdk的分片包采用了链式管理,同一个数据包的数据,分散存储在不连续的块中(mbuf结构)。这就要求DMA一次操作,需要从不连续的多个块中搬移数据。附上e1000驱动发包部分代码:
scatter-gather DMA 与 block DMA
传统的block DMA 一次只能传输物理上连续的一个块的数据, 完成传输后发起中断。而scatter-gather DMA允许一次传输多个物理上不连续的块,完成传输后只发起一次中断。
传统的block DMA像这样:
先进的scatter-gather DMA像这样:
原文链接:https://blog.csdn.net/weixin_38006908/article/details/87375404
这样做的好处是直观的,大大减少了中断的次数,提高了数据传输的效率。
/* ------------------------------------------------------------------------- */
#ifdef CONFIG_HAS_DMA
int usb_gadget_map_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
if (req->length == 0)
return 0;
if (req->num_sgs) {
int mapped;
mapped = dma_map_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (mapped == 0) {
dev_err(dev, "failed to map SGs\n");
return -EFAULT;
}
req->num_mapped_sgs = mapped;
} else {
if (is_vmalloc_addr(req->buf)) {
dev_err(dev, "buffer is not dma capable\n");
return -EFAULT;
} else if (object_is_on_stack(req->buf)) {
dev_err(dev, "buffer is on stack\n");
return -EFAULT;
}
req->dma = dma_map_single(dev, req->buf, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (dma_mapping_error(dev, req->dma)) {
dev_err(dev, "failed to map buffer\n");
return -EFAULT;
}
req->dma_mapped = 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
dwc3_gadget_ep_cleanup_completed_request
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
const struct dwc3_event_depevt *event,
struct dwc3_request *req, int status)
{
struct dwc3 *dwc = dep->dwc;
int request_status;
int ret;
/*
* If the HWO is set, it implies the TRB is still being
* processed by the core. Hence do not reclaim it until
* it is processed by the core.
*/
if (req->trb->ctrl & DWC3_TRB_CTRL_HWO) {
dbg_event(0xFF, "PEND TRB", dep->number);
return 1;
}
if (req->request.num_mapped_sgs)
ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
status);
else
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
req->request.actual = req->request.length - req->remaining;
if (!dwc3_gadget_ep_request_completed(req))
goto out;
if (req->needs_extra_trb) {
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
status);
/* Reclaim MPS padding TRB for ZLP */
if (!req->direction && req->request.zero && req->request.length &&
!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
(IS_ALIGNED(req->request.length, maxp)))
ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
req->needs_extra_trb = false;
}
/*
* The event status only reflects the status of the TRB with IOC set.
* For the requests that don't set interrupt on completion, the driver
* needs to check and return the status of the completed TRBs associated
* with the request. Use the status of the last TRB of the request.
*/
if (req->request.no_interrupt) {
struct dwc3_trb *trb;
trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
case DWC3_TRBSTS_MISSED_ISOC:
/* Isoc endpoint only */
request_status = -EXDEV;
break;
case DWC3_TRB_STS_XFER_IN_PROG:
/* Applicable when End Transfer with ForceRM=0 */
case DWC3_TRBSTS_SETUP_PENDING:
/* Control endpoint only */
case DWC3_TRBSTS_OK:
default:
request_status = 0;
break;
}
} else {
request_status = status;
}
dwc3_gadget_giveback(dep, req, request_status);
out:
return ret;
}
static void
uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
struct uvc_buffer *buf)
{
void *mem = req->buf;
int len = video->req_size;
int ret;
/* Add a header at the beginning of the payload. */
if (video->payload_size == 0) {
ret = uvc_video_encode_header(video, buf, mem, len);
video->payload_size += ret;
mem += ret;
len -= ret;
}
/* Process video data. */
len = min((int)(video->max_payload_size - video->payload_size), len);
//拷贝video(uvc_buffer)数据buf到mem (req->buf)
ret = uvc_video_encode_data(video, buf, mem, len);
video->payload_size += ret;
len -= ret;
req->length = video->req_size - len;
req->zero = video->payload_size == video->max_payload_size;
if (buf->bytesused == video->queue.buf_used) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
uvcg_queue_next_buffer(&video->queue, buf);
video->fid ^= UVC_STREAM_FID;
video->payload_size = 0;
}
if (video->payload_size == video->max_payload_size ||
buf->bytesused == video->queue.buf_used)
video->payload_size = 0;
}
出队列(VIDIOC_DQBUF)
/*
* I somehow feel that synchronisation won't be easy to achieve here. We have
* three events that control USB requests submission:
*
* - USB request completion: the completion handler will resubmit the request
* if a video buffer is available.
*
* - USB interface setting selection: in response to a SET_INTERFACE request,
* the handler will start streaming if a video buffer is available and if
* video is not currently streaming.
*
* - V4L2 buffer queueing: the driver will start streaming if video is not
* currently streaming.
*
* Race conditions between those 3 events might lead to deadlocks or other
* nasty side effects.
*
* The "video currently streaming" condition can't be detected by the irqqueue
* being empty, as a request can still be in flight. A separate "queue paused"
* flag is thus needed.
*
* The paused flag will be set when we try to retrieve the irqqueue head if the
* queue is empty, and cleared when we queue a buffer.
*
* The USB request completion handler will get the buffer at the irqqueue head
* under protection of the queue spinlock. If the queue is empty, the streaming
* paused flag will be set. Right after releasing the spinlock a userspace
* application can queue a buffer. The flag will then cleared, and the ioctl
* handler will restart the video stream.
*/
static void
uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_video *video = req->context;
struct uvc_video_queue *queue = &video->queue;
struct uvc_buffer *buf;
unsigned long flags;
int ret;
switch (req->status) {
case 0:
break;
case -ESHUTDOWN: /* disconnect from host. */
uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1);
goto requeue;
default:
uvcg_warn(&video->uvc->func,
"VS request completed with status %d.\n",
req->status);
uvcg_queue_cancel(queue, 0);
goto requeue;
}
spin_lock_irqsave(&video->queue.irqlock, flags);
//获取video buf
buf = uvcg_queue_head(&video->queue);
if (buf == NULL) {
spin_unlock_irqrestore(&video->queue.irqlock, flags);
goto requeue;
}
//将buf 数据拷贝到的数据拷贝到video
video->encode(req, video, buf);
ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&video->queue.irqlock, flags);
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
goto requeue;
}
return;
requeue:
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
}
通过queue->irqqueue 获取uvc_buffer
struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
{
struct uvc_buffer *buf = NULL;
if (!list_empty(&queue->irqqueue))
buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
else
queue->flags |= UVC_QUEUE_PAUSED;
return buf;
}
将queue uvc_buffer mem 拷贝req->buf
static void
uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
struct uvc_buffer *buf)
{
void *mem = req->buf;
int len = video->req_size;
int ret;
/* Add a header at the beginning of the payload. */
if (video->payload_size == 0) {
ret = uvc_video_encode_header(video, buf, mem, len);
video->payload_size += ret;
mem += ret;
len -= ret;
}
/* Process video data. */
len = min((int)(video->max_payload_size - video->payload_size), len);
// 将buf中的数据拷贝到 req->buf中
ret = uvc_video_encode_data(video, buf, mem, len);
video->payload_size += ret;
len -= ret;
req->length = video->req_size - len;
req->zero = video->payload_size == video->max_payload_size;
if (buf->bytesused == video->queue.buf_used) {
video->queue.buf_used = 0;
buf->state = UVC_BUF_STATE_DONE;
//queued_list中的buffer处理后, 需要放到done_list中
uvcg_queue_next_buffer(&video->queue, buf);
video->fid ^= UVC_STREAM_FID;
video->payload_size = 0;
}
if (video->payload_size == video->max_payload_size ||
buf->bytesused == video->queue.buf_used)
video->payload_size = 0;
}
uvc_video_encode_data
static int
uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
u8 *data, int len)
{
struct uvc_video_queue *queue = &video->queue;
unsigned int nbytes;
void *mem;
/* Copy video data to the USB buffer. */
mem = buf->mem + queue->buf_used;
nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
//将uvc_buffer 拷贝到req->buf
memcpy(data, mem, nbytes);
//更新下次拷贝数据地址的偏移值
queue->buf_used += nbytes;
return nbytes;
}
uvcg_queue_next_buffer
* called with &queue_irqlock held.. */
struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf)
{
struct uvc_buffer *nextbuf;
if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
buf->length != buf->bytesused) {
buf->state = UVC_BUF_STATE_QUEUED;
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
return buf;
}
list_del(&buf->queue);
if (!list_empty(&queue->irqqueue))
nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
else
nextbuf = NULL;
buf->buf.field = V4L2_FIELD_NONE;
buf->buf.sequence = queue->sequence++;
buf->buf.vb2_buf.timestamp = ktime_get_ns();
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
return nextbuf;
}
关于vb2_buffer_done-CSDN博客
vb2_buffer_done
将 vb->done_entry添加到q->done_list中发送q->done_wq唤醒dequeue 的__vb2_wait_for_done_vb 传输数据给应用层
从应用调用vivi驱动分析v4l2 -- 缓存放入队列(VIDIOC_QBUF)_v4l2 清空缓存队列-CSDN博客
- vidioc_dqbuf
- vb2_ioctl_dqbuf
- vb2_dqbuf
- vb2_internal_dqbuf
__vb2_get_done_vb
1.__vb2_wait_for_done_vb
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
{
struct vb2_queue *q = vb->vb2_queue;
unsigned long flags;
unsigned int plane;
if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
return;
if (WARN_ON(state != VB2_BUF_STATE_DONE &&
state != VB2_BUF_STATE_ERROR &&
state != VB2_BUF_STATE_QUEUED))
state = VB2_BUF_STATE_ERROR;
#ifdef CONFIG_VIDEO_ADV_DEBUG
/*
* Although this is not a callback, it still does have to balance
* with the buf_queue op. So update this counter manually.
*/
vb->cnt_buf_done++;
#endif
dprintk(4, "done processing on buffer %d, state: %d\n",
vb->index, state);
if (state != VB2_BUF_STATE_QUEUED) {
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
vb->synced = 0;
}
spin_lock_irqsave(&q->done_lock, flags);
if (state == VB2_BUF_STATE_QUEUED) {
vb->state = VB2_BUF_STATE_QUEUED;
} else {
/* Add the buffer to the done buffers list */
list_add_tail(&vb->done_entry, &q->done_list);
vb->state = state;
}
atomic_dec(&q->owned_by_drv_count);
if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
media_request_object_unbind(&vb->req_obj);
media_request_object_put(&vb->req_obj);
}
spin_unlock_irqrestore(&q->done_lock, flags);
trace_vb2_buf_done(q, vb);
switch (state) {
case VB2_BUF_STATE_QUEUED:
return;
default:
/* Inform any processes that may be waiting for buffers */
wake_up(&q->done_wq);
break;
}
}
EXPORT_SYMBOL_GPL(vb2_buffer_done);
缓存放入队列(VIDIOC_QBUF)vidioc_qbuf
ioctl—>/drivers/usb/gadget/function/uvc_v4l2.c:v4l2_file_operations uvc_v4l2_fops—>video_ioctl2(该函数在/drivers/media/v4l2-core/v4l2-ioctl.c)—>video_usercopy—>__video_do_ioctl—>v4l2_ioctls(这是个数组,找到对应的cmd执行对应的ioctl)—>v4l_qbuf(入队ioctl)—>ops->vidioc_qbuf(file, fh, p)—>uvc_v4l2_qbuf
int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
{
unsigned long flags;
int ret;
ret = vb2_qbuf(&queue->queue, NULL, buf);
if (ret < 0)
return ret;
spin_lock_irqsave(&queue->irqlock, flags);
ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
queue->flags &= ~UVC_QUEUE_PAUSED;
spin_unlock_irqrestore(&queue->irqlock, flags);
return ret;
}
从应用调用vivi驱动分析v4l2 -- 缓存放入队列(VIDIOC_QBUF)_v4l2 清空缓存队列-CSDN博客
-> vb2_qbuf
-> vb2_queue_or_prepare_buf
-> vb2_core_qbuf
-> __enqueue_in_driver
/*
* __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
*/
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->owned_by_drv_count);
trace_vb2_buf_queue(q, vb);
call_void_vb_qop(vb, buf_queue, vb);
}
uvc_buffer_queue //将buf->queue 添加到irqqueue
static void uvc_buffer_queue(struct vb2_buffer *vb)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
unsigned long flags;
spin_lock_irqsave(&queue->irqlock, flags);
if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
//将buf->queue 添加到irqqueue
list_add_tail(&buf->queue, &queue->irqqueue);
} else {
/* If the device is disconnected return the buffer to userspace
* directly. The next QBUF call will fail with -ENODEV.
*/
buf->state = UVC_BUF_STATE_ERROR;
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&queue->irqlock, flags);
}