Skip to content
This repository was archived by the owner on Oct 31, 2024. It is now read-only.

Commit 39601f4

Browse files
Mani-Sadhasivamgregkh
authored andcommitted
bus: mhi: ep: Add support for async DMA read operation
[ Upstream commit 2547beb ] As like the async DMA write operation, let's add support for async DMA read operation. In the async path, the data will be read from the transfer ring continuously and when the controller driver notifies the stack using the completion callback (mhi_ep_read_completion), then the client driver will be notified with the read data and the completion event will be sent to the host for the respective ring element (if requested by the host). Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Stable-dep-of: c7d0b2d ("bus: mhi: ep: Do not allocate memory for MHI objects from DMA zone") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent b6af3a9 commit 39601f4

File tree

1 file changed

+89
-73
lines changed

1 file changed

+89
-73
lines changed

drivers/bus/mhi/ep/main.c

+89-73
Original file line numberDiff line numberDiff line change
@@ -318,17 +318,81 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio
318318
}
319319
EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
320320

321+
static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info)
322+
{
323+
struct mhi_ep_device *mhi_dev = buf_info->mhi_dev;
324+
struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
325+
struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan;
326+
struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
327+
struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset];
328+
struct mhi_result result = {};
329+
int ret;
330+
331+
if (mhi_chan->xfer_cb) {
332+
result.buf_addr = buf_info->cb_buf;
333+
result.dir = mhi_chan->dir;
334+
result.bytes_xferd = buf_info->size;
335+
336+
mhi_chan->xfer_cb(mhi_dev, &result);
337+
}
338+
339+
/*
340+
* The host will split the data packet into multiple TREs if it can't fit
341+
* the packet in a single TRE. In that case, CHAIN flag will be set by the
342+
* host for all TREs except the last one.
343+
*/
344+
if (buf_info->code != MHI_EV_CC_OVERFLOW) {
345+
if (MHI_TRE_DATA_GET_CHAIN(el)) {
346+
/*
347+
* IEOB (Interrupt on End of Block) flag will be set by the host if
348+
* it expects the completion event for all TREs of a TD.
349+
*/
350+
if (MHI_TRE_DATA_GET_IEOB(el)) {
351+
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
352+
MHI_TRE_DATA_GET_LEN(el),
353+
MHI_EV_CC_EOB);
354+
if (ret < 0) {
355+
dev_err(&mhi_chan->mhi_dev->dev,
356+
"Error sending transfer compl. event\n");
357+
goto err_free_tre_buf;
358+
}
359+
}
360+
} else {
361+
/*
362+
* IEOT (Interrupt on End of Transfer) flag will be set by the host
363+
* for the last TRE of the TD and expects the completion event for
364+
* the same.
365+
*/
366+
if (MHI_TRE_DATA_GET_IEOT(el)) {
367+
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
368+
MHI_TRE_DATA_GET_LEN(el),
369+
MHI_EV_CC_EOT);
370+
if (ret < 0) {
371+
dev_err(&mhi_chan->mhi_dev->dev,
372+
"Error sending transfer compl. event\n");
373+
goto err_free_tre_buf;
374+
}
375+
}
376+
}
377+
}
378+
379+
mhi_ep_ring_inc_index(ring);
380+
381+
err_free_tre_buf:
382+
kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf);
383+
}
384+
321385
static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
322-
struct mhi_ep_ring *ring,
323-
struct mhi_result *result,
324-
u32 len)
386+
struct mhi_ep_ring *ring)
325387
{
326388
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
327389
struct device *dev = &mhi_cntrl->mhi_dev->dev;
328390
size_t tr_len, read_offset, write_offset;
329391
struct mhi_ep_buf_info buf_info = {};
392+
u32 len = MHI_EP_DEFAULT_MTU;
330393
struct mhi_ring_element *el;
331394
bool tr_done = false;
395+
void *buf_addr;
332396
u32 buf_left;
333397
int ret;
334398

@@ -358,83 +422,50 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
358422
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
359423
write_offset = len - buf_left;
360424

425+
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
426+
if (!buf_addr)
427+
return -ENOMEM;
428+
361429
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
362-
buf_info.dev_addr = result->buf_addr + write_offset;
430+
buf_info.dev_addr = buf_addr + write_offset;
363431
buf_info.size = tr_len;
432+
buf_info.cb = mhi_ep_read_completion;
433+
buf_info.cb_buf = buf_addr;
434+
buf_info.mhi_dev = mhi_chan->mhi_dev;
435+
436+
if (mhi_chan->tre_bytes_left - tr_len)
437+
buf_info.code = MHI_EV_CC_OVERFLOW;
364438

365439
dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
366-
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
440+
ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info);
367441
if (ret < 0) {
368442
dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
369-
return ret;
443+
goto err_free_buf_addr;
370444
}
371445

372446
buf_left -= tr_len;
373447
mhi_chan->tre_bytes_left -= tr_len;
374448

375-
/*
376-
* Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
377-
* read completely:
378-
*
379-
* 1. Send completion event to the host based on the flags set in TRE.
380-
* 2. Increment the local read offset of the transfer ring.
381-
*/
382449
if (!mhi_chan->tre_bytes_left) {
383-
/*
384-
* The host will split the data packet into multiple TREs if it can't fit
385-
* the packet in a single TRE. In that case, CHAIN flag will be set by the
386-
* host for all TREs except the last one.
387-
*/
388-
if (MHI_TRE_DATA_GET_CHAIN(el)) {
389-
/*
390-
* IEOB (Interrupt on End of Block) flag will be set by the host if
391-
* it expects the completion event for all TREs of a TD.
392-
*/
393-
if (MHI_TRE_DATA_GET_IEOB(el)) {
394-
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
395-
MHI_TRE_DATA_GET_LEN(el),
396-
MHI_EV_CC_EOB);
397-
if (ret < 0) {
398-
dev_err(&mhi_chan->mhi_dev->dev,
399-
"Error sending transfer compl. event\n");
400-
return ret;
401-
}
402-
}
403-
} else {
404-
/*
405-
* IEOT (Interrupt on End of Transfer) flag will be set by the host
406-
* for the last TRE of the TD and expects the completion event for
407-
* the same.
408-
*/
409-
if (MHI_TRE_DATA_GET_IEOT(el)) {
410-
ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
411-
MHI_TRE_DATA_GET_LEN(el),
412-
MHI_EV_CC_EOT);
413-
if (ret < 0) {
414-
dev_err(&mhi_chan->mhi_dev->dev,
415-
"Error sending transfer compl. event\n");
416-
return ret;
417-
}
418-
}
419-
450+
if (MHI_TRE_DATA_GET_IEOT(el))
420451
tr_done = true;
421-
}
422452

423453
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
424-
mhi_ep_ring_inc_index(ring);
425454
}
426-
427-
result->bytes_xferd += tr_len;
428455
} while (buf_left && !tr_done);
429456

430457
return 0;
458+
459+
err_free_buf_addr:
460+
kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr);
461+
462+
return ret;
431463
}
432464

433-
static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
465+
static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
434466
{
435467
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
436468
struct mhi_result result = {};
437-
u32 len = MHI_EP_DEFAULT_MTU;
438469
struct mhi_ep_chan *mhi_chan;
439470
int ret;
440471

@@ -455,27 +486,15 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem
455486
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
456487
} else {
457488
/* UL channel */
458-
result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
459-
if (!result.buf_addr)
460-
return -ENOMEM;
461-
462489
do {
463-
ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
490+
ret = mhi_ep_read_channel(mhi_cntrl, ring);
464491
if (ret < 0) {
465492
dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
466-
kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
467493
return ret;
468494
}
469495

470-
result.dir = mhi_chan->dir;
471-
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
472-
result.bytes_xferd = 0;
473-
memset(result.buf_addr, 0, len);
474-
475496
/* Read until the ring becomes empty */
476497
} while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
477-
478-
kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
479498
}
480499

481500
return 0;
@@ -781,7 +800,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
781800
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
782801
struct device *dev = &mhi_cntrl->mhi_dev->dev;
783802
struct mhi_ep_ring_item *itr, *tmp;
784-
struct mhi_ring_element *el;
785803
struct mhi_ep_ring *ring;
786804
struct mhi_ep_chan *chan;
787805
unsigned long flags;
@@ -826,10 +844,8 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
826844
continue;
827845
}
828846

829-
el = &ring->ring_cache[ring->rd_offset];
830-
831847
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
832-
ret = mhi_ep_process_ch_ring(ring, el);
848+
ret = mhi_ep_process_ch_ring(ring);
833849
if (ret) {
834850
dev_err(dev, "Error processing ring for channel (%u): %d\n",
835851
ring->ch_id, ret);

0 commit comments

Comments
 (0)