Skip to content

Commit 26d57c9

Browse files
committed
dma_mcux_edma: Split reload function into helpers
There is two completely different types of reload modes happening here, therefore we should split this function into two completely separate functions because it was getting large and hard to read. Removes one level of indentation. Signed-off-by: Declan Snyder <[email protected]>
1 parent 9c5bae7 commit 26d57c9

File tree

1 file changed

+126
-109
lines changed

1 file changed

+126
-109
lines changed

drivers/dma/dma_mcux_edma.c

Lines changed: 126 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -627,7 +627,7 @@ static void dma_mcux_edma_update_hw_tcd(const struct device *dev, uint32_t chann
627627
EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_DREQ(1U);
628628
}
629629

630-
static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel,
630+
static int edma_reload_loop(const struct device *dev, uint32_t channel,
631631
uint32_t src, uint32_t dst, size_t size)
632632
{
633633
struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
@@ -636,138 +636,155 @@ static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel,
636636
uint32_t hw_id, sw_id;
637637
uint8_t pre_idx;
638638

639-
/* Lock the channel configuration */
640-
const unsigned int key = irq_lock();
641-
int ret = 0;
642-
643-
if (!data->transfer_settings.valid) {
644-
LOG_ERR("Invalid EDMA settings on initial config. Configure DMA before reload.");
645-
ret = -EFAULT;
646-
goto cleanup;
639+
if (data->transfer_settings.empty_tcds == 0) {
640+
LOG_ERR("TCD list is full in loop mode.");
641+
return -ENOBUFS;
647642
}
648643

649-
if (data->transfer_settings.cyclic) {
650-
if (data->transfer_settings.empty_tcds == 0) {
651-
LOG_ERR("TCD list is full in loop mode.");
652-
ret = -ENOBUFS;
653-
goto cleanup;
654-
}
644+
/* Convert size into major loop count */
645+
size = size / data->transfer_settings.dest_data_size;
655646

656-
/* Convert size into major loop count */
657-
size = size / data->transfer_settings.dest_data_size;
658-
659-
/* Previous TCD index in circular list */
660-
pre_idx = data->transfer_settings.write_idx - 1;
661-
if (pre_idx >= CONFIG_DMA_TCD_QUEUE_SIZE) {
662-
pre_idx = CONFIG_DMA_TCD_QUEUE_SIZE - 1;
663-
}
647+
/* Previous TCD index in circular list */
648+
pre_idx = data->transfer_settings.write_idx - 1;
649+
if (pre_idx >= CONFIG_DMA_TCD_QUEUE_SIZE) {
650+
pre_idx = CONFIG_DMA_TCD_QUEUE_SIZE - 1;
651+
}
664652

665-
/* Configure a TCD for the transfer */
666-
tcd = &(DEV_CFG(dev)->tcdpool[channel][data->transfer_settings.write_idx]);
667-
pre_tcd = &(DEV_CFG(dev)->tcdpool[channel][pre_idx]);
653+
/* Configure a TCD for the transfer */
654+
tcd = &(DEV_CFG(dev)->tcdpool[channel][data->transfer_settings.write_idx]);
655+
pre_tcd = &(DEV_CFG(dev)->tcdpool[channel][pre_idx]);
668656

669657
#if defined(FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET) && FSL_FEATURE_MEMORY_HAS_ADDRESS_OFFSET
670-
EDMA_TCD_SADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) =
671-
MEMORY_ConvertMemoryMapAddress(src, kMEMORY_Local2DMA);
672-
EDMA_TCD_DADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) =
673-
MEMORY_ConvertMemoryMapAddress(dst, kMEMORY_Local2DMA);
658+
EDMA_TCD_SADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) =
659+
MEMORY_ConvertMemoryMapAddress(src, kMEMORY_Local2DMA);
660+
EDMA_TCD_DADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) =
661+
MEMORY_ConvertMemoryMapAddress(dst, kMEMORY_Local2DMA);
674662
#else
675-
EDMA_TCD_SADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = src;
676-
EDMA_TCD_DADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = dst;
663+
EDMA_TCD_SADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = src;
664+
EDMA_TCD_DADDR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = dst;
677665
#endif
678-
EDMA_TCD_BITER(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = size;
679-
EDMA_TCD_CITER(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = size;
680-
/* Enable automatically stop */
681-
EDMA_TCD_CSR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) |= DMA_CSR_DREQ(1U);
682-
sw_id = EDMA_TCD_DLAST_SGA(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev)));
683-
684-
/* Block the peripheral's hardware request trigger to prevent
685-
* starting the DMA before updating the TCDs. Make sure the
686-
* code between EDMA_DisableChannelRequest() and
687-
* EDMA_EnableChannelRequest() is minimum.
688-
*/
689-
EDMA_DisableChannelRequest(DEV_BASE(dev), channel);
666+
EDMA_TCD_BITER(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = size;
667+
EDMA_TCD_CITER(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) = size;
668+
/* Enable automatically stop */
669+
EDMA_TCD_CSR(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) |= DMA_CSR_DREQ(1U);
670+
sw_id = EDMA_TCD_DLAST_SGA(tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev)));
671+
672+
/* Block the peripheral's hardware request trigger to prevent
673+
* starting the DMA before updating the TCDs. Make sure the
674+
* code between EDMA_DisableChannelRequest() and
675+
* EDMA_EnableChannelRequest() is minimum.
676+
*/
677+
EDMA_DisableChannelRequest(DEV_BASE(dev), channel);
678+
679+
/* Wait for the DMA to be inactive before updating the TCDs.
680+
* The CSR[ACTIVE] bit will deassert quickly after the EDMA's
681+
* minor loop burst completes.
682+
*/
683+
while (EDMA_HW_TCD_CSR(dev, channel) & EDMA_HW_TCD_CH_ACTIVE_MASK) {
684+
;
685+
}
690686

691-
/* Wait for the DMA to be inactive before updating the TCDs.
692-
* The CSR[ACTIVE] bit will deassert quickly after the EDMA's
693-
* minor loop burst completes.
687+
/* Identify the current active TCD. Use DLAST_SGA as the HW ID */
688+
hw_id = EDMA_GetNextTCDAddress(DEV_EDMA_HANDLE(dev, channel));
689+
if (data->transfer_settings.empty_tcds >= CONFIG_DMA_TCD_QUEUE_SIZE ||
690+
hw_id == sw_id) {
691+
/* All transfers have been done.DMA is stopped automatically,
692+
* invalid TCD has been loaded into the HW, update HW.
694693
*/
695-
while (EDMA_HW_TCD_CSR(dev, channel) & EDMA_HW_TCD_CH_ACTIVE_MASK) {
696-
;
697-
}
694+
dma_mcux_edma_update_hw_tcd(dev, channel, src, dst, size);
695+
LOG_DBG("Transfer done,auto stop");
698696

699-
/* Identify the current active TCD. Use DLAST_SGA as the HW ID */
700-
hw_id = EDMA_GetNextTCDAddress(DEV_EDMA_HANDLE(dev, channel));
701-
if (data->transfer_settings.empty_tcds >= CONFIG_DMA_TCD_QUEUE_SIZE ||
702-
hw_id == sw_id) {
703-
/* All transfers have been done.DMA is stopped automatically,
704-
* invalid TCD has been loaded into the HW, update HW.
705-
*/
706-
dma_mcux_edma_update_hw_tcd(dev, channel, src, dst, size);
707-
LOG_DBG("Transfer done,auto stop");
697+
} else {
698+
/* Previous TCD can automatically start this TCD.
699+
* Enable the peripheral DMA request in the previous TCD
700+
*/
701+
EDMA_TCD_CSR(pre_tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) &=
702+
~DMA_CSR_DREQ(1U);
708703

709-
} else {
710-
/* Previous TCD can automatically start this TCD.
711-
* Enable the peripheral DMA request in the previous TCD
704+
if (data->transfer_settings.empty_tcds == CONFIG_DMA_TCD_QUEUE_SIZE - 1 ||
705+
hw_id == (uint32_t)tcd) {
706+
/* DMA is running on last transfer. HW has loaded the last one,
707+
* we need ensure it's DREQ is cleared.
712708
*/
713-
EDMA_TCD_CSR(pre_tcd, EDMA_TCD_TYPE((void *)DEV_BASE(dev))) &=
714-
~DMA_CSR_DREQ(1U);
715-
716-
if (data->transfer_settings.empty_tcds == CONFIG_DMA_TCD_QUEUE_SIZE - 1 ||
717-
hw_id == (uint32_t)tcd) {
718-
/* DMA is running on last transfer. HW has loaded the last one,
719-
* we need ensure it's DREQ is cleared.
720-
*/
721-
EDMA_EnableAutoStopRequest(DEV_BASE(dev), channel, false);
722-
LOG_DBG("Last transfer.");
723-
}
724-
LOG_DBG("Manu stop");
709+
EDMA_EnableAutoStopRequest(DEV_BASE(dev), channel, false);
710+
LOG_DBG("Last transfer.");
725711
}
712+
LOG_DBG("Manu stop");
713+
}
726714

727715
#ifdef CONFIG_DMA_MCUX_EDMA
728-
/* It seems that there is HW issue which may cause ESG bit is cleared.
729-
* This is a workaround. Clear the DONE bit before setting ESG bit.
730-
*/
731-
EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, kEDMA_DoneFlag);
732-
EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_ESG_MASK;
716+
/* It seems that there is HW issue which may cause ESG bit is cleared.
717+
* This is a workaround. Clear the DONE bit before setting ESG bit.
718+
*/
719+
EDMA_ClearChannelStatusFlags(DEV_BASE(dev), channel, kEDMA_DoneFlag);
720+
EDMA_HW_TCD_CSR(dev, channel) |= DMA_CSR_ESG_MASK;
733721
#elif (CONFIG_DMA_MCUX_EDMA_V3 || CONFIG_DMA_MCUX_EDMA_V4 || CONFIG_DMA_MCUX_EDMA_V5)
734-
/*We have not verified if this issue exist on V3/V4 HW, jut place a holder here. */
722+
/*We have not verified if this issue exist on V3/V4 HW, jut place a holder here. */
735723
#endif
736-
/* TCDs are configured. Resume DMA */
737-
EDMA_EnableChannelRequest(DEV_BASE(dev), channel);
724+
/* TCDs are configured. Resume DMA */
725+
EDMA_EnableChannelRequest(DEV_BASE(dev), channel);
738726

739-
/* Update the write index and available TCD numbers. */
740-
data->transfer_settings.write_idx =
741-
(data->transfer_settings.write_idx + 1) % CONFIG_DMA_TCD_QUEUE_SIZE;
742-
data->transfer_settings.empty_tcds--;
727+
/* Update the write index and available TCD numbers. */
728+
data->transfer_settings.write_idx =
729+
(data->transfer_settings.write_idx + 1) % CONFIG_DMA_TCD_QUEUE_SIZE;
730+
data->transfer_settings.empty_tcds--;
743731

744-
LOG_DBG("w_idx:%d no:%d(ch:%d)", data->transfer_settings.write_idx,
745-
data->transfer_settings.empty_tcds, channel);
732+
LOG_DBG("w_idx:%d no:%d(ch:%d)", data->transfer_settings.write_idx,
733+
data->transfer_settings.empty_tcds, channel);
746734

747-
} else {
748-
/* Dynamice Scatter/Gather mode:
749-
* If the tcdPool is not in use (no s/g) then only a single TCD
750-
* can be active at once.
751-
*/
752-
if (data->busy && data->edma_handle.tcdPool == NULL) {
753-
LOG_ERR("EDMA busy. Wait until the transfer completes before reloading.");
754-
ret = -EBUSY;
755-
goto cleanup;
756-
}
735+
return 0;
736+
}
757737

758-
EDMA_PrepareTransfer(&(data->transferConfig), (void *)src,
759-
data->transfer_settings.source_data_size, (void *)dst,
760-
data->transfer_settings.dest_data_size,
761-
data->transfer_settings.source_burst_length, size,
762-
data->transfer_settings.transfer_type);
738+
static int edma_reload_dynamic(const struct device *dev, uint32_t channel,
739+
uint32_t src, uint32_t dst, size_t size)
740+
{
741+
struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
763742

764-
const status_t submit_status =
765-
EDMA_SubmitTransfer(DEV_EDMA_HANDLE(dev, channel), &(data->transferConfig));
743+
/* Dynamice Scatter/Gather mode:
744+
* If the tcdPool is not in use (no s/g) then only a single TCD
745+
* can be active at once.
746+
*/
747+
if (data->busy && data->edma_handle.tcdPool == NULL) {
748+
LOG_ERR("EDMA busy. Wait until the transfer completes before reloading.");
749+
return -EBUSY;
750+
}
766751

767-
if (submit_status != kStatus_Success) {
768-
LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status);
769-
ret = -EFAULT;
770-
}
752+
EDMA_PrepareTransfer(&(data->transferConfig), (void *)src,
753+
data->transfer_settings.source_data_size, (void *)dst,
754+
data->transfer_settings.dest_data_size,
755+
data->transfer_settings.source_burst_length, size,
756+
data->transfer_settings.transfer_type);
757+
758+
const status_t submit_status =
759+
EDMA_SubmitTransfer(DEV_EDMA_HANDLE(dev, channel), &(data->transferConfig));
760+
761+
if (submit_status != kStatus_Success) {
762+
LOG_ERR("Error submitting EDMA Transfer: 0x%x", submit_status);
763+
return -EFAULT;
764+
}
765+
766+
return 0;
767+
}
768+
769+
static int dma_mcux_edma_reload(const struct device *dev, uint32_t channel,
770+
uint32_t src, uint32_t dst, size_t size)
771+
{
772+
struct call_back *data = DEV_CHANNEL_DATA(dev, channel);
773+
int ret = 0;
774+
775+
/* Lock the channel configuration */
776+
const unsigned int key = irq_lock();
777+
778+
if (!data->transfer_settings.valid) {
779+
LOG_ERR("Invalid EDMA settings on initial config. Configure DMA before reload.");
780+
ret = -EFAULT;
781+
goto cleanup;
782+
}
783+
784+
if (data->transfer_settings.cyclic) {
785+
ret = edma_reload_loop(dev, channel, src, dst, size);
786+
} else {
787+
ret = edma_reload_dynamic(dev, channel, src, dst, size);
771788
}
772789

773790
cleanup:

0 commit comments

Comments
 (0)