dma_map_sg - notro/udrm-kernel GitHub Wiki

dma_map_sg()

#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)

/*
 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
 * It should never return a value < 0.
 */
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
                                   int nents, enum dma_data_direction dir,
                                   unsigned long attrs)
{
        struct dma_map_ops *ops = get_dma_ops(dev);
        int i, ents;
        struct scatterlist *s;

        for_each_sg(sg, s, nents, i)
                kmemcheck_mark_initialized(sg_virt(s), s->length);
        BUG_ON(!valid_dma_direction(dir));
        ents = ops->map_sg(dev, sg, nents, dir, attrs);
        BUG_ON(ents < 0);
        debug_dma_map_sg(dev, sg, nents, ents, dir);

        return ents;
}

arm_dma_map_sg()

struct dma_map_ops arm_dma_ops = {
        .map_sg                 = arm_dma_map_sg,
        .map_page               = arm_dma_map_page,
};

/**
 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
                enum dma_data_direction dir, unsigned long attrs)
{
        struct dma_map_ops *ops = get_dma_ops(dev);
        struct scatterlist *s;
        int i, j;

        for_each_sg(sg, s, nents, i) {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
                s->dma_length = s->length;
#endif
                s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
                                                s->length, dir, attrs);
                if (dma_mapping_error(dev, s->dma_address))
                        goto bad_mapping;
        }
        return nents;

 bad_mapping:
        for_each_sg(sg, s, i, j)
                ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
        return 0;
}

/**
 * arm_dma_map_page - map a portion of a page for streaming DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @page: page that buffer resides in
 * @offset: offset into page for start of buffer
 * @size: size of buffer to map
 * @dir: DMA transfer direction
 *
 * Ensure that any data held in the cache is appropriately discarded
 * or written back.
 *
 * The device owns this memory once this call has completed.  The CPU
 * can regain ownership by calling dma_unmap_page().
 */
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
             unsigned long offset, size_t size, enum dma_data_direction dir,
             unsigned long attrs)
{
        if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
                __dma_page_cpu_to_dev(page, offset, size, dir);
        return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}

Cache control

__dma_page_cpu_to_dev()

/*
 * Make an area consistent for devices.
 * Note: Drivers should NOT use this function directly, as it will break
 * platforms with CONFIG_DMABOUNCE.
 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
 */
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
        size_t size, enum dma_data_direction dir)
{
        phys_addr_t paddr;

        dma_cache_maint_page(page, off, size, dir, dmac_map_area);

        paddr = page_to_phys(page) + off;
        if (dir == DMA_FROM_DEVICE) {
                outer_inv_range(paddr, paddr + size);
        } else {
                outer_clean_range(paddr, paddr + size);
        }
        /* FIXME: non-speculating: flush on bidirectional mappings? */
}

static void dma_cache_maint_page(struct page *page, unsigned long offset,
        size_t size, enum dma_data_direction dir,
        void (*op)(const void *, size_t, int))
{
        unsigned long pfn;
        size_t left = size;

        pfn = page_to_pfn(page) + offset / PAGE_SIZE;
        offset %= PAGE_SIZE;

        /*
         * A single sg entry may refer to multiple physically contiguous
         * pages.  But we still need to process highmem pages individually.
         * If highmem is not configured then the bulk of this loop gets
         * optimized out.
         */
        do {
                size_t len = left;
                void *vaddr;

                page = pfn_to_page(pfn);

                if (PageHighMem(page)) {
<snip>
                } else {
                        vaddr = page_address(page) + offset;
                        op(vaddr, len, dir);
                }
                offset = 0;
                pfn++;
                left -= len;
        } while (left);
}

outer_inv_range() and outer_clean_range() are empty on Raspberry Pi.

Raspberry Pi 1: CONFIG_CPU_V6
Raspberry Pi 2/3: CONFIG_CPU_V7

/*
 *      Cache Model
 *      ===========
 */
#undef _CACHE
#undef MULTI_CACHE


#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
# ifdef _CACHE
#  define MULTI_CACHE 1
# else
#  define _CACHE v6
# endif
#endif

#if defined(CONFIG_CPU_V7)
# ifdef _CACHE
#  define MULTI_CACHE 1
# else
#  define _CACHE v7
# endif
#endif

dmac_map_area()

#ifndef MULTI_CACHE
#define dmac_map_area                   __glue(_CACHE,_dma_map_area)

arch/arm/mm/cache-v7.S

/*
 *      dma_map_area(start, size, dir)
 *      - start - kernel virtual start address
 *      - size  - size of region
 *      - dir   - DMA direction
 */
ENTRY(v7_dma_map_area)
        add     r1, r1, r0
        teq     r2, #DMA_FROM_DEVICE
        beq     v7_dma_inv_range
        b       v7_dma_clean_range
ENDPROC(v7_dma_map_area)

ARM assembler:

  • Use the TEQ instruction to test if two values are equal, without affecting the V or C flags (as CMP does).
  • beq: branch if equal
v7_dma_map_area:
	If DMA_FROM_DEVICE
		Invalidate data cache
	Else
		Clean data cache
/*
 *      v7_dma_inv_range(start,end)
 *
 *      Invalidate the data cache within the specified region; we will
 *      be performing a DMA operation in this region and we want to
 *      purge old data in the cache.
 *
 *      - start   - virtual start address of region
 *      - end     - virtual end address of region
 */
v7_dma_inv_range:
        dcache_line_size r2, r3
        sub     r3, r2, #1
        tst     r0, r3
        bic     r0, r0, r3
        mcrne   p15, 0, r0, c7, c14, 1          @ clean & invalidate D / U line

        tst     r1, r3
        bic     r1, r1, r3
        mcrne   p15, 0, r1, c7, c14, 1          @ clean & invalidate D / U line
1:
        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D / U line
        add     r0, r0, r2
        cmp     r0, r1
        blo     1b
        dsb     st
        ret     lr
ENDPROC(v7_dma_inv_range)

/*
 *      v7_dma_clean_range(start,end)
 *      - start   - virtual start address of region
 *      - end     - virtual end address of region
 */
v7_dma_clean_range:
        dcache_line_size r2, r3
        sub     r3, r2, #1
        bic     r0, r0, r3
1:
        mcr     p15, 0, r0, c7, c10, 1          @ clean D / U line
        add     r0, r0, r2
        cmp     r0, r1
        blo     1b
        dsb     st
        ret     lr
ENDPROC(v7_dma_clean_range)
⚠️ **GitHub.com Fallback** ⚠️