mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-27 00:51:35 +00:00
dma-pool: add additional coherent pools to map to gfp mask
The single atomic pool is allocated from the lowest zone possible since it is guaranteed to be applicable for any DMA allocation. Devices may allocate through the DMA API but not have a strict reliance on GFP_DMA memory. Since the atomic pool will be used for all non-blockable allocations, returning all memory from ZONE_DMA may unnecessarily deplete the zone. Provision for multiple atomic pools that will map to the optimal gfp mask of the device. When allocating non-blockable memory, determine the optimal gfp mask of the device and use the appropriate atomic pool. The coherent DMA mask will remain the same between allocation and free and, thus, memory will be freed to the same atomic pool it was allocated from. __dma_atomic_pool_init() will be changed to return struct gen_pool * later once dynamic expansion is added. Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
e860c299ac
commit
c84dc6e68a
5 changed files with 91 additions and 54 deletions
|
@ -45,8 +45,8 @@ u64 dma_direct_get_required_mask(struct device *dev)
|
|||
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
|
||||
}
|
||||
|
||||
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
u64 *phys_limit)
|
||||
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
u64 *phys_limit)
|
||||
{
|
||||
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
|
||||
|
||||
|
@ -89,8 +89,8 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
|
||||
/* we always manually zero the memory once we are done: */
|
||||
gfp &= ~__GFP_ZERO;
|
||||
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_limit);
|
||||
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_limit);
|
||||
page = dma_alloc_contiguous(dev, alloc_size, gfp);
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
dma_free_contiguous(dev, page, alloc_size);
|
||||
|
@ -128,7 +128,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_alloc_need_uncached(dev, attrs) &&
|
||||
!gfpflags_allow_blocking(gfp)) {
|
||||
ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
|
||||
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
goto done;
|
||||
|
@ -212,7 +212,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
|
||||
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
|
||||
return;
|
||||
|
||||
if (force_dma_unencrypted(dev))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue