Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 68 additions & 8 deletions src/driver/amdxdna/amdxdna_gem_of.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,14 @@ static void amdxdna_gem_dma_obj_free(struct drm_gem_object *gobj)
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);

XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
drm_gem_dma_object_free(gobj);

if (abo->mem.kva && abo->mem.dev_addr) {
dma_free_coherent(gobj->dev->dev, abo->mem.size, abo->mem.kva, abo->mem.dev_addr);
abo->mem.kva = NULL;
}

drm_gem_object_release(gobj);
kfree(abo);
}

static const struct drm_gem_object_funcs amdxdna_gem_dma_funcs = {
Expand Down Expand Up @@ -77,29 +84,82 @@ struct drm_gem_object *amdxdna_gem_create_object_cb(struct drm_device *dev, size
return to_gobj(abo);
}

static struct drm_gem_dma_object *amdxdna_cma_create(struct drm_device *dev, size_t size)
{
struct drm_gem_dma_object *cma_obj;
struct drm_gem_object *gem_obj;
int ret = 0;

gem_obj = kzalloc(sizeof(struct amdxdna_gem_obj), GFP_KERNEL);
if (!gem_obj)
return ERR_PTR(-ENOMEM);

cma_obj = container_of(gem_obj, struct drm_gem_dma_object, base);

gem_obj->funcs = &amdxdna_gem_dma_funcs;

/* manually init the drm gem obj */
ret = drm_gem_object_init(dev, gem_obj, size);
if (ret)
goto error;

ret = drm_gem_create_mmap_offset(gem_obj);
if (ret) {
drm_gem_object_release(gem_obj);
goto error;
}

return cma_obj;

error:
kfree(gem_obj);
return ERR_PTR(ret);

}

static struct amdxdna_gem_obj *amdxdna_drm_create_dma_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
struct drm_file *filp)
{
struct drm_gem_dma_object *dma;
struct drm_gem_dma_object *cma_obj;
struct amdxdna_gem_obj *abo;
size_t size = args->size;
dma_addr_t dma_addr;
void *vaddr;

/* Round up to more than 4K to ensure to allocate memory from CMA always */
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you briefly explain why the CMA allocation can be determined by size?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @houlz0507
We observed one issue, for smaller buffer sizes, Linux allocates randomly CMA memory from some other region not from the dedicated CMA memory region. And rounding up the smaller buffer sizes to page size, guarantees to get CMA memory from the dedicated CMA memory region.

It creates an issue for some tests. In some usecases kernels do not have access/connectivity to all memory regions but to only CMA memory region. If Linux allocates CMA memory from some other region, then the test fails.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

MM... how do you guarantee >=8K memory will be from CMA region in all circumstance?

Copy link
Contributor Author

@bisingha-xilinx bisingha-xilinx Aug 6, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi,
This is the lowest size I tested continuously multiple times having 1000/2000 iteration in each test. I didnt observe the issue. If I just round up to 4K (for smaller buffer sizes) then I saw the mentioned issue.

So, we aggreed to round up to 2*page_size for smaller sizes and round up with page_size for bigger sizes.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What if the CMA region is used up? I think alloc_coherent will try to allocate memory from other places instead of returning an error. I did not see a way to make sure the memory is from CMA regions by using alloc_coherent.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @houlz0507
if the CMA region is used up, alloc_coherent is not allocating from other places. it throws error. In my test I've observed this behavior. Let me confirm again.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please try small buffer e.g. 8k for your experiment.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @houlz0507 thank you for the review.
Sure.

if (size <= PAGE_SIZE)
size = round_up(size, 2 * PAGE_SIZE);
else
size = round_up(size, PAGE_SIZE);

cma_obj = amdxdna_cma_create(dev, size);
if (IS_ERR(cma_obj))
return ERR_PTR(-ENOMEM);

dma = drm_gem_dma_create(dev, size);
if (IS_ERR(dma))
return ERR_CAST(dma);
vaddr = dma_alloc_coherent(dev->dev, size, &dma_addr, GFP_KERNEL);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

gem_dma bo was used which alloc non-coherent or wc buffer. Why do you need coherent buffer here? Non-coherent is preferred if possible.

if (!vaddr)
goto error_free_gem;

abo = to_xdna_obj(&dma->base);
cma_obj->dma_addr = dma_addr;
cma_obj->vaddr = vaddr;

abo->mem.dev_addr = dma->dma_addr;
abo->mem.kva = dma->vaddr;
abo = to_xdna_obj(&cma_obj->base);

abo->mem.dev_addr = cma_obj->dma_addr;
abo->mem.kva = cma_obj->vaddr;
abo->type = args->type;
abo->mem.size = size;

mutex_init(&abo->lock);
abo->mem.userptr = AMDXDNA_INVALID_ADDR;

return abo;

error_free_gem:
drm_gem_object_release(&cma_obj->base);
kfree(to_xdna_obj(&cma_obj->base));
return ERR_PTR(-ENOMEM);
}

int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
Expand Down