mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-03-16 12:14:06 +00:00
drm/virtio: Support sync objects
Add sync object DRM UAPI support to VirtIO-GPU driver. Sync objects support is needed by native context VirtIO-GPU Mesa drivers, it also will be used by Venus and Virgl contexts. Reviewed-by; Emil Velikov <emil.velikov@collabora.com> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> # amdgpu nctx Tested-by: Rob Clark <robdclark@gmail.com> # freedreno nctx Reviewed-by: Rob Clark <robdclark@gmail.com> Acked-by: Gurchetan Singh <gurchetansingh@chromium.org> Acked-by: Gerd Hoffmann <kraxel@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230416115237.798604-4-dmitry.osipenko@collabora.com
This commit is contained in:
parent
744d35d343
commit
7cb8d1ab8c
3 changed files with 241 additions and 2 deletions
|
@ -176,7 +176,8 @@ static const struct drm_driver driver = {
|
|||
* If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
|
||||
* out via drm_device::driver_features:
|
||||
*/
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
|
||||
DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
|
||||
.open = virtio_gpu_driver_open,
|
||||
.postclose = virtio_gpu_driver_postclose,
|
||||
|
||||
|
|
|
@ -14,11 +14,24 @@
|
|||
#include <linux/uaccess.h>
|
||||
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_syncobj.h>
|
||||
#include <drm/virtgpu_drm.h>
|
||||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
struct virtio_gpu_submit_post_dep {
|
||||
struct drm_syncobj *syncobj;
|
||||
struct dma_fence_chain *chain;
|
||||
u64 point;
|
||||
};
|
||||
|
||||
struct virtio_gpu_submit {
|
||||
struct virtio_gpu_submit_post_dep *post_deps;
|
||||
unsigned int num_out_syncobjs;
|
||||
|
||||
struct drm_syncobj **in_syncobjs;
|
||||
unsigned int num_in_syncobjs;
|
||||
|
||||
struct virtio_gpu_object_array *buflist;
|
||||
struct drm_virtgpu_execbuffer *exbuf;
|
||||
struct virtio_gpu_fence *out_fence;
|
||||
|
@ -59,6 +72,203 @@ static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
|
||||
u32 nr_syncobjs)
|
||||
{
|
||||
u32 i = nr_syncobjs;
|
||||
|
||||
while (i--) {
|
||||
if (syncobjs[i])
|
||||
drm_syncobj_put(syncobjs[i]);
|
||||
}
|
||||
|
||||
kvfree(syncobjs);
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
|
||||
{
|
||||
struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
|
||||
struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
|
||||
size_t syncobj_stride = exbuf->syncobj_stride;
|
||||
u32 num_in_syncobjs = exbuf->num_in_syncobjs;
|
||||
struct drm_syncobj **syncobjs;
|
||||
int ret = 0, i;
|
||||
|
||||
if (!num_in_syncobjs)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* kvalloc at first tries to allocate memory using kmalloc and
|
||||
* falls back to vmalloc only on failure. It also uses __GFP_NOWARN
|
||||
* internally for allocations larger than a page size, preventing
|
||||
* storm of KMSG warnings.
|
||||
*/
|
||||
syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
|
||||
if (!syncobjs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_in_syncobjs; i++) {
|
||||
u64 address = exbuf->in_syncobjs + i * syncobj_stride;
|
||||
struct dma_fence *fence;
|
||||
|
||||
memset(&syncobj_desc, 0, sizeof(syncobj_desc));
|
||||
|
||||
if (copy_from_user(&syncobj_desc,
|
||||
u64_to_user_ptr(address),
|
||||
min(syncobj_stride, sizeof(syncobj_desc)))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
|
||||
syncobj_desc.point, 0, &fence);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = virtio_gpu_dma_fence_wait(submit, fence);
|
||||
|
||||
dma_fence_put(fence);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
|
||||
syncobjs[i] = drm_syncobj_find(submit->file,
|
||||
syncobj_desc.handle);
|
||||
if (!syncobjs[i]) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
virtio_gpu_free_syncobjs(syncobjs, i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
submit->num_in_syncobjs = num_in_syncobjs;
|
||||
submit->in_syncobjs = syncobjs;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
|
||||
u32 nr_syncobjs)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < nr_syncobjs; i++) {
|
||||
if (syncobjs[i])
|
||||
drm_syncobj_replace_fence(syncobjs[i], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
|
||||
u32 nr_syncobjs)
|
||||
{
|
||||
u32 i = nr_syncobjs;
|
||||
|
||||
while (i--) {
|
||||
kfree(post_deps[i].chain);
|
||||
drm_syncobj_put(post_deps[i].syncobj);
|
||||
}
|
||||
|
||||
kvfree(post_deps);
|
||||
}
|
||||
|
||||
static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
|
||||
{
|
||||
struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
|
||||
struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
|
||||
struct virtio_gpu_submit_post_dep *post_deps;
|
||||
u32 num_out_syncobjs = exbuf->num_out_syncobjs;
|
||||
size_t syncobj_stride = exbuf->syncobj_stride;
|
||||
int ret = 0, i;
|
||||
|
||||
if (!num_out_syncobjs)
|
||||
return 0;
|
||||
|
||||
post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
|
||||
if (!post_deps)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_out_syncobjs; i++) {
|
||||
u64 address = exbuf->out_syncobjs + i * syncobj_stride;
|
||||
|
||||
memset(&syncobj_desc, 0, sizeof(syncobj_desc));
|
||||
|
||||
if (copy_from_user(&syncobj_desc,
|
||||
u64_to_user_ptr(address),
|
||||
min(syncobj_stride, sizeof(syncobj_desc)))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
post_deps[i].point = syncobj_desc.point;
|
||||
|
||||
if (syncobj_desc.flags) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (syncobj_desc.point) {
|
||||
post_deps[i].chain = dma_fence_chain_alloc();
|
||||
if (!post_deps[i].chain) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
post_deps[i].syncobj = drm_syncobj_find(submit->file,
|
||||
syncobj_desc.handle);
|
||||
if (!post_deps[i].syncobj) {
|
||||
kfree(post_deps[i].chain);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
virtio_gpu_free_post_deps(post_deps, i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
submit->num_out_syncobjs = num_out_syncobjs;
|
||||
submit->post_deps = post_deps;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
|
||||
{
|
||||
struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
|
||||
|
||||
if (post_deps) {
|
||||
struct dma_fence *fence = &submit->out_fence->f;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < submit->num_out_syncobjs; i++) {
|
||||
if (post_deps[i].chain) {
|
||||
drm_syncobj_add_point(post_deps[i].syncobj,
|
||||
post_deps[i].chain,
|
||||
fence, post_deps[i].point);
|
||||
post_deps[i].chain = NULL;
|
||||
} else {
|
||||
drm_syncobj_replace_fence(post_deps[i].syncobj,
|
||||
fence);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int virtio_gpu_fence_event_create(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct virtio_gpu_fence *fence,
|
||||
|
@ -118,6 +328,10 @@ static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
|
|||
|
||||
static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
|
||||
{
|
||||
virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
|
||||
virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
|
||||
virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
|
||||
|
||||
if (!IS_ERR(submit->buf))
|
||||
kvfree(submit->buf);
|
||||
|
||||
|
@ -172,6 +386,7 @@ static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
|
|||
drm_fence_event = false;
|
||||
|
||||
if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
|
||||
exbuf->num_out_syncobjs ||
|
||||
exbuf->num_bo_handles ||
|
||||
drm_fence_event)
|
||||
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
|
||||
|
@ -291,6 +506,14 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
ret = virtio_gpu_parse_post_deps(&submit);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
ret = virtio_gpu_parse_deps(&submit);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
/*
|
||||
* Await in-fences in the end of the job submission path to
|
||||
* optimize the path by proceeding directly to the submission
|
||||
|
@ -311,6 +534,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
* the job submission path.
|
||||
*/
|
||||
virtio_gpu_install_out_fence_fd(&submit);
|
||||
virtio_gpu_process_post_deps(&submit);
|
||||
virtio_gpu_complete_submit(&submit);
|
||||
cleanup:
|
||||
virtio_gpu_cleanup_submit(&submit);
|
||||
|
|
|
@ -64,6 +64,16 @@ struct drm_virtgpu_map {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
|
||||
#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
|
||||
VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
|
||||
0)
|
||||
struct drm_virtgpu_execbuffer_syncobj {
|
||||
__u32 handle;
|
||||
__u32 flags;
|
||||
__u64 point;
|
||||
};
|
||||
|
||||
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
|
||||
struct drm_virtgpu_execbuffer {
|
||||
__u32 flags;
|
||||
|
@ -73,7 +83,11 @@ struct drm_virtgpu_execbuffer {
|
|||
__u32 num_bo_handles;
|
||||
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
|
||||
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
|
||||
__u32 pad;
|
||||
__u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
|
||||
__u32 num_in_syncobjs;
|
||||
__u32 num_out_syncobjs;
|
||||
__u64 in_syncobjs;
|
||||
__u64 out_syncobjs;
|
||||
};
|
||||
|
||||
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
|
||||
|
|
Loading…
Add table
Reference in a new issue