From d4db619690abe58b29052f1584a5e4d9e47d78a3 Mon Sep 17 00:00:00 2001 From: Myy <myy@miouyouyou.fr> Date: Fri, 10 Feb 2017 14:48:54 +0000 Subject: [PATCH 2/3] Renamed Kernel DMA Fence structures and functions DMA Fence structures and functions were renamed in Linux 4.9. See this commit: dma-buf: Rename struct fence to dma_fence f54d1867005c3323f5d8ad83eed823e84226c429 Kernel: 4.9.0-rc Signed-off-by: Myy <myy@miouyouyou.fr> --- drivers/gpu/arm/midgard/mali_kbase_defs.h | 2 +- drivers/gpu/arm/midgard/mali_kbase_dma_fence.c | 51 ++++++++++++------------- drivers/gpu/arm/midgard/mali_kbase_dma_fence.h | 7 ++-- drivers/gpu/arm/midgard/mali_kbase_jd.c | 2 +- drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c | 8 ++-- 5 files changed, 33 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/arm/midgard/mali_kbase_defs.h b/drivers/gpu/arm/midgard/mali_kbase_defs.h index 290d460..587cfb3 100644 --- a/drivers/gpu/arm/midgard/mali_kbase_defs.h +++ b/drivers/gpu/arm/midgard/mali_kbase_defs.h @@ -453,7 +453,7 @@ struct kbase_jd_atom { * regardless of the event_code of the katom (signal also on * failure). */ - struct fence *fence; + struct dma_fence *fence; /* The dma-buf fence context number for this atom. A unique * context number is allocated to each katom in the context on * context creation. diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c index 97bb6c5..82160ac 100644 --- a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c +++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.c @@ -22,11 +22,8 @@ #include "mali_kbase_dma_fence.h" #include <linux/atomic.h> -#include <linux/fence.h> -#include <linux/list.h> #include <linux/lockdep.h> #include <linux/mutex.h> -#include <linux/reservation.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> @@ -56,19 +53,19 @@ kbase_dma_fence_waiters_remove(struct kbase_jd_atom *katom) } static const char * -kbase_dma_fence_get_driver_name(struct fence *fence) +kbase_dma_fence_get_driver_name(struct dma_fence *fence) { return kbase_drv_name; } static const char * -kbase_dma_fence_get_timeline_name(struct fence *fence) +kbase_dma_fence_get_timeline_name(struct dma_fence *fence) { return kbase_timeline_name; } static bool -kbase_dma_fence_enable_signaling(struct fence *fence) +kbase_dma_fence_enable_signaling(struct dma_fence *fence) { /* If in the future we need to add code here remember to * to get a reference to the fence and release it when signaling @@ -78,30 +75,30 @@ kbase_dma_fence_enable_signaling(struct fence *fence) } static void -kbase_dma_fence_fence_value_str(struct fence *fence, char *str, int size) +kbase_dma_fence_fence_value_str(struct dma_fence *fence, char *str, int size) { snprintf(str, size, "%u", fence->seqno); } -static const struct fence_ops kbase_dma_fence_ops = { +static const struct dma_fence_ops kbase_dma_fence_ops = { .get_driver_name = kbase_dma_fence_get_driver_name, .get_timeline_name = kbase_dma_fence_get_timeline_name, .enable_signaling = kbase_dma_fence_enable_signaling, /* Use the default wait */ - .wait = fence_default_wait, + .wait = dma_fence_default_wait, .fence_value_str = kbase_dma_fence_fence_value_str, }; -static struct fence * +static struct dma_fence * kbase_dma_fence_new(unsigned int context, unsigned int seqno) { - struct fence *fence; + struct dma_fence *fence; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (!fence) return NULL; - fence_init(fence, + dma_fence_init(fence, &kbase_dma_fence_ops, &kbase_dma_fence_lock, context, @@ -212,7 +209,7 @@ kbase_dma_fence_free_callbacks(struct kbase_jd_atom *katom, bool queue_worker) bool ret; /* Cancel callbacks that hasn't been called yet. */ - ret = fence_remove_callback(cb->fence, &cb->fence_cb); + ret = dma_fence_remove_callback(cb->fence, &cb->fence_cb); if (ret) { int ret; @@ -235,7 +232,7 @@ kbase_dma_fence_free_callbacks(struct kbase_jd_atom *katom, bool queue_worker) * Release the reference taken in * kbase_dma_fence_add_callback(). */ - fence_put(cb->fence); + dma_fence_put(cb->fence); list_del(&cb->node); kfree(cb); } @@ -329,8 +326,8 @@ kbase_dma_fence_work(struct work_struct *pwork) */ static int kbase_dma_fence_add_callback(struct kbase_jd_atom *katom, - struct fence *fence, - fence_func_t callback) + struct dma_fence *fence, + dma_fence_func_t callback) { int err = 0; struct kbase_dma_fence_cb *kbase_fence_cb; @@ -343,7 +340,7 @@ kbase_dma_fence_add_callback(struct kbase_jd_atom *katom, kbase_fence_cb->katom = katom; INIT_LIST_HEAD(&kbase_fence_cb->node); - err = fence_add_callback(fence, &kbase_fence_cb->fence_cb, callback); + err = dma_fence_add_callback(fence, &kbase_fence_cb->fence_cb, callback); if (err == -ENOENT) { /* Fence signaled, clear the error and return */ err = 0; @@ -356,7 +353,7 @@ kbase_dma_fence_add_callback(struct kbase_jd_atom *katom, * Get reference to fence that will be kept until callback gets * cleaned up in kbase_dma_fence_free_callbacks(). */ - fence_get(fence); + dma_fence_get(fence); atomic_inc(&katom->dma_fence.dep_count); /* Add callback to katom's list of callbacks */ list_add(&kbase_fence_cb->node, &katom->dma_fence.callbacks); @@ -366,7 +363,7 @@ kbase_dma_fence_add_callback(struct kbase_jd_atom *katom, } static void -kbase_dma_fence_cb(struct fence *fence, struct fence_cb *cb) +kbase_dma_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct kbase_dma_fence_cb *kcb = container_of(cb, struct kbase_dma_fence_cb, @@ -386,8 +383,8 @@ kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom, struct reservation_object *resv, bool exclusive) { - struct fence *excl_fence = NULL; - struct fence **shared_fences = NULL; + struct dma_fence *excl_fence = NULL; + struct dma_fence **shared_fences = NULL; unsigned int shared_count = 0; int err, i; @@ -408,7 +405,7 @@ kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom, * and it's the fence's owner is responsible for singling the fence * before allowing it to disappear. */ - fence_put(excl_fence); + dma_fence_put(excl_fence); if (err) goto out; @@ -431,7 +428,7 @@ kbase_dma_fence_add_reservation_callback(struct kbase_jd_atom *katom, */ out: for (i = 0; i < shared_count; i++) - fence_put(shared_fences[i]); + dma_fence_put(shared_fences[i]); kfree(shared_fences); if (err) { @@ -468,7 +465,7 @@ int kbase_dma_fence_wait(struct kbase_jd_atom *katom, struct kbase_dma_fence_resv_info *info) { int err, i; - struct fence *fence; + struct dma_fence *fence; struct ww_acquire_ctx ww_ctx; lockdep_assert_held(&katom->kctx->jctx.lock); @@ -490,7 +487,7 @@ int kbase_dma_fence_wait(struct kbase_jd_atom *katom, dev_err(katom->kctx->kbdev->dev, "Error %d locking reservations.\n", err); atomic_set(&katom->dma_fence.dep_count, -1); - fence_put(fence); + dma_fence_put(fence); return err; } @@ -580,8 +577,8 @@ void kbase_dma_fence_signal(struct kbase_jd_atom *katom) KBASE_DEBUG_ASSERT(atomic_read(&katom->dma_fence.dep_count) == -1); /* Signal the atom's fence. */ - fence_signal(katom->dma_fence.fence); - fence_put(katom->dma_fence.fence); + dma_fence_signal(katom->dma_fence.fence); + dma_fence_put(katom->dma_fence.fence); katom->dma_fence.fence = NULL; kbase_dma_fence_free_callbacks(katom, false); diff --git a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h index 3b0a69b..19b343e 100644 --- a/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h +++ b/drivers/gpu/arm/midgard/mali_kbase_dma_fence.h @@ -20,10 +20,9 @@ #ifdef CONFIG_MALI_DMA_FENCE -#include <linux/fence.h> #include <linux/list.h> #include <linux/reservation.h> - +#include <linux/dma-fence.h> /* Forward declaration from mali_kbase_defs.h */ struct kbase_jd_atom; @@ -37,9 +36,9 @@ struct kbase_context; * @node: List head for linking this callback to the katom */ struct kbase_dma_fence_cb { - struct fence_cb fence_cb; + struct dma_fence_cb fence_cb; struct kbase_jd_atom *katom; - struct fence *fence; + struct dma_fence *fence; struct list_head node; }; diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/midgard/mali_kbase_jd.c index 596b047..e3ea5f8 100644 --- a/drivers/gpu/arm/midgard/mali_kbase_jd.c +++ b/drivers/gpu/arm/midgard/mali_kbase_jd.c @@ -1847,7 +1847,7 @@ int kbase_jd_init(struct kbase_context *kctx) kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED; #ifdef CONFIG_MALI_DMA_FENCE - kctx->jctx.atoms[i].dma_fence.context = fence_context_alloc(1); + kctx->jctx.atoms[i].dma_fence.context = dma_fence_context_alloc(1); atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0); INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks); #endif diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c index 47ea426..e2d4df8 100644 --- a/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c +++ b/drivers/gpu/arm/midgard/mali_kbase_jd_debugfs.c @@ -71,7 +71,7 @@ static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom, struct kbase_dma_fence_cb *cb; if (atom->dma_fence.fence) { - struct fence *fence = atom->dma_fence.fence; + struct dma_fence *fence = atom->dma_fence.fence; seq_printf(sfile, #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) @@ -81,13 +81,13 @@ static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom, #endif fence->context, fence->seqno, - fence_is_signaled(fence) ? + dma_fence_is_signaled(fence) ? "signaled" : "active"); } list_for_each_entry(cb, &atom->dma_fence.callbacks, node) { - struct fence *fence = cb->fence; + struct dma_fence *fence = cb->fence; seq_printf(sfile, #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) @@ -97,7 +97,7 @@ static void kbase_jd_debugfs_fence_info(struct kbase_jd_atom *atom, #endif fence->context, fence->seqno, - fence_is_signaled(fence) ? + dma_fence_is_signaled(fence) ? "signaled" : "active"); } } -- 2.10.2