mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-05 06:05:06 +00:00
drm/i915: Make GEM object create and create from data take dev_priv
Makes all GEM object constructors consistent. v2: Fix compilation in GVT code. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> (v1)
This commit is contained in:
parent
187685cb90
commit
12d79d7828
12 changed files with 32 additions and 30 deletions
|
@ -1602,7 +1602,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||
return -ENOMEM;
|
||||
|
||||
entry_obj->obj =
|
||||
i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
|
||||
i915_gem_object_create(s->vgpu->gvt->dev_priv,
|
||||
roundup(bb_size, PAGE_SIZE));
|
||||
if (IS_ERR(entry_obj->obj)) {
|
||||
ret = PTR_ERR(entry_obj->obj);
|
||||
|
@ -2665,14 +2665,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|||
|
||||
static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
||||
{
|
||||
struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
|
||||
int ctx_size = wa_ctx->indirect_ctx.size;
|
||||
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret = 0;
|
||||
void *map;
|
||||
|
||||
obj = i915_gem_object_create(dev,
|
||||
obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
|
||||
roundup(ctx_size + CACHELINE_BYTES,
|
||||
PAGE_SIZE));
|
||||
if (IS_ERR(obj))
|
||||
|
|
|
@ -2985,10 +2985,11 @@ void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
|
|||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_object_ops *ops);
|
||||
struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
|
||||
u64 size);
|
||||
struct drm_i915_gem_object *i915_gem_object_create_from_data(
|
||||
struct drm_device *dev, const void *data, size_t size);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
|
||||
const void *data, size_t size);
|
||||
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
|
||||
void i915_gem_free_object(struct drm_gem_object *obj);
|
||||
|
||||
|
|
|
@ -635,7 +635,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
|
|||
|
||||
static int
|
||||
i915_gem_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv,
|
||||
uint64_t size,
|
||||
uint32_t *handle_p)
|
||||
{
|
||||
|
@ -648,7 +648,7 @@ i915_gem_create(struct drm_file *file,
|
|||
return -EINVAL;
|
||||
|
||||
/* Allocate the new object */
|
||||
obj = i915_gem_object_create(dev, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
|
@ -670,7 +670,7 @@ i915_gem_dumb_create(struct drm_file *file,
|
|||
/* have to work out size/pitch and return them */
|
||||
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
|
||||
args->size = args->pitch * args->height;
|
||||
return i915_gem_create(file, dev,
|
||||
return i915_gem_create(file, to_i915(dev),
|
||||
args->size, &args->handle);
|
||||
}
|
||||
|
||||
|
@ -684,11 +684,12 @@ int
|
|||
i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_create *args = data;
|
||||
|
||||
i915_gem_flush_free_objects(to_i915(dev));
|
||||
i915_gem_flush_free_objects(dev_priv);
|
||||
|
||||
return i915_gem_create(file, dev,
|
||||
return i915_gem_create(file, dev_priv,
|
||||
args->size, &args->handle);
|
||||
}
|
||||
|
||||
|
@ -3970,9 +3971,8 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
|
|||
(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create(struct drm_device *dev, u64 size)
|
||||
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct address_space *mapping;
|
||||
gfp_t mask;
|
||||
|
@ -3993,7 +3993,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
|
|||
if (obj == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = drm_gem_object_init(dev, &obj->base, size);
|
||||
ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
@ -4749,7 +4749,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
|
|||
|
||||
/* Allocate a new GEM object and fill it with the supplied data */
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_from_data(struct drm_device *dev,
|
||||
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
|
||||
const void *data, size_t size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
@ -4757,7 +4757,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
|
|||
size_t bytes;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
|
||||
obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
|
||||
|
|
|
@ -169,12 +169,13 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
|||
static struct drm_i915_gem_object *
|
||||
alloc_context_obj(struct drm_device *dev, u64 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev->struct_mutex);
|
||||
|
||||
obj = i915_gem_object_create(dev, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return obj;
|
||||
|
||||
|
@ -193,7 +194,7 @@ alloc_context_obj(struct drm_device *dev, u64 size)
|
|||
* This is only applicable for Ivy Bridge devices since
|
||||
* later platforms don't have L3 control bits in the PTE.
|
||||
*/
|
||||
if (IS_IVYBRIDGE(to_i915(dev))) {
|
||||
if (IS_IVYBRIDGE(dev_priv)) {
|
||||
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
|
||||
/* Failure shouldn't ever happen this early */
|
||||
if (WARN_ON(ret)) {
|
||||
|
|
|
@ -574,7 +574,7 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
|
|||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create(&dev_priv->drm, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
|
|
|
@ -773,7 +773,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
|
|||
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
|
||||
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
|
||||
|
||||
bo = i915_gem_object_create(&dev_priv->drm, OA_BUFFER_SIZE);
|
||||
bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
|
||||
if (IS_ERR(bo)) {
|
||||
DRM_ERROR("Failed to allocate OA buffer\n");
|
||||
ret = PTR_ERR(bo);
|
||||
|
|
|
@ -10998,7 +10998,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
|
|||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
|
||||
|
||||
obj = i915_gem_object_create(dev,
|
||||
obj = i915_gem_object_create(to_i915(dev),
|
||||
intel_framebuffer_size_for_mode(mode, bpp));
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
|
|
@ -147,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
|||
if (size * 2 < ggtt->stolen_usable_size)
|
||||
obj = i915_gem_object_create_stolen(dev_priv, size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_object_create(dev, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("failed to allocate framebuffer\n");
|
||||
ret = PTR_ERR(obj);
|
||||
|
|
|
@ -590,6 +590,7 @@ fail:
|
|||
|
||||
static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct firmware *fw = NULL;
|
||||
|
@ -648,7 +649,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
|||
|
||||
/* Header and uCode will be loaded to WOPCM. Size of the two. */
|
||||
size = guc_fw->header_size + guc_fw->ucode_size;
|
||||
if (size > guc_wopcm_size(to_i915(dev))) {
|
||||
if (size > guc_wopcm_size(dev_priv)) {
|
||||
DRM_NOTE("Firmware is too large to fit in WOPCM\n");
|
||||
goto fail;
|
||||
}
|
||||
|
@ -676,7 +677,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
|
|||
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
|
||||
obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (IS_ERR_OR_NULL(obj)) {
|
||||
err = obj ? PTR_ERR(obj) : -ENOMEM;
|
||||
|
|
|
@ -1243,7 +1243,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
|
|||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
|
||||
obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
|
@ -2242,7 +2242,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
|||
/* One extra page as the sharing data between driver and GuC */
|
||||
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
|
||||
|
||||
ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
|
||||
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
|
||||
if (IS_ERR(ctx_obj)) {
|
||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
||||
return PTR_ERR(ctx_obj);
|
||||
|
|
|
@ -1393,7 +1393,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
|||
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
|
||||
reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
|
||||
if (reg_bo == NULL)
|
||||
reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
|
||||
reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
|
||||
if (IS_ERR(reg_bo))
|
||||
goto out_free;
|
||||
overlay->reg_bo = reg_bo;
|
||||
|
|
|
@ -1871,7 +1871,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
|
|||
|
||||
obj = i915_gem_object_create_stolen(dev_priv, size);
|
||||
if (!obj)
|
||||
obj = i915_gem_object_create(&dev_priv->drm, size);
|
||||
obj = i915_gem_object_create(dev_priv, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
|
@ -2452,7 +2452,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
|
|||
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
obj = i915_gem_object_create(&dev_priv->drm, 4096);
|
||||
obj = i915_gem_object_create(dev_priv, 4096);
|
||||
if (IS_ERR(obj))
|
||||
goto err;
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue