mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-27 09:11:49 +00:00
7474 lines
229 KiB
Diff
7474 lines
229 KiB
Diff
From a8c21a5451d831e67b7a6fb910f9ca8bc7b43554 Mon Sep 17 00:00:00 2001
|
|
From: The etnaviv authors <dri-devel@lists.freedesktop.org>
|
|
Date: Thu, 3 Dec 2015 18:21:29 +0100
|
|
Subject: [PATCH] drm/etnaviv: add initial etnaviv DRM driver
|
|
|
|
This adds the etnaviv DRM driver and hooks it up in Makefiles
|
|
and Kconfig.
|
|
|
|
Signed-off-by: Christian Gmeiner <christian.gmeiner@gmail.com>
|
|
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
|
|
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
|
|
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
|
|
---
|
|
drivers/gpu/drm/Kconfig | 2 +
|
|
drivers/gpu/drm/Makefile | 1 +
|
|
drivers/gpu/drm/etnaviv/Kconfig | 20 +
|
|
drivers/gpu/drm/etnaviv/Makefile | 14 +
|
|
drivers/gpu/drm/etnaviv/cmdstream.xml.h | 218 ++++
|
|
drivers/gpu/drm/etnaviv/common.xml.h | 249 ++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 268 +++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c | 209 ++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_drv.c | 707 +++++++++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_drv.h | 161 +++
|
|
drivers/gpu/drm/etnaviv/etnaviv_dump.c | 227 ++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_dump.h | 54 +
|
|
drivers/gpu/drm/etnaviv/etnaviv_gem.c | 897 ++++++++++++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_gem.h | 117 ++
|
|
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 122 ++
|
|
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 443 +++++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 1644 ++++++++++++++++++++++++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 209 ++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_iommu.c | 240 ++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_iommu.h | 28 +
|
|
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 33 +
|
|
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h | 25 +
|
|
drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 299 +++++
|
|
drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 71 ++
|
|
drivers/gpu/drm/etnaviv/state.xml.h | 351 ++++++
|
|
drivers/gpu/drm/etnaviv/state_hi.xml.h | 407 +++++++
|
|
include/uapi/drm/etnaviv_drm.h | 222 ++++
|
|
27 files changed, 7238 insertions(+)
|
|
create mode 100644 drivers/gpu/drm/etnaviv/Kconfig
|
|
create mode 100644 drivers/gpu/drm/etnaviv/Makefile
|
|
create mode 100644 drivers/gpu/drm/etnaviv/cmdstream.xml.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/common.xml.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_buffer.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_drv.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_drv.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_dump.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_dump.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gpu.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_gpu.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_mmu.c
|
|
create mode 100644 drivers/gpu/drm/etnaviv/etnaviv_mmu.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/state.xml.h
|
|
create mode 100644 drivers/gpu/drm/etnaviv/state_hi.xml.h
|
|
create mode 100644 include/uapi/drm/etnaviv_drm.h
|
|
|
|
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
|
|
index c4bf9a1..b02ac62 100644
|
|
--- a/drivers/gpu/drm/Kconfig
|
|
+++ b/drivers/gpu/drm/Kconfig
|
|
@@ -266,3 +266,5 @@ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
|
|
source "drivers/gpu/drm/imx/Kconfig"
|
|
|
|
source "drivers/gpu/drm/vc4/Kconfig"
|
|
+
|
|
+source "drivers/gpu/drm/etnaviv/Kconfig"
|
|
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
|
|
index 1e9ff4c..f858aa2 100644
|
|
--- a/drivers/gpu/drm/Makefile
|
|
+++ b/drivers/gpu/drm/Makefile
|
|
@@ -75,3 +75,4 @@ obj-y += i2c/
|
|
obj-y += panel/
|
|
obj-y += bridge/
|
|
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
|
|
+obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
|
|
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
|
|
new file mode 100644
|
|
index 0000000..2cde7a5
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/Kconfig
|
|
@@ -0,0 +1,20 @@
|
|
+
|
|
+config DRM_ETNAVIV
|
|
+ tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
|
|
+ depends on DRM
|
|
+ depends on ARCH_MXC || ARCH_DOVE
|
|
+ select SHMEM
|
|
+ select TMPFS
|
|
+ select IOMMU_API
|
|
+ select IOMMU_SUPPORT
|
|
+ select WANT_DEV_COREDUMP
|
|
+ help
|
|
+ DRM driver for Vivante GPUs.
|
|
+
|
|
+config DRM_ETNAVIV_REGISTER_LOGGING
|
|
+ bool "enable ETNAVIV register logging"
|
|
+ depends on DRM_ETNAVIV
|
|
+ help
|
|
+ Compile in support for logging register reads/writes in a format
|
|
+ that can be parsed by envytools demsm tool. If enabled, register
|
|
+ logging can be switched on via etnaviv.reglog=y module param.
|
|
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
|
|
new file mode 100644
|
|
index 0000000..1086e98
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/Makefile
|
|
@@ -0,0 +1,14 @@
|
|
+etnaviv-y := \
|
|
+ etnaviv_buffer.o \
|
|
+ etnaviv_cmd_parser.o \
|
|
+ etnaviv_drv.o \
|
|
+ etnaviv_dump.o \
|
|
+ etnaviv_gem_prime.o \
|
|
+ etnaviv_gem_submit.o \
|
|
+ etnaviv_gem.o \
|
|
+ etnaviv_gpu.o \
|
|
+ etnaviv_iommu_v2.o \
|
|
+ etnaviv_iommu.o \
|
|
+ etnaviv_mmu.o
|
|
+
|
|
+obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
|
|
diff --git a/drivers/gpu/drm/etnaviv/cmdstream.xml.h b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
|
|
new file mode 100644
|
|
index 0000000..8c44ba9
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/cmdstream.xml.h
|
|
@@ -0,0 +1,218 @@
|
|
+#ifndef CMDSTREAM_XML
|
|
+#define CMDSTREAM_XML
|
|
+
|
|
+/* Autogenerated file, DO NOT EDIT manually!
|
|
+
|
|
+This file was generated by the rules-ng-ng headergen tool in this git repository:
|
|
+http://0x04.net/cgit/index.cgi/rules-ng-ng
|
|
+git clone git://0x04.net/rules-ng-ng
|
|
+
|
|
+The rules-ng-ng source files this header was generated from are:
|
|
+- cmdstream.xml ( 12589 bytes, from 2014-02-17 14:57:56)
|
|
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
|
|
+
|
|
+Copyright (C) 2014
|
|
+*/
|
|
+
|
|
+
|
|
+#define FE_OPCODE_LOAD_STATE 0x00000001
|
|
+#define FE_OPCODE_END 0x00000002
|
|
+#define FE_OPCODE_NOP 0x00000003
|
|
+#define FE_OPCODE_DRAW_2D 0x00000004
|
|
+#define FE_OPCODE_DRAW_PRIMITIVES 0x00000005
|
|
+#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES 0x00000006
|
|
+#define FE_OPCODE_WAIT 0x00000007
|
|
+#define FE_OPCODE_LINK 0x00000008
|
|
+#define FE_OPCODE_STALL 0x00000009
|
|
+#define FE_OPCODE_CALL 0x0000000a
|
|
+#define FE_OPCODE_RETURN 0x0000000b
|
|
+#define FE_OPCODE_CHIP_SELECT 0x0000000d
|
|
+#define PRIMITIVE_TYPE_POINTS 0x00000001
|
|
+#define PRIMITIVE_TYPE_LINES 0x00000002
|
|
+#define PRIMITIVE_TYPE_LINE_STRIP 0x00000003
|
|
+#define PRIMITIVE_TYPE_TRIANGLES 0x00000004
|
|
+#define PRIMITIVE_TYPE_TRIANGLE_STRIP 0x00000005
|
|
+#define PRIMITIVE_TYPE_TRIANGLE_FAN 0x00000006
|
|
+#define PRIMITIVE_TYPE_LINE_LOOP 0x00000007
|
|
+#define PRIMITIVE_TYPE_QUADS 0x00000008
|
|
+#define VIV_FE_LOAD_STATE 0x00000000
|
|
+
|
|
+#define VIV_FE_LOAD_STATE_HEADER 0x00000000
|
|
+#define VIV_FE_LOAD_STATE_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE 0x08000000
|
|
+#define VIV_FE_LOAD_STATE_HEADER_FIXP 0x04000000
|
|
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK 0x03ff0000
|
|
+#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT 16
|
|
+#define VIV_FE_LOAD_STATE_HEADER_COUNT(x) (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK)
|
|
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK 0x0000ffff
|
|
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT 0
|
|
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x) (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK)
|
|
+#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR 2
|
|
+
|
|
+#define VIV_FE_END 0x00000000
|
|
+
|
|
+#define VIV_FE_END_HEADER 0x00000000
|
|
+#define VIV_FE_END_HEADER_EVENT_ID__MASK 0x0000001f
|
|
+#define VIV_FE_END_HEADER_EVENT_ID__SHIFT 0
|
|
+#define VIV_FE_END_HEADER_EVENT_ID(x) (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK)
|
|
+#define VIV_FE_END_HEADER_EVENT_ENABLE 0x00000100
|
|
+#define VIV_FE_END_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_END_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_END_HEADER_OP_END 0x10000000
|
|
+
|
|
+#define VIV_FE_NOP 0x00000000
|
|
+
|
|
+#define VIV_FE_NOP_HEADER 0x00000000
|
|
+#define VIV_FE_NOP_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_NOP_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_NOP_HEADER_OP_NOP 0x18000000
|
|
+
|
|
+#define VIV_FE_DRAW_2D 0x00000000
|
|
+
|
|
+#define VIV_FE_DRAW_2D_HEADER 0x00000000
|
|
+#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK 0x0000ff00
|
|
+#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT 8
|
|
+#define VIV_FE_DRAW_2D_HEADER_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK)
|
|
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK 0x07ff0000
|
|
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT 16
|
|
+#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x) (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK)
|
|
+#define VIV_FE_DRAW_2D_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D 0x20000000
|
|
+
|
|
+#define VIV_FE_DRAW_2D_TOP_LEFT 0x00000008
|
|
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK 0x0000ffff
|
|
+#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT 0
|
|
+#define VIV_FE_DRAW_2D_TOP_LEFT_X(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK)
|
|
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK 0xffff0000
|
|
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT 16
|
|
+#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x) (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK)
|
|
+
|
|
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT 0x0000000c
|
|
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK 0x0000ffff
|
|
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT 0
|
|
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK)
|
|
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK 0xffff0000
|
|
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT 16
|
|
+#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x) (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK)
|
|
+
|
|
+#define VIV_FE_DRAW_PRIMITIVES 0x00000000
|
|
+
|
|
+#define VIV_FE_DRAW_PRIMITIVES_HEADER 0x00000000
|
|
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES 0x28000000
|
|
+
|
|
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND 0x00000004
|
|
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
|
|
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT 0
|
|
+#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK)
|
|
+
|
|
+#define VIV_FE_DRAW_PRIMITIVES_START 0x00000008
|
|
+
|
|
+#define VIV_FE_DRAW_PRIMITIVES_COUNT 0x0000000c
|
|
+
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES 0x00000000
|
|
+
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER 0x00000000
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES 0x30000000
|
|
+
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND 0x00000004
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK 0x000000ff
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT 0
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x) (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK)
|
|
+
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START 0x00000008
|
|
+
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT 0x0000000c
|
|
+
|
|
+#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET 0x00000010
|
|
+
|
|
+#define VIV_FE_WAIT 0x00000000
|
|
+
|
|
+#define VIV_FE_WAIT_HEADER 0x00000000
|
|
+#define VIV_FE_WAIT_HEADER_DELAY__MASK 0x0000ffff
|
|
+#define VIV_FE_WAIT_HEADER_DELAY__SHIFT 0
|
|
+#define VIV_FE_WAIT_HEADER_DELAY(x) (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK)
|
|
+#define VIV_FE_WAIT_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_WAIT_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_WAIT_HEADER_OP_WAIT 0x38000000
|
|
+
|
|
+#define VIV_FE_LINK 0x00000000
|
|
+
|
|
+#define VIV_FE_LINK_HEADER 0x00000000
|
|
+#define VIV_FE_LINK_HEADER_PREFETCH__MASK 0x0000ffff
|
|
+#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT 0
|
|
+#define VIV_FE_LINK_HEADER_PREFETCH(x) (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK)
|
|
+#define VIV_FE_LINK_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_LINK_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_LINK_HEADER_OP_LINK 0x40000000
|
|
+
|
|
+#define VIV_FE_LINK_ADDRESS 0x00000004
|
|
+
|
|
+#define VIV_FE_STALL 0x00000000
|
|
+
|
|
+#define VIV_FE_STALL_HEADER 0x00000000
|
|
+#define VIV_FE_STALL_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_STALL_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_STALL_HEADER_OP_STALL 0x48000000
|
|
+
|
|
+#define VIV_FE_STALL_TOKEN 0x00000004
|
|
+#define VIV_FE_STALL_TOKEN_FROM__MASK 0x0000001f
|
|
+#define VIV_FE_STALL_TOKEN_FROM__SHIFT 0
|
|
+#define VIV_FE_STALL_TOKEN_FROM(x) (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK)
|
|
+#define VIV_FE_STALL_TOKEN_TO__MASK 0x00001f00
|
|
+#define VIV_FE_STALL_TOKEN_TO__SHIFT 8
|
|
+#define VIV_FE_STALL_TOKEN_TO(x) (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK)
|
|
+
|
|
+#define VIV_FE_CALL 0x00000000
|
|
+
|
|
+#define VIV_FE_CALL_HEADER 0x00000000
|
|
+#define VIV_FE_CALL_HEADER_PREFETCH__MASK 0x0000ffff
|
|
+#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT 0
|
|
+#define VIV_FE_CALL_HEADER_PREFETCH(x) (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK)
|
|
+#define VIV_FE_CALL_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_CALL_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_CALL_HEADER_OP_CALL 0x50000000
|
|
+
|
|
+#define VIV_FE_CALL_ADDRESS 0x00000004
|
|
+
|
|
+#define VIV_FE_CALL_RETURN_PREFETCH 0x00000008
|
|
+
|
|
+#define VIV_FE_CALL_RETURN_ADDRESS 0x0000000c
|
|
+
|
|
+#define VIV_FE_RETURN 0x00000000
|
|
+
|
|
+#define VIV_FE_RETURN_HEADER 0x00000000
|
|
+#define VIV_FE_RETURN_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_RETURN_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_RETURN_HEADER_OP_RETURN 0x58000000
|
|
+
|
|
+#define VIV_FE_CHIP_SELECT 0x00000000
|
|
+
|
|
+#define VIV_FE_CHIP_SELECT_HEADER 0x00000000
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK 0xf8000000
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT 27
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT 0x68000000
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15 0x00008000
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14 0x00004000
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13 0x00002000
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12 0x00001000
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11 0x00000800
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10 0x00000400
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9 0x00000200
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8 0x00000100
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7 0x00000080
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6 0x00000040
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5 0x00000020
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4 0x00000010
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3 0x00000008
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2 0x00000004
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1 0x00000002
|
|
+#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0 0x00000001
|
|
+
|
|
+
|
|
+#endif /* CMDSTREAM_XML */
|
|
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
|
|
new file mode 100644
|
|
index 0000000..9e585d5
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
|
|
@@ -0,0 +1,249 @@
|
|
+#ifndef COMMON_XML
|
|
+#define COMMON_XML
|
|
+
|
|
+/* Autogenerated file, DO NOT EDIT manually!
|
|
+
|
|
+This file was generated by the rules-ng-ng headergen tool in this git repository:
|
|
+http://0x04.net/cgit/index.cgi/rules-ng-ng
|
|
+git clone git://0x04.net/rules-ng-ng
|
|
+
|
|
+The rules-ng-ng source files this header was generated from are:
|
|
+- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
|
|
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
|
|
+
|
|
+Copyright (C) 2015
|
|
+*/
|
|
+
|
|
+
|
|
+#define PIPE_ID_PIPE_3D 0x00000000
|
|
+#define PIPE_ID_PIPE_2D 0x00000001
|
|
+#define SYNC_RECIPIENT_FE 0x00000001
|
|
+#define SYNC_RECIPIENT_RA 0x00000005
|
|
+#define SYNC_RECIPIENT_PE 0x00000007
|
|
+#define SYNC_RECIPIENT_DE 0x0000000b
|
|
+#define SYNC_RECIPIENT_VG 0x0000000f
|
|
+#define SYNC_RECIPIENT_TESSELATOR 0x00000010
|
|
+#define SYNC_RECIPIENT_VG2 0x00000011
|
|
+#define SYNC_RECIPIENT_TESSELATOR2 0x00000012
|
|
+#define SYNC_RECIPIENT_VG3 0x00000013
|
|
+#define SYNC_RECIPIENT_TESSELATOR3 0x00000014
|
|
+#define ENDIAN_MODE_NO_SWAP 0x00000000
|
|
+#define ENDIAN_MODE_SWAP_16 0x00000001
|
|
+#define ENDIAN_MODE_SWAP_32 0x00000002
|
|
+#define chipModel_GC300 0x00000300
|
|
+#define chipModel_GC320 0x00000320
|
|
+#define chipModel_GC350 0x00000350
|
|
+#define chipModel_GC355 0x00000355
|
|
+#define chipModel_GC400 0x00000400
|
|
+#define chipModel_GC410 0x00000410
|
|
+#define chipModel_GC420 0x00000420
|
|
+#define chipModel_GC450 0x00000450
|
|
+#define chipModel_GC500 0x00000500
|
|
+#define chipModel_GC530 0x00000530
|
|
+#define chipModel_GC600 0x00000600
|
|
+#define chipModel_GC700 0x00000700
|
|
+#define chipModel_GC800 0x00000800
|
|
+#define chipModel_GC860 0x00000860
|
|
+#define chipModel_GC880 0x00000880
|
|
+#define chipModel_GC1000 0x00001000
|
|
+#define chipModel_GC2000 0x00002000
|
|
+#define chipModel_GC2100 0x00002100
|
|
+#define chipModel_GC4000 0x00004000
|
|
+#define RGBA_BITS_R 0x00000001
|
|
+#define RGBA_BITS_G 0x00000002
|
|
+#define RGBA_BITS_B 0x00000004
|
|
+#define RGBA_BITS_A 0x00000008
|
|
+#define chipFeatures_FAST_CLEAR 0x00000001
|
|
+#define chipFeatures_SPECIAL_ANTI_ALIASING 0x00000002
|
|
+#define chipFeatures_PIPE_3D 0x00000004
|
|
+#define chipFeatures_DXT_TEXTURE_COMPRESSION 0x00000008
|
|
+#define chipFeatures_DEBUG_MODE 0x00000010
|
|
+#define chipFeatures_Z_COMPRESSION 0x00000020
|
|
+#define chipFeatures_YUV420_SCALER 0x00000040
|
|
+#define chipFeatures_MSAA 0x00000080
|
|
+#define chipFeatures_DC 0x00000100
|
|
+#define chipFeatures_PIPE_2D 0x00000200
|
|
+#define chipFeatures_ETC1_TEXTURE_COMPRESSION 0x00000400
|
|
+#define chipFeatures_FAST_SCALER 0x00000800
|
|
+#define chipFeatures_HIGH_DYNAMIC_RANGE 0x00001000
|
|
+#define chipFeatures_YUV420_TILER 0x00002000
|
|
+#define chipFeatures_MODULE_CG 0x00004000
|
|
+#define chipFeatures_MIN_AREA 0x00008000
|
|
+#define chipFeatures_NO_EARLY_Z 0x00010000
|
|
+#define chipFeatures_NO_422_TEXTURE 0x00020000
|
|
+#define chipFeatures_BUFFER_INTERLEAVING 0x00040000
|
|
+#define chipFeatures_BYTE_WRITE_2D 0x00080000
|
|
+#define chipFeatures_NO_SCALER 0x00100000
|
|
+#define chipFeatures_YUY2_AVERAGING 0x00200000
|
|
+#define chipFeatures_HALF_PE_CACHE 0x00400000
|
|
+#define chipFeatures_HALF_TX_CACHE 0x00800000
|
|
+#define chipFeatures_YUY2_RENDER_TARGET 0x01000000
|
|
+#define chipFeatures_MEM32 0x02000000
|
|
+#define chipFeatures_PIPE_VG 0x04000000
|
|
+#define chipFeatures_VGTS 0x08000000
|
|
+#define chipFeatures_FE20 0x10000000
|
|
+#define chipFeatures_BYTE_WRITE_3D 0x20000000
|
|
+#define chipFeatures_RS_YUV_TARGET 0x40000000
|
|
+#define chipFeatures_32_BIT_INDICES 0x80000000
|
|
+#define chipMinorFeatures0_FLIP_Y 0x00000001
|
|
+#define chipMinorFeatures0_DUAL_RETURN_BUS 0x00000002
|
|
+#define chipMinorFeatures0_ENDIANNESS_CONFIG 0x00000004
|
|
+#define chipMinorFeatures0_TEXTURE_8K 0x00000008
|
|
+#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER 0x00000010
|
|
+#define chipMinorFeatures0_SPECIAL_MSAA_LOD 0x00000020
|
|
+#define chipMinorFeatures0_FAST_CLEAR_FLUSH 0x00000040
|
|
+#define chipMinorFeatures0_2DPE20 0x00000080
|
|
+#define chipMinorFeatures0_CORRECT_AUTO_DISABLE 0x00000100
|
|
+#define chipMinorFeatures0_RENDERTARGET_8K 0x00000200
|
|
+#define chipMinorFeatures0_2BITPERTILE 0x00000400
|
|
+#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED 0x00000800
|
|
+#define chipMinorFeatures0_SUPER_TILED 0x00001000
|
|
+#define chipMinorFeatures0_VG_20 0x00002000
|
|
+#define chipMinorFeatures0_TS_EXTENDED_COMMANDS 0x00004000
|
|
+#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED 0x00008000
|
|
+#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL 0x00010000
|
|
+#define chipMinorFeatures0_VG_FILTER 0x00020000
|
|
+#define chipMinorFeatures0_VG_21 0x00040000
|
|
+#define chipMinorFeatures0_SHADER_HAS_W 0x00080000
|
|
+#define chipMinorFeatures0_HAS_SQRT_TRIG 0x00100000
|
|
+#define chipMinorFeatures0_MORE_MINOR_FEATURES 0x00200000
|
|
+#define chipMinorFeatures0_MC20 0x00400000
|
|
+#define chipMinorFeatures0_MSAA_SIDEBAND 0x00800000
|
|
+#define chipMinorFeatures0_BUG_FIXES0 0x01000000
|
|
+#define chipMinorFeatures0_VAA 0x02000000
|
|
+#define chipMinorFeatures0_BYPASS_IN_MSAA 0x04000000
|
|
+#define chipMinorFeatures0_HZ 0x08000000
|
|
+#define chipMinorFeatures0_NEW_TEXTURE 0x10000000
|
|
+#define chipMinorFeatures0_2D_A8_TARGET 0x20000000
|
|
+#define chipMinorFeatures0_CORRECT_STENCIL 0x40000000
|
|
+#define chipMinorFeatures0_ENHANCE_VR 0x80000000
|
|
+#define chipMinorFeatures1_RSUV_SWIZZLE 0x00000001
|
|
+#define chipMinorFeatures1_V2_COMPRESSION 0x00000002
|
|
+#define chipMinorFeatures1_VG_DOUBLE_BUFFER 0x00000004
|
|
+#define chipMinorFeatures1_EXTRA_EVENT_STATES 0x00000008
|
|
+#define chipMinorFeatures1_NO_STRIPING_NEEDED 0x00000010
|
|
+#define chipMinorFeatures1_TEXTURE_STRIDE 0x00000020
|
|
+#define chipMinorFeatures1_BUG_FIXES3 0x00000040
|
|
+#define chipMinorFeatures1_AUTO_DISABLE 0x00000080
|
|
+#define chipMinorFeatures1_AUTO_RESTART_TS 0x00000100
|
|
+#define chipMinorFeatures1_DISABLE_PE_GATING 0x00000200
|
|
+#define chipMinorFeatures1_L2_WINDOWING 0x00000400
|
|
+#define chipMinorFeatures1_HALF_FLOAT 0x00000800
|
|
+#define chipMinorFeatures1_PIXEL_DITHER 0x00001000
|
|
+#define chipMinorFeatures1_TWO_STENCIL_REFERENCE 0x00002000
|
|
+#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT 0x00004000
|
|
+#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH 0x00008000
|
|
+#define chipMinorFeatures1_2D_DITHER 0x00010000
|
|
+#define chipMinorFeatures1_BUG_FIXES5 0x00020000
|
|
+#define chipMinorFeatures1_NEW_2D 0x00040000
|
|
+#define chipMinorFeatures1_NEW_FP 0x00080000
|
|
+#define chipMinorFeatures1_TEXTURE_HALIGN 0x00100000
|
|
+#define chipMinorFeatures1_NON_POWER_OF_TWO 0x00200000
|
|
+#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT 0x00400000
|
|
+#define chipMinorFeatures1_HALTI0 0x00800000
|
|
+#define chipMinorFeatures1_CORRECT_OVERFLOW_VG 0x01000000
|
|
+#define chipMinorFeatures1_NEGATIVE_LOG_FIX 0x02000000
|
|
+#define chipMinorFeatures1_RESOLVE_OFFSET 0x04000000
|
|
+#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK 0x08000000
|
|
+#define chipMinorFeatures1_MMU_VERSION 0x10000000
|
|
+#define chipMinorFeatures1_WIDE_LINE 0x20000000
|
|
+#define chipMinorFeatures1_BUG_FIXES6 0x40000000
|
|
+#define chipMinorFeatures1_FC_FLUSH_STALL 0x80000000
|
|
+#define chipMinorFeatures2_LINE_LOOP 0x00000001
|
|
+#define chipMinorFeatures2_LOGIC_OP 0x00000002
|
|
+#define chipMinorFeatures2_UNK2 0x00000004
|
|
+#define chipMinorFeatures2_SUPERTILED_TEXTURE 0x00000008
|
|
+#define chipMinorFeatures2_UNK4 0x00000010
|
|
+#define chipMinorFeatures2_RECT_PRIMITIVE 0x00000020
|
|
+#define chipMinorFeatures2_COMPOSITION 0x00000040
|
|
+#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT 0x00000080
|
|
+#define chipMinorFeatures2_UNK8 0x00000100
|
|
+#define chipMinorFeatures2_UNK9 0x00000200
|
|
+#define chipMinorFeatures2_UNK10 0x00000400
|
|
+#define chipMinorFeatures2_SAMPLERBASE_16 0x00000800
|
|
+#define chipMinorFeatures2_UNK12 0x00001000
|
|
+#define chipMinorFeatures2_UNK13 0x00002000
|
|
+#define chipMinorFeatures2_UNK14 0x00004000
|
|
+#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000
|
|
+#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000
|
|
+#define chipMinorFeatures2_2D_TILING 0x00020000
|
|
+#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000
|
|
+#define chipMinorFeatures2_TILE_FILLER 0x00080000
|
|
+#define chipMinorFeatures2_UNK20 0x00100000
|
|
+#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT 0x00200000
|
|
+#define chipMinorFeatures2_UNK22 0x00400000
|
|
+#define chipMinorFeatures2_UNK23 0x00800000
|
|
+#define chipMinorFeatures2_UNK24 0x01000000
|
|
+#define chipMinorFeatures2_MIXED_STREAMS 0x02000000
|
|
+#define chipMinorFeatures2_2D_420_L2CACHE 0x04000000
|
|
+#define chipMinorFeatures2_UNK27 0x08000000
|
|
+#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH 0x10000000
|
|
+#define chipMinorFeatures2_TEXTURE_TILED_READ 0x20000000
|
|
+#define chipMinorFeatures2_UNK30 0x40000000
|
|
+#define chipMinorFeatures2_UNK31 0x80000000
|
|
+#define chipMinorFeatures3_ROTATION_STALL_FIX 0x00000001
|
|
+#define chipMinorFeatures3_UNK1 0x00000002
|
|
+#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX 0x00000004
|
|
+#define chipMinorFeatures3_UNK3 0x00000008
|
|
+#define chipMinorFeatures3_UNK4 0x00000010
|
|
+#define chipMinorFeatures3_UNK5 0x00000020
|
|
+#define chipMinorFeatures3_UNK6 0x00000040
|
|
+#define chipMinorFeatures3_UNK7 0x00000080
|
|
+#define chipMinorFeatures3_UNK8 0x00000100
|
|
+#define chipMinorFeatures3_UNK9 0x00000200
|
|
+#define chipMinorFeatures3_BUG_FIXES10 0x00000400
|
|
+#define chipMinorFeatures3_UNK11 0x00000800
|
|
+#define chipMinorFeatures3_BUG_FIXES11 0x00001000
|
|
+#define chipMinorFeatures3_UNK13 0x00002000
|
|
+#define chipMinorFeatures3_UNK14 0x00004000
|
|
+#define chipMinorFeatures3_UNK15 0x00008000
|
|
+#define chipMinorFeatures3_UNK16 0x00010000
|
|
+#define chipMinorFeatures3_UNK17 0x00020000
|
|
+#define chipMinorFeatures3_UNK18 0x00040000
|
|
+#define chipMinorFeatures3_UNK19 0x00080000
|
|
+#define chipMinorFeatures3_UNK20 0x00100000
|
|
+#define chipMinorFeatures3_UNK21 0x00200000
|
|
+#define chipMinorFeatures3_UNK22 0x00400000
|
|
+#define chipMinorFeatures3_UNK23 0x00800000
|
|
+#define chipMinorFeatures3_UNK24 0x01000000
|
|
+#define chipMinorFeatures3_UNK25 0x02000000
|
|
+#define chipMinorFeatures3_UNK26 0x04000000
|
|
+#define chipMinorFeatures3_UNK27 0x08000000
|
|
+#define chipMinorFeatures3_UNK28 0x10000000
|
|
+#define chipMinorFeatures3_UNK29 0x20000000
|
|
+#define chipMinorFeatures3_UNK30 0x40000000
|
|
+#define chipMinorFeatures3_UNK31 0x80000000
|
|
+#define chipMinorFeatures4_UNK0 0x00000001
|
|
+#define chipMinorFeatures4_UNK1 0x00000002
|
|
+#define chipMinorFeatures4_UNK2 0x00000004
|
|
+#define chipMinorFeatures4_UNK3 0x00000008
|
|
+#define chipMinorFeatures4_UNK4 0x00000010
|
|
+#define chipMinorFeatures4_UNK5 0x00000020
|
|
+#define chipMinorFeatures4_UNK6 0x00000040
|
|
+#define chipMinorFeatures4_UNK7 0x00000080
|
|
+#define chipMinorFeatures4_UNK8 0x00000100
|
|
+#define chipMinorFeatures4_UNK9 0x00000200
|
|
+#define chipMinorFeatures4_UNK10 0x00000400
|
|
+#define chipMinorFeatures4_UNK11 0x00000800
|
|
+#define chipMinorFeatures4_UNK12 0x00001000
|
|
+#define chipMinorFeatures4_UNK13 0x00002000
|
|
+#define chipMinorFeatures4_UNK14 0x00004000
|
|
+#define chipMinorFeatures4_UNK15 0x00008000
|
|
+#define chipMinorFeatures4_UNK16 0x00010000
|
|
+#define chipMinorFeatures4_UNK17 0x00020000
|
|
+#define chipMinorFeatures4_UNK18 0x00040000
|
|
+#define chipMinorFeatures4_UNK19 0x00080000
|
|
+#define chipMinorFeatures4_UNK20 0x00100000
|
|
+#define chipMinorFeatures4_UNK21 0x00200000
|
|
+#define chipMinorFeatures4_UNK22 0x00400000
|
|
+#define chipMinorFeatures4_UNK23 0x00800000
|
|
+#define chipMinorFeatures4_UNK24 0x01000000
|
|
+#define chipMinorFeatures4_UNK25 0x02000000
|
|
+#define chipMinorFeatures4_UNK26 0x04000000
|
|
+#define chipMinorFeatures4_UNK27 0x08000000
|
|
+#define chipMinorFeatures4_UNK28 0x10000000
|
|
+#define chipMinorFeatures4_UNK29 0x20000000
|
|
+#define chipMinorFeatures4_UNK30 0x40000000
|
|
+#define chipMinorFeatures4_UNK31 0x80000000
|
|
+
|
|
+#endif /* COMMON_XML */
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
|
|
new file mode 100644
|
|
index 0000000..332c55e
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
|
|
@@ -0,0 +1,268 @@
|
|
+/*
|
|
+ * Copyright (C) 2014 Etnaviv Project
|
|
+ * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_gem.h"
|
|
+#include "etnaviv_mmu.h"
|
|
+
|
|
+#include "common.xml.h"
|
|
+#include "state.xml.h"
|
|
+#include "cmdstream.xml.h"
|
|
+
|
|
+/*
|
|
+ * Command Buffer helper:
|
|
+ */
|
|
+
|
|
+
|
|
+static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
|
|
+{
|
|
+ u32 *vaddr = (u32 *)buffer->vaddr;
|
|
+
|
|
+ BUG_ON(buffer->user_size >= buffer->size);
|
|
+
|
|
+ vaddr[buffer->user_size / 4] = data;
|
|
+ buffer->user_size += 4;
|
|
+}
|
|
+
|
|
+static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
|
|
+ u32 reg, u32 value)
|
|
+{
|
|
+ u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
|
|
+
|
|
+ buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
+
|
|
+ /* write a register via cmd stream */
|
|
+ OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
|
|
+ VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
|
|
+ VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
|
|
+ OUT(buffer, value);
|
|
+}
|
|
+
|
|
+static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
|
|
+{
|
|
+ buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
+
|
|
+ OUT(buffer, VIV_FE_END_HEADER_OP_END);
|
|
+}
|
|
+
|
|
+static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
|
|
+{
|
|
+ buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
+
|
|
+ OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
|
|
+}
|
|
+
|
|
+static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
|
|
+ u16 prefetch, u32 address)
|
|
+{
|
|
+ buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
+
|
|
+ OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
|
|
+ VIV_FE_LINK_HEADER_PREFETCH(prefetch));
|
|
+ OUT(buffer, address);
|
|
+}
|
|
+
|
|
+static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
|
|
+ u32 from, u32 to)
|
|
+{
|
|
+ buffer->user_size = ALIGN(buffer->user_size, 8);
|
|
+
|
|
+ OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
|
|
+ OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
|
|
+}
|
|
+
|
|
+static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
|
|
+{
|
|
+ u32 flush;
|
|
+ u32 stall;
|
|
+
|
|
+ /*
|
|
+ * This assumes that if we're switching to 2D, we're switching
|
|
+ * away from 3D, and vice versa. Hence, if we're switching to
|
|
+ * the 2D core, we need to flush the 3D depth and color caches,
|
|
+ * otherwise we need to flush the 2D pixel engine cache.
|
|
+ */
|
|
+ if (pipe == ETNA_PIPE_2D)
|
|
+ flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
|
|
+ else
|
|
+ flush = VIVS_GL_FLUSH_CACHE_PE2D;
|
|
+
|
|
+ stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
|
|
+ VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
|
|
+
|
|
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
|
|
+ CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
|
|
+
|
|
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
|
|
+
|
|
+ CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
|
|
+ VIVS_GL_PIPE_SELECT_PIPE(pipe));
|
|
+}
|
|
+
|
|
+static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
|
|
+{
|
|
+ return buf->paddr - gpu->memory_base;
|
|
+}
|
|
+
|
|
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
|
|
+ struct etnaviv_cmdbuf *buf, u32 off, u32 len)
|
|
+{
|
|
+ u32 size = buf->size;
|
|
+ u32 *ptr = buf->vaddr + off;
|
|
+
|
|
+ dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
|
|
+ ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
|
|
+
|
|
+ print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
|
|
+ ptr, len * 4, 0);
|
|
+}
|
|
+
|
|
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
|
|
+
|
|
+ /* initialize buffer */
|
|
+ buffer->user_size = 0;
|
|
+
|
|
+ CMD_WAIT(buffer);
|
|
+ CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
|
|
+
|
|
+ return buffer->user_size / 8;
|
|
+}
|
|
+
|
|
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
|
|
+
|
|
+ /* Replace the last WAIT with an END */
|
|
+ buffer->user_size -= 16;
|
|
+
|
|
+ CMD_END(buffer);
|
|
+ mb();
|
|
+}
|
|
+
|
|
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|
+ struct etnaviv_cmdbuf *cmdbuf)
|
|
+{
|
|
+ struct etnaviv_cmdbuf *buffer = gpu->buffer;
|
|
+ u32 *lw = buffer->vaddr + buffer->user_size - 16;
|
|
+ u32 back, link_target, link_size, reserve_size, extra_size = 0;
|
|
+
|
|
+ if (drm_debug & DRM_UT_DRIVER)
|
|
+ etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
|
+
|
|
+ /*
|
|
+ * If we need to flush the MMU prior to submitting this buffer, we
|
|
+ * will need to append a mmu flush load state, followed by a new
|
|
+ * link to this buffer - a total of four additional words.
|
|
+ */
|
|
+ if (gpu->mmu->need_flush || gpu->switch_context) {
|
|
+ /* link command */
|
|
+ extra_size += 2;
|
|
+ /* flush command */
|
|
+ if (gpu->mmu->need_flush)
|
|
+ extra_size += 2;
|
|
+ /* pipe switch commands */
|
|
+ if (gpu->switch_context)
|
|
+ extra_size += 8;
|
|
+ }
|
|
+
|
|
+ reserve_size = (6 + extra_size) * 4;
|
|
+
|
|
+ /*
|
|
+ * if we are going to completely overflow the buffer, we need to wrap.
|
|
+ */
|
|
+ if (buffer->user_size + reserve_size > buffer->size)
|
|
+ buffer->user_size = 0;
|
|
+
|
|
+ /* save offset back into main buffer */
|
|
+ back = buffer->user_size + reserve_size - 6 * 4;
|
|
+ link_target = gpu_va(gpu, buffer) + buffer->user_size;
|
|
+ link_size = 6;
|
|
+
|
|
+ /* Skip over any extra instructions */
|
|
+ link_target += extra_size * sizeof(u32);
|
|
+
|
|
+ if (drm_debug & DRM_UT_DRIVER)
|
|
+ pr_info("stream link to 0x%08x @ 0x%08x %p\n",
|
|
+ link_target, gpu_va(gpu, cmdbuf), cmdbuf->vaddr);
|
|
+
|
|
+ /* jump back from cmd to main buffer */
|
|
+ CMD_LINK(cmdbuf, link_size, link_target);
|
|
+
|
|
+ link_target = gpu_va(gpu, cmdbuf);
|
|
+ link_size = cmdbuf->size / 8;
|
|
+
|
|
+
|
|
+
|
|
+ if (drm_debug & DRM_UT_DRIVER) {
|
|
+ print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
|
|
+ cmdbuf->vaddr, cmdbuf->size, 0);
|
|
+
|
|
+ pr_info("link op: %p\n", lw);
|
|
+ pr_info("link addr: %p\n", lw + 1);
|
|
+ pr_info("addr: 0x%08x\n", link_target);
|
|
+ pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
|
|
+ pr_info("event: %d\n", event);
|
|
+ }
|
|
+
|
|
+ if (gpu->mmu->need_flush || gpu->switch_context) {
|
|
+ u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
|
|
+
|
|
+ if (gpu->mmu->need_flush) {
|
|
+ /* Add the MMU flush */
|
|
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
|
|
+ VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
|
|
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
|
|
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
|
|
+ VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
|
|
+ VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
|
|
+
|
|
+ gpu->mmu->need_flush = false;
|
|
+ }
|
|
+
|
|
+ if (gpu->switch_context) {
|
|
+ etnaviv_cmd_select_pipe(buffer, cmdbuf->exec_state);
|
|
+ gpu->switch_context = false;
|
|
+ }
|
|
+
|
|
+ /* And the link to the first buffer */
|
|
+ CMD_LINK(buffer, link_size, link_target);
|
|
+
|
|
+ /* Update the link target to point to above instructions */
|
|
+ link_target = new_target;
|
|
+ link_size = extra_size;
|
|
+ }
|
|
+
|
|
+ /* trigger event */
|
|
+ CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
|
|
+ VIVS_GL_EVENT_FROM_PE);
|
|
+
|
|
+ /* append WAIT/LINK to main buffer */
|
|
+ CMD_WAIT(buffer);
|
|
+ CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
|
|
+
|
|
+ /* Change WAIT into a LINK command; write the address first. */
|
|
+ *(lw + 1) = link_target;
|
|
+ mb();
|
|
+ *(lw) = VIV_FE_LINK_HEADER_OP_LINK |
|
|
+ VIV_FE_LINK_HEADER_PREFETCH(link_size);
|
|
+ mb();
|
|
+
|
|
+ if (drm_debug & DRM_UT_DRIVER)
|
|
+ etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
|
|
new file mode 100644
|
|
index 0000000..dcfd565
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
|
|
@@ -0,0 +1,209 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+
|
|
+#include "etnaviv_gem.h"
|
|
+#include "etnaviv_gpu.h"
|
|
+
|
|
+#include "cmdstream.xml.h"
|
|
+
|
|
+#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
|
|
+
|
|
+struct etna_validation_state {
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ const struct drm_etnaviv_gem_submit_reloc *relocs;
|
|
+ unsigned int num_relocs;
|
|
+ u32 *start;
|
|
+};
|
|
+
|
|
+static const struct {
|
|
+ u16 offset;
|
|
+ u16 size;
|
|
+} etnaviv_sensitive_states[] __initconst = {
|
|
+#define ST(start, num) { (start) >> 2, (num) }
|
|
+ /* 2D */
|
|
+ ST(0x1200, 1),
|
|
+ ST(0x1228, 1),
|
|
+ ST(0x1238, 1),
|
|
+ ST(0x1284, 1),
|
|
+ ST(0x128c, 1),
|
|
+ ST(0x1304, 1),
|
|
+ ST(0x1310, 1),
|
|
+ ST(0x1318, 1),
|
|
+ ST(0x12800, 4),
|
|
+ ST(0x128a0, 4),
|
|
+ ST(0x128c0, 4),
|
|
+ ST(0x12970, 4),
|
|
+ ST(0x12a00, 8),
|
|
+ ST(0x12b40, 8),
|
|
+ ST(0x12b80, 8),
|
|
+ ST(0x12ce0, 8),
|
|
+ /* 3D */
|
|
+ ST(0x0644, 1),
|
|
+ ST(0x064c, 1),
|
|
+ ST(0x0680, 8),
|
|
+ ST(0x1410, 1),
|
|
+ ST(0x1430, 1),
|
|
+ ST(0x1458, 1),
|
|
+ ST(0x1460, 8),
|
|
+ ST(0x1480, 8),
|
|
+ ST(0x1500, 8),
|
|
+ ST(0x1520, 8),
|
|
+ ST(0x1608, 1),
|
|
+ ST(0x1610, 1),
|
|
+ ST(0x1658, 1),
|
|
+ ST(0x165c, 1),
|
|
+ ST(0x1664, 1),
|
|
+ ST(0x1668, 1),
|
|
+ ST(0x16a4, 1),
|
|
+ ST(0x16c0, 8),
|
|
+ ST(0x16e0, 8),
|
|
+ ST(0x1740, 8),
|
|
+ ST(0x2400, 14 * 16),
|
|
+ ST(0x10800, 32 * 16),
|
|
+#undef ST
|
|
+};
|
|
+
|
|
+#define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u)
|
|
+static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE);
|
|
+
|
|
+void __init etnaviv_validate_init(void)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++)
|
|
+ bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset,
|
|
+ etnaviv_sensitive_states[i].size);
|
|
+}
|
|
+
|
|
+static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state,
|
|
+ unsigned int buf_offset, unsigned int state_addr)
|
|
+{
|
|
+ if (state->num_relocs && state->relocs->submit_offset < buf_offset) {
|
|
+ dev_warn_once(state->gpu->dev,
|
|
+ "%s: relocation for non-sensitive state 0x%x at offset %u\n",
|
|
+ __func__, state_addr,
|
|
+ state->relocs->submit_offset);
|
|
+ while (state->num_relocs &&
|
|
+ state->relocs->submit_offset < buf_offset) {
|
|
+ state->relocs++;
|
|
+ state->num_relocs--;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static bool etnaviv_validate_load_state(struct etna_validation_state *state,
|
|
+ u32 *ptr, unsigned int state_offset, unsigned int num)
|
|
+{
|
|
+ unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num);
|
|
+ unsigned int st_offset = state_offset, buf_offset;
|
|
+
|
|
+ for_each_set_bit_from(st_offset, etnaviv_states, size) {
|
|
+ buf_offset = (ptr - state->start +
|
|
+ st_offset - state_offset) * 4;
|
|
+
|
|
+ etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4);
|
|
+ if (state->num_relocs &&
|
|
+ state->relocs->submit_offset == buf_offset) {
|
|
+ state->relocs++;
|
|
+ state->num_relocs--;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ dev_warn_ratelimited(state->gpu->dev,
|
|
+ "%s: load state touches restricted state 0x%x at offset %u\n",
|
|
+ __func__, st_offset * 4, buf_offset);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (state->num_relocs) {
|
|
+ buf_offset = (ptr - state->start + num) * 4;
|
|
+ etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 +
|
|
+ state->relocs->submit_offset -
|
|
+ buf_offset);
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static uint8_t cmd_length[32] = {
|
|
+ [FE_OPCODE_DRAW_PRIMITIVES] = 4,
|
|
+ [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
|
|
+ [FE_OPCODE_NOP] = 2,
|
|
+ [FE_OPCODE_STALL] = 2,
|
|
+};
|
|
+
|
|
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream,
|
|
+ unsigned int size,
|
|
+ struct drm_etnaviv_gem_submit_reloc *relocs,
|
|
+ unsigned int reloc_size)
|
|
+{
|
|
+ struct etna_validation_state state;
|
|
+ u32 *buf = stream;
|
|
+ u32 *end = buf + size;
|
|
+
|
|
+ state.gpu = gpu;
|
|
+ state.relocs = relocs;
|
|
+ state.num_relocs = reloc_size;
|
|
+ state.start = stream;
|
|
+
|
|
+ while (buf < end) {
|
|
+ u32 cmd = *buf;
|
|
+ unsigned int len, n, off;
|
|
+ unsigned int op = cmd >> 27;
|
|
+
|
|
+ switch (op) {
|
|
+ case FE_OPCODE_LOAD_STATE:
|
|
+ n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
|
|
+ len = ALIGN(1 + n, 2);
|
|
+ if (buf + len > end)
|
|
+ break;
|
|
+
|
|
+ off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
|
|
+ if (!etnaviv_validate_load_state(&state, buf + 1,
|
|
+ off, n))
|
|
+ return false;
|
|
+ break;
|
|
+
|
|
+ case FE_OPCODE_DRAW_2D:
|
|
+ n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
|
|
+ if (n == 0)
|
|
+ n = 256;
|
|
+ len = 2 + n * 2;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ len = cmd_length[op];
|
|
+ if (len == 0) {
|
|
+ dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
|
|
+ __func__, op, buf - state.start);
|
|
+ return false;
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ buf += len;
|
|
+ }
|
|
+
|
|
+ if (buf > end) {
|
|
+ dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
|
|
+ __func__, buf - state.start, size);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
|
|
new file mode 100644
|
|
index 0000000..5c89ebb
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
|
|
@@ -0,0 +1,707 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/component.h>
|
|
+#include <linux/of_platform.h>
|
|
+
|
|
+#include "etnaviv_drv.h"
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_gem.h"
|
|
+#include "etnaviv_mmu.h"
|
|
+#include "etnaviv_gem.h"
|
|
+
|
|
+#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
|
|
+static bool reglog;
|
|
+MODULE_PARM_DESC(reglog, "Enable register read/write logging");
|
|
+module_param(reglog, bool, 0600);
|
|
+#else
|
|
+#define reglog 0
|
|
+#endif
|
|
+
|
|
+void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
|
|
+ const char *dbgname)
|
|
+{
|
|
+ struct resource *res;
|
|
+ void __iomem *ptr;
|
|
+
|
|
+ if (name)
|
|
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
|
|
+ else
|
|
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
+
|
|
+ ptr = devm_ioremap_resource(&pdev->dev, res);
|
|
+ if (IS_ERR(ptr)) {
|
|
+ dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
|
|
+ PTR_ERR(ptr));
|
|
+ return ptr;
|
|
+ }
|
|
+
|
|
+ if (reglog)
|
|
+ dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
|
|
+ dbgname, ptr, (size_t)resource_size(res));
|
|
+
|
|
+ return ptr;
|
|
+}
|
|
+
|
|
+void etnaviv_writel(u32 data, void __iomem *addr)
|
|
+{
|
|
+ if (reglog)
|
|
+ printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
|
|
+
|
|
+ writel(data, addr);
|
|
+}
|
|
+
|
|
+u32 etnaviv_readl(const void __iomem *addr)
|
|
+{
|
|
+ u32 val = readl(addr);
|
|
+
|
|
+ if (reglog)
|
|
+ printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
|
|
+
|
|
+ return val;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * DRM operations:
|
|
+ */
|
|
+
|
|
+
|
|
+static void load_gpu(struct drm_device *dev)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
|
|
+ struct etnaviv_gpu *g = priv->gpu[i];
|
|
+
|
|
+ if (g) {
|
|
+ int ret;
|
|
+
|
|
+ ret = etnaviv_gpu_init(g);
|
|
+ if (ret) {
|
|
+ dev_err(g->dev, "hw init failed: %d\n", ret);
|
|
+ priv->gpu[i] = NULL;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
|
|
+{
|
|
+ struct etnaviv_file_private *ctx;
|
|
+
|
|
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
|
+ if (!ctx)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ file->driver_priv = ctx;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ struct etnaviv_file_private *ctx = file->driver_priv;
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
|
|
+ struct etnaviv_gpu *gpu = priv->gpu[i];
|
|
+
|
|
+ if (gpu) {
|
|
+ mutex_lock(&gpu->lock);
|
|
+ if (gpu->lastctx == ctx)
|
|
+ gpu->lastctx = NULL;
|
|
+ mutex_unlock(&gpu->lock);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ kfree(ctx);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * DRM debugfs:
|
|
+ */
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+
|
|
+ etnaviv_gem_describe_objects(priv, m);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ read_lock(&dev->vma_offset_manager->vm_lock);
|
|
+ ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
|
|
+ read_unlock(&dev->vma_offset_manager->vm_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
|
|
+{
|
|
+ seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
|
|
+
|
|
+ mutex_lock(&gpu->mmu->lock);
|
|
+ drm_mm_dump_table(m, &gpu->mmu->mm);
|
|
+ mutex_unlock(&gpu->mmu->lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
|
|
+{
|
|
+ struct etnaviv_cmdbuf *buf = gpu->buffer;
|
|
+ u32 size = buf->size;
|
|
+ u32 *ptr = buf->vaddr;
|
|
+ u32 i;
|
|
+
|
|
+ seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
|
|
+ buf->vaddr, (u64)buf->paddr, size - buf->user_size);
|
|
+
|
|
+ for (i = 0; i < size / 4; i++) {
|
|
+ if (i && !(i % 4))
|
|
+ seq_puts(m, "\n");
|
|
+ if (i % 4 == 0)
|
|
+ seq_printf(m, "\t0x%p: ", ptr + i);
|
|
+ seq_printf(m, "%08x ", *(ptr + i));
|
|
+ }
|
|
+ seq_puts(m, "\n");
|
|
+}
|
|
+
|
|
+static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
|
|
+{
|
|
+ seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
|
|
+
|
|
+ mutex_lock(&gpu->lock);
|
|
+ etnaviv_buffer_dump(gpu, m);
|
|
+ mutex_unlock(&gpu->lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int show_unlocked(struct seq_file *m, void *arg)
|
|
+{
|
|
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
|
|
+ struct drm_device *dev = node->minor->dev;
|
|
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
|
|
+ node->info_ent->data;
|
|
+
|
|
+ return show(dev, m);
|
|
+}
|
|
+
|
|
+static int show_each_gpu(struct seq_file *m, void *arg)
|
|
+{
|
|
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
|
|
+ struct drm_device *dev = node->minor->dev;
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
|
|
+ node->info_ent->data;
|
|
+ unsigned int i;
|
|
+ int ret = 0;
|
|
+
|
|
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
|
|
+ gpu = priv->gpu[i];
|
|
+ if (!gpu)
|
|
+ continue;
|
|
+
|
|
+ ret = show(gpu, m);
|
|
+ if (ret < 0)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static struct drm_info_list etnaviv_debugfs_list[] = {
|
|
+ {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
|
|
+ {"gem", show_unlocked, 0, etnaviv_gem_show},
|
|
+ { "mm", show_unlocked, 0, etnaviv_mm_show },
|
|
+ {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
|
|
+ {"ring", show_each_gpu, 0, etnaviv_ring_show},
|
|
+};
|
|
+
|
|
+static int etnaviv_debugfs_init(struct drm_minor *minor)
|
|
+{
|
|
+ struct drm_device *dev = minor->dev;
|
|
+ int ret;
|
|
+
|
|
+ ret = drm_debugfs_create_files(etnaviv_debugfs_list,
|
|
+ ARRAY_SIZE(etnaviv_debugfs_list),
|
|
+ minor->debugfs_root, minor);
|
|
+
|
|
+ if (ret) {
|
|
+ dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
|
|
+{
|
|
+ drm_debugfs_remove_files(etnaviv_debugfs_list,
|
|
+ ARRAY_SIZE(etnaviv_debugfs_list), minor);
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * DRM ioctls:
|
|
+ */
|
|
+
|
|
+static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ struct drm_etnaviv_param *args = data;
|
|
+ struct etnaviv_gpu *gpu;
|
|
+
|
|
+ if (args->pipe >= ETNA_MAX_PIPES)
|
|
+ return -EINVAL;
|
|
+
|
|
+ gpu = priv->gpu[args->pipe];
|
|
+ if (!gpu)
|
|
+ return -ENXIO;
|
|
+
|
|
+ return etnaviv_gpu_get_param(gpu, args->param, &args->value);
|
|
+}
|
|
+
|
|
+static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct drm_etnaviv_gem_new *args = data;
|
|
+
|
|
+ if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
|
|
+ ETNA_BO_FORCE_MMU))
|
|
+ return -EINVAL;
|
|
+
|
|
+ return etnaviv_gem_new_handle(dev, file, args->size,
|
|
+ args->flags, &args->handle);
|
|
+}
|
|
+
|
|
+#define TS(t) ((struct timespec){ \
|
|
+ .tv_sec = (t).tv_sec, \
|
|
+ .tv_nsec = (t).tv_nsec \
|
|
+})
|
|
+
|
|
+static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct drm_etnaviv_gem_cpu_prep *args = data;
|
|
+ struct drm_gem_object *obj;
|
|
+ int ret;
|
|
+
|
|
+ if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
|
|
+ return -EINVAL;
|
|
+
|
|
+ obj = drm_gem_object_lookup(dev, file, args->handle);
|
|
+ if (!obj)
|
|
+ return -ENOENT;
|
|
+
|
|
+ ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
|
|
+
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct drm_etnaviv_gem_cpu_fini *args = data;
|
|
+ struct drm_gem_object *obj;
|
|
+ int ret;
|
|
+
|
|
+ if (args->flags)
|
|
+ return -EINVAL;
|
|
+
|
|
+ obj = drm_gem_object_lookup(dev, file, args->handle);
|
|
+ if (!obj)
|
|
+ return -ENOENT;
|
|
+
|
|
+ ret = etnaviv_gem_cpu_fini(obj);
|
|
+
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct drm_etnaviv_gem_info *args = data;
|
|
+ struct drm_gem_object *obj;
|
|
+ int ret;
|
|
+
|
|
+ if (args->pad)
|
|
+ return -EINVAL;
|
|
+
|
|
+ obj = drm_gem_object_lookup(dev, file, args->handle);
|
|
+ if (!obj)
|
|
+ return -ENOENT;
|
|
+
|
|
+ ret = etnaviv_gem_mmap_offset(obj, &args->offset);
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct drm_etnaviv_wait_fence *args = data;
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ struct timespec *timeout = &TS(args->timeout);
|
|
+ struct etnaviv_gpu *gpu;
|
|
+
|
|
+ if (args->flags & ~(ETNA_WAIT_NONBLOCK))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (args->pipe >= ETNA_MAX_PIPES)
|
|
+ return -EINVAL;
|
|
+
|
|
+ gpu = priv->gpu[args->pipe];
|
|
+ if (!gpu)
|
|
+ return -ENXIO;
|
|
+
|
|
+ if (args->flags & ETNA_WAIT_NONBLOCK)
|
|
+ timeout = NULL;
|
|
+
|
|
+ return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
|
|
+ timeout);
|
|
+}
|
|
+
|
|
+static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct drm_etnaviv_gem_userptr *args = data;
|
|
+ int access;
|
|
+
|
|
+ if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
|
|
+ args->flags == 0)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (offset_in_page(args->user_ptr | args->user_size) ||
|
|
+ (uintptr_t)args->user_ptr != args->user_ptr ||
|
|
+ (u32)args->user_size != args->user_size ||
|
|
+ args->user_ptr & ~PAGE_MASK)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (args->flags & ETNA_USERPTR_WRITE)
|
|
+ access = VERIFY_WRITE;
|
|
+ else
|
|
+ access = VERIFY_READ;
|
|
+
|
|
+ if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
|
|
+ args->user_size))
|
|
+ return -EFAULT;
|
|
+
|
|
+ return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
|
|
+ args->user_size, args->flags,
|
|
+ &args->handle);
|
|
+}
|
|
+
|
|
+static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ struct drm_etnaviv_gem_wait *args = data;
|
|
+ struct timespec *timeout = &TS(args->timeout);
|
|
+ struct drm_gem_object *obj;
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ int ret;
|
|
+
|
|
+ if (args->flags & ~(ETNA_WAIT_NONBLOCK))
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (args->pipe >= ETNA_MAX_PIPES)
|
|
+ return -EINVAL;
|
|
+
|
|
+ gpu = priv->gpu[args->pipe];
|
|
+ if (!gpu)
|
|
+ return -ENXIO;
|
|
+
|
|
+ obj = drm_gem_object_lookup(dev, file, args->handle);
|
|
+ if (!obj)
|
|
+ return -ENOENT;
|
|
+
|
|
+ if (args->flags & ETNA_WAIT_NONBLOCK)
|
|
+ timeout = NULL;
|
|
+
|
|
+ ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
|
|
+
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static const struct drm_ioctl_desc etnaviv_ioctls[] = {
|
|
+#define ETNA_IOCTL(n, func, flags) \
|
|
+ DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
|
|
+ ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+ ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
|
|
+};
|
|
+
|
|
+static const struct vm_operations_struct vm_ops = {
|
|
+ .fault = etnaviv_gem_fault,
|
|
+ .open = drm_gem_vm_open,
|
|
+ .close = drm_gem_vm_close,
|
|
+};
|
|
+
|
|
+static const struct file_operations fops = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .open = drm_open,
|
|
+ .release = drm_release,
|
|
+ .unlocked_ioctl = drm_ioctl,
|
|
+#ifdef CONFIG_COMPAT
|
|
+ .compat_ioctl = drm_compat_ioctl,
|
|
+#endif
|
|
+ .poll = drm_poll,
|
|
+ .read = drm_read,
|
|
+ .llseek = no_llseek,
|
|
+ .mmap = etnaviv_gem_mmap,
|
|
+};
|
|
+
|
|
+static struct drm_driver etnaviv_drm_driver = {
|
|
+ .driver_features = DRIVER_HAVE_IRQ |
|
|
+ DRIVER_GEM |
|
|
+ DRIVER_PRIME |
|
|
+ DRIVER_RENDER,
|
|
+ .open = etnaviv_open,
|
|
+ .preclose = etnaviv_preclose,
|
|
+ .set_busid = drm_platform_set_busid,
|
|
+ .gem_free_object = etnaviv_gem_free_object,
|
|
+ .gem_vm_ops = &vm_ops,
|
|
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
|
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
|
+ .gem_prime_export = drm_gem_prime_export,
|
|
+ .gem_prime_import = drm_gem_prime_import,
|
|
+ .gem_prime_pin = etnaviv_gem_prime_pin,
|
|
+ .gem_prime_unpin = etnaviv_gem_prime_unpin,
|
|
+ .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
|
|
+ .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
|
|
+ .gem_prime_vmap = etnaviv_gem_prime_vmap,
|
|
+ .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+ .debugfs_init = etnaviv_debugfs_init,
|
|
+ .debugfs_cleanup = etnaviv_debugfs_cleanup,
|
|
+#endif
|
|
+ .ioctls = etnaviv_ioctls,
|
|
+ .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
|
|
+ .fops = &fops,
|
|
+ .name = "etnaviv",
|
|
+ .desc = "etnaviv DRM",
|
|
+ .date = "20151214",
|
|
+ .major = 1,
|
|
+ .minor = 0,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Platform driver:
|
|
+ */
|
|
+static int etnaviv_bind(struct device *dev)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv;
|
|
+ struct drm_device *drm;
|
|
+ int ret;
|
|
+
|
|
+ drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
|
|
+ if (!drm)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ drm->platformdev = to_platform_device(dev);
|
|
+
|
|
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
+ if (!priv) {
|
|
+ dev_err(dev, "failed to allocate private data\n");
|
|
+ ret = -ENOMEM;
|
|
+ goto out_unref;
|
|
+ }
|
|
+ drm->dev_private = priv;
|
|
+
|
|
+ priv->wq = alloc_ordered_workqueue("etnaviv", 0);
|
|
+ if (!priv->wq) {
|
|
+ ret = -ENOMEM;
|
|
+ goto out_wq;
|
|
+ }
|
|
+
|
|
+ mutex_init(&priv->gem_lock);
|
|
+ INIT_LIST_HEAD(&priv->gem_list);
|
|
+ priv->num_gpus = 0;
|
|
+
|
|
+ dev_set_drvdata(dev, drm);
|
|
+
|
|
+ ret = component_bind_all(dev, drm);
|
|
+ if (ret < 0)
|
|
+ goto out_bind;
|
|
+
|
|
+ load_gpu(drm);
|
|
+
|
|
+ ret = drm_dev_register(drm, 0);
|
|
+ if (ret)
|
|
+ goto out_register;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out_register:
|
|
+ component_unbind_all(dev, drm);
|
|
+out_bind:
|
|
+ flush_workqueue(priv->wq);
|
|
+ destroy_workqueue(priv->wq);
|
|
+out_wq:
|
|
+ kfree(priv);
|
|
+out_unref:
|
|
+ drm_dev_unref(drm);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void etnaviv_unbind(struct device *dev)
|
|
+{
|
|
+ struct drm_device *drm = dev_get_drvdata(dev);
|
|
+ struct etnaviv_drm_private *priv = drm->dev_private;
|
|
+
|
|
+ drm_dev_unregister(drm);
|
|
+
|
|
+ flush_workqueue(priv->wq);
|
|
+ destroy_workqueue(priv->wq);
|
|
+
|
|
+ component_unbind_all(dev, drm);
|
|
+
|
|
+ drm->dev_private = NULL;
|
|
+ kfree(priv);
|
|
+
|
|
+ drm_put_dev(drm);
|
|
+}
|
|
+
|
|
+static const struct component_master_ops etnaviv_master_ops = {
|
|
+ .bind = etnaviv_bind,
|
|
+ .unbind = etnaviv_unbind,
|
|
+};
|
|
+
|
|
+static int compare_of(struct device *dev, void *data)
|
|
+{
|
|
+ struct device_node *np = data;
|
|
+
|
|
+ return dev->of_node == np;
|
|
+}
|
|
+
|
|
+static int compare_str(struct device *dev, void *data)
|
|
+{
|
|
+ return !strcmp(dev_name(dev), data);
|
|
+}
|
|
+
|
|
+static int etnaviv_pdev_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct device_node *node = dev->of_node;
|
|
+ struct component_match *match = NULL;
|
|
+
|
|
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
|
+
|
|
+ if (node) {
|
|
+ struct device_node *core_node;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; ; i++) {
|
|
+ core_node = of_parse_phandle(node, "cores", i);
|
|
+ if (!core_node)
|
|
+ break;
|
|
+
|
|
+ component_match_add(&pdev->dev, &match, compare_of,
|
|
+ core_node);
|
|
+ of_node_put(core_node);
|
|
+ }
|
|
+ } else if (dev->platform_data) {
|
|
+ char **names = dev->platform_data;
|
|
+ unsigned i;
|
|
+
|
|
+ for (i = 0; names[i]; i++)
|
|
+ component_match_add(dev, &match, compare_str, names[i]);
|
|
+ }
|
|
+
|
|
+ return component_master_add_with_match(dev, &etnaviv_master_ops, match);
|
|
+}
|
|
+
|
|
+static int etnaviv_pdev_remove(struct platform_device *pdev)
|
|
+{
|
|
+ component_master_del(&pdev->dev, &etnaviv_master_ops);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static const struct of_device_id dt_match[] = {
|
|
+ { .compatible = "fsl,imx-gpu-subsystem" },
|
|
+ { .compatible = "marvell,dove-gpu-subsystem" },
|
|
+ {}
|
|
+};
|
|
+MODULE_DEVICE_TABLE(of, dt_match);
|
|
+
|
|
+static struct platform_driver etnaviv_platform_driver = {
|
|
+ .probe = etnaviv_pdev_probe,
|
|
+ .remove = etnaviv_pdev_remove,
|
|
+ .driver = {
|
|
+ .owner = THIS_MODULE,
|
|
+ .name = "etnaviv",
|
|
+ .of_match_table = dt_match,
|
|
+ },
|
|
+};
|
|
+
|
|
+static int __init etnaviv_init(void)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ etnaviv_validate_init();
|
|
+
|
|
+ ret = platform_driver_register(&etnaviv_gpu_driver);
|
|
+ if (ret != 0)
|
|
+ return ret;
|
|
+
|
|
+ ret = platform_driver_register(&etnaviv_platform_driver);
|
|
+ if (ret != 0)
|
|
+ platform_driver_unregister(&etnaviv_gpu_driver);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+module_init(etnaviv_init);
|
|
+
|
|
+static void __exit etnaviv_exit(void)
|
|
+{
|
|
+ platform_driver_unregister(&etnaviv_gpu_driver);
|
|
+ platform_driver_unregister(&etnaviv_platform_driver);
|
|
+}
|
|
+module_exit(etnaviv_exit);
|
|
+
|
|
+MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
|
|
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
|
|
+MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
|
|
+MODULE_DESCRIPTION("etnaviv DRM Driver");
|
|
+MODULE_LICENSE("GPL v2");
|
|
+MODULE_ALIAS("platform:etnaviv");
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
|
|
new file mode 100644
|
|
index 0000000..d6bd438
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
|
|
@@ -0,0 +1,161 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef __ETNAVIV_DRV_H__
|
|
+#define __ETNAVIV_DRV_H__
|
|
+
|
|
+#include <linux/kernel.h>
|
|
+#include <linux/clk.h>
|
|
+#include <linux/cpufreq.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/pm.h>
|
|
+#include <linux/pm_runtime.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/list.h>
|
|
+#include <linux/iommu.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/sizes.h>
|
|
+
|
|
+#include <drm/drmP.h>
|
|
+#include <drm/drm_crtc_helper.h>
|
|
+#include <drm/drm_fb_helper.h>
|
|
+#include <drm/drm_gem.h>
|
|
+#include <drm/etnaviv_drm.h>
|
|
+
|
|
+struct etnaviv_cmdbuf;
|
|
+struct etnaviv_gpu;
|
|
+struct etnaviv_mmu;
|
|
+struct etnaviv_gem_object;
|
|
+struct etnaviv_gem_submit;
|
|
+
|
|
+struct etnaviv_file_private {
|
|
+ /* currently we don't do anything useful with this.. but when
|
|
+ * per-context address spaces are supported we'd keep track of
|
|
+ * the context's page-tables here.
|
|
+ */
|
|
+ int dummy;
|
|
+};
|
|
+
|
|
+struct etnaviv_drm_private {
|
|
+ int num_gpus;
|
|
+ struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
|
|
+
|
|
+ /* list of GEM objects: */
|
|
+ struct mutex gem_lock;
|
|
+ struct list_head gem_list;
|
|
+
|
|
+ struct workqueue_struct *wq;
|
|
+};
|
|
+
|
|
+static inline void etnaviv_queue_work(struct drm_device *dev,
|
|
+ struct work_struct *w)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+
|
|
+ queue_work(priv->wq, w);
|
|
+}
|
|
+
|
|
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file);
|
|
+
|
|
+int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
|
+int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
|
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
|
|
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
|
|
+ struct drm_gem_object *obj, u32 *iova);
|
|
+void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj);
|
|
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
|
+void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
|
|
+void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
|
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
|
|
+ struct dma_buf_attachment *attach, struct sg_table *sg);
|
|
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
|
|
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
|
|
+void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
|
|
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
|
|
+ struct timespec *timeout);
|
|
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
|
|
+void etnaviv_gem_free_object(struct drm_gem_object *obj);
|
|
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|
+ u32 size, u32 flags, u32 *handle);
|
|
+struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
|
|
+ u32 size, u32 flags);
|
|
+struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
|
|
+ u32 size, u32 flags);
|
|
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
|
|
+ uintptr_t ptr, u32 size, u32 flags, u32 *handle);
|
|
+u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
|
|
+void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
|
|
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|
+ struct etnaviv_cmdbuf *cmdbuf);
|
|
+void etnaviv_validate_init(void);
|
|
+bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
|
|
+ u32 *stream, unsigned int size,
|
|
+ struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size);
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
|
|
+ struct seq_file *m);
|
|
+#endif
|
|
+
|
|
+void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
|
|
+ const char *dbgname);
|
|
+void etnaviv_writel(u32 data, void __iomem *addr);
|
|
+u32 etnaviv_readl(const void __iomem *addr);
|
|
+
|
|
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
|
|
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
|
|
+
|
|
+/*
|
|
+ * Return the storage size of a structure with a variable length array.
|
|
+ * The array is nelem elements of elem_size, where the base structure
|
|
+ * is defined by base. If the size overflows size_t, return zero.
|
|
+ */
|
|
+static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
|
|
+{
|
|
+ if (elem_size && nelem > (SIZE_MAX - base) / elem_size)
|
|
+ return 0;
|
|
+ return base + nelem * elem_size;
|
|
+}
|
|
+
|
|
+/* returns true if fence a comes after fence b */
|
|
+static inline bool fence_after(u32 a, u32 b)
|
|
+{
|
|
+ return (s32)(a - b) > 0;
|
|
+}
|
|
+
|
|
+static inline bool fence_after_eq(u32 a, u32 b)
|
|
+{
|
|
+ return (s32)(a - b) >= 0;
|
|
+}
|
|
+
|
|
+static inline unsigned long etnaviv_timeout_to_jiffies(
|
|
+ const struct timespec *timeout)
|
|
+{
|
|
+ unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
|
|
+ unsigned long start_jiffies = jiffies;
|
|
+ unsigned long remaining_jiffies;
|
|
+
|
|
+ if (time_after(start_jiffies, timeout_jiffies))
|
|
+ remaining_jiffies = 0;
|
|
+ else
|
|
+ remaining_jiffies = timeout_jiffies - start_jiffies;
|
|
+
|
|
+ return remaining_jiffies;
|
|
+}
|
|
+
|
|
+#endif /* __ETNAVIV_DRV_H__ */
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
|
|
new file mode 100644
|
|
index 0000000..bf8fa85
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
|
|
@@ -0,0 +1,227 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/devcoredump.h>
|
|
+#include "etnaviv_dump.h"
|
|
+#include "etnaviv_gem.h"
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_mmu.h"
|
|
+#include "state.xml.h"
|
|
+#include "state_hi.xml.h"
|
|
+
|
|
+struct core_dump_iterator {
|
|
+ void *start;
|
|
+ struct etnaviv_dump_object_header *hdr;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+static const unsigned short etnaviv_dump_registers[] = {
|
|
+ VIVS_HI_AXI_STATUS,
|
|
+ VIVS_HI_CLOCK_CONTROL,
|
|
+ VIVS_HI_IDLE_STATE,
|
|
+ VIVS_HI_AXI_CONFIG,
|
|
+ VIVS_HI_INTR_ENBL,
|
|
+ VIVS_HI_CHIP_IDENTITY,
|
|
+ VIVS_HI_CHIP_FEATURE,
|
|
+ VIVS_HI_CHIP_MODEL,
|
|
+ VIVS_HI_CHIP_REV,
|
|
+ VIVS_HI_CHIP_DATE,
|
|
+ VIVS_HI_CHIP_TIME,
|
|
+ VIVS_HI_CHIP_MINOR_FEATURE_0,
|
|
+ VIVS_HI_CACHE_CONTROL,
|
|
+ VIVS_HI_AXI_CONTROL,
|
|
+ VIVS_PM_POWER_CONTROLS,
|
|
+ VIVS_PM_MODULE_CONTROLS,
|
|
+ VIVS_PM_MODULE_STATUS,
|
|
+ VIVS_PM_PULSE_EATER,
|
|
+ VIVS_MC_MMU_FE_PAGE_TABLE,
|
|
+ VIVS_MC_MMU_TX_PAGE_TABLE,
|
|
+ VIVS_MC_MMU_PE_PAGE_TABLE,
|
|
+ VIVS_MC_MMU_PEZ_PAGE_TABLE,
|
|
+ VIVS_MC_MMU_RA_PAGE_TABLE,
|
|
+ VIVS_MC_DEBUG_MEMORY,
|
|
+ VIVS_MC_MEMORY_BASE_ADDR_RA,
|
|
+ VIVS_MC_MEMORY_BASE_ADDR_FE,
|
|
+ VIVS_MC_MEMORY_BASE_ADDR_TX,
|
|
+ VIVS_MC_MEMORY_BASE_ADDR_PEZ,
|
|
+ VIVS_MC_MEMORY_BASE_ADDR_PE,
|
|
+ VIVS_MC_MEMORY_TIMING_CONTROL,
|
|
+ VIVS_MC_BUS_CONFIG,
|
|
+ VIVS_FE_DMA_STATUS,
|
|
+ VIVS_FE_DMA_DEBUG_STATE,
|
|
+ VIVS_FE_DMA_ADDRESS,
|
|
+ VIVS_FE_DMA_LOW,
|
|
+ VIVS_FE_DMA_HIGH,
|
|
+ VIVS_FE_AUTO_FLUSH,
|
|
+};
|
|
+
|
|
+static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
|
|
+ u32 type, void *data_end)
|
|
+{
|
|
+ struct etnaviv_dump_object_header *hdr = iter->hdr;
|
|
+
|
|
+ hdr->magic = cpu_to_le32(ETDUMP_MAGIC);
|
|
+ hdr->type = cpu_to_le32(type);
|
|
+ hdr->file_offset = cpu_to_le32(iter->data - iter->start);
|
|
+ hdr->file_size = cpu_to_le32(data_end - iter->data);
|
|
+
|
|
+ iter->hdr++;
|
|
+ iter->data += hdr->file_size;
|
|
+}
|
|
+
|
|
+static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
|
|
+ struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ struct etnaviv_dump_registers *reg = iter->data;
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
|
|
+ reg->reg = etnaviv_dump_registers[i];
|
|
+ reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
|
|
+ }
|
|
+
|
|
+ etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
|
|
+}
|
|
+
|
|
+static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
|
|
+ struct etnaviv_gpu *gpu, size_t mmu_size)
|
|
+{
|
|
+ etnaviv_iommu_dump(gpu->mmu, iter->data);
|
|
+
|
|
+ etnaviv_core_dump_header(iter, ETDUMP_BUF_MMU, iter->data + mmu_size);
|
|
+}
|
|
+
|
|
+static void etnaviv_core_dump_mem(struct core_dump_iterator *iter, u32 type,
|
|
+ void *ptr, size_t size, u64 iova)
|
|
+{
|
|
+ memcpy(iter->data, ptr, size);
|
|
+
|
|
+ iter->hdr->iova = cpu_to_le64(iova);
|
|
+
|
|
+ etnaviv_core_dump_header(iter, type, iter->data + size);
|
|
+}
|
|
+
|
|
+void etnaviv_core_dump(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ struct core_dump_iterator iter;
|
|
+ struct etnaviv_vram_mapping *vram;
|
|
+ struct etnaviv_gem_object *obj;
|
|
+ struct etnaviv_cmdbuf *cmd;
|
|
+ unsigned int n_obj, n_bomap_pages;
|
|
+ size_t file_size, mmu_size;
|
|
+ __le64 *bomap, *bomap_start;
|
|
+
|
|
+ mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
|
|
+
|
|
+ /* We always dump registers, mmu, ring and end marker */
|
|
+ n_obj = 4;
|
|
+ n_bomap_pages = 0;
|
|
+ file_size = ARRAY_SIZE(etnaviv_dump_registers) *
|
|
+ sizeof(struct etnaviv_dump_registers) +
|
|
+ mmu_size + gpu->buffer->size;
|
|
+
|
|
+ /* Add in the active command buffers */
|
|
+ list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
|
|
+ file_size += cmd->size;
|
|
+ n_obj++;
|
|
+ }
|
|
+
|
|
+ /* Add in the active buffer objects */
|
|
+ list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
|
|
+ if (!vram->use)
|
|
+ continue;
|
|
+
|
|
+ obj = vram->object;
|
|
+ file_size += obj->base.size;
|
|
+ n_bomap_pages += obj->base.size >> PAGE_SHIFT;
|
|
+ n_obj++;
|
|
+ }
|
|
+
|
|
+ /* If we have any buffer objects, add a bomap object */
|
|
+ if (n_bomap_pages) {
|
|
+ file_size += n_bomap_pages * sizeof(__le64);
|
|
+ n_obj++;
|
|
+ }
|
|
+
|
|
+ /* Add the size of the headers */
|
|
+ file_size += sizeof(*iter.hdr) * n_obj;
|
|
+
|
|
+ /* Allocate the file in vmalloc memory, it's likely to be big */
|
|
+ iter.start = vmalloc(file_size);
|
|
+ if (!iter.start) {
|
|
+ dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ /* Point the data member after the headers */
|
|
+ iter.hdr = iter.start;
|
|
+ iter.data = &iter.hdr[n_obj];
|
|
+
|
|
+ memset(iter.hdr, 0, iter.data - iter.start);
|
|
+
|
|
+ etnaviv_core_dump_registers(&iter, gpu);
|
|
+ etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
|
|
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
|
|
+ gpu->buffer->size, gpu->buffer->paddr);
|
|
+
|
|
+ list_for_each_entry(cmd, &gpu->active_cmd_list, node)
|
|
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
|
|
+ cmd->size, cmd->paddr);
|
|
+
|
|
+ /* Reserve space for the bomap */
|
|
+ if (n_bomap_pages) {
|
|
+ bomap_start = bomap = iter.data;
|
|
+ memset(bomap, 0, sizeof(*bomap) * n_bomap_pages);
|
|
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_BOMAP,
|
|
+ bomap + n_bomap_pages);
|
|
+ } else {
|
|
+ /* Silence warning */
|
|
+ bomap_start = bomap = NULL;
|
|
+ }
|
|
+
|
|
+ list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
|
|
+ struct page **pages;
|
|
+ void *vaddr;
|
|
+
|
|
+ if (vram->use == 0)
|
|
+ continue;
|
|
+
|
|
+ obj = vram->object;
|
|
+
|
|
+ pages = etnaviv_gem_get_pages(obj);
|
|
+ if (pages) {
|
|
+ int j;
|
|
+
|
|
+ iter.hdr->data[0] = bomap - bomap_start;
|
|
+
|
|
+ for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
|
|
+ *bomap++ = cpu_to_le64(page_to_phys(*pages++));
|
|
+ }
|
|
+
|
|
+ iter.hdr->iova = cpu_to_le64(vram->iova);
|
|
+
|
|
+ vaddr = etnaviv_gem_vaddr(&obj->base);
|
|
+ if (vaddr && !IS_ERR(vaddr))
|
|
+ memcpy(iter.data, vaddr, obj->base.size);
|
|
+
|
|
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
|
|
+ obj->base.size);
|
|
+ }
|
|
+
|
|
+ etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
|
|
+
|
|
+ dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.h b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
|
|
new file mode 100644
|
|
index 0000000..97f2f8d
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.h
|
|
@@ -0,0 +1,54 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ * Etnaviv devcoredump file definitions
|
|
+ */
|
|
+#ifndef ETNAVIV_DUMP_H
|
|
+#define ETNAVIV_DUMP_H
|
|
+
|
|
+#include <linux/types.h>
|
|
+
|
|
+enum {
|
|
+ ETDUMP_MAGIC = 0x414e5445,
|
|
+ ETDUMP_BUF_REG = 0,
|
|
+ ETDUMP_BUF_MMU,
|
|
+ ETDUMP_BUF_RING,
|
|
+ ETDUMP_BUF_CMD,
|
|
+ ETDUMP_BUF_BOMAP,
|
|
+ ETDUMP_BUF_BO,
|
|
+ ETDUMP_BUF_END,
|
|
+};
|
|
+
|
|
+struct etnaviv_dump_object_header {
|
|
+ __le32 magic;
|
|
+ __le32 type;
|
|
+ __le32 file_offset;
|
|
+ __le32 file_size;
|
|
+ __le64 iova;
|
|
+ __le32 data[2];
|
|
+};
|
|
+
|
|
+/* Registers object, an array of these */
|
|
+struct etnaviv_dump_registers {
|
|
+ __le32 reg;
|
|
+ __le32 value;
|
|
+};
|
|
+
|
|
+#ifdef __KERNEL__
|
|
+struct etnaviv_gpu;
|
|
+void etnaviv_core_dump(struct etnaviv_gpu *gpu);
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
|
|
new file mode 100644
|
|
index 0000000..8d6f859
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
|
|
@@ -0,0 +1,897 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/spinlock.h>
|
|
+#include <linux/shmem_fs.h>
|
|
+
|
|
+#include "etnaviv_drv.h"
|
|
+#include "etnaviv_gem.h"
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_mmu.h"
|
|
+
|
|
+static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ struct drm_device *dev = etnaviv_obj->base.dev;
|
|
+ struct sg_table *sgt = etnaviv_obj->sgt;
|
|
+
|
|
+ /*
|
|
+ * For non-cached buffers, ensure the new pages are clean
|
|
+ * because display controller, GPU, etc. are not coherent.
|
|
+ */
|
|
+ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
|
|
+ dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
|
|
+}
|
|
+
|
|
+static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ struct drm_device *dev = etnaviv_obj->base.dev;
|
|
+ struct sg_table *sgt = etnaviv_obj->sgt;
|
|
+
|
|
+ /*
|
|
+ * For non-cached buffers, ensure the new pages are clean
|
|
+ * because display controller, GPU, etc. are not coherent:
|
|
+ *
|
|
+ * WARNING: The DMA API does not support concurrent CPU
|
|
+ * and device access to the memory area. With BIDIRECTIONAL,
|
|
+ * we will clean the cache lines which overlap the region,
|
|
+ * and invalidate all cache lines (partially) contained in
|
|
+ * the region.
|
|
+ *
|
|
+ * If you have dirty data in the overlapping cache lines,
|
|
+ * that will corrupt the GPU-written data. If you have
|
|
+ * written into the remainder of the region, this can
|
|
+ * discard those writes.
|
|
+ */
|
|
+ if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
|
|
+ dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
|
|
+}
|
|
+
|
|
+/* called with etnaviv_obj->lock held */
|
|
+static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ struct drm_device *dev = etnaviv_obj->base.dev;
|
|
+ struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
|
|
+
|
|
+ if (IS_ERR(p)) {
|
|
+ dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
|
|
+ return PTR_ERR(p);
|
|
+ }
|
|
+
|
|
+ etnaviv_obj->pages = p;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ if (etnaviv_obj->sgt) {
|
|
+ etnaviv_gem_scatterlist_unmap(etnaviv_obj);
|
|
+ sg_free_table(etnaviv_obj->sgt);
|
|
+ kfree(etnaviv_obj->sgt);
|
|
+ etnaviv_obj->sgt = NULL;
|
|
+ }
|
|
+ if (etnaviv_obj->pages) {
|
|
+ drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
|
|
+ true, false);
|
|
+
|
|
+ etnaviv_obj->pages = NULL;
|
|
+ }
|
|
+}
|
|
+
|
|
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(&etnaviv_obj->lock);
|
|
+
|
|
+ if (!etnaviv_obj->pages) {
|
|
+ ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
|
|
+ if (ret < 0)
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+
|
|
+ if (!etnaviv_obj->sgt) {
|
|
+ struct drm_device *dev = etnaviv_obj->base.dev;
|
|
+ int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
|
|
+ struct sg_table *sgt;
|
|
+
|
|
+ sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
|
|
+ if (IS_ERR(sgt)) {
|
|
+ dev_err(dev->dev, "failed to allocate sgt: %ld\n",
|
|
+ PTR_ERR(sgt));
|
|
+ return ERR_CAST(sgt);
|
|
+ }
|
|
+
|
|
+ etnaviv_obj->sgt = sgt;
|
|
+
|
|
+ etnaviv_gem_scatter_map(etnaviv_obj);
|
|
+ }
|
|
+
|
|
+ return etnaviv_obj->pages;
|
|
+}
|
|
+
|
|
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ lockdep_assert_held(&etnaviv_obj->lock);
|
|
+ /* when we start tracking the pin count, then do something here */
|
|
+}
|
|
+
|
|
+static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
|
|
+ struct vm_area_struct *vma)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+ pgprot_t vm_page_prot;
|
|
+
|
|
+ vma->vm_flags &= ~VM_PFNMAP;
|
|
+ vma->vm_flags |= VM_MIXEDMAP;
|
|
+
|
|
+ vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
|
+
|
|
+ if (etnaviv_obj->flags & ETNA_BO_WC) {
|
|
+ vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
|
|
+ } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
|
|
+ vma->vm_page_prot = pgprot_noncached(vm_page_prot);
|
|
+ } else {
|
|
+ /*
|
|
+ * Shunt off cached objs to shmem file so they have their own
|
|
+ * address_space (so unmap_mapping_range does what we want,
|
|
+ * in particular in the case of mmap'd dmabufs)
|
|
+ */
|
|
+ fput(vma->vm_file);
|
|
+ get_file(obj->filp);
|
|
+ vma->vm_pgoff = 0;
|
|
+ vma->vm_file = obj->filp;
|
|
+
|
|
+ vma->vm_page_prot = vm_page_prot;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|
+{
|
|
+ struct etnaviv_gem_object *obj;
|
|
+ int ret;
|
|
+
|
|
+ ret = drm_gem_mmap(filp, vma);
|
|
+ if (ret) {
|
|
+ DBG("mmap failed: %d", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ obj = to_etnaviv_bo(vma->vm_private_data);
|
|
+ return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
|
|
+}
|
|
+
|
|
+int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
+{
|
|
+ struct drm_gem_object *obj = vma->vm_private_data;
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+ struct page **pages, *page;
|
|
+ pgoff_t pgoff;
|
|
+ int ret;
|
|
+
|
|
+ /*
|
|
+ * Make sure we don't parallel update on a fault, nor move or remove
|
|
+ * something from beneath our feet. Note that vm_insert_page() is
|
|
+ * specifically coded to take care of this, so we don't have to.
|
|
+ */
|
|
+ ret = mutex_lock_interruptible(&etnaviv_obj->lock);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ /* make sure we have pages attached now */
|
|
+ pages = etnaviv_gem_get_pages(etnaviv_obj);
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+
|
|
+ if (IS_ERR(pages)) {
|
|
+ ret = PTR_ERR(pages);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* We don't use vmf->pgoff since that has the fake offset: */
|
|
+ pgoff = ((unsigned long)vmf->virtual_address -
|
|
+ vma->vm_start) >> PAGE_SHIFT;
|
|
+
|
|
+ page = pages[pgoff];
|
|
+
|
|
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
|
|
+ page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
|
|
+
|
|
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
|
|
+
|
|
+out:
|
|
+ switch (ret) {
|
|
+ case -EAGAIN:
|
|
+ case 0:
|
|
+ case -ERESTARTSYS:
|
|
+ case -EINTR:
|
|
+ case -EBUSY:
|
|
+ /*
|
|
+ * EBUSY is ok: this just means that another thread
|
|
+ * already did the job.
|
|
+ */
|
|
+ return VM_FAULT_NOPAGE;
|
|
+ case -ENOMEM:
|
|
+ return VM_FAULT_OOM;
|
|
+ default:
|
|
+ return VM_FAULT_SIGBUS;
|
|
+ }
|
|
+}
|
|
+
|
|
+int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ /* Make it mmapable */
|
|
+ ret = drm_gem_create_mmap_offset(obj);
|
|
+ if (ret)
|
|
+ dev_err(obj->dev->dev, "could not allocate mmap offset\n");
|
|
+ else
|
|
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static struct etnaviv_vram_mapping *
|
|
+etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
|
|
+ struct etnaviv_iommu *mmu)
|
|
+{
|
|
+ struct etnaviv_vram_mapping *mapping;
|
|
+
|
|
+ list_for_each_entry(mapping, &obj->vram_list, obj_node) {
|
|
+ if (mapping->mmu == mmu)
|
|
+ return mapping;
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
|
|
+ struct drm_gem_object *obj, u32 *iova)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+ struct etnaviv_vram_mapping *mapping;
|
|
+ struct page **pages;
|
|
+ int ret = 0;
|
|
+
|
|
+ mutex_lock(&etnaviv_obj->lock);
|
|
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
|
|
+ if (mapping) {
|
|
+ /*
|
|
+ * Holding the object lock prevents the use count changing
|
|
+ * beneath us. If the use count is zero, the MMU might be
|
|
+ * reaping this object, so take the lock and re-check that
|
|
+ * the MMU owns this mapping to close this race.
|
|
+ */
|
|
+ if (mapping->use == 0) {
|
|
+ mutex_lock(&gpu->mmu->lock);
|
|
+ if (mapping->mmu == gpu->mmu)
|
|
+ mapping->use += 1;
|
|
+ else
|
|
+ mapping = NULL;
|
|
+ mutex_unlock(&gpu->mmu->lock);
|
|
+ if (mapping)
|
|
+ goto out;
|
|
+ } else {
|
|
+ mapping->use += 1;
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ pages = etnaviv_gem_get_pages(etnaviv_obj);
|
|
+ if (IS_ERR(pages)) {
|
|
+ ret = PTR_ERR(pages);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * See if we have a reaped vram mapping we can re-use before
|
|
+ * allocating a fresh mapping.
|
|
+ */
|
|
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
|
|
+ if (!mapping) {
|
|
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
|
|
+ if (!mapping)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ INIT_LIST_HEAD(&mapping->scan_node);
|
|
+ mapping->object = etnaviv_obj;
|
|
+ } else {
|
|
+ list_del(&mapping->obj_node);
|
|
+ }
|
|
+
|
|
+ mapping->mmu = gpu->mmu;
|
|
+ mapping->use = 1;
|
|
+
|
|
+ ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
|
|
+ mapping);
|
|
+ if (ret < 0)
|
|
+ kfree(mapping);
|
|
+ else
|
|
+ list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
|
|
+
|
|
+out:
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+
|
|
+ if (!ret) {
|
|
+ /* Take a reference on the object */
|
|
+ drm_gem_object_reference(obj);
|
|
+ *iova = mapping->iova;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+ struct etnaviv_vram_mapping *mapping;
|
|
+
|
|
+ mutex_lock(&etnaviv_obj->lock);
|
|
+ mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
|
|
+
|
|
+ WARN_ON(mapping->use == 0);
|
|
+ mapping->use -= 1;
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+}
|
|
+
|
|
+void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+
|
|
+ mutex_lock(&etnaviv_obj->lock);
|
|
+ if (!etnaviv_obj->vaddr) {
|
|
+ struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
|
|
+
|
|
+ if (IS_ERR(pages))
|
|
+ return ERR_CAST(pages);
|
|
+
|
|
+ etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
|
|
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
|
+ }
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+
|
|
+ return etnaviv_obj->vaddr;
|
|
+}
|
|
+
|
|
+static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
|
|
+{
|
|
+ if (op & ETNA_PREP_READ)
|
|
+ return DMA_FROM_DEVICE;
|
|
+ else if (op & ETNA_PREP_WRITE)
|
|
+ return DMA_TO_DEVICE;
|
|
+ else
|
|
+ return DMA_BIDIRECTIONAL;
|
|
+}
|
|
+
|
|
+int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
|
|
+ struct timespec *timeout)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+ struct drm_device *dev = obj->dev;
|
|
+ bool write = !!(op & ETNA_PREP_WRITE);
|
|
+ int ret;
|
|
+
|
|
+ if (op & ETNA_PREP_NOSYNC) {
|
|
+ if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
|
|
+ write))
|
|
+ return -EBUSY;
|
|
+ } else {
|
|
+ unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
|
|
+
|
|
+ ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
|
|
+ write, true, remain);
|
|
+ if (ret <= 0)
|
|
+ return ret == 0 ? -ETIMEDOUT : ret;
|
|
+ }
|
|
+
|
|
+ if (etnaviv_obj->flags & ETNA_BO_CACHED) {
|
|
+ if (!etnaviv_obj->sgt) {
|
|
+ void *ret;
|
|
+
|
|
+ mutex_lock(&etnaviv_obj->lock);
|
|
+ ret = etnaviv_gem_get_pages(etnaviv_obj);
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+ if (IS_ERR(ret))
|
|
+ return PTR_ERR(ret);
|
|
+ }
|
|
+
|
|
+ dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
|
|
+ etnaviv_obj->sgt->nents,
|
|
+ etnaviv_op_to_dma_dir(op));
|
|
+ etnaviv_obj->last_cpu_prep_op = op;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
|
|
+{
|
|
+ struct drm_device *dev = obj->dev;
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+
|
|
+ if (etnaviv_obj->flags & ETNA_BO_CACHED) {
|
|
+ /* fini without a prep is almost certainly a userspace error */
|
|
+ WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
|
|
+ dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
|
|
+ etnaviv_obj->sgt->nents,
|
|
+ etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
|
|
+ etnaviv_obj->last_cpu_prep_op = 0;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
|
|
+ struct timespec *timeout)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+
|
|
+ return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+static void etnaviv_gem_describe_fence(struct fence *fence,
|
|
+ const char *type, struct seq_file *m)
|
|
+{
|
|
+ if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
|
+ seq_printf(m, "\t%9s: %s %s seq %u\n",
|
|
+ type,
|
|
+ fence->ops->get_driver_name(fence),
|
|
+ fence->ops->get_timeline_name(fence),
|
|
+ fence->seqno);
|
|
+}
|
|
+
|
|
+static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+ struct reservation_object *robj = etnaviv_obj->resv;
|
|
+ struct reservation_object_list *fobj;
|
|
+ struct fence *fence;
|
|
+ unsigned long off = drm_vma_node_start(&obj->vma_node);
|
|
+
|
|
+ seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
|
|
+ etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
|
|
+ obj->name, obj->refcount.refcount.counter,
|
|
+ off, etnaviv_obj->vaddr, obj->size);
|
|
+
|
|
+ rcu_read_lock();
|
|
+ fobj = rcu_dereference(robj->fence);
|
|
+ if (fobj) {
|
|
+ unsigned int i, shared_count = fobj->shared_count;
|
|
+
|
|
+ for (i = 0; i < shared_count; i++) {
|
|
+ fence = rcu_dereference(fobj->shared[i]);
|
|
+ etnaviv_gem_describe_fence(fence, "Shared", m);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ fence = rcu_dereference(robj->fence_excl);
|
|
+ if (fence)
|
|
+ etnaviv_gem_describe_fence(fence, "Exclusive", m);
|
|
+ rcu_read_unlock();
|
|
+}
|
|
+
|
|
+void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
|
|
+ struct seq_file *m)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj;
|
|
+ int count = 0;
|
|
+ size_t size = 0;
|
|
+
|
|
+ mutex_lock(&priv->gem_lock);
|
|
+ list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
|
|
+ struct drm_gem_object *obj = &etnaviv_obj->base;
|
|
+
|
|
+ seq_puts(m, " ");
|
|
+ etnaviv_gem_describe(obj, m);
|
|
+ count++;
|
|
+ size += obj->size;
|
|
+ }
|
|
+ mutex_unlock(&priv->gem_lock);
|
|
+
|
|
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ if (etnaviv_obj->vaddr)
|
|
+ vunmap(etnaviv_obj->vaddr);
|
|
+ put_pages(etnaviv_obj);
|
|
+}
|
|
+
|
|
+static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
|
|
+ .get_pages = etnaviv_gem_shmem_get_pages,
|
|
+ .release = etnaviv_gem_shmem_release,
|
|
+};
|
|
+
|
|
+void etnaviv_gem_free_object(struct drm_gem_object *obj)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+ struct etnaviv_vram_mapping *mapping, *tmp;
|
|
+
|
|
+ /* object should not be active */
|
|
+ WARN_ON(is_active(etnaviv_obj));
|
|
+
|
|
+ list_del(&etnaviv_obj->gem_node);
|
|
+
|
|
+ list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
|
|
+ obj_node) {
|
|
+ struct etnaviv_iommu *mmu = mapping->mmu;
|
|
+
|
|
+ WARN_ON(mapping->use);
|
|
+
|
|
+ if (mmu)
|
|
+ etnaviv_iommu_unmap_gem(mmu, mapping);
|
|
+
|
|
+ list_del(&mapping->obj_node);
|
|
+ kfree(mapping);
|
|
+ }
|
|
+
|
|
+ drm_gem_free_mmap_offset(obj);
|
|
+ etnaviv_obj->ops->release(etnaviv_obj);
|
|
+ if (etnaviv_obj->resv == &etnaviv_obj->_resv)
|
|
+ reservation_object_fini(&etnaviv_obj->_resv);
|
|
+ drm_gem_object_release(obj);
|
|
+
|
|
+ kfree(etnaviv_obj);
|
|
+}
|
|
+
|
|
+int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+
|
|
+ mutex_lock(&priv->gem_lock);
|
|
+ list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
|
|
+ mutex_unlock(&priv->gem_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
|
|
+ struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
|
|
+ struct drm_gem_object **obj)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj;
|
|
+ unsigned sz = sizeof(*etnaviv_obj);
|
|
+ bool valid = true;
|
|
+
|
|
+ /* validate flags */
|
|
+ switch (flags & ETNA_BO_CACHE_MASK) {
|
|
+ case ETNA_BO_UNCACHED:
|
|
+ case ETNA_BO_CACHED:
|
|
+ case ETNA_BO_WC:
|
|
+ break;
|
|
+ default:
|
|
+ valid = false;
|
|
+ }
|
|
+
|
|
+ if (!valid) {
|
|
+ dev_err(dev->dev, "invalid cache flag: %x\n",
|
|
+ (flags & ETNA_BO_CACHE_MASK));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ etnaviv_obj = kzalloc(sz, GFP_KERNEL);
|
|
+ if (!etnaviv_obj)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ etnaviv_obj->flags = flags;
|
|
+ etnaviv_obj->ops = ops;
|
|
+ if (robj) {
|
|
+ etnaviv_obj->resv = robj;
|
|
+ } else {
|
|
+ etnaviv_obj->resv = &etnaviv_obj->_resv;
|
|
+ reservation_object_init(&etnaviv_obj->_resv);
|
|
+ }
|
|
+
|
|
+ mutex_init(&etnaviv_obj->lock);
|
|
+ INIT_LIST_HEAD(&etnaviv_obj->vram_list);
|
|
+
|
|
+ *obj = &etnaviv_obj->base;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
|
|
+ u32 size, u32 flags)
|
|
+{
|
|
+ struct drm_gem_object *obj = NULL;
|
|
+ int ret;
|
|
+
|
|
+ size = PAGE_ALIGN(size);
|
|
+
|
|
+ ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
|
|
+ &etnaviv_gem_shmem_ops, &obj);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
+ ret = drm_gem_object_init(dev, obj, size);
|
|
+ if (ret == 0) {
|
|
+ struct address_space *mapping;
|
|
+
|
|
+ /*
|
|
+ * Our buffers are kept pinned, so allocating them
|
|
+ * from the MOVABLE zone is a really bad idea, and
|
|
+ * conflicts with CMA. See coments above new_inode()
|
|
+ * why this is required _and_ expected if you're
|
|
+ * going to pin these pages.
|
|
+ */
|
|
+ mapping = file_inode(obj->filp)->i_mapping;
|
|
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
|
|
+ }
|
|
+
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
+ return obj;
|
|
+
|
|
+fail:
|
|
+ if (obj)
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
+
|
|
+/* convenience method to construct a GEM buffer object, and userspace handle */
|
|
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
|
+ u32 size, u32 flags, u32 *handle)
|
|
+{
|
|
+ struct drm_gem_object *obj;
|
|
+ int ret;
|
|
+
|
|
+ obj = __etnaviv_gem_new(dev, size, flags);
|
|
+ if (IS_ERR(obj))
|
|
+ return PTR_ERR(obj);
|
|
+
|
|
+ ret = etnaviv_gem_obj_add(dev, obj);
|
|
+ if (ret < 0) {
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = drm_gem_handle_create(file, obj, handle);
|
|
+
|
|
+ /* drop reference from allocate - handle holds it now */
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
|
|
+ u32 size, u32 flags)
|
|
+{
|
|
+ struct drm_gem_object *obj;
|
|
+ int ret;
|
|
+
|
|
+ obj = __etnaviv_gem_new(dev, size, flags);
|
|
+ if (IS_ERR(obj))
|
|
+ return obj;
|
|
+
|
|
+ ret = etnaviv_gem_obj_add(dev, obj);
|
|
+ if (ret < 0) {
|
|
+ drm_gem_object_unreference_unlocked(obj);
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+
|
|
+ return obj;
|
|
+}
|
|
+
|
|
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
|
|
+ struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
|
|
+ struct etnaviv_gem_object **res)
|
|
+{
|
|
+ struct drm_gem_object *obj;
|
|
+ int ret;
|
|
+
|
|
+ ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ drm_gem_private_object_init(dev, obj, size);
|
|
+
|
|
+ *res = to_etnaviv_bo(obj);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+struct get_pages_work {
|
|
+ struct work_struct work;
|
|
+ struct mm_struct *mm;
|
|
+ struct task_struct *task;
|
|
+ struct etnaviv_gem_object *etnaviv_obj;
|
|
+};
|
|
+
|
|
+static struct page **etnaviv_gem_userptr_do_get_pages(
|
|
+ struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
|
|
+{
|
|
+ int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
|
|
+ struct page **pvec;
|
|
+ uintptr_t ptr;
|
|
+
|
|
+ pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
|
+ if (!pvec)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ pinned = 0;
|
|
+ ptr = etnaviv_obj->userptr.ptr;
|
|
+
|
|
+ down_read(&mm->mmap_sem);
|
|
+ while (pinned < npages) {
|
|
+ ret = get_user_pages(task, mm, ptr, npages - pinned,
|
|
+ !etnaviv_obj->userptr.ro, 0,
|
|
+ pvec + pinned, NULL);
|
|
+ if (ret < 0)
|
|
+ break;
|
|
+
|
|
+ ptr += ret * PAGE_SIZE;
|
|
+ pinned += ret;
|
|
+ }
|
|
+ up_read(&mm->mmap_sem);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ release_pages(pvec, pinned, 0);
|
|
+ drm_free_large(pvec);
|
|
+ return ERR_PTR(ret);
|
|
+ }
|
|
+
|
|
+ return pvec;
|
|
+}
|
|
+
|
|
+static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
|
|
+{
|
|
+ struct get_pages_work *work = container_of(_work, typeof(*work), work);
|
|
+ struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
|
|
+ struct page **pvec;
|
|
+
|
|
+ pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
|
|
+
|
|
+ mutex_lock(&etnaviv_obj->lock);
|
|
+ if (IS_ERR(pvec)) {
|
|
+ etnaviv_obj->userptr.work = ERR_CAST(pvec);
|
|
+ } else {
|
|
+ etnaviv_obj->userptr.work = NULL;
|
|
+ etnaviv_obj->pages = pvec;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
|
|
+
|
|
+ mmput(work->mm);
|
|
+ put_task_struct(work->task);
|
|
+ kfree(work);
|
|
+}
|
|
+
|
|
+static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ struct page **pvec = NULL;
|
|
+ struct get_pages_work *work;
|
|
+ struct mm_struct *mm;
|
|
+ int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
|
|
+
|
|
+ if (etnaviv_obj->userptr.work) {
|
|
+ if (IS_ERR(etnaviv_obj->userptr.work)) {
|
|
+ ret = PTR_ERR(etnaviv_obj->userptr.work);
|
|
+ etnaviv_obj->userptr.work = NULL;
|
|
+ } else {
|
|
+ ret = -EAGAIN;
|
|
+ }
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ mm = get_task_mm(etnaviv_obj->userptr.task);
|
|
+ pinned = 0;
|
|
+ if (mm == current->mm) {
|
|
+ pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
|
+ if (!pvec) {
|
|
+ mmput(mm);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
|
|
+ !etnaviv_obj->userptr.ro, pvec);
|
|
+ if (pinned < 0) {
|
|
+ drm_free_large(pvec);
|
|
+ mmput(mm);
|
|
+ return pinned;
|
|
+ }
|
|
+
|
|
+ if (pinned == npages) {
|
|
+ etnaviv_obj->pages = pvec;
|
|
+ mmput(mm);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ release_pages(pvec, pinned, 0);
|
|
+ drm_free_large(pvec);
|
|
+
|
|
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
|
|
+ if (!work) {
|
|
+ mmput(mm);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ get_task_struct(current);
|
|
+ drm_gem_object_reference(&etnaviv_obj->base);
|
|
+
|
|
+ work->mm = mm;
|
|
+ work->task = current;
|
|
+ work->etnaviv_obj = etnaviv_obj;
|
|
+
|
|
+ etnaviv_obj->userptr.work = &work->work;
|
|
+ INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
|
|
+
|
|
+ etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
|
|
+
|
|
+ return -EAGAIN;
|
|
+}
|
|
+
|
|
+static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ if (etnaviv_obj->sgt) {
|
|
+ etnaviv_gem_scatterlist_unmap(etnaviv_obj);
|
|
+ sg_free_table(etnaviv_obj->sgt);
|
|
+ kfree(etnaviv_obj->sgt);
|
|
+ }
|
|
+ if (etnaviv_obj->pages) {
|
|
+ int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
|
|
+
|
|
+ release_pages(etnaviv_obj->pages, npages, 0);
|
|
+ drm_free_large(etnaviv_obj->pages);
|
|
+ }
|
|
+ put_task_struct(etnaviv_obj->userptr.task);
|
|
+}
|
|
+
|
|
+static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
|
|
+ .get_pages = etnaviv_gem_userptr_get_pages,
|
|
+ .release = etnaviv_gem_userptr_release,
|
|
+};
|
|
+
|
|
+int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
|
|
+ uintptr_t ptr, u32 size, u32 flags, u32 *handle)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj;
|
|
+ int ret;
|
|
+
|
|
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
|
|
+ &etnaviv_gem_userptr_ops, &etnaviv_obj);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ etnaviv_obj->userptr.ptr = ptr;
|
|
+ etnaviv_obj->userptr.task = current;
|
|
+ etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
|
|
+ get_task_struct(current);
|
|
+
|
|
+ ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
|
|
+ if (ret) {
|
|
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
|
|
+
|
|
+ /* drop reference from allocate - handle holds it now */
|
|
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
|
|
new file mode 100644
|
|
index 0000000..a300b4b
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
|
|
@@ -0,0 +1,117 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef __ETNAVIV_GEM_H__
|
|
+#define __ETNAVIV_GEM_H__
|
|
+
|
|
+#include <linux/reservation.h>
|
|
+#include "etnaviv_drv.h"
|
|
+
|
|
+struct etnaviv_gem_ops;
|
|
+struct etnaviv_gem_object;
|
|
+
|
|
+struct etnaviv_gem_userptr {
|
|
+ uintptr_t ptr;
|
|
+ struct task_struct *task;
|
|
+ struct work_struct *work;
|
|
+ bool ro;
|
|
+};
|
|
+
|
|
+struct etnaviv_vram_mapping {
|
|
+ struct list_head obj_node;
|
|
+ struct list_head scan_node;
|
|
+ struct list_head mmu_node;
|
|
+ struct etnaviv_gem_object *object;
|
|
+ struct etnaviv_iommu *mmu;
|
|
+ struct drm_mm_node vram_node;
|
|
+ unsigned int use;
|
|
+ u32 iova;
|
|
+};
|
|
+
|
|
+struct etnaviv_gem_object {
|
|
+ struct drm_gem_object base;
|
|
+ const struct etnaviv_gem_ops *ops;
|
|
+ struct mutex lock;
|
|
+
|
|
+ u32 flags;
|
|
+
|
|
+ struct list_head gem_node;
|
|
+ struct etnaviv_gpu *gpu; /* non-null if active */
|
|
+ atomic_t gpu_active;
|
|
+ u32 access;
|
|
+
|
|
+ struct page **pages;
|
|
+ struct sg_table *sgt;
|
|
+ void *vaddr;
|
|
+
|
|
+ /* normally (resv == &_resv) except for imported bo's */
|
|
+ struct reservation_object *resv;
|
|
+ struct reservation_object _resv;
|
|
+
|
|
+ struct list_head vram_list;
|
|
+
|
|
+ /* cache maintenance */
|
|
+ u32 last_cpu_prep_op;
|
|
+
|
|
+ struct etnaviv_gem_userptr userptr;
|
|
+};
|
|
+
|
|
+static inline
|
|
+struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
|
|
+{
|
|
+ return container_of(obj, struct etnaviv_gem_object, base);
|
|
+}
|
|
+
|
|
+struct etnaviv_gem_ops {
|
|
+ int (*get_pages)(struct etnaviv_gem_object *);
|
|
+ void (*release)(struct etnaviv_gem_object *);
|
|
+};
|
|
+
|
|
+static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ return atomic_read(&etnaviv_obj->gpu_active) != 0;
|
|
+}
|
|
+
|
|
+#define MAX_CMDS 4
|
|
+
|
|
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
|
|
+ * associated with the cmdstream submission for synchronization (and
|
|
+ * make it easier to unwind when things go wrong, etc). This only
|
|
+ * lasts for the duration of the submit-ioctl.
|
|
+ */
|
|
+struct etnaviv_gem_submit {
|
|
+ struct drm_device *dev;
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ struct ww_acquire_ctx ticket;
|
|
+ u32 fence;
|
|
+ unsigned int nr_bos;
|
|
+ struct {
|
|
+ u32 flags;
|
|
+ struct etnaviv_gem_object *obj;
|
|
+ u32 iova;
|
|
+ } bos[0];
|
|
+};
|
|
+
|
|
+int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
|
|
+ struct timespec *timeout);
|
|
+int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
|
|
+ struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
|
|
+ struct etnaviv_gem_object **res);
|
|
+int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
|
|
+struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
|
|
+void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
|
|
+
|
|
+#endif /* __ETNAVIV_GEM_H__ */
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
|
|
new file mode 100644
|
|
index 0000000..e94db4f
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
|
|
@@ -0,0 +1,122 @@
|
|
+/*
|
|
+ * Copyright (C) 2013 Red Hat
|
|
+ * Author: Rob Clark <robdclark@gmail.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/dma-buf.h>
|
|
+#include "etnaviv_drv.h"
|
|
+#include "etnaviv_gem.h"
|
|
+
|
|
+
|
|
+struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+
|
|
+ BUG_ON(!etnaviv_obj->sgt); /* should have already pinned! */
|
|
+
|
|
+ return etnaviv_obj->sgt;
|
|
+}
|
|
+
|
|
+void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
|
|
+{
|
|
+ return etnaviv_gem_vaddr(obj);
|
|
+}
|
|
+
|
|
+void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
|
+{
|
|
+ /* TODO msm_gem_vunmap() */
|
|
+}
|
|
+
|
|
+int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
|
|
+{
|
|
+ if (!obj->import_attach) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+
|
|
+ mutex_lock(&etnaviv_obj->lock);
|
|
+ etnaviv_gem_get_pages(etnaviv_obj);
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
|
|
+{
|
|
+ if (!obj->import_attach) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
|
|
+
|
|
+ mutex_lock(&etnaviv_obj->lock);
|
|
+ etnaviv_gem_put_pages(to_etnaviv_bo(obj));
|
|
+ mutex_unlock(&etnaviv_obj->lock);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
|
|
+{
|
|
+ if (etnaviv_obj->vaddr)
|
|
+ dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
|
|
+ etnaviv_obj->vaddr);
|
|
+
|
|
+ /* Don't drop the pages for imported dmabuf, as they are not
|
|
+ * ours, just free the array we allocated:
|
|
+ */
|
|
+ if (etnaviv_obj->pages)
|
|
+ drm_free_large(etnaviv_obj->pages);
|
|
+
|
|
+ drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
|
|
+}
|
|
+
|
|
+static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
|
|
+ /* .get_pages should never be called */
|
|
+ .release = etnaviv_gem_prime_release,
|
|
+};
|
|
+
|
|
+struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
|
|
+ struct dma_buf_attachment *attach, struct sg_table *sgt)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj;
|
|
+ size_t size = PAGE_ALIGN(attach->dmabuf->size);
|
|
+ int ret, npages;
|
|
+
|
|
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
|
|
+ attach->dmabuf->resv,
|
|
+ &etnaviv_gem_prime_ops, &etnaviv_obj);
|
|
+ if (ret < 0)
|
|
+ return ERR_PTR(ret);
|
|
+
|
|
+ npages = size / PAGE_SIZE;
|
|
+
|
|
+ etnaviv_obj->sgt = sgt;
|
|
+ etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
|
+ if (!etnaviv_obj->pages) {
|
|
+ ret = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
|
|
+ NULL, npages);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
+ ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
+ return &etnaviv_obj->base;
|
|
+
|
|
+fail:
|
|
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
|
|
+
|
|
+ return ERR_PTR(ret);
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
|
|
new file mode 100644
|
|
index 0000000..1aba01a
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
|
|
@@ -0,0 +1,443 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/reservation.h>
|
|
+#include "etnaviv_drv.h"
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_gem.h"
|
|
+
|
|
+/*
|
|
+ * Cmdstream submission:
|
|
+ */
|
|
+
|
|
+#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
|
|
+/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
|
|
+#define BO_LOCKED 0x4000
|
|
+#define BO_PINNED 0x2000
|
|
+
|
|
+static inline void __user *to_user_ptr(u64 address)
|
|
+{
|
|
+ return (void __user *)(uintptr_t)address;
|
|
+}
|
|
+
|
|
+static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
|
|
+ struct etnaviv_gpu *gpu, size_t nr)
|
|
+{
|
|
+ struct etnaviv_gem_submit *submit;
|
|
+ size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
|
|
+
|
|
+ submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
|
+ if (submit) {
|
|
+ submit->dev = dev;
|
|
+ submit->gpu = gpu;
|
|
+
|
|
+ /* initially, until copy_from_user() and bo lookup succeeds: */
|
|
+ submit->nr_bos = 0;
|
|
+
|
|
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
|
|
+ }
|
|
+
|
|
+ return submit;
|
|
+}
|
|
+
|
|
+static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
|
|
+ struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
|
|
+ unsigned nr_bos)
|
|
+{
|
|
+ struct drm_etnaviv_gem_submit_bo *bo;
|
|
+ unsigned i;
|
|
+ int ret = 0;
|
|
+
|
|
+ spin_lock(&file->table_lock);
|
|
+
|
|
+ for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
|
|
+ struct drm_gem_object *obj;
|
|
+
|
|
+ if (bo->flags & BO_INVALID_FLAGS) {
|
|
+ DRM_ERROR("invalid flags: %x\n", bo->flags);
|
|
+ ret = -EINVAL;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
+ submit->bos[i].flags = bo->flags;
|
|
+
|
|
+ /* normally use drm_gem_object_lookup(), but for bulk lookup
|
|
+ * all under single table_lock just hit object_idr directly:
|
|
+ */
|
|
+ obj = idr_find(&file->object_idr, bo->handle);
|
|
+ if (!obj) {
|
|
+ DRM_ERROR("invalid handle %u at index %u\n",
|
|
+ bo->handle, i);
|
|
+ ret = -EINVAL;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Take a refcount on the object. The file table lock
|
|
+ * prevents the object_idr's refcount on this being dropped.
|
|
+ */
|
|
+ drm_gem_object_reference(obj);
|
|
+
|
|
+ submit->bos[i].obj = to_etnaviv_bo(obj);
|
|
+ }
|
|
+
|
|
+out_unlock:
|
|
+ submit->nr_bos = i;
|
|
+ spin_unlock(&file->table_lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
|
|
+{
|
|
+ if (submit->bos[i].flags & BO_LOCKED) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
|
+
|
|
+ ww_mutex_unlock(&etnaviv_obj->resv->lock);
|
|
+ submit->bos[i].flags &= ~BO_LOCKED;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int submit_lock_objects(struct etnaviv_gem_submit *submit)
|
|
+{
|
|
+ int contended, slow_locked = -1, i, ret = 0;
|
|
+
|
|
+retry:
|
|
+ for (i = 0; i < submit->nr_bos; i++) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
|
+
|
|
+ if (slow_locked == i)
|
|
+ slow_locked = -1;
|
|
+
|
|
+ contended = i;
|
|
+
|
|
+ if (!(submit->bos[i].flags & BO_LOCKED)) {
|
|
+ ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
|
|
+ &submit->ticket);
|
|
+ if (ret == -EALREADY)
|
|
+ DRM_ERROR("BO at index %u already on submit list\n",
|
|
+ i);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+ submit->bos[i].flags |= BO_LOCKED;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ww_acquire_done(&submit->ticket);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ for (; i >= 0; i--)
|
|
+ submit_unlock_object(submit, i);
|
|
+
|
|
+ if (slow_locked > 0)
|
|
+ submit_unlock_object(submit, slow_locked);
|
|
+
|
|
+ if (ret == -EDEADLK) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj;
|
|
+
|
|
+ etnaviv_obj = submit->bos[contended].obj;
|
|
+
|
|
+ /* we lost out in a seqno race, lock and retry.. */
|
|
+ ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
|
|
+ &submit->ticket);
|
|
+ if (!ret) {
|
|
+ submit->bos[contended].flags |= BO_LOCKED;
|
|
+ slow_locked = contended;
|
|
+ goto retry;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
|
|
+{
|
|
+ unsigned int context = submit->gpu->fence_context;
|
|
+ int i, ret = 0;
|
|
+
|
|
+ for (i = 0; i < submit->nr_bos; i++) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
|
+ bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
|
|
+
|
|
+ ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
|
|
+ if (ret)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < submit->nr_bos; i++) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
|
+
|
|
+ if (submit->bos[i].flags & BO_PINNED)
|
|
+ etnaviv_gem_put_iova(submit->gpu, &etnaviv_obj->base);
|
|
+
|
|
+ submit->bos[i].iova = 0;
|
|
+ submit->bos[i].flags &= ~BO_PINNED;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int submit_pin_objects(struct etnaviv_gem_submit *submit)
|
|
+{
|
|
+ int i, ret = 0;
|
|
+
|
|
+ for (i = 0; i < submit->nr_bos; i++) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
|
+ u32 iova;
|
|
+
|
|
+ ret = etnaviv_gem_get_iova(submit->gpu, &etnaviv_obj->base,
|
|
+ &iova);
|
|
+ if (ret)
|
|
+ break;
|
|
+
|
|
+ submit->bos[i].flags |= BO_PINNED;
|
|
+ submit->bos[i].iova = iova;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
|
|
+ struct etnaviv_gem_object **obj, u32 *iova)
|
|
+{
|
|
+ if (idx >= submit->nr_bos) {
|
|
+ DRM_ERROR("invalid buffer index: %u (out of %u)\n",
|
|
+ idx, submit->nr_bos);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (obj)
|
|
+ *obj = submit->bos[idx].obj;
|
|
+ if (iova)
|
|
+ *iova = submit->bos[idx].iova;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* process the reloc's and patch up the cmdstream as needed: */
|
|
+static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
|
|
+ u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs,
|
|
+ u32 nr_relocs)
|
|
+{
|
|
+ u32 i, last_offset = 0;
|
|
+ u32 *ptr = stream;
|
|
+ int ret;
|
|
+
|
|
+ for (i = 0; i < nr_relocs; i++) {
|
|
+ const struct drm_etnaviv_gem_submit_reloc *r = relocs + i;
|
|
+ struct etnaviv_gem_object *bobj;
|
|
+ u32 iova, off;
|
|
+
|
|
+ if (unlikely(r->flags)) {
|
|
+ DRM_ERROR("invalid reloc flags\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (r->submit_offset % 4) {
|
|
+ DRM_ERROR("non-aligned reloc offset: %u\n",
|
|
+ r->submit_offset);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /* offset in dwords: */
|
|
+ off = r->submit_offset / 4;
|
|
+
|
|
+ if ((off >= size ) ||
|
|
+ (off < last_offset)) {
|
|
+ DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ret = submit_bo(submit, r->reloc_idx, &bobj, &iova);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (r->reloc_offset >=
|
|
+ bobj->base.size - sizeof(*ptr)) {
|
|
+ DRM_ERROR("relocation %u outside object", i);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ptr[off] = iova + r->reloc_offset;
|
|
+
|
|
+ last_offset = off;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void submit_cleanup(struct etnaviv_gem_submit *submit)
|
|
+{
|
|
+ unsigned i;
|
|
+
|
|
+ for (i = 0; i < submit->nr_bos; i++) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
|
+
|
|
+ submit_unlock_object(submit, i);
|
|
+ drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
|
|
+ }
|
|
+
|
|
+ ww_acquire_fini(&submit->ticket);
|
|
+ kfree(submit);
|
|
+}
|
|
+
|
|
+int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
|
|
+ struct drm_file *file)
|
|
+{
|
|
+ struct etnaviv_drm_private *priv = dev->dev_private;
|
|
+ struct drm_etnaviv_gem_submit *args = data;
|
|
+ struct drm_etnaviv_gem_submit_reloc *relocs;
|
|
+ struct drm_etnaviv_gem_submit_bo *bos;
|
|
+ struct etnaviv_gem_submit *submit;
|
|
+ struct etnaviv_cmdbuf *cmdbuf;
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ void *stream;
|
|
+ int ret;
|
|
+
|
|
+ if (args->pipe >= ETNA_MAX_PIPES)
|
|
+ return -EINVAL;
|
|
+
|
|
+ gpu = priv->gpu[args->pipe];
|
|
+ if (!gpu)
|
|
+ return -ENXIO;
|
|
+
|
|
+ if (args->stream_size % 4) {
|
|
+ DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
|
|
+ args->stream_size);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (args->exec_state != ETNA_PIPE_3D &&
|
|
+ args->exec_state != ETNA_PIPE_2D &&
|
|
+ args->exec_state != ETNA_PIPE_VG) {
|
|
+ DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Copy the command submission and bo array to kernel space in
|
|
+ * one go, and do this outside of any locks.
|
|
+ */
|
|
+ bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
|
|
+ relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs));
|
|
+ stream = drm_malloc_ab(1, args->stream_size);
|
|
+ cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8,
|
|
+ args->nr_bos);
|
|
+ if (!bos || !relocs || !stream || !cmdbuf) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_submit_cmds;
|
|
+ }
|
|
+
|
|
+ cmdbuf->exec_state = args->exec_state;
|
|
+ cmdbuf->ctx = file->driver_priv;
|
|
+
|
|
+ ret = copy_from_user(bos, to_user_ptr(args->bos),
|
|
+ args->nr_bos * sizeof(*bos));
|
|
+ if (ret) {
|
|
+ ret = -EFAULT;
|
|
+ goto err_submit_cmds;
|
|
+ }
|
|
+
|
|
+ ret = copy_from_user(relocs, to_user_ptr(args->relocs),
|
|
+ args->nr_relocs * sizeof(*relocs));
|
|
+ if (ret) {
|
|
+ ret = -EFAULT;
|
|
+ goto err_submit_cmds;
|
|
+ }
|
|
+
|
|
+ ret = copy_from_user(stream, to_user_ptr(args->stream),
|
|
+ args->stream_size);
|
|
+ if (ret) {
|
|
+ ret = -EFAULT;
|
|
+ goto err_submit_cmds;
|
|
+ }
|
|
+
|
|
+ submit = submit_create(dev, gpu, args->nr_bos);
|
|
+ if (!submit) {
|
|
+ ret = -ENOMEM;
|
|
+ goto err_submit_cmds;
|
|
+ }
|
|
+
|
|
+ ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
|
|
+ if (ret)
|
|
+ goto err_submit_objects;
|
|
+
|
|
+ ret = submit_lock_objects(submit);
|
|
+ if (ret)
|
|
+ goto err_submit_objects;
|
|
+
|
|
+ if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
|
|
+ relocs, args->nr_relocs)) {
|
|
+ ret = -EINVAL;
|
|
+ goto err_submit_objects;
|
|
+ }
|
|
+
|
|
+ ret = submit_fence_sync(submit);
|
|
+ if (ret)
|
|
+ goto err_submit_objects;
|
|
+
|
|
+ ret = submit_pin_objects(submit);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ ret = submit_reloc(submit, stream, args->stream_size / 4,
|
|
+ relocs, args->nr_relocs);
|
|
+ if (ret)
|
|
+ goto out;
|
|
+
|
|
+ memcpy(cmdbuf->vaddr, stream, args->stream_size);
|
|
+ cmdbuf->user_size = ALIGN(args->stream_size, 8);
|
|
+
|
|
+ ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
|
|
+ if (ret == 0)
|
|
+ cmdbuf = NULL;
|
|
+
|
|
+ args->fence = submit->fence;
|
|
+
|
|
+out:
|
|
+ submit_unpin_objects(submit);
|
|
+
|
|
+ /*
|
|
+ * If we're returning -EAGAIN, it may be due to the userptr code
|
|
+ * wanting to run its workqueue outside of any locks. Flush our
|
|
+ * workqueue to ensure that it is run in a timely manner.
|
|
+ */
|
|
+ if (ret == -EAGAIN)
|
|
+ flush_workqueue(priv->wq);
|
|
+
|
|
+err_submit_objects:
|
|
+ submit_cleanup(submit);
|
|
+
|
|
+err_submit_cmds:
|
|
+ /* if we still own the cmdbuf */
|
|
+ if (cmdbuf)
|
|
+ etnaviv_gpu_cmdbuf_free(cmdbuf);
|
|
+ if (stream)
|
|
+ drm_free_large(stream);
|
|
+ if (bos)
|
|
+ drm_free_large(bos);
|
|
+ if (relocs)
|
|
+ drm_free_large(relocs);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
|
|
new file mode 100644
|
|
index 0000000..d39093d
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
|
|
@@ -0,0 +1,1644 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/component.h>
|
|
+#include <linux/fence.h>
|
|
+#include <linux/moduleparam.h>
|
|
+#include <linux/of_device.h>
|
|
+#include "etnaviv_dump.h"
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_gem.h"
|
|
+#include "etnaviv_mmu.h"
|
|
+#include "etnaviv_iommu.h"
|
|
+#include "etnaviv_iommu_v2.h"
|
|
+#include "common.xml.h"
|
|
+#include "state.xml.h"
|
|
+#include "state_hi.xml.h"
|
|
+#include "cmdstream.xml.h"
|
|
+
|
|
+static const struct platform_device_id gpu_ids[] = {
|
|
+ { .name = "etnaviv-gpu,2d" },
|
|
+ { },
|
|
+};
|
|
+
|
|
+static bool etnaviv_dump_core = true;
|
|
+module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
|
|
+
|
|
+/*
|
|
+ * Driver functions:
|
|
+ */
|
|
+
|
|
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
|
|
+{
|
|
+ switch (param) {
|
|
+ case ETNAVIV_PARAM_GPU_MODEL:
|
|
+ *value = gpu->identity.model;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_REVISION:
|
|
+ *value = gpu->identity.revision;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_FEATURES_0:
|
|
+ *value = gpu->identity.features;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_FEATURES_1:
|
|
+ *value = gpu->identity.minor_features0;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_FEATURES_2:
|
|
+ *value = gpu->identity.minor_features1;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_FEATURES_3:
|
|
+ *value = gpu->identity.minor_features2;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_FEATURES_4:
|
|
+ *value = gpu->identity.minor_features3;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_STREAM_COUNT:
|
|
+ *value = gpu->identity.stream_count;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_REGISTER_MAX:
|
|
+ *value = gpu->identity.register_max;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_THREAD_COUNT:
|
|
+ *value = gpu->identity.thread_count;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
|
|
+ *value = gpu->identity.vertex_cache_size;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
|
|
+ *value = gpu->identity.shader_core_count;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
|
|
+ *value = gpu->identity.pixel_pipes;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
|
|
+ *value = gpu->identity.vertex_output_buffer_size;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
|
|
+ *value = gpu->identity.buffer_size;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
|
|
+ *value = gpu->identity.instruction_count;
|
|
+ break;
|
|
+
|
|
+ case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
|
|
+ *value = gpu->identity.num_constants;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ if (gpu->identity.minor_features0 &
|
|
+ chipMinorFeatures0_MORE_MINOR_FEATURES) {
|
|
+ u32 specs[2];
|
|
+
|
|
+ specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
|
|
+ specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
|
|
+
|
|
+ gpu->identity.stream_count =
|
|
+ (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
|
|
+ gpu->identity.register_max =
|
|
+ (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
|
|
+ gpu->identity.thread_count =
|
|
+ (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
|
|
+ gpu->identity.vertex_cache_size =
|
|
+ (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
|
|
+ gpu->identity.shader_core_count =
|
|
+ (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
|
|
+ gpu->identity.pixel_pipes =
|
|
+ (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
|
|
+ gpu->identity.vertex_output_buffer_size =
|
|
+ (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
|
|
+
|
|
+ gpu->identity.buffer_size =
|
|
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
|
|
+ gpu->identity.instruction_count =
|
|
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
|
|
+ gpu->identity.num_constants =
|
|
+ (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
|
|
+ >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
|
|
+ }
|
|
+
|
|
+ /* Fill in the stream count if not specified */
|
|
+ if (gpu->identity.stream_count == 0) {
|
|
+ if (gpu->identity.model >= 0x1000)
|
|
+ gpu->identity.stream_count = 4;
|
|
+ else
|
|
+ gpu->identity.stream_count = 1;
|
|
+ }
|
|
+
|
|
+ /* Convert the register max value */
|
|
+ if (gpu->identity.register_max)
|
|
+ gpu->identity.register_max = 1 << gpu->identity.register_max;
|
|
+ else if (gpu->identity.model == 0x0400)
|
|
+ gpu->identity.register_max = 32;
|
|
+ else
|
|
+ gpu->identity.register_max = 64;
|
|
+
|
|
+ /* Convert thread count */
|
|
+ if (gpu->identity.thread_count)
|
|
+ gpu->identity.thread_count = 1 << gpu->identity.thread_count;
|
|
+ else if (gpu->identity.model == 0x0400)
|
|
+ gpu->identity.thread_count = 64;
|
|
+ else if (gpu->identity.model == 0x0500 ||
|
|
+ gpu->identity.model == 0x0530)
|
|
+ gpu->identity.thread_count = 128;
|
|
+ else
|
|
+ gpu->identity.thread_count = 256;
|
|
+
|
|
+ if (gpu->identity.vertex_cache_size == 0)
|
|
+ gpu->identity.vertex_cache_size = 8;
|
|
+
|
|
+ if (gpu->identity.shader_core_count == 0) {
|
|
+ if (gpu->identity.model >= 0x1000)
|
|
+ gpu->identity.shader_core_count = 2;
|
|
+ else
|
|
+ gpu->identity.shader_core_count = 1;
|
|
+ }
|
|
+
|
|
+ if (gpu->identity.pixel_pipes == 0)
|
|
+ gpu->identity.pixel_pipes = 1;
|
|
+
|
|
+ /* Convert virtex buffer size */
|
|
+ if (gpu->identity.vertex_output_buffer_size) {
|
|
+ gpu->identity.vertex_output_buffer_size =
|
|
+ 1 << gpu->identity.vertex_output_buffer_size;
|
|
+ } else if (gpu->identity.model == 0x0400) {
|
|
+ if (gpu->identity.revision < 0x4000)
|
|
+ gpu->identity.vertex_output_buffer_size = 512;
|
|
+ else if (gpu->identity.revision < 0x4200)
|
|
+ gpu->identity.vertex_output_buffer_size = 256;
|
|
+ else
|
|
+ gpu->identity.vertex_output_buffer_size = 128;
|
|
+ } else {
|
|
+ gpu->identity.vertex_output_buffer_size = 512;
|
|
+ }
|
|
+
|
|
+ switch (gpu->identity.instruction_count) {
|
|
+ case 0:
|
|
+ if ((gpu->identity.model == 0x2000 &&
|
|
+ gpu->identity.revision == 0x5108) ||
|
|
+ gpu->identity.model == 0x880)
|
|
+ gpu->identity.instruction_count = 512;
|
|
+ else
|
|
+ gpu->identity.instruction_count = 256;
|
|
+ break;
|
|
+
|
|
+ case 1:
|
|
+ gpu->identity.instruction_count = 1024;
|
|
+ break;
|
|
+
|
|
+ case 2:
|
|
+ gpu->identity.instruction_count = 2048;
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ gpu->identity.instruction_count = 256;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (gpu->identity.num_constants == 0)
|
|
+ gpu->identity.num_constants = 168;
|
|
+}
|
|
+
|
|
+static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ u32 chipIdentity;
|
|
+
|
|
+ chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
|
|
+
|
|
+ /* Special case for older graphic cores. */
|
|
+ if (VIVS_HI_CHIP_IDENTITY_FAMILY(chipIdentity) == 0x01) {
|
|
+ gpu->identity.model = 0x500; /* gc500 */
|
|
+ gpu->identity.revision = VIVS_HI_CHIP_IDENTITY_REVISION(chipIdentity);
|
|
+ } else {
|
|
+
|
|
+ gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
|
|
+ gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
|
|
+
|
|
+ /*
|
|
+ * !!!! HACK ALERT !!!!
|
|
+ * Because people change device IDs without letting software
|
|
+ * know about it - here is the hack to make it all look the
|
|
+ * same. Only for GC400 family.
|
|
+ */
|
|
+ if ((gpu->identity.model & 0xff00) == 0x0400 &&
|
|
+ gpu->identity.model != 0x0420) {
|
|
+ gpu->identity.model = gpu->identity.model & 0x0400;
|
|
+ }
|
|
+
|
|
+ /* Another special case */
|
|
+ if (gpu->identity.model == 0x300 &&
|
|
+ gpu->identity.revision == 0x2201) {
|
|
+ u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
|
|
+ u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
|
|
+
|
|
+ if (chipDate == 0x20080814 && chipTime == 0x12051100) {
|
|
+ /*
|
|
+ * This IP has an ECO; put the correct
|
|
+ * revision in it.
|
|
+ */
|
|
+ gpu->identity.revision = 0x1051;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dev_info(gpu->dev, "model: GC%x, revision: %x\n",
|
|
+ gpu->identity.model, gpu->identity.revision);
|
|
+
|
|
+ gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
|
|
+
|
|
+ /* Disable fast clear on GC700. */
|
|
+ if (gpu->identity.model == 0x700)
|
|
+ gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
|
|
+
|
|
+ if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
|
|
+ (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
|
|
+
|
|
+ /*
|
|
+ * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
|
|
+ * registers.
|
|
+ */
|
|
+ gpu->identity.minor_features0 = 0;
|
|
+ gpu->identity.minor_features1 = 0;
|
|
+ gpu->identity.minor_features2 = 0;
|
|
+ gpu->identity.minor_features3 = 0;
|
|
+ } else
|
|
+ gpu->identity.minor_features0 =
|
|
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
|
|
+
|
|
+ if (gpu->identity.minor_features0 &
|
|
+ chipMinorFeatures0_MORE_MINOR_FEATURES) {
|
|
+ gpu->identity.minor_features1 =
|
|
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
|
|
+ gpu->identity.minor_features2 =
|
|
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
|
|
+ gpu->identity.minor_features3 =
|
|
+ gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
|
|
+ }
|
|
+
|
|
+ /* GC600 idle register reports zero bits where modules aren't present */
|
|
+ if (gpu->identity.model == chipModel_GC600) {
|
|
+ gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
|
|
+ VIVS_HI_IDLE_STATE_RA |
|
|
+ VIVS_HI_IDLE_STATE_SE |
|
|
+ VIVS_HI_IDLE_STATE_PA |
|
|
+ VIVS_HI_IDLE_STATE_SH |
|
|
+ VIVS_HI_IDLE_STATE_PE |
|
|
+ VIVS_HI_IDLE_STATE_DE |
|
|
+ VIVS_HI_IDLE_STATE_FE;
|
|
+ } else {
|
|
+ gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
|
|
+ }
|
|
+
|
|
+ etnaviv_hw_specs(gpu);
|
|
+}
|
|
+
|
|
+static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
|
|
+{
|
|
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
|
|
+ VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
|
|
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
|
|
+}
|
|
+
|
|
+static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ u32 control, idle;
|
|
+ unsigned long timeout;
|
|
+ bool failed = true;
|
|
+
|
|
+ /* TODO
|
|
+ *
|
|
+ * - clock gating
|
|
+ * - puls eater
|
|
+ * - what about VG?
|
|
+ */
|
|
+
|
|
+ /* We hope that the GPU resets in under one second */
|
|
+ timeout = jiffies + msecs_to_jiffies(1000);
|
|
+
|
|
+ while (time_is_after_jiffies(timeout)) {
|
|
+ control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
|
|
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
|
|
+
|
|
+ /* enable clock */
|
|
+ etnaviv_gpu_load_clock(gpu, control);
|
|
+
|
|
+ /* Wait for stable clock. Vivante's code waited for 1ms */
|
|
+ usleep_range(1000, 10000);
|
|
+
|
|
+ /* isolate the GPU. */
|
|
+ control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
|
|
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
|
|
+
|
|
+ /* set soft reset. */
|
|
+ control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
|
|
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
|
|
+
|
|
+ /* wait for reset. */
|
|
+ msleep(1);
|
|
+
|
|
+ /* reset soft reset bit. */
|
|
+ control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
|
|
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
|
|
+
|
|
+ /* reset GPU isolation. */
|
|
+ control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
|
|
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
|
|
+
|
|
+ /* read idle register. */
|
|
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
|
|
+
|
|
+ /* try reseting again if FE it not idle */
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
|
|
+ dev_dbg(gpu->dev, "FE is not idle\n");
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* read reset register. */
|
|
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
|
+
|
|
+ /* is the GPU idle? */
|
|
+ if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
|
|
+ ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
|
|
+ dev_dbg(gpu->dev, "GPU is not idle\n");
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ failed = false;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (failed) {
|
|
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
|
|
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
|
+
|
|
+ dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
|
|
+ idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
|
|
+ control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
|
|
+ control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
|
|
+
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ /* We rely on the GPU running, so program the clock */
|
|
+ control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
|
|
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
|
|
+
|
|
+ /* enable clock */
|
|
+ etnaviv_gpu_load_clock(gpu, control);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ u16 prefetch;
|
|
+
|
|
+ if (gpu->identity.model == chipModel_GC320 &&
|
|
+ gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
|
|
+ (gpu->identity.revision == 0x5007 ||
|
|
+ gpu->identity.revision == 0x5220)) {
|
|
+ u32 mc_memory_debug;
|
|
+
|
|
+ mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
|
|
+
|
|
+ if (gpu->identity.revision == 0x5007)
|
|
+ mc_memory_debug |= 0x0c;
|
|
+ else
|
|
+ mc_memory_debug |= 0x08;
|
|
+
|
|
+ gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Update GPU AXI cache atttribute to "cacheable, no allocate".
|
|
+ * This is necessary to prevent the iMX6 SoC locking up.
|
|
+ */
|
|
+ gpu_write(gpu, VIVS_HI_AXI_CONFIG,
|
|
+ VIVS_HI_AXI_CONFIG_AWCACHE(2) |
|
|
+ VIVS_HI_AXI_CONFIG_ARCACHE(2));
|
|
+
|
|
+ /* GC2000 rev 5108 needs a special bus config */
|
|
+ if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
|
|
+ u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
|
|
+ bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
|
|
+ VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
|
|
+ bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
|
|
+ VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
|
|
+ gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
|
|
+ }
|
|
+
|
|
+ /* set base addresses */
|
|
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
|
|
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
|
|
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
|
|
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
|
|
+ gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
|
|
+
|
|
+ /* setup the MMU page table pointers */
|
|
+ etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
|
|
+
|
|
+ /* Start command processor */
|
|
+ prefetch = etnaviv_buffer_init(gpu);
|
|
+
|
|
+ gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
|
|
+ gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
|
|
+ gpu->buffer->paddr - gpu->memory_base);
|
|
+ gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
|
|
+ VIVS_FE_COMMAND_CONTROL_ENABLE |
|
|
+ VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
|
|
+}
|
|
+
|
|
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ int ret, i;
|
|
+ struct iommu_domain *iommu;
|
|
+ enum etnaviv_iommu_version version;
|
|
+ bool mmuv2;
|
|
+
|
|
+ ret = pm_runtime_get_sync(gpu->dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ etnaviv_hw_identify(gpu);
|
|
+
|
|
+ if (gpu->identity.model == 0) {
|
|
+ dev_err(gpu->dev, "Unknown GPU model\n");
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+ return -ENXIO;
|
|
+ }
|
|
+
|
|
+ ret = etnaviv_hw_reset(gpu);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
+ /* Setup IOMMU.. eventually we will (I think) do this once per context
|
|
+ * and have separate page tables per context. For now, to keep things
|
|
+ * simple and to get something working, just use a single address space:
|
|
+ */
|
|
+ mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
|
|
+ dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
|
|
+
|
|
+ if (!mmuv2) {
|
|
+ iommu = etnaviv_iommu_domain_alloc(gpu);
|
|
+ version = ETNAVIV_IOMMU_V1;
|
|
+ } else {
|
|
+ iommu = etnaviv_iommu_v2_domain_alloc(gpu);
|
|
+ version = ETNAVIV_IOMMU_V2;
|
|
+ }
|
|
+
|
|
+ if (!iommu) {
|
|
+ ret = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ /* TODO: we will leak here memory - fix it! */
|
|
+
|
|
+ gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
|
|
+ if (!gpu->mmu) {
|
|
+ ret = -ENOMEM;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ /* Create buffer: */
|
|
+ gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
|
|
+ if (!gpu->buffer) {
|
|
+ ret = -ENOMEM;
|
|
+ dev_err(gpu->dev, "could not create command buffer\n");
|
|
+ goto fail;
|
|
+ }
|
|
+ if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
|
|
+ ret = -EINVAL;
|
|
+ dev_err(gpu->dev,
|
|
+ "command buffer outside valid memory window\n");
|
|
+ goto free_buffer;
|
|
+ }
|
|
+
|
|
+ /* Setup event management */
|
|
+ spin_lock_init(&gpu->event_spinlock);
|
|
+ init_completion(&gpu->event_free);
|
|
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
|
|
+ gpu->event[i].used = false;
|
|
+ complete(&gpu->event_free);
|
|
+ }
|
|
+
|
|
+ /* Now program the hardware */
|
|
+ mutex_lock(&gpu->lock);
|
|
+ etnaviv_gpu_hw_init(gpu);
|
|
+ mutex_unlock(&gpu->lock);
|
|
+
|
|
+ pm_runtime_mark_last_busy(gpu->dev);
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+free_buffer:
|
|
+ etnaviv_gpu_cmdbuf_free(gpu->buffer);
|
|
+ gpu->buffer = NULL;
|
|
+fail:
|
|
+ pm_runtime_mark_last_busy(gpu->dev);
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+struct dma_debug {
|
|
+ u32 address[2];
|
|
+ u32 state[2];
|
|
+};
|
|
+
|
|
+static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
|
|
+{
|
|
+ u32 i;
|
|
+
|
|
+ debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
|
|
+ debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
|
|
+
|
|
+ for (i = 0; i < 500; i++) {
|
|
+ debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
|
|
+ debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
|
|
+
|
|
+ if (debug->address[0] != debug->address[1])
|
|
+ break;
|
|
+
|
|
+ if (debug->state[0] != debug->state[1])
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
|
|
+{
|
|
+ struct dma_debug debug;
|
|
+ u32 dma_lo, dma_hi, axi, idle;
|
|
+ int ret;
|
|
+
|
|
+ seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
|
|
+
|
|
+ ret = pm_runtime_get_sync(gpu->dev);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
|
|
+ dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
|
|
+ axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
|
|
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
|
|
+
|
|
+ verify_dma(gpu, &debug);
|
|
+
|
|
+ seq_puts(m, "\tfeatures\n");
|
|
+ seq_printf(m, "\t minor_features0: 0x%08x\n",
|
|
+ gpu->identity.minor_features0);
|
|
+ seq_printf(m, "\t minor_features1: 0x%08x\n",
|
|
+ gpu->identity.minor_features1);
|
|
+ seq_printf(m, "\t minor_features2: 0x%08x\n",
|
|
+ gpu->identity.minor_features2);
|
|
+ seq_printf(m, "\t minor_features3: 0x%08x\n",
|
|
+ gpu->identity.minor_features3);
|
|
+
|
|
+ seq_puts(m, "\tspecs\n");
|
|
+ seq_printf(m, "\t stream_count: %d\n",
|
|
+ gpu->identity.stream_count);
|
|
+ seq_printf(m, "\t register_max: %d\n",
|
|
+ gpu->identity.register_max);
|
|
+ seq_printf(m, "\t thread_count: %d\n",
|
|
+ gpu->identity.thread_count);
|
|
+ seq_printf(m, "\t vertex_cache_size: %d\n",
|
|
+ gpu->identity.vertex_cache_size);
|
|
+ seq_printf(m, "\t shader_core_count: %d\n",
|
|
+ gpu->identity.shader_core_count);
|
|
+ seq_printf(m, "\t pixel_pipes: %d\n",
|
|
+ gpu->identity.pixel_pipes);
|
|
+ seq_printf(m, "\t vertex_output_buffer_size: %d\n",
|
|
+ gpu->identity.vertex_output_buffer_size);
|
|
+ seq_printf(m, "\t buffer_size: %d\n",
|
|
+ gpu->identity.buffer_size);
|
|
+ seq_printf(m, "\t instruction_count: %d\n",
|
|
+ gpu->identity.instruction_count);
|
|
+ seq_printf(m, "\t num_constants: %d\n",
|
|
+ gpu->identity.num_constants);
|
|
+
|
|
+ seq_printf(m, "\taxi: 0x%08x\n", axi);
|
|
+ seq_printf(m, "\tidle: 0x%08x\n", idle);
|
|
+ idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
|
|
+ seq_puts(m, "\t FE is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
|
|
+ seq_puts(m, "\t DE is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
|
|
+ seq_puts(m, "\t PE is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
|
|
+ seq_puts(m, "\t SH is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
|
|
+ seq_puts(m, "\t PA is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
|
|
+ seq_puts(m, "\t SE is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
|
|
+ seq_puts(m, "\t RA is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
|
|
+ seq_puts(m, "\t TX is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
|
|
+ seq_puts(m, "\t VG is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
|
|
+ seq_puts(m, "\t IM is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
|
|
+ seq_puts(m, "\t FP is not idle\n");
|
|
+ if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
|
|
+ seq_puts(m, "\t TS is not idle\n");
|
|
+ if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
|
|
+ seq_puts(m, "\t AXI low power mode\n");
|
|
+
|
|
+ if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
|
|
+ u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
|
|
+ u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
|
|
+ u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
|
|
+
|
|
+ seq_puts(m, "\tMC\n");
|
|
+ seq_printf(m, "\t read0: 0x%08x\n", read0);
|
|
+ seq_printf(m, "\t read1: 0x%08x\n", read1);
|
|
+ seq_printf(m, "\t write: 0x%08x\n", write);
|
|
+ }
|
|
+
|
|
+ seq_puts(m, "\tDMA ");
|
|
+
|
|
+ if (debug.address[0] == debug.address[1] &&
|
|
+ debug.state[0] == debug.state[1]) {
|
|
+ seq_puts(m, "seems to be stuck\n");
|
|
+ } else if (debug.address[0] == debug.address[1]) {
|
|
+ seq_puts(m, "adress is constant\n");
|
|
+ } else {
|
|
+ seq_puts(m, "is runing\n");
|
|
+ }
|
|
+
|
|
+ seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
|
|
+ seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
|
|
+ seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
|
|
+ seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
|
|
+ seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
|
|
+ dma_lo, dma_hi);
|
|
+
|
|
+ ret = 0;
|
|
+
|
|
+ pm_runtime_mark_last_busy(gpu->dev);
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * Power Management:
|
|
+ */
|
|
+static int enable_clk(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ if (gpu->clk_core)
|
|
+ clk_prepare_enable(gpu->clk_core);
|
|
+ if (gpu->clk_shader)
|
|
+ clk_prepare_enable(gpu->clk_shader);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int disable_clk(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ if (gpu->clk_core)
|
|
+ clk_disable_unprepare(gpu->clk_core);
|
|
+ if (gpu->clk_shader)
|
|
+ clk_disable_unprepare(gpu->clk_shader);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int enable_axi(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ if (gpu->clk_bus)
|
|
+ clk_prepare_enable(gpu->clk_bus);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int disable_axi(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ if (gpu->clk_bus)
|
|
+ clk_disable_unprepare(gpu->clk_bus);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Hangcheck detection for locked gpu:
|
|
+ */
|
|
+static void recover_worker(struct work_struct *work)
|
|
+{
|
|
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
|
|
+ recover_work);
|
|
+ unsigned long flags;
|
|
+ unsigned int i;
|
|
+
|
|
+ dev_err(gpu->dev, "hangcheck recover!\n");
|
|
+
|
|
+ if (pm_runtime_get_sync(gpu->dev) < 0)
|
|
+ return;
|
|
+
|
|
+ mutex_lock(&gpu->lock);
|
|
+
|
|
+ /* Only catch the first event, or when manually re-armed */
|
|
+ if (etnaviv_dump_core) {
|
|
+ etnaviv_core_dump(gpu);
|
|
+ etnaviv_dump_core = false;
|
|
+ }
|
|
+
|
|
+ etnaviv_hw_reset(gpu);
|
|
+
|
|
+ /* complete all events, the GPU won't do it after the reset */
|
|
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
|
|
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
|
|
+ if (!gpu->event[i].used)
|
|
+ continue;
|
|
+ fence_signal(gpu->event[i].fence);
|
|
+ gpu->event[i].fence = NULL;
|
|
+ gpu->event[i].used = false;
|
|
+ complete(&gpu->event_free);
|
|
+ /*
|
|
+ * Decrement the PM count for each stuck event. This is safe
|
|
+ * even in atomic context as we use ASYNC RPM here.
|
|
+ */
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+ }
|
|
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
|
+ gpu->completed_fence = gpu->active_fence;
|
|
+
|
|
+ etnaviv_gpu_hw_init(gpu);
|
|
+ gpu->switch_context = true;
|
|
+
|
|
+ mutex_unlock(&gpu->lock);
|
|
+ pm_runtime_mark_last_busy(gpu->dev);
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+
|
|
+ /* Retire the buffer objects in a work */
|
|
+ etnaviv_queue_work(gpu->drm, &gpu->retire_work);
|
|
+}
|
|
+
|
|
+static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ DBG("%s", dev_name(gpu->dev));
|
|
+ mod_timer(&gpu->hangcheck_timer,
|
|
+ round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
|
|
+}
|
|
+
|
|
+static void hangcheck_handler(unsigned long data)
|
|
+{
|
|
+ struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
|
|
+ u32 fence = gpu->completed_fence;
|
|
+ bool progress = false;
|
|
+
|
|
+ if (fence != gpu->hangcheck_fence) {
|
|
+ gpu->hangcheck_fence = fence;
|
|
+ progress = true;
|
|
+ }
|
|
+
|
|
+ if (!progress) {
|
|
+ u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
|
|
+ int change = dma_addr - gpu->hangcheck_dma_addr;
|
|
+
|
|
+ if (change < 0 || change > 16) {
|
|
+ gpu->hangcheck_dma_addr = dma_addr;
|
|
+ progress = true;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!progress && fence_after(gpu->active_fence, fence)) {
|
|
+ dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
|
|
+ dev_err(gpu->dev, " completed fence: %u\n", fence);
|
|
+ dev_err(gpu->dev, " active fence: %u\n",
|
|
+ gpu->active_fence);
|
|
+ etnaviv_queue_work(gpu->drm, &gpu->recover_work);
|
|
+ }
|
|
+
|
|
+ /* if still more pending work, reset the hangcheck timer: */
|
|
+ if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
|
|
+ hangcheck_timer_reset(gpu);
|
|
+}
|
|
+
|
|
+static void hangcheck_disable(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ del_timer_sync(&gpu->hangcheck_timer);
|
|
+ cancel_work_sync(&gpu->recover_work);
|
|
+}
|
|
+
|
|
+/* fence object management */
|
|
+struct etnaviv_fence {
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ struct fence base;
|
|
+};
|
|
+
|
|
+static inline struct etnaviv_fence *to_etnaviv_fence(struct fence *fence)
|
|
+{
|
|
+ return container_of(fence, struct etnaviv_fence, base);
|
|
+}
|
|
+
|
|
+static const char *etnaviv_fence_get_driver_name(struct fence *fence)
|
|
+{
|
|
+ return "etnaviv";
|
|
+}
|
|
+
|
|
+static const char *etnaviv_fence_get_timeline_name(struct fence *fence)
|
|
+{
|
|
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
|
|
+
|
|
+ return dev_name(f->gpu->dev);
|
|
+}
|
|
+
|
|
+static bool etnaviv_fence_enable_signaling(struct fence *fence)
|
|
+{
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool etnaviv_fence_signaled(struct fence *fence)
|
|
+{
|
|
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
|
|
+
|
|
+ return fence_completed(f->gpu, f->base.seqno);
|
|
+}
|
|
+
|
|
+static void etnaviv_fence_release(struct fence *fence)
|
|
+{
|
|
+ struct etnaviv_fence *f = to_etnaviv_fence(fence);
|
|
+
|
|
+ kfree_rcu(f, base.rcu);
|
|
+}
|
|
+
|
|
+static const struct fence_ops etnaviv_fence_ops = {
|
|
+ .get_driver_name = etnaviv_fence_get_driver_name,
|
|
+ .get_timeline_name = etnaviv_fence_get_timeline_name,
|
|
+ .enable_signaling = etnaviv_fence_enable_signaling,
|
|
+ .signaled = etnaviv_fence_signaled,
|
|
+ .wait = fence_default_wait,
|
|
+ .release = etnaviv_fence_release,
|
|
+};
|
|
+
|
|
+static struct fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ struct etnaviv_fence *f;
|
|
+
|
|
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
|
|
+ if (!f)
|
|
+ return NULL;
|
|
+
|
|
+ f->gpu = gpu;
|
|
+
|
|
+ fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
|
|
+ gpu->fence_context, ++gpu->next_fence);
|
|
+
|
|
+ return &f->base;
|
|
+}
|
|
+
|
|
+int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
|
+ unsigned int context, bool exclusive)
|
|
+{
|
|
+ struct reservation_object *robj = etnaviv_obj->resv;
|
|
+ struct reservation_object_list *fobj;
|
|
+ struct fence *fence;
|
|
+ int i, ret;
|
|
+
|
|
+ if (!exclusive) {
|
|
+ ret = reservation_object_reserve_shared(robj);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * If we have any shared fences, then the exclusive fence
|
|
+ * should be ignored as it will already have been signalled.
|
|
+ */
|
|
+ fobj = reservation_object_get_list(robj);
|
|
+ if (!fobj || fobj->shared_count == 0) {
|
|
+ /* Wait on any existing exclusive fence which isn't our own */
|
|
+ fence = reservation_object_get_excl(robj);
|
|
+ if (fence && fence->context != context) {
|
|
+ ret = fence_wait(fence, true);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!exclusive || !fobj)
|
|
+ return 0;
|
|
+
|
|
+ for (i = 0; i < fobj->shared_count; i++) {
|
|
+ fence = rcu_dereference_protected(fobj->shared[i],
|
|
+ reservation_object_held(robj));
|
|
+ if (fence->context != context) {
|
|
+ ret = fence_wait(fence, true);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * event management:
|
|
+ */
|
|
+
|
|
+static unsigned int event_alloc(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ unsigned long ret, flags;
|
|
+ unsigned int i, event = ~0U;
|
|
+
|
|
+ ret = wait_for_completion_timeout(&gpu->event_free,
|
|
+ msecs_to_jiffies(10 * 10000));
|
|
+ if (!ret)
|
|
+ dev_err(gpu->dev, "wait_for_completion_timeout failed");
|
|
+
|
|
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
|
|
+
|
|
+ /* find first free event */
|
|
+ for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
|
|
+ if (gpu->event[i].used == false) {
|
|
+ gpu->event[i].used = true;
|
|
+ event = i;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
|
+
|
|
+ return event;
|
|
+}
|
|
+
|
|
+static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
|
|
+{
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&gpu->event_spinlock, flags);
|
|
+
|
|
+ if (gpu->event[event].used == false) {
|
|
+ dev_warn(gpu->dev, "event %u is already marked as free",
|
|
+ event);
|
|
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
|
+ } else {
|
|
+ gpu->event[event].used = false;
|
|
+ spin_unlock_irqrestore(&gpu->event_spinlock, flags);
|
|
+
|
|
+ complete(&gpu->event_free);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Cmdstream submission/retirement:
|
|
+ */
|
|
+
|
|
+struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size,
|
|
+ size_t nr_bos)
|
|
+{
|
|
+ struct etnaviv_cmdbuf *cmdbuf;
|
|
+ size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
|
|
+ sizeof(*cmdbuf));
|
|
+
|
|
+ cmdbuf = kzalloc(sz, GFP_KERNEL);
|
|
+ if (!cmdbuf)
|
|
+ return NULL;
|
|
+
|
|
+ cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
|
|
+ GFP_KERNEL);
|
|
+ if (!cmdbuf->vaddr) {
|
|
+ kfree(cmdbuf);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ cmdbuf->gpu = gpu;
|
|
+ cmdbuf->size = size;
|
|
+
|
|
+ return cmdbuf;
|
|
+}
|
|
+
|
|
+void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
|
|
+{
|
|
+ dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
|
|
+ cmdbuf->vaddr, cmdbuf->paddr);
|
|
+ kfree(cmdbuf);
|
|
+}
|
|
+
|
|
+static void retire_worker(struct work_struct *work)
|
|
+{
|
|
+ struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
|
|
+ retire_work);
|
|
+ u32 fence = gpu->completed_fence;
|
|
+ struct etnaviv_cmdbuf *cmdbuf, *tmp;
|
|
+ unsigned int i;
|
|
+
|
|
+ mutex_lock(&gpu->lock);
|
|
+ list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
|
|
+ if (!fence_is_signaled(cmdbuf->fence))
|
|
+ break;
|
|
+
|
|
+ list_del(&cmdbuf->node);
|
|
+ fence_put(cmdbuf->fence);
|
|
+
|
|
+ for (i = 0; i < cmdbuf->nr_bos; i++) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
|
|
+
|
|
+ atomic_dec(&etnaviv_obj->gpu_active);
|
|
+ /* drop the refcount taken in etnaviv_gpu_submit */
|
|
+ etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
|
|
+ }
|
|
+
|
|
+ etnaviv_gpu_cmdbuf_free(cmdbuf);
|
|
+ }
|
|
+
|
|
+ gpu->retired_fence = fence;
|
|
+
|
|
+ mutex_unlock(&gpu->lock);
|
|
+
|
|
+ wake_up_all(&gpu->fence_event);
|
|
+}
|
|
+
|
|
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
|
|
+ u32 fence, struct timespec *timeout)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ if (fence_after(fence, gpu->next_fence)) {
|
|
+ DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
|
|
+ fence, gpu->next_fence);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!timeout) {
|
|
+ /* No timeout was requested: just test for completion */
|
|
+ ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
|
|
+ } else {
|
|
+ unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
|
|
+
|
|
+ ret = wait_event_interruptible_timeout(gpu->fence_event,
|
|
+ fence_completed(gpu, fence),
|
|
+ remaining);
|
|
+ if (ret == 0) {
|
|
+ DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
|
|
+ fence, gpu->retired_fence,
|
|
+ gpu->completed_fence);
|
|
+ ret = -ETIMEDOUT;
|
|
+ } else if (ret != -ERESTARTSYS) {
|
|
+ ret = 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Wait for an object to become inactive. This, on it's own, is not race
|
|
+ * free: the object is moved by the retire worker off the active list, and
|
|
+ * then the iova is put. Moreover, the object could be re-submitted just
|
|
+ * after we notice that it's become inactive.
|
|
+ *
|
|
+ * Although the retirement happens under the gpu lock, we don't want to hold
|
|
+ * that lock in this function while waiting.
|
|
+ */
|
|
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
|
|
+ struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
|
|
+{
|
|
+ unsigned long remaining;
|
|
+ long ret;
|
|
+
|
|
+ if (!timeout)
|
|
+ return !is_active(etnaviv_obj) ? 0 : -EBUSY;
|
|
+
|
|
+ remaining = etnaviv_timeout_to_jiffies(timeout);
|
|
+
|
|
+ ret = wait_event_interruptible_timeout(gpu->fence_event,
|
|
+ !is_active(etnaviv_obj),
|
|
+ remaining);
|
|
+ if (ret > 0) {
|
|
+ struct etnaviv_drm_private *priv = gpu->drm->dev_private;
|
|
+
|
|
+ /* Synchronise with the retire worker */
|
|
+ flush_workqueue(priv->wq);
|
|
+ return 0;
|
|
+ } else if (ret == -ERESTARTSYS) {
|
|
+ return -ERESTARTSYS;
|
|
+ } else {
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+}
|
|
+
|
|
+int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ return pm_runtime_get_sync(gpu->dev);
|
|
+}
|
|
+
|
|
+void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ pm_runtime_mark_last_busy(gpu->dev);
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+}
|
|
+
|
|
+/* add bo's to gpu's ring, and kick gpu: */
|
|
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
|
+ struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
|
|
+{
|
|
+ struct fence *fence;
|
|
+ unsigned int event, i;
|
|
+ int ret;
|
|
+
|
|
+ ret = etnaviv_gpu_pm_get_sync(gpu);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ mutex_lock(&gpu->lock);
|
|
+
|
|
+ /*
|
|
+ * TODO
|
|
+ *
|
|
+ * - flush
|
|
+ * - data endian
|
|
+ * - prefetch
|
|
+ *
|
|
+ */
|
|
+
|
|
+ event = event_alloc(gpu);
|
|
+ if (unlikely(event == ~0U)) {
|
|
+ DRM_ERROR("no free event\n");
|
|
+ ret = -EBUSY;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
+ fence = etnaviv_gpu_fence_alloc(gpu);
|
|
+ if (!fence) {
|
|
+ event_free(gpu, event);
|
|
+ ret = -ENOMEM;
|
|
+ goto out_unlock;
|
|
+ }
|
|
+
|
|
+ gpu->event[event].fence = fence;
|
|
+ submit->fence = fence->seqno;
|
|
+ gpu->active_fence = submit->fence;
|
|
+
|
|
+ if (gpu->lastctx != cmdbuf->ctx) {
|
|
+ gpu->mmu->need_flush = true;
|
|
+ gpu->switch_context = true;
|
|
+ gpu->lastctx = cmdbuf->ctx;
|
|
+ }
|
|
+
|
|
+ etnaviv_buffer_queue(gpu, event, cmdbuf);
|
|
+
|
|
+ cmdbuf->fence = fence;
|
|
+ list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
|
|
+
|
|
+ /* We're committed to adding this command buffer, hold a PM reference */
|
|
+ pm_runtime_get_noresume(gpu->dev);
|
|
+
|
|
+ for (i = 0; i < submit->nr_bos; i++) {
|
|
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
|
|
+ u32 iova;
|
|
+
|
|
+ /* Each cmdbuf takes a refcount on the iova */
|
|
+ etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
|
|
+ cmdbuf->bo[i] = etnaviv_obj;
|
|
+ atomic_inc(&etnaviv_obj->gpu_active);
|
|
+
|
|
+ if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
|
|
+ reservation_object_add_excl_fence(etnaviv_obj->resv,
|
|
+ fence);
|
|
+ else
|
|
+ reservation_object_add_shared_fence(etnaviv_obj->resv,
|
|
+ fence);
|
|
+ }
|
|
+ cmdbuf->nr_bos = submit->nr_bos;
|
|
+ hangcheck_timer_reset(gpu);
|
|
+ ret = 0;
|
|
+
|
|
+out_unlock:
|
|
+ mutex_unlock(&gpu->lock);
|
|
+
|
|
+ etnaviv_gpu_pm_put(gpu);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Init/Cleanup:
|
|
+ */
|
|
+static irqreturn_t irq_handler(int irq, void *data)
|
|
+{
|
|
+ struct etnaviv_gpu *gpu = data;
|
|
+ irqreturn_t ret = IRQ_NONE;
|
|
+
|
|
+ u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
|
|
+
|
|
+ if (intr != 0) {
|
|
+ int event;
|
|
+
|
|
+ pm_runtime_mark_last_busy(gpu->dev);
|
|
+
|
|
+ dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
|
|
+
|
|
+ if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
|
|
+ dev_err(gpu->dev, "AXI bus error\n");
|
|
+ intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
|
|
+ }
|
|
+
|
|
+ while ((event = ffs(intr)) != 0) {
|
|
+ struct fence *fence;
|
|
+
|
|
+ event -= 1;
|
|
+
|
|
+ intr &= ~(1 << event);
|
|
+
|
|
+ dev_dbg(gpu->dev, "event %u\n", event);
|
|
+
|
|
+ fence = gpu->event[event].fence;
|
|
+ gpu->event[event].fence = NULL;
|
|
+ fence_signal(fence);
|
|
+
|
|
+ /*
|
|
+ * Events can be processed out of order. Eg,
|
|
+ * - allocate and queue event 0
|
|
+ * - allocate event 1
|
|
+ * - event 0 completes, we process it
|
|
+ * - allocate and queue event 0
|
|
+ * - event 1 and event 0 complete
|
|
+ * we can end up processing event 0 first, then 1.
|
|
+ */
|
|
+ if (fence_after(fence->seqno, gpu->completed_fence))
|
|
+ gpu->completed_fence = fence->seqno;
|
|
+
|
|
+ event_free(gpu, event);
|
|
+
|
|
+ /*
|
|
+ * We need to balance the runtime PM count caused by
|
|
+ * each submission. Upon submission, we increment
|
|
+ * the runtime PM counter, and allocate one event.
|
|
+ * So here, we put the runtime PM count for each
|
|
+ * completed event.
|
|
+ */
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+ }
|
|
+
|
|
+ /* Retire the buffer objects in a work */
|
|
+ etnaviv_queue_work(gpu->drm, &gpu->retire_work);
|
|
+
|
|
+ ret = IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = enable_clk(gpu);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = enable_axi(gpu);
|
|
+ if (ret) {
|
|
+ disable_clk(gpu);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ ret = disable_axi(gpu);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ret = disable_clk(gpu);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ if (gpu->buffer) {
|
|
+ unsigned long timeout;
|
|
+
|
|
+ /* Replace the last WAIT with END */
|
|
+ etnaviv_buffer_end(gpu);
|
|
+
|
|
+ /*
|
|
+ * We know that only the FE is busy here, this should
|
|
+ * happen quickly (as the WAIT is only 200 cycles). If
|
|
+ * we fail, just warn and continue.
|
|
+ */
|
|
+ timeout = jiffies + msecs_to_jiffies(100);
|
|
+ do {
|
|
+ u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
|
|
+
|
|
+ if ((idle & gpu->idle_mask) == gpu->idle_mask)
|
|
+ break;
|
|
+
|
|
+ if (time_is_before_jiffies(timeout)) {
|
|
+ dev_warn(gpu->dev,
|
|
+ "timed out waiting for idle: idle=0x%x\n",
|
|
+ idle);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ udelay(5);
|
|
+ } while (1);
|
|
+ }
|
|
+
|
|
+ return etnaviv_gpu_clk_disable(gpu);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ u32 clock;
|
|
+ int ret;
|
|
+
|
|
+ ret = mutex_lock_killable(&gpu->lock);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
|
|
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
|
|
+
|
|
+ etnaviv_gpu_load_clock(gpu, clock);
|
|
+ etnaviv_gpu_hw_init(gpu);
|
|
+
|
|
+ gpu->switch_context = true;
|
|
+
|
|
+ mutex_unlock(&gpu->lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int etnaviv_gpu_bind(struct device *dev, struct device *master,
|
|
+ void *data)
|
|
+{
|
|
+ struct drm_device *drm = data;
|
|
+ struct etnaviv_drm_private *priv = drm->dev_private;
|
|
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
|
|
+ int ret;
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+ ret = pm_runtime_get_sync(gpu->dev);
|
|
+#else
|
|
+ ret = etnaviv_gpu_clk_enable(gpu);
|
|
+#endif
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ gpu->drm = drm;
|
|
+ gpu->fence_context = fence_context_alloc(1);
|
|
+ spin_lock_init(&gpu->fence_spinlock);
|
|
+
|
|
+ INIT_LIST_HEAD(&gpu->active_cmd_list);
|
|
+ INIT_WORK(&gpu->retire_work, retire_worker);
|
|
+ INIT_WORK(&gpu->recover_work, recover_worker);
|
|
+ init_waitqueue_head(&gpu->fence_event);
|
|
+
|
|
+ setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
|
|
+ (unsigned long)gpu);
|
|
+
|
|
+ priv->gpu[priv->num_gpus++] = gpu;
|
|
+
|
|
+ pm_runtime_mark_last_busy(gpu->dev);
|
|
+ pm_runtime_put_autosuspend(gpu->dev);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
|
|
+ void *data)
|
|
+{
|
|
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
|
|
+
|
|
+ DBG("%s", dev_name(gpu->dev));
|
|
+
|
|
+ hangcheck_disable(gpu);
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+ pm_runtime_get_sync(gpu->dev);
|
|
+ pm_runtime_put_sync_suspend(gpu->dev);
|
|
+#else
|
|
+ etnaviv_gpu_hw_suspend(gpu);
|
|
+#endif
|
|
+
|
|
+ if (gpu->buffer) {
|
|
+ etnaviv_gpu_cmdbuf_free(gpu->buffer);
|
|
+ gpu->buffer = NULL;
|
|
+ }
|
|
+
|
|
+ if (gpu->mmu) {
|
|
+ etnaviv_iommu_destroy(gpu->mmu);
|
|
+ gpu->mmu = NULL;
|
|
+ }
|
|
+
|
|
+ gpu->drm = NULL;
|
|
+}
|
|
+
|
|
+static const struct component_ops gpu_ops = {
|
|
+ .bind = etnaviv_gpu_bind,
|
|
+ .unbind = etnaviv_gpu_unbind,
|
|
+};
|
|
+
|
|
+static const struct of_device_id etnaviv_gpu_match[] = {
|
|
+ {
|
|
+ .compatible = "vivante,gc"
|
|
+ },
|
|
+ { /* sentinel */ }
|
|
+};
|
|
+
|
|
+static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
|
|
+{
|
|
+ struct device *dev = &pdev->dev;
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ int err = 0;
|
|
+
|
|
+ gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
|
|
+ if (!gpu)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ gpu->dev = &pdev->dev;
|
|
+ mutex_init(&gpu->lock);
|
|
+
|
|
+ /*
|
|
+ * Set the GPU base address to the start of physical memory. This
|
|
+ * ensures that if we have up to 2GB, the v1 MMU can address the
|
|
+ * highest memory. This is important as command buffers may be
|
|
+ * allocated outside of this limit.
|
|
+ */
|
|
+ gpu->memory_base = PHYS_OFFSET;
|
|
+
|
|
+ /* Map registers: */
|
|
+ gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
|
|
+ if (IS_ERR(gpu->mmio))
|
|
+ return PTR_ERR(gpu->mmio);
|
|
+
|
|
+ /* Get Interrupt: */
|
|
+ gpu->irq = platform_get_irq(pdev, 0);
|
|
+ if (gpu->irq < 0) {
|
|
+ err = gpu->irq;
|
|
+ dev_err(dev, "failed to get irq: %d\n", err);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
|
|
+ dev_name(gpu->dev), gpu);
|
|
+ if (err) {
|
|
+ dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ /* Get Clocks: */
|
|
+ gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
|
|
+ DBG("clk_bus: %p", gpu->clk_bus);
|
|
+ if (IS_ERR(gpu->clk_bus))
|
|
+ gpu->clk_bus = NULL;
|
|
+
|
|
+ gpu->clk_core = devm_clk_get(&pdev->dev, "core");
|
|
+ DBG("clk_core: %p", gpu->clk_core);
|
|
+ if (IS_ERR(gpu->clk_core))
|
|
+ gpu->clk_core = NULL;
|
|
+
|
|
+ gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
|
|
+ DBG("clk_shader: %p", gpu->clk_shader);
|
|
+ if (IS_ERR(gpu->clk_shader))
|
|
+ gpu->clk_shader = NULL;
|
|
+
|
|
+ /* TODO: figure out max mapped size */
|
|
+ dev_set_drvdata(dev, gpu);
|
|
+
|
|
+ /*
|
|
+ * We treat the device as initially suspended. The runtime PM
|
|
+ * autosuspend delay is rather arbitary: no measurements have
|
|
+ * yet been performed to determine an appropriate value.
|
|
+ */
|
|
+ pm_runtime_use_autosuspend(gpu->dev);
|
|
+ pm_runtime_set_autosuspend_delay(gpu->dev, 200);
|
|
+ pm_runtime_enable(gpu->dev);
|
|
+
|
|
+ err = component_add(&pdev->dev, &gpu_ops);
|
|
+ if (err < 0) {
|
|
+ dev_err(&pdev->dev, "failed to register component: %d\n", err);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
|
|
+{
|
|
+ component_del(&pdev->dev, &gpu_ops);
|
|
+ pm_runtime_disable(&pdev->dev);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_PM
|
|
+static int etnaviv_gpu_rpm_suspend(struct device *dev)
|
|
+{
|
|
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
|
|
+ u32 idle, mask;
|
|
+
|
|
+ /* If we have outstanding fences, we're not idle */
|
|
+ if (gpu->completed_fence != gpu->active_fence)
|
|
+ return -EBUSY;
|
|
+
|
|
+ /* Check whether the hardware (except FE) is idle */
|
|
+ mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
|
|
+ idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
|
|
+ if (idle != mask)
|
|
+ return -EBUSY;
|
|
+
|
|
+ return etnaviv_gpu_hw_suspend(gpu);
|
|
+}
|
|
+
|
|
+static int etnaviv_gpu_rpm_resume(struct device *dev)
|
|
+{
|
|
+ struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
|
|
+ int ret;
|
|
+
|
|
+ ret = etnaviv_gpu_clk_enable(gpu);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ /* Re-initialise the basic hardware state */
|
|
+ if (gpu->drm && gpu->buffer) {
|
|
+ ret = etnaviv_gpu_hw_resume(gpu);
|
|
+ if (ret) {
|
|
+ etnaviv_gpu_clk_disable(gpu);
|
|
+ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
|
|
+ SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
|
|
+ NULL)
|
|
+};
|
|
+
|
|
+struct platform_driver etnaviv_gpu_driver = {
|
|
+ .driver = {
|
|
+ .name = "etnaviv-gpu",
|
|
+ .owner = THIS_MODULE,
|
|
+ .pm = &etnaviv_gpu_pm_ops,
|
|
+ .of_match_table = etnaviv_gpu_match,
|
|
+ },
|
|
+ .probe = etnaviv_gpu_platform_probe,
|
|
+ .remove = etnaviv_gpu_platform_remove,
|
|
+ .id_table = gpu_ids,
|
|
+};
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
|
|
new file mode 100644
|
|
index 0000000..c75d503
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
|
|
@@ -0,0 +1,209 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef __ETNAVIV_GPU_H__
|
|
+#define __ETNAVIV_GPU_H__
|
|
+
|
|
+#include <linux/clk.h>
|
|
+#include <linux/regulator/consumer.h>
|
|
+
|
|
+#include "etnaviv_drv.h"
|
|
+
|
|
+struct etnaviv_gem_submit;
|
|
+
|
|
+struct etnaviv_chip_identity {
|
|
+ /* Chip model. */
|
|
+ u32 model;
|
|
+
|
|
+ /* Revision value.*/
|
|
+ u32 revision;
|
|
+
|
|
+ /* Supported feature fields. */
|
|
+ u32 features;
|
|
+
|
|
+ /* Supported minor feature fields. */
|
|
+ u32 minor_features0;
|
|
+
|
|
+ /* Supported minor feature 1 fields. */
|
|
+ u32 minor_features1;
|
|
+
|
|
+ /* Supported minor feature 2 fields. */
|
|
+ u32 minor_features2;
|
|
+
|
|
+ /* Supported minor feature 3 fields. */
|
|
+ u32 minor_features3;
|
|
+
|
|
+ /* Number of streams supported. */
|
|
+ u32 stream_count;
|
|
+
|
|
+ /* Total number of temporary registers per thread. */
|
|
+ u32 register_max;
|
|
+
|
|
+ /* Maximum number of threads. */
|
|
+ u32 thread_count;
|
|
+
|
|
+ /* Number of shader cores. */
|
|
+ u32 shader_core_count;
|
|
+
|
|
+ /* Size of the vertex cache. */
|
|
+ u32 vertex_cache_size;
|
|
+
|
|
+ /* Number of entries in the vertex output buffer. */
|
|
+ u32 vertex_output_buffer_size;
|
|
+
|
|
+ /* Number of pixel pipes. */
|
|
+ u32 pixel_pipes;
|
|
+
|
|
+ /* Number of instructions. */
|
|
+ u32 instruction_count;
|
|
+
|
|
+ /* Number of constants. */
|
|
+ u32 num_constants;
|
|
+
|
|
+ /* Buffer size */
|
|
+ u32 buffer_size;
|
|
+};
|
|
+
|
|
+struct etnaviv_event {
|
|
+ bool used;
|
|
+ struct fence *fence;
|
|
+};
|
|
+
|
|
+struct etnaviv_cmdbuf;
|
|
+
|
|
+struct etnaviv_gpu {
|
|
+ struct drm_device *drm;
|
|
+ struct device *dev;
|
|
+ struct mutex lock;
|
|
+ struct etnaviv_chip_identity identity;
|
|
+ struct etnaviv_file_private *lastctx;
|
|
+ bool switch_context;
|
|
+
|
|
+ /* 'ring'-buffer: */
|
|
+ struct etnaviv_cmdbuf *buffer;
|
|
+
|
|
+ /* bus base address of memory */
|
|
+ u32 memory_base;
|
|
+
|
|
+ /* event management: */
|
|
+ struct etnaviv_event event[30];
|
|
+ struct completion event_free;
|
|
+ spinlock_t event_spinlock;
|
|
+
|
|
+ /* list of currently in-flight command buffers */
|
|
+ struct list_head active_cmd_list;
|
|
+
|
|
+ u32 idle_mask;
|
|
+
|
|
+ /* Fencing support */
|
|
+ u32 next_fence;
|
|
+ u32 active_fence;
|
|
+ u32 completed_fence;
|
|
+ u32 retired_fence;
|
|
+ wait_queue_head_t fence_event;
|
|
+ unsigned int fence_context;
|
|
+ spinlock_t fence_spinlock;
|
|
+
|
|
+ /* worker for handling active-list retiring: */
|
|
+ struct work_struct retire_work;
|
|
+
|
|
+ void __iomem *mmio;
|
|
+ int irq;
|
|
+
|
|
+ struct etnaviv_iommu *mmu;
|
|
+
|
|
+ /* Power Control: */
|
|
+ struct clk *clk_bus;
|
|
+ struct clk *clk_core;
|
|
+ struct clk *clk_shader;
|
|
+
|
|
+ /* Hang Detction: */
|
|
+#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
|
|
+#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
|
|
+ struct timer_list hangcheck_timer;
|
|
+ u32 hangcheck_fence;
|
|
+ u32 hangcheck_dma_addr;
|
|
+ struct work_struct recover_work;
|
|
+};
|
|
+
|
|
+struct etnaviv_cmdbuf {
|
|
+ /* device this cmdbuf is allocated for */
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ /* user context key, must be unique between all active users */
|
|
+ struct etnaviv_file_private *ctx;
|
|
+ /* cmdbuf properties */
|
|
+ void *vaddr;
|
|
+ dma_addr_t paddr;
|
|
+ u32 size;
|
|
+ u32 user_size;
|
|
+ /* fence after which this buffer is to be disposed */
|
|
+ struct fence *fence;
|
|
+ /* target exec state */
|
|
+ u32 exec_state;
|
|
+ /* per GPU in-flight list */
|
|
+ struct list_head node;
|
|
+ /* BOs attached to this command buffer */
|
|
+ unsigned int nr_bos;
|
|
+ struct etnaviv_gem_object *bo[0];
|
|
+};
|
|
+
|
|
+static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
|
|
+{
|
|
+ etnaviv_writel(data, gpu->mmio + reg);
|
|
+}
|
|
+
|
|
+static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
|
|
+{
|
|
+ return etnaviv_readl(gpu->mmio + reg);
|
|
+}
|
|
+
|
|
+static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
|
|
+{
|
|
+ return fence_after_eq(gpu->completed_fence, fence);
|
|
+}
|
|
+
|
|
+static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
|
|
+{
|
|
+ return fence_after_eq(gpu->retired_fence, fence);
|
|
+}
|
|
+
|
|
+int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
|
|
+
|
|
+int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
|
|
+#endif
|
|
+
|
|
+int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
|
|
+ unsigned int context, bool exclusive);
|
|
+
|
|
+void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
|
|
+int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
|
|
+ u32 fence, struct timespec *timeout);
|
|
+int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
|
|
+ struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
|
|
+int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
|
+ struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
|
|
+struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
|
|
+ u32 size, size_t nr_bos);
|
|
+void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
|
|
+int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
|
|
+void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
|
|
+
|
|
+extern struct platform_driver etnaviv_gpu_driver;
|
|
+
|
|
+#endif /* __ETNAVIV_GPU_H__ */
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
|
|
new file mode 100644
|
|
index 0000000..522cfd4
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
|
|
@@ -0,0 +1,240 @@
|
|
+/*
|
|
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/iommu.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/sizes.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/bitops.h>
|
|
+
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_mmu.h"
|
|
+#include "etnaviv_iommu.h"
|
|
+#include "state_hi.xml.h"
|
|
+
|
|
+#define PT_SIZE SZ_2M
|
|
+#define PT_ENTRIES (PT_SIZE / sizeof(u32))
|
|
+
|
|
+#define GPU_MEM_START 0x80000000
|
|
+
|
|
+struct etnaviv_iommu_domain_pgtable {
|
|
+ u32 *pgtable;
|
|
+ dma_addr_t paddr;
|
|
+};
|
|
+
|
|
+struct etnaviv_iommu_domain {
|
|
+ struct iommu_domain domain;
|
|
+ struct device *dev;
|
|
+ void *bad_page_cpu;
|
|
+ dma_addr_t bad_page_dma;
|
|
+ struct etnaviv_iommu_domain_pgtable pgtable;
|
|
+ spinlock_t map_lock;
|
|
+};
|
|
+
|
|
+static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
|
|
+{
|
|
+ return container_of(domain, struct etnaviv_iommu_domain, domain);
|
|
+}
|
|
+
|
|
+static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
|
|
+ size_t size)
|
|
+{
|
|
+ pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
|
|
+ if (!pgtable->pgtable)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
|
|
+ size_t size)
|
|
+{
|
|
+ dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
|
|
+}
|
|
+
|
|
+static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
|
|
+ unsigned long iova)
|
|
+{
|
|
+ /* calcuate index into page table */
|
|
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
|
|
+ phys_addr_t paddr;
|
|
+
|
|
+ paddr = pgtable->pgtable[index];
|
|
+
|
|
+ return paddr;
|
|
+}
|
|
+
|
|
+static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
|
|
+ unsigned long iova, phys_addr_t paddr)
|
|
+{
|
|
+ /* calcuate index into page table */
|
|
+ unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
|
|
+
|
|
+ pgtable->pgtable[index] = paddr;
|
|
+}
|
|
+
|
|
+static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
|
|
+{
|
|
+ u32 *p;
|
|
+ int ret, i;
|
|
+
|
|
+ etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
|
|
+ SZ_4K,
|
|
+ &etnaviv_domain->bad_page_dma,
|
|
+ GFP_KERNEL);
|
|
+ if (!etnaviv_domain->bad_page_cpu)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ p = etnaviv_domain->bad_page_cpu;
|
|
+ for (i = 0; i < SZ_4K / 4; i++)
|
|
+ *p++ = 0xdead55aa;
|
|
+
|
|
+ ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
|
|
+ if (ret < 0) {
|
|
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
+ etnaviv_domain->bad_page_cpu,
|
|
+ etnaviv_domain->bad_page_dma);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < PT_ENTRIES; i++)
|
|
+ etnaviv_domain->pgtable.pgtable[i] =
|
|
+ etnaviv_domain->bad_page_dma;
|
|
+
|
|
+ spin_lock_init(&etnaviv_domain->map_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void etnaviv_domain_free(struct iommu_domain *domain)
|
|
+{
|
|
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
|
+
|
|
+ pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
|
|
+
|
|
+ dma_free_coherent(etnaviv_domain->dev, SZ_4K,
|
|
+ etnaviv_domain->bad_page_cpu,
|
|
+ etnaviv_domain->bad_page_dma);
|
|
+
|
|
+ kfree(etnaviv_domain);
|
|
+}
|
|
+
|
|
+static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova,
|
|
+ phys_addr_t paddr, size_t size, int prot)
|
|
+{
|
|
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
|
+
|
|
+ if (size != SZ_4K)
|
|
+ return -EINVAL;
|
|
+
|
|
+ spin_lock(&etnaviv_domain->map_lock);
|
|
+ pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
|
|
+ spin_unlock(&etnaviv_domain->map_lock);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain,
|
|
+ unsigned long iova, size_t size)
|
|
+{
|
|
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
|
+
|
|
+ if (size != SZ_4K)
|
|
+ return -EINVAL;
|
|
+
|
|
+ spin_lock(&etnaviv_domain->map_lock);
|
|
+ pgtable_write(&etnaviv_domain->pgtable, iova,
|
|
+ etnaviv_domain->bad_page_dma);
|
|
+ spin_unlock(&etnaviv_domain->map_lock);
|
|
+
|
|
+ return SZ_4K;
|
|
+}
|
|
+
|
|
+static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
|
|
+ dma_addr_t iova)
|
|
+{
|
|
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
|
+
|
|
+ return pgtable_read(&etnaviv_domain->pgtable, iova);
|
|
+}
|
|
+
|
|
+static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain)
|
|
+{
|
|
+ return PT_SIZE;
|
|
+}
|
|
+
|
|
+static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf)
|
|
+{
|
|
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
|
+
|
|
+ memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE);
|
|
+}
|
|
+
|
|
+static struct etnaviv_iommu_ops etnaviv_iommu_ops = {
|
|
+ .ops = {
|
|
+ .domain_free = etnaviv_domain_free,
|
|
+ .map = etnaviv_iommuv1_map,
|
|
+ .unmap = etnaviv_iommuv1_unmap,
|
|
+ .iova_to_phys = etnaviv_iommu_iova_to_phys,
|
|
+ .pgsize_bitmap = SZ_4K,
|
|
+ },
|
|
+ .dump_size = etnaviv_iommuv1_dump_size,
|
|
+ .dump = etnaviv_iommuv1_dump,
|
|
+};
|
|
+
|
|
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
|
|
+ struct iommu_domain *domain)
|
|
+{
|
|
+ struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
|
|
+ u32 pgtable;
|
|
+
|
|
+ /* set page table address in MC */
|
|
+ pgtable = (u32)etnaviv_domain->pgtable.paddr;
|
|
+
|
|
+ gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
|
|
+ gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
|
|
+ gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
|
|
+ gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
|
|
+ gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
|
|
+}
|
|
+
|
|
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ struct etnaviv_iommu_domain *etnaviv_domain;
|
|
+ int ret;
|
|
+
|
|
+ etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
|
|
+ if (!etnaviv_domain)
|
|
+ return NULL;
|
|
+
|
|
+ etnaviv_domain->dev = gpu->dev;
|
|
+
|
|
+ etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
|
|
+ etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
|
|
+ etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
|
|
+ etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
|
|
+
|
|
+ ret = __etnaviv_iommu_init(etnaviv_domain);
|
|
+ if (ret)
|
|
+ goto out_free;
|
|
+
|
|
+ return &etnaviv_domain->domain;
|
|
+
|
|
+out_free:
|
|
+ kfree(etnaviv_domain);
|
|
+ return NULL;
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
|
|
new file mode 100644
|
|
index 0000000..cf45503
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.h
|
|
@@ -0,0 +1,28 @@
|
|
+/*
|
|
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef __ETNAVIV_IOMMU_H__
|
|
+#define __ETNAVIV_IOMMU_H__
|
|
+
|
|
+#include <linux/iommu.h>
|
|
+struct etnaviv_gpu;
|
|
+
|
|
+struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
|
|
+void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
|
|
+ struct iommu_domain *domain);
|
|
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
|
|
+
|
|
+#endif /* __ETNAVIV_IOMMU_H__ */
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
|
|
new file mode 100644
|
|
index 0000000..fbb4aed
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
|
|
@@ -0,0 +1,33 @@
|
|
+/*
|
|
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include <linux/iommu.h>
|
|
+#include <linux/platform_device.h>
|
|
+#include <linux/sizes.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/dma-mapping.h>
|
|
+#include <linux/bitops.h>
|
|
+
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_iommu.h"
|
|
+#include "state_hi.xml.h"
|
|
+
|
|
+
|
|
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
|
|
+{
|
|
+ /* TODO */
|
|
+ return NULL;
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
|
|
new file mode 100644
|
|
index 0000000..603ea41
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.h
|
|
@@ -0,0 +1,25 @@
|
|
+/*
|
|
+ * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com>
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef __ETNAVIV_IOMMU_V2_H__
|
|
+#define __ETNAVIV_IOMMU_V2_H__
|
|
+
|
|
+#include <linux/iommu.h>
|
|
+struct etnaviv_gpu;
|
|
+
|
|
+struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
|
|
+
|
|
+#endif /* __ETNAVIV_IOMMU_V2_H__ */
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
|
|
new file mode 100644
|
|
index 0000000..6743bc6
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
|
|
@@ -0,0 +1,299 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#include "etnaviv_drv.h"
|
|
+#include "etnaviv_gem.h"
|
|
+#include "etnaviv_gpu.h"
|
|
+#include "etnaviv_mmu.h"
|
|
+
|
|
+static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
|
|
+ unsigned long iova, int flags, void *arg)
|
|
+{
|
|
+ DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
|
|
+ struct sg_table *sgt, unsigned len, int prot)
|
|
+{
|
|
+ struct iommu_domain *domain = iommu->domain;
|
|
+ struct scatterlist *sg;
|
|
+ unsigned int da = iova;
|
|
+ unsigned int i, j;
|
|
+ int ret;
|
|
+
|
|
+ if (!domain || !sgt)
|
|
+ return -EINVAL;
|
|
+
|
|
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
|
+ u32 pa = sg_dma_address(sg) - sg->offset;
|
|
+ size_t bytes = sg_dma_len(sg) + sg->offset;
|
|
+
|
|
+ VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
|
|
+
|
|
+ ret = iommu_map(domain, da, pa, bytes, prot);
|
|
+ if (ret)
|
|
+ goto fail;
|
|
+
|
|
+ da += bytes;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ da = iova;
|
|
+
|
|
+ for_each_sg(sgt->sgl, sg, i, j) {
|
|
+ size_t bytes = sg_dma_len(sg) + sg->offset;
|
|
+
|
|
+ iommu_unmap(domain, da, bytes);
|
|
+ da += bytes;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
|
|
+ struct sg_table *sgt, unsigned len)
|
|
+{
|
|
+ struct iommu_domain *domain = iommu->domain;
|
|
+ struct scatterlist *sg;
|
|
+ unsigned int da = iova;
|
|
+ int i;
|
|
+
|
|
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
|
+ size_t bytes = sg_dma_len(sg) + sg->offset;
|
|
+ size_t unmapped;
|
|
+
|
|
+ unmapped = iommu_unmap(domain, da, bytes);
|
|
+ if (unmapped < bytes)
|
|
+ return unmapped;
|
|
+
|
|
+ VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
|
|
+
|
|
+ BUG_ON(!PAGE_ALIGNED(bytes));
|
|
+
|
|
+ da += bytes;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
|
|
+ struct etnaviv_vram_mapping *mapping)
|
|
+{
|
|
+ struct etnaviv_gem_object *etnaviv_obj = mapping->object;
|
|
+
|
|
+ etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
|
|
+ etnaviv_obj->sgt, etnaviv_obj->base.size);
|
|
+ drm_mm_remove_node(&mapping->vram_node);
|
|
+}
|
|
+
|
|
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
|
|
+ struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
|
|
+ struct etnaviv_vram_mapping *mapping)
|
|
+{
|
|
+ struct etnaviv_vram_mapping *free = NULL;
|
|
+ struct sg_table *sgt = etnaviv_obj->sgt;
|
|
+ struct drm_mm_node *node;
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(&etnaviv_obj->lock);
|
|
+
|
|
+ mutex_lock(&mmu->lock);
|
|
+
|
|
+ /* v1 MMU can optimize single entry (contiguous) scatterlists */
|
|
+ if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
|
|
+ u32 iova;
|
|
+
|
|
+ iova = sg_dma_address(sgt->sgl) - memory_base;
|
|
+ if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
|
|
+ mapping->iova = iova;
|
|
+ list_add_tail(&mapping->mmu_node, &mmu->mappings);
|
|
+ mutex_unlock(&mmu->lock);
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ node = &mapping->vram_node;
|
|
+ while (1) {
|
|
+ struct etnaviv_vram_mapping *m, *n;
|
|
+ struct list_head list;
|
|
+ bool found;
|
|
+
|
|
+ ret = drm_mm_insert_node_in_range(&mmu->mm, node,
|
|
+ etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
|
|
+ DRM_MM_SEARCH_DEFAULT);
|
|
+
|
|
+ if (ret != -ENOSPC)
|
|
+ break;
|
|
+
|
|
+ /*
|
|
+ * If we did not search from the start of the MMU region,
|
|
+ * try again in case there are free slots.
|
|
+ */
|
|
+ if (mmu->last_iova) {
|
|
+ mmu->last_iova = 0;
|
|
+ mmu->need_flush = true;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* Try to retire some entries */
|
|
+ drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
|
|
+
|
|
+ found = 0;
|
|
+ INIT_LIST_HEAD(&list);
|
|
+ list_for_each_entry(free, &mmu->mappings, mmu_node) {
|
|
+ /* If this vram node has not been used, skip this. */
|
|
+ if (!free->vram_node.mm)
|
|
+ continue;
|
|
+
|
|
+ /*
|
|
+ * If the iova is pinned, then it's in-use,
|
|
+ * so we must keep its mapping.
|
|
+ */
|
|
+ if (free->use)
|
|
+ continue;
|
|
+
|
|
+ list_add(&free->scan_node, &list);
|
|
+ if (drm_mm_scan_add_block(&free->vram_node)) {
|
|
+ found = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!found) {
|
|
+ /* Nothing found, clean up and fail */
|
|
+ list_for_each_entry_safe(m, n, &list, scan_node)
|
|
+ BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * drm_mm does not allow any other operations while
|
|
+ * scanning, so we have to remove all blocks first.
|
|
+ * If drm_mm_scan_remove_block() returns false, we
|
|
+ * can leave the block pinned.
|
|
+ */
|
|
+ list_for_each_entry_safe(m, n, &list, scan_node)
|
|
+ if (!drm_mm_scan_remove_block(&m->vram_node))
|
|
+ list_del_init(&m->scan_node);
|
|
+
|
|
+ /*
|
|
+ * Unmap the blocks which need to be reaped from the MMU.
|
|
+ * Clear the mmu pointer to prevent the get_iova finding
|
|
+ * this mapping.
|
|
+ */
|
|
+ list_for_each_entry_safe(m, n, &list, scan_node) {
|
|
+ etnaviv_iommu_remove_mapping(mmu, m);
|
|
+ m->mmu = NULL;
|
|
+ list_del_init(&m->mmu_node);
|
|
+ list_del_init(&m->scan_node);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * We removed enough mappings so that the new allocation will
|
|
+ * succeed. Ensure that the MMU will be flushed before the
|
|
+ * associated commit requesting this mapping, and retry the
|
|
+ * allocation one more time.
|
|
+ */
|
|
+ mmu->need_flush = true;
|
|
+ }
|
|
+
|
|
+ if (ret < 0) {
|
|
+ mutex_unlock(&mmu->lock);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ mmu->last_iova = node->start + etnaviv_obj->base.size;
|
|
+ mapping->iova = node->start;
|
|
+ ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
|
|
+ IOMMU_READ | IOMMU_WRITE);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ drm_mm_remove_node(node);
|
|
+ mutex_unlock(&mmu->lock);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ list_add_tail(&mapping->mmu_node, &mmu->mappings);
|
|
+ mutex_unlock(&mmu->lock);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
|
|
+ struct etnaviv_vram_mapping *mapping)
|
|
+{
|
|
+ WARN_ON(mapping->use);
|
|
+
|
|
+ mutex_lock(&mmu->lock);
|
|
+
|
|
+ /* If the vram node is on the mm, unmap and remove the node */
|
|
+ if (mapping->vram_node.mm == &mmu->mm)
|
|
+ etnaviv_iommu_remove_mapping(mmu, mapping);
|
|
+
|
|
+ list_del(&mapping->mmu_node);
|
|
+ mutex_unlock(&mmu->lock);
|
|
+}
|
|
+
|
|
+void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
|
|
+{
|
|
+ drm_mm_takedown(&mmu->mm);
|
|
+ iommu_domain_free(mmu->domain);
|
|
+ kfree(mmu);
|
|
+}
|
|
+
|
|
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
|
|
+ struct iommu_domain *domain, enum etnaviv_iommu_version version)
|
|
+{
|
|
+ struct etnaviv_iommu *mmu;
|
|
+
|
|
+ mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
|
|
+ if (!mmu)
|
|
+ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ mmu->domain = domain;
|
|
+ mmu->gpu = gpu;
|
|
+ mmu->version = version;
|
|
+ mutex_init(&mmu->lock);
|
|
+ INIT_LIST_HEAD(&mmu->mappings);
|
|
+
|
|
+ drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
|
|
+ domain->geometry.aperture_end -
|
|
+ domain->geometry.aperture_start + 1);
|
|
+
|
|
+ iommu_set_fault_handler(domain, etnaviv_fault_handler, gpu->dev);
|
|
+
|
|
+ return mmu;
|
|
+}
|
|
+
|
|
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
|
|
+{
|
|
+ struct etnaviv_iommu_ops *ops;
|
|
+
|
|
+ ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
|
|
+
|
|
+ return ops->dump_size(iommu->domain);
|
|
+}
|
|
+
|
|
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
|
|
+{
|
|
+ struct etnaviv_iommu_ops *ops;
|
|
+
|
|
+ ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
|
|
+
|
|
+ ops->dump(iommu->domain, buf);
|
|
+}
|
|
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
|
|
new file mode 100644
|
|
index 0000000..fff215a
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h
|
|
@@ -0,0 +1,71 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef __ETNAVIV_MMU_H__
|
|
+#define __ETNAVIV_MMU_H__
|
|
+
|
|
+#include <linux/iommu.h>
|
|
+
|
|
+enum etnaviv_iommu_version {
|
|
+ ETNAVIV_IOMMU_V1 = 0,
|
|
+ ETNAVIV_IOMMU_V2,
|
|
+};
|
|
+
|
|
+struct etnaviv_gpu;
|
|
+struct etnaviv_vram_mapping;
|
|
+
|
|
+struct etnaviv_iommu_ops {
|
|
+ struct iommu_ops ops;
|
|
+ size_t (*dump_size)(struct iommu_domain *);
|
|
+ void (*dump)(struct iommu_domain *, void *);
|
|
+};
|
|
+
|
|
+struct etnaviv_iommu {
|
|
+ struct etnaviv_gpu *gpu;
|
|
+ struct iommu_domain *domain;
|
|
+
|
|
+ enum etnaviv_iommu_version version;
|
|
+
|
|
+ /* memory manager for GPU address area */
|
|
+ struct mutex lock;
|
|
+ struct list_head mappings;
|
|
+ struct drm_mm mm;
|
|
+ u32 last_iova;
|
|
+ bool need_flush;
|
|
+};
|
|
+
|
|
+struct etnaviv_gem_object;
|
|
+
|
|
+int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
|
|
+ int cnt);
|
|
+int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
|
|
+ struct sg_table *sgt, unsigned len, int prot);
|
|
+int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
|
|
+ struct sg_table *sgt, unsigned len);
|
|
+int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
|
|
+ struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
|
|
+ struct etnaviv_vram_mapping *mapping);
|
|
+void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
|
|
+ struct etnaviv_vram_mapping *mapping);
|
|
+void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
|
|
+
|
|
+size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu);
|
|
+void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);
|
|
+
|
|
+struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu,
|
|
+ struct iommu_domain *domain, enum etnaviv_iommu_version version);
|
|
+
|
|
+#endif /* __ETNAVIV_MMU_H__ */
|
|
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
|
|
new file mode 100644
|
|
index 0000000..3682183
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
|
|
@@ -0,0 +1,351 @@
|
|
+#ifndef STATE_XML
|
|
+#define STATE_XML
|
|
+
|
|
+/* Autogenerated file, DO NOT EDIT manually!
|
|
+
|
|
+This file was generated by the rules-ng-ng headergen tool in this git repository:
|
|
+http://0x04.net/cgit/index.cgi/rules-ng-ng
|
|
+git clone git://0x04.net/rules-ng-ng
|
|
+
|
|
+The rules-ng-ng source files this header was generated from are:
|
|
+- state.xml ( 18882 bytes, from 2015-03-25 11:42:32)
|
|
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
|
|
+- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
|
|
+- state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06)
|
|
+- state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19)
|
|
+- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
|
|
+
|
|
+Copyright (C) 2015
|
|
+*/
|
|
+
|
|
+
|
|
+#define VARYING_COMPONENT_USE_UNUSED 0x00000000
|
|
+#define VARYING_COMPONENT_USE_USED 0x00000001
|
|
+#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002
|
|
+#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003
|
|
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff
|
|
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0
|
|
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
|
|
+#define VIVS_FE 0x00000000
|
|
+
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0))
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE 0x00000004
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE 0x00000080
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK 0x00000700
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT 8
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK)
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK 0x00003000
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT 12
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK)
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK 0x0000c000
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT 14
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF 0x00000000
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON 0x00008000
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK 0x00ff0000
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT 16
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK)
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK 0xff000000
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT 24
|
|
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK)
|
|
+
|
|
+#define VIVS_FE_CMD_STREAM_BASE_ADDR 0x00000640
|
|
+
|
|
+#define VIVS_FE_INDEX_STREAM_BASE_ADDR 0x00000644
|
|
+
|
|
+#define VIVS_FE_INDEX_STREAM_CONTROL 0x00000648
|
|
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK 0x00000003
|
|
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT 0
|
|
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000
|
|
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001
|
|
+#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002
|
|
+
|
|
+#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c
|
|
+
|
|
+#define VIVS_FE_VERTEX_STREAM_CONTROL 0x00000650
|
|
+
|
|
+#define VIVS_FE_COMMAND_ADDRESS 0x00000654
|
|
+
|
|
+#define VIVS_FE_COMMAND_CONTROL 0x00000658
|
|
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
|
|
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT 0
|
|
+#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK)
|
|
+#define VIVS_FE_COMMAND_CONTROL_ENABLE 0x00010000
|
|
+
|
|
+#define VIVS_FE_DMA_STATUS 0x0000065c
|
|
+
|
|
+#define VIVS_FE_DMA_DEBUG_STATE 0x00000660
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK 0x0000001f
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT 0
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE 0x00000000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC 0x00000001
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0 0x00000002
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0 0x00000003
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1 0x00000004
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1 0x00000005
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR 0x00000006
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD 0x00000007
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL 0x00000008
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL 0x00000009
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA 0x0000000a
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX 0x0000000b
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW 0x0000000c
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0 0x0000000d
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1 0x0000000e
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0 0x0000000f
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1 0x00000010
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO 0x00000011
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT 0x00000012
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK 0x00000013
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END 0x00000014
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL 0x00000015
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK 0x00000300
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT 8
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE 0x00000000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START 0x00000100
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ 0x00000200
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END 0x00000300
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK 0x00000c00
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT 10
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE 0x00000000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID 0x00000400
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID 0x00000800
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK 0x00003000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT 12
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE 0x00000000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX 0x00001000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL 0x00002000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK 0x0000c000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT 14
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE 0x00000000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR 0x00004000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC 0x00008000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK 0x00030000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT 16
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE 0x00000000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE 0x00010000
|
|
+#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS 0x00020000
|
|
+
|
|
+#define VIVS_FE_DMA_ADDRESS 0x00000664
|
|
+
|
|
+#define VIVS_FE_DMA_LOW 0x00000668
|
|
+
|
|
+#define VIVS_FE_DMA_HIGH 0x0000066c
|
|
+
|
|
+#define VIVS_FE_AUTO_FLUSH 0x00000670
|
|
+
|
|
+#define VIVS_FE_UNK00678 0x00000678
|
|
+
|
|
+#define VIVS_FE_UNK0067C 0x0000067c
|
|
+
|
|
+#define VIVS_FE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
|
|
+#define VIVS_FE_VERTEX_STREAMS__ESIZE 0x00000004
|
|
+#define VIVS_FE_VERTEX_STREAMS__LEN 0x00000008
|
|
+
|
|
+#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00000680 + 0x4*(i0))
|
|
+
|
|
+#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0))
|
|
+
|
|
+#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0))
|
|
+#define VIVS_FE_UNK00700__ESIZE 0x00000004
|
|
+#define VIVS_FE_UNK00700__LEN 0x00000010
|
|
+
|
|
+#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0))
|
|
+#define VIVS_FE_UNK00740__ESIZE 0x00000004
|
|
+#define VIVS_FE_UNK00740__LEN 0x00000010
|
|
+
|
|
+#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0))
|
|
+#define VIVS_FE_UNK00780__ESIZE 0x00000004
|
|
+#define VIVS_FE_UNK00780__LEN 0x00000010
|
|
+
|
|
+#define VIVS_GL 0x00000000
|
|
+
|
|
+#define VIVS_GL_PIPE_SELECT 0x00003800
|
|
+#define VIVS_GL_PIPE_SELECT_PIPE__MASK 0x00000001
|
|
+#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT 0
|
|
+#define VIVS_GL_PIPE_SELECT_PIPE(x) (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK)
|
|
+
|
|
+#define VIVS_GL_EVENT 0x00003804
|
|
+#define VIVS_GL_EVENT_EVENT_ID__MASK 0x0000001f
|
|
+#define VIVS_GL_EVENT_EVENT_ID__SHIFT 0
|
|
+#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
|
|
+#define VIVS_GL_EVENT_FROM_FE 0x00000020
|
|
+#define VIVS_GL_EVENT_FROM_PE 0x00000040
|
|
+#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00
|
|
+#define VIVS_GL_EVENT_SOURCE__SHIFT 8
|
|
+#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
|
|
+
|
|
+#define VIVS_GL_SEMAPHORE_TOKEN 0x00003808
|
|
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK 0x0000001f
|
|
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT 0
|
|
+#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK)
|
|
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00
|
|
+#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8
|
|
+#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
|
|
+
|
|
+#define VIVS_GL_FLUSH_CACHE 0x0000380c
|
|
+#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001
|
|
+#define VIVS_GL_FLUSH_CACHE_COLOR 0x00000002
|
|
+#define VIVS_GL_FLUSH_CACHE_TEXTURE 0x00000004
|
|
+#define VIVS_GL_FLUSH_CACHE_PE2D 0x00000008
|
|
+#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010
|
|
+#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020
|
|
+#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040
|
|
+
|
|
+#define VIVS_GL_FLUSH_MMU 0x00003810
|
|
+#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001
|
|
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1 0x00000002
|
|
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2 0x00000004
|
|
+#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU 0x00000008
|
|
+#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4 0x00000010
|
|
+
|
|
+#define VIVS_GL_VERTEX_ELEMENT_CONFIG 0x00003814
|
|
+
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG 0x00003818
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK 0x00000003
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT 0
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE 0x00000000
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X 0x00000001
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X 0x00000002
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK 0x00000008
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK 0x000000f0
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT 4
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK)
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK 0x00000100
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK 0x00007000
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT 12
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK)
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK 0x00008000
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK 0x00030000
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT 16
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x) (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK)
|
|
+#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK 0x00080000
|
|
+
|
|
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS 0x0000381c
|
|
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK 0x000000ff
|
|
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT 0
|
|
+#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
|
|
+
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28
|
|
+#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
|
|
+
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0))
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE__LEN 0x00000002
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK 0x00000003
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT 0
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK 0x0000000c
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT 2
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK 0x00000030
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT 4
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK 0x000000c0
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT 6
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK 0x00000300
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT 8
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK 0x00000c00
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT 10
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK 0x00003000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT 12
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK 0x0000c000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT 14
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK 0x00030000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT 16
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK 0x000c0000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT 18
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK 0x00300000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT 20
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK 0x00c00000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT 22
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK 0x03000000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT 24
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK 0x0c000000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT 26
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK 0x30000000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT 28
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK)
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK 0xc0000000
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30
|
|
+#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
|
|
+
|
|
+#define VIVS_GL_UNK03834 0x00003834
|
|
+
|
|
+#define VIVS_GL_UNK03838 0x00003838
|
|
+
|
|
+#define VIVS_GL_API_MODE 0x0000384c
|
|
+#define VIVS_GL_API_MODE_OPENGL 0x00000000
|
|
+#define VIVS_GL_API_MODE_OPENVG 0x00000001
|
|
+#define VIVS_GL_API_MODE_OPENCL 0x00000002
|
|
+
|
|
+#define VIVS_GL_CONTEXT_POINTER 0x00003850
|
|
+
|
|
+#define VIVS_GL_UNK03A00 0x00003a00
|
|
+
|
|
+#define VIVS_GL_STALL_TOKEN 0x00003c00
|
|
+#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f
|
|
+#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0
|
|
+#define VIVS_GL_STALL_TOKEN_FROM(x) (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK)
|
|
+#define VIVS_GL_STALL_TOKEN_TO__MASK 0x00001f00
|
|
+#define VIVS_GL_STALL_TOKEN_TO__SHIFT 8
|
|
+#define VIVS_GL_STALL_TOKEN_TO(x) (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK)
|
|
+#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000
|
|
+#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000
|
|
+
|
|
+#define VIVS_DUMMY 0x00000000
|
|
+
|
|
+#define VIVS_DUMMY_DUMMY 0x0003fffc
|
|
+
|
|
+
|
|
+#endif /* STATE_XML */
|
|
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
|
|
new file mode 100644
|
|
index 0000000..0064f26
|
|
--- /dev/null
|
|
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
|
|
@@ -0,0 +1,407 @@
|
|
+#ifndef STATE_HI_XML
|
|
+#define STATE_HI_XML
|
|
+
|
|
+/* Autogenerated file, DO NOT EDIT manually!
|
|
+
|
|
+This file was generated by the rules-ng-ng headergen tool in this git repository:
|
|
+http://0x04.net/cgit/index.cgi/rules-ng-ng
|
|
+git clone git://0x04.net/rules-ng-ng
|
|
+
|
|
+The rules-ng-ng source files this header was generated from are:
|
|
+- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
|
|
+- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
|
|
+
|
|
+Copyright (C) 2015
|
|
+*/
|
|
+
|
|
+
|
|
+#define MMU_EXCEPTION_SLAVE_NOT_PRESENT 0x00000001
|
|
+#define MMU_EXCEPTION_PAGE_NOT_PRESENT 0x00000002
|
|
+#define MMU_EXCEPTION_WRITE_VIOLATION 0x00000003
|
|
+#define VIVS_HI 0x00000000
|
|
+
|
|
+#define VIVS_HI_CLOCK_CONTROL 0x00000000
|
|
+#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS 0x00000001
|
|
+#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS 0x00000002
|
|
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK 0x000001fc
|
|
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT 2
|
|
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x) (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK)
|
|
+#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD 0x00000200
|
|
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING 0x00000400
|
|
+#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS 0x00000800
|
|
+#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET 0x00001000
|
|
+#define VIVS_HI_CLOCK_CONTROL_IDLE_3D 0x00010000
|
|
+#define VIVS_HI_CLOCK_CONTROL_IDLE_2D 0x00020000
|
|
+#define VIVS_HI_CLOCK_CONTROL_IDLE_VG 0x00040000
|
|
+#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU 0x00080000
|
|
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK 0x00f00000
|
|
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT 20
|
|
+#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x) (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK)
|
|
+
|
|
+#define VIVS_HI_IDLE_STATE 0x00000004
|
|
+#define VIVS_HI_IDLE_STATE_FE 0x00000001
|
|
+#define VIVS_HI_IDLE_STATE_DE 0x00000002
|
|
+#define VIVS_HI_IDLE_STATE_PE 0x00000004
|
|
+#define VIVS_HI_IDLE_STATE_SH 0x00000008
|
|
+#define VIVS_HI_IDLE_STATE_PA 0x00000010
|
|
+#define VIVS_HI_IDLE_STATE_SE 0x00000020
|
|
+#define VIVS_HI_IDLE_STATE_RA 0x00000040
|
|
+#define VIVS_HI_IDLE_STATE_TX 0x00000080
|
|
+#define VIVS_HI_IDLE_STATE_VG 0x00000100
|
|
+#define VIVS_HI_IDLE_STATE_IM 0x00000200
|
|
+#define VIVS_HI_IDLE_STATE_FP 0x00000400
|
|
+#define VIVS_HI_IDLE_STATE_TS 0x00000800
|
|
+#define VIVS_HI_IDLE_STATE_AXI_LP 0x80000000
|
|
+
|
|
+#define VIVS_HI_AXI_CONFIG 0x00000008
|
|
+#define VIVS_HI_AXI_CONFIG_AWID__MASK 0x0000000f
|
|
+#define VIVS_HI_AXI_CONFIG_AWID__SHIFT 0
|
|
+#define VIVS_HI_AXI_CONFIG_AWID(x) (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK)
|
|
+#define VIVS_HI_AXI_CONFIG_ARID__MASK 0x000000f0
|
|
+#define VIVS_HI_AXI_CONFIG_ARID__SHIFT 4
|
|
+#define VIVS_HI_AXI_CONFIG_ARID(x) (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK)
|
|
+#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK 0x00000f00
|
|
+#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT 8
|
|
+#define VIVS_HI_AXI_CONFIG_AWCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK)
|
|
+#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK 0x0000f000
|
|
+#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT 12
|
|
+#define VIVS_HI_AXI_CONFIG_ARCACHE(x) (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK)
|
|
+
|
|
+#define VIVS_HI_AXI_STATUS 0x0000000c
|
|
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK 0x0000000f
|
|
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT 0
|
|
+#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK)
|
|
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK 0x000000f0
|
|
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT 4
|
|
+#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x) (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK)
|
|
+#define VIVS_HI_AXI_STATUS_DET_WR_ERR 0x00000100
|
|
+#define VIVS_HI_AXI_STATUS_DET_RD_ERR 0x00000200
|
|
+
|
|
+#define VIVS_HI_INTR_ACKNOWLEDGE 0x00000010
|
|
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK 0x7fffffff
|
|
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT 0
|
|
+#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x) (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
|
|
+#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR 0x80000000
|
|
+
|
|
+#define VIVS_HI_INTR_ENBL 0x00000014
|
|
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK 0xffffffff
|
|
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT 0
|
|
+#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x) (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK)
|
|
+
|
|
+#define VIVS_HI_CHIP_IDENTITY 0x00000018
|
|
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK 0xff000000
|
|
+#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT 24
|
|
+#define VIVS_HI_CHIP_IDENTITY_FAMILY(x) (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
|
|
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK 0x00ff0000
|
|
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT 16
|
|
+#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x) (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK)
|
|
+#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK 0x0000f000
|
|
+#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT 12
|
|
+#define VIVS_HI_CHIP_IDENTITY_REVISION(x) (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
|
|
+
|
|
+#define VIVS_HI_CHIP_FEATURE 0x0000001c
|
|
+
|
|
+#define VIVS_HI_CHIP_MODEL 0x00000020
|
|
+
|
|
+#define VIVS_HI_CHIP_REV 0x00000024
|
|
+
|
|
+#define VIVS_HI_CHIP_DATE 0x00000028
|
|
+
|
|
+#define VIVS_HI_CHIP_TIME 0x0000002c
|
|
+
|
|
+#define VIVS_HI_CHIP_MINOR_FEATURE_0 0x00000034
|
|
+
|
|
+#define VIVS_HI_CACHE_CONTROL 0x00000038
|
|
+
|
|
+#define VIVS_HI_MEMORY_COUNTER_RESET 0x0000003c
|
|
+
|
|
+#define VIVS_HI_PROFILE_READ_BYTES8 0x00000040
|
|
+
|
|
+#define VIVS_HI_PROFILE_WRITE_BYTES8 0x00000044
|
|
+
|
|
+#define VIVS_HI_CHIP_SPECS 0x00000048
|
|
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK 0x0000000f
|
|
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT 0
|
|
+#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK 0x000000f0
|
|
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT 4
|
|
+#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x) (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK 0x00000f00
|
|
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT 8
|
|
+#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK 0x0001f000
|
|
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT 12
|
|
+#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK 0x01f00000
|
|
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT 20
|
|
+#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK 0x0e000000
|
|
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT 25
|
|
+#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x) (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK 0xf0000000
|
|
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT 28
|
|
+#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
|
|
+
|
|
+#define VIVS_HI_PROFILE_WRITE_BURSTS 0x0000004c
|
|
+
|
|
+#define VIVS_HI_PROFILE_WRITE_REQUESTS 0x00000050
|
|
+
|
|
+#define VIVS_HI_PROFILE_READ_BURSTS 0x00000058
|
|
+
|
|
+#define VIVS_HI_PROFILE_READ_REQUESTS 0x0000005c
|
|
+
|
|
+#define VIVS_HI_PROFILE_READ_LASTS 0x00000060
|
|
+
|
|
+#define VIVS_HI_GP_OUT0 0x00000064
|
|
+
|
|
+#define VIVS_HI_GP_OUT1 0x00000068
|
|
+
|
|
+#define VIVS_HI_GP_OUT2 0x0000006c
|
|
+
|
|
+#define VIVS_HI_AXI_CONTROL 0x00000070
|
|
+#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE 0x00000001
|
|
+
|
|
+#define VIVS_HI_CHIP_MINOR_FEATURE_1 0x00000074
|
|
+
|
|
+#define VIVS_HI_PROFILE_TOTAL_CYCLES 0x00000078
|
|
+
|
|
+#define VIVS_HI_PROFILE_IDLE_CYCLES 0x0000007c
|
|
+
|
|
+#define VIVS_HI_CHIP_SPECS_2 0x00000080
|
|
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK 0x000000ff
|
|
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT 0
|
|
+#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x) (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK 0x0000ff00
|
|
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT 8
|
|
+#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
|
|
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK 0xffff0000
|
|
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT 16
|
|
+#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x) (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
|
|
+
|
|
+#define VIVS_HI_CHIP_MINOR_FEATURE_2 0x00000084
|
|
+
|
|
+#define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088
|
|
+
|
|
+#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
|
|
+
|
|
+#define VIVS_PM 0x00000000
|
|
+
|
|
+#define VIVS_PM_POWER_CONTROLS 0x00000100
|
|
+#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING 0x00000001
|
|
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING 0x00000002
|
|
+#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING 0x00000004
|
|
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK 0x000000f0
|
|
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT 4
|
|
+#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK)
|
|
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK 0xffff0000
|
|
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT 16
|
|
+#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x) (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK)
|
|
+
|
|
+#define VIVS_PM_MODULE_CONTROLS 0x00000104
|
|
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
|
|
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
|
|
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
|
|
+
|
|
+#define VIVS_PM_MODULE_STATUS 0x00000108
|
|
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
|
|
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002
|
|
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004
|
|
+
|
|
+#define VIVS_PM_PULSE_EATER 0x0000010c
|
|
+
|
|
+#define VIVS_MMUv2 0x00000000
|
|
+
|
|
+#define VIVS_MMUv2_SAFE_ADDRESS 0x00000180
|
|
+
|
|
+#define VIVS_MMUv2_CONFIGURATION 0x00000184
|
|
+#define VIVS_MMUv2_CONFIGURATION_MODE__MASK 0x00000001
|
|
+#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT 0
|
|
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K 0x00000000
|
|
+#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K 0x00000001
|
|
+#define VIVS_MMUv2_CONFIGURATION_MODE_MASK 0x00000008
|
|
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK 0x00000010
|
|
+#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT 4
|
|
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH 0x00000010
|
|
+#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK 0x00000080
|
|
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK 0x00000100
|
|
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK 0xfffffc00
|
|
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT 10
|
|
+#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
|
|
+
|
|
+#define VIVS_MMUv2_STATUS 0x00000188
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12
|
|
+#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
|
|
+
|
|
+#define VIVS_MMUv2_CONTROL 0x0000018c
|
|
+#define VIVS_MMUv2_CONTROL_ENABLE 0x00000001
|
|
+
|
|
+#define VIVS_MMUv2_EXCEPTION_ADDR(i0) (0x00000190 + 0x4*(i0))
|
|
+#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004
|
|
+#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004
|
|
+
|
|
+#define VIVS_MC 0x00000000
|
|
+
|
|
+#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400
|
|
+
|
|
+#define VIVS_MC_MMU_TX_PAGE_TABLE 0x00000404
|
|
+
|
|
+#define VIVS_MC_MMU_PE_PAGE_TABLE 0x00000408
|
|
+
|
|
+#define VIVS_MC_MMU_PEZ_PAGE_TABLE 0x0000040c
|
|
+
|
|
+#define VIVS_MC_MMU_RA_PAGE_TABLE 0x00000410
|
|
+
|
|
+#define VIVS_MC_DEBUG_MEMORY 0x00000414
|
|
+#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320 0x00000008
|
|
+#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS 0x00100000
|
|
+#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS 0x00200000
|
|
+
|
|
+#define VIVS_MC_MEMORY_BASE_ADDR_RA 0x00000418
|
|
+
|
|
+#define VIVS_MC_MEMORY_BASE_ADDR_FE 0x0000041c
|
|
+
|
|
+#define VIVS_MC_MEMORY_BASE_ADDR_TX 0x00000420
|
|
+
|
|
+#define VIVS_MC_MEMORY_BASE_ADDR_PEZ 0x00000424
|
|
+
|
|
+#define VIVS_MC_MEMORY_BASE_ADDR_PE 0x00000428
|
|
+
|
|
+#define VIVS_MC_MEMORY_TIMING_CONTROL 0x0000042c
|
|
+
|
|
+#define VIVS_MC_MEMORY_FLUSH 0x00000430
|
|
+
|
|
+#define VIVS_MC_PROFILE_CYCLE_COUNTER 0x00000438
|
|
+
|
|
+#define VIVS_MC_DEBUG_READ0 0x0000043c
|
|
+
|
|
+#define VIVS_MC_DEBUG_READ1 0x00000440
|
|
+
|
|
+#define VIVS_MC_DEBUG_WRITE 0x00000444
|
|
+
|
|
+#define VIVS_MC_PROFILE_RA_READ 0x00000448
|
|
+
|
|
+#define VIVS_MC_PROFILE_TX_READ 0x0000044c
|
|
+
|
|
+#define VIVS_MC_PROFILE_FE_READ 0x00000450
|
|
+
|
|
+#define VIVS_MC_PROFILE_PE_READ 0x00000454
|
|
+
|
|
+#define VIVS_MC_PROFILE_DE_READ 0x00000458
|
|
+
|
|
+#define VIVS_MC_PROFILE_SH_READ 0x0000045c
|
|
+
|
|
+#define VIVS_MC_PROFILE_PA_READ 0x00000460
|
|
+
|
|
+#define VIVS_MC_PROFILE_SE_READ 0x00000464
|
|
+
|
|
+#define VIVS_MC_PROFILE_MC_READ 0x00000468
|
|
+
|
|
+#define VIVS_MC_PROFILE_HI_READ 0x0000046c
|
|
+
|
|
+#define VIVS_MC_PROFILE_CONFIG0 0x00000470
|
|
+#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f
|
|
+#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
|
|
+#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
|
|
+#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00
|
|
+#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
|
|
+#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE 0x00020000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER 0x08000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER 0x09000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER 0x0a000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER 0x0b000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER 0x0c000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER 0x0d000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER 0x0e000000
|
|
+#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000
|
|
+
|
|
+#define VIVS_MC_PROFILE_CONFIG1 0x00000474
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER 0x00000005
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
|
|
+#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
|
|
+#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00
|
|
+#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
|
|
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
|
|
+#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z 0x00020000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT 0x00030000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER 0x00090000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS 0x02000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS 0x03000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN 0x04000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT 0x05000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT 0x06000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT 0x07000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT 0x08000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT 0x09000000
|
|
+#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000
|
|
+
|
|
+#define VIVS_MC_PROFILE_CONFIG2 0x00000478
|
|
+#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f
|
|
+#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0
|
|
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
|
|
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
|
|
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
|
|
+#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
|
|
+#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00
|
|
+#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
|
|
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
|
|
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
|
|
+#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
|
|
+#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
|
|
+
|
|
+#define VIVS_MC_PROFILE_CONFIG3 0x0000047c
|
|
+
|
|
+#define VIVS_MC_BUS_CONFIG 0x00000480
|
|
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK 0x0000000f
|
|
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT 0
|
|
+#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK)
|
|
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK 0x000000f0
|
|
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT 4
|
|
+#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x) (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK)
|
|
+
|
|
+#define VIVS_MC_START_COMPOSITION 0x00000554
|
|
+
|
|
+#define VIVS_MC_128B_MERGE 0x00000558
|
|
+
|
|
+
|
|
+#endif /* STATE_HI_XML */
|
|
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
|
|
new file mode 100644
|
|
index 0000000..4cc989a
|
|
--- /dev/null
|
|
+++ b/include/uapi/drm/etnaviv_drm.h
|
|
@@ -0,0 +1,222 @@
|
|
+/*
|
|
+ * Copyright (C) 2015 Etnaviv Project
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms of the GNU General Public License version 2 as published by
|
|
+ * the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope that it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+
|
|
+#ifndef __ETNAVIV_DRM_H__
|
|
+#define __ETNAVIV_DRM_H__
|
|
+
|
|
+#include "drm.h"
|
|
+
|
|
+/* Please note that modifications to all structs defined here are
|
|
+ * subject to backwards-compatibility constraints:
|
|
+ * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
|
|
+ * user/kernel compatibility
|
|
+ * 2) Keep fields aligned to their size
|
|
+ * 3) Because of how drm_ioctl() works, we can add new fields at
|
|
+ * the end of an ioctl if some care is taken: drm_ioctl() will
|
|
+ * zero out the new fields at the tail of the ioctl, so a zero
|
|
+ * value should have a backwards compatible meaning. And for
|
|
+ * output params, userspace won't see the newly added output
|
|
+ * fields.. so that has to be somehow ok.
|
|
+ */
|
|
+
|
|
+/* timeouts are specified in clock-monotonic absolute times (to simplify
|
|
+ * restarting interrupted ioctls). The following struct is logically the
|
|
+ * same as 'struct timespec' but 32/64b ABI safe.
|
|
+ */
|
|
+struct drm_etnaviv_timespec {
|
|
+ __s64 tv_sec; /* seconds */
|
|
+ __s64 tv_nsec; /* nanoseconds */
|
|
+};
|
|
+
|
|
+#define ETNAVIV_PARAM_GPU_MODEL 0x01
|
|
+#define ETNAVIV_PARAM_GPU_REVISION 0x02
|
|
+#define ETNAVIV_PARAM_GPU_FEATURES_0 0x03
|
|
+#define ETNAVIV_PARAM_GPU_FEATURES_1 0x04
|
|
+#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
|
|
+#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
|
|
+#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
|
|
+
|
|
+#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
|
|
+#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
|
|
+#define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12
|
|
+#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13
|
|
+#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14
|
|
+#define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15
|
|
+#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
|
|
+#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
|
|
+#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
|
|
+#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
|
|
+
|
|
+#define ETNA_MAX_PIPES 4
|
|
+
|
|
+struct drm_etnaviv_param {
|
|
+ __u32 pipe; /* in */
|
|
+ __u32 param; /* in, ETNAVIV_PARAM_x */
|
|
+ __u64 value; /* out (get_param) or in (set_param) */
|
|
+};
|
|
+
|
|
+/*
|
|
+ * GEM buffers:
|
|
+ */
|
|
+
|
|
+#define ETNA_BO_CACHE_MASK 0x000f0000
|
|
+/* cache modes */
|
|
+#define ETNA_BO_CACHED 0x00010000
|
|
+#define ETNA_BO_WC 0x00020000
|
|
+#define ETNA_BO_UNCACHED 0x00040000
|
|
+/* map flags */
|
|
+#define ETNA_BO_FORCE_MMU 0x00100000
|
|
+
|
|
+struct drm_etnaviv_gem_new {
|
|
+ __u64 size; /* in */
|
|
+ __u32 flags; /* in, mask of ETNA_BO_x */
|
|
+ __u32 handle; /* out */
|
|
+};
|
|
+
|
|
+struct drm_etnaviv_gem_info {
|
|
+ __u32 handle; /* in */
|
|
+ __u32 pad;
|
|
+ __u64 offset; /* out, offset to pass to mmap() */
|
|
+};
|
|
+
|
|
+#define ETNA_PREP_READ 0x01
|
|
+#define ETNA_PREP_WRITE 0x02
|
|
+#define ETNA_PREP_NOSYNC 0x04
|
|
+
|
|
+struct drm_etnaviv_gem_cpu_prep {
|
|
+ __u32 handle; /* in */
|
|
+ __u32 op; /* in, mask of ETNA_PREP_x */
|
|
+ struct drm_etnaviv_timespec timeout; /* in */
|
|
+};
|
|
+
|
|
+struct drm_etnaviv_gem_cpu_fini {
|
|
+ __u32 handle; /* in */
|
|
+ __u32 flags; /* in, placeholder for now, no defined values */
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Cmdstream Submission:
|
|
+ */
|
|
+
|
|
+/* The value written into the cmdstream is logically:
|
|
+ * relocbuf->gpuaddr + reloc_offset
|
|
+ *
|
|
+ * NOTE that reloc's must be sorted by order of increasing submit_offset,
|
|
+ * otherwise EINVAL.
|
|
+ */
|
|
+struct drm_etnaviv_gem_submit_reloc {
|
|
+ __u32 submit_offset; /* in, offset from submit_bo */
|
|
+ __u32 reloc_idx; /* in, index of reloc_bo buffer */
|
|
+ __u64 reloc_offset; /* in, offset from start of reloc_bo */
|
|
+ __u32 flags; /* in, placeholder for now, no defined values */
|
|
+};
|
|
+
|
|
+/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
|
|
+ * cmdstream buffer(s) themselves or reloc entries) has one (and only
|
|
+ * one) entry in the submit->bos[] table.
|
|
+ *
|
|
+ * As a optimization, the current buffer (gpu virtual address) can be
|
|
+ * passed back through the 'presumed' field. If on a subsequent reloc,
|
|
+ * userspace passes back a 'presumed' address that is still valid,
|
|
+ * then patching the cmdstream for this entry is skipped. This can
|
|
+ * avoid kernel needing to map/access the cmdstream bo in the common
|
|
+ * case.
|
|
+ */
|
|
+#define ETNA_SUBMIT_BO_READ 0x0001
|
|
+#define ETNA_SUBMIT_BO_WRITE 0x0002
|
|
+struct drm_etnaviv_gem_submit_bo {
|
|
+ __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */
|
|
+ __u32 handle; /* in, GEM handle */
|
|
+ __u64 presumed; /* in/out, presumed buffer address */
|
|
+};
|
|
+
|
|
+/* Each cmdstream submit consists of a table of buffers involved, and
|
|
+ * one or more cmdstream buffers. This allows for conditional execution
|
|
+ * (context-restore), and IB buffers needed for per tile/bin draw cmds.
|
|
+ */
|
|
+#define ETNA_PIPE_3D 0x00
|
|
+#define ETNA_PIPE_2D 0x01
|
|
+#define ETNA_PIPE_VG 0x02
|
|
+struct drm_etnaviv_gem_submit {
|
|
+ __u32 fence; /* out */
|
|
+ __u32 pipe; /* in */
|
|
+ __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */
|
|
+ __u32 nr_bos; /* in, number of submit_bo's */
|
|
+ __u32 nr_relocs; /* in, number of submit_reloc's */
|
|
+ __u32 stream_size; /* in, cmdstream size */
|
|
+ __u64 bos; /* in, ptr to array of submit_bo's */
|
|
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
|
|
+ __u64 stream; /* in, ptr to cmdstream */
|
|
+};
|
|
+
|
|
+/* The normal way to synchronize with the GPU is just to CPU_PREP on
|
|
+ * a buffer if you need to access it from the CPU (other cmdstream
|
|
+ * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
|
|
+ * handle the required synchronization under the hood). This ioctl
|
|
+ * mainly just exists as a way to implement the gallium pipe_fence
|
|
+ * APIs without requiring a dummy bo to synchronize on.
|
|
+ */
|
|
+#define ETNA_WAIT_NONBLOCK 0x01
|
|
+struct drm_etnaviv_wait_fence {
|
|
+ __u32 pipe; /* in */
|
|
+ __u32 fence; /* in */
|
|
+ __u32 flags; /* in, mask of ETNA_WAIT_x */
|
|
+ __u32 pad;
|
|
+ struct drm_etnaviv_timespec timeout; /* in */
|
|
+};
|
|
+
|
|
+#define ETNA_USERPTR_READ 0x01
|
|
+#define ETNA_USERPTR_WRITE 0x02
|
|
+struct drm_etnaviv_gem_userptr {
|
|
+ __u64 user_ptr; /* in, page aligned user pointer */
|
|
+ __u64 user_size; /* in, page aligned user size */
|
|
+ __u32 flags; /* in, flags */
|
|
+ __u32 handle; /* out, non-zero handle */
|
|
+};
|
|
+
|
|
+struct drm_etnaviv_gem_wait {
|
|
+ __u32 pipe; /* in */
|
|
+ __u32 handle; /* in, bo to be waited for */
|
|
+ __u32 flags; /* in, mask of ETNA_WAIT_x */
|
|
+ __u32 pad;
|
|
+ struct drm_etnaviv_timespec timeout; /* in */
|
|
+};
|
|
+
|
|
+#define DRM_ETNAVIV_GET_PARAM 0x00
|
|
+/* placeholder:
|
|
+#define DRM_ETNAVIV_SET_PARAM 0x01
|
|
+ */
|
|
+#define DRM_ETNAVIV_GEM_NEW 0x02
|
|
+#define DRM_ETNAVIV_GEM_INFO 0x03
|
|
+#define DRM_ETNAVIV_GEM_CPU_PREP 0x04
|
|
+#define DRM_ETNAVIV_GEM_CPU_FINI 0x05
|
|
+#define DRM_ETNAVIV_GEM_SUBMIT 0x06
|
|
+#define DRM_ETNAVIV_WAIT_FENCE 0x07
|
|
+#define DRM_ETNAVIV_GEM_USERPTR 0x08
|
|
+#define DRM_ETNAVIV_GEM_WAIT 0x09
|
|
+#define DRM_ETNAVIV_NUM_IOCTLS 0x0a
|
|
+
|
|
+#define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
|
|
+#define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
|
|
+#define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
|
|
+#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
|
|
+#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
|
|
+#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
|
|
+#define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
|
|
+#define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
|
|
+#define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
|
|
+
|
|
+#endif /* __ETNAVIV_DRM_H__ */
|
|
--
|
|
2.7.0.rc3
|
|
|