Skip to content

Commit

Permalink
Copy in non-KFD changes
Browse files Browse the repository at this point in the history
These include amdgpu changes, as well as any changes we had to make to
the include files, radeon, etc.

Change-Id: Ic6291c17e4168c757ab172235342e3e407b285a1

 Conflicts[4.14-rc1]:
	drivers/gpu/drm/amd/amdgpu/amdgpu.h
        drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

 Conflicts[4.15-rc2]:
	drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
	drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

 Conflicts[4.15-rc4]:
	drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
        drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
        drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
	drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c

 Conflicts[4.16-rc1]:
	drivers/gpu/drm/amd/amdgpu/amdgpu.h
	drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
	drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
	drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
	include/linux/pci.h
	include/uapi/linux/pci_regs.h

Conflicts:
      drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
      drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
      drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
      drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
      drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
      drivers/iommu/amd_iommu.c
  • Loading branch information
kentrussell authored and Kevin Wang committed Mar 19, 2018
1 parent 61986dd commit cb82748
Show file tree
Hide file tree
Showing 27 changed files with 434 additions and 77 deletions.
3 changes: 3 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
struct kfd_vm_fault_info;

enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
Expand Down Expand Up @@ -389,6 +390,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);

extern const struct dma_buf_ops amdgpu_dmabuf_ops;

/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
* like the indirect buffer or semaphore, which both have their
Expand Down
5 changes: 3 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->bo_list) {
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev);
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
}

INIT_LIST_HEAD(&duplicates);
Expand Down Expand Up @@ -1695,7 +1695,8 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
addr /= AMDGPU_GPU_PAGE_SIZE;

mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo ||
amdgpu_ttm_adev(mapping->bo_va->base.bo->tbo.bdev) != parser->adev)
return -EINVAL;

*bo = mapping->bo_va->base.bo;
Expand Down
5 changes: 5 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
Original file line number Diff line number Diff line change
Expand Up @@ -2725,6 +2725,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;

/* Block kfd */
amdgpu_amdkfd_pre_reset(adev);

/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);

Expand Down Expand Up @@ -2786,6 +2789,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
/*unlock kfd after a successfully recovery*/
amdgpu_amdkfd_post_reset(adev);
}

amdgpu_vf_error_trans_all(adev);
Expand Down
6 changes: 6 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
obj = new_amdgpu_fb->obj;
new_abo = gem_to_amdgpu_bo(obj);

if (amdgpu_ttm_adev(new_abo->tbo.bdev) != adev) {
DRM_ERROR("Foreign BOs not allowed in the display engine\n");
r = -EINVAL;
goto cleanup;
}

/* pin the new buffer */
r = amdgpu_bo_reserve(new_abo, false);
if (unlikely(r != 0)) {
Expand Down
11 changes: 10 additions & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -296,16 +296,25 @@ module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);

#ifdef CONFIG_DRM_AMDGPU_SI

#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
int amdgpu_si_support = 1;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");

#else
int amdgpu_si_support = 1;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
#endif
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif

#ifdef CONFIG_DRM_AMDGPU_CIK

#if (0 && (defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)))
int amdgpu_cik_support = 0;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
#else
int amdgpu_cik_support = 1;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
#endif
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif

Expand Down
2 changes: 2 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ struct amdgpu_gmc {
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
struct kfd_vm_fault_info *vm_fault_info;
atomic_t vm_fault_info_updated;

const struct amdgpu_gmc_funcs *gmc_funcs;
};
Expand Down
117 changes: 95 additions & 22 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,14 @@
#include <drm/drm.h>

#include "amdgpu.h"
#include "amdgpu_amdkfd.h"

struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device *adev;
struct mm_struct *mm;
struct mmu_notifier mn;
enum amdgpu_mn_type type;

/* only used on destruction */
struct work_struct work;
Expand Down Expand Up @@ -193,7 +195,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
}

/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
Expand All @@ -203,10 +205,10 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
Expand All @@ -228,7 +230,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
}

/**
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
* amdgpu_mn_invalidate_range_end_gfx - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
Expand All @@ -237,33 +239,101 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
*
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
static void amdgpu_mn_invalidate_range_end_gfx(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);

amdgpu_mn_read_unlock(rmn);
}

static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
.invalidate_range_end = amdgpu_mn_invalidate_range_end,
/**
* amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
* We temporarily evict all BOs between start and end. This
* necessitates evicting all user-mode queues of the process. The BOs
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;

/* notification is exclusive, but interval is inclusive */
end -= 1;

amdgpu_mn_read_lock(rmn);

it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;

node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);

list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo;

if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
start, end))
amdgpu_amdkfd_evict_userptr(mem, mm);
}
}
}

static void amdgpu_mn_invalidate_range_end_hsa(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);

amdgpu_mn_read_unlock(rmn);
}

static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
[AMDGPU_MN_TYPE_GFX] = {
.release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx,
.invalidate_range_end = amdgpu_mn_invalidate_range_end_gfx,
},
[AMDGPU_MN_TYPE_HSA] = {
.release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa,
.invalidate_range_end = amdgpu_mn_invalidate_range_end_hsa,
},
};

/* Low bits of any reasonable mm pointer will be unused due to struct
* alignment. Use these bits to make a unique key from the mm pointer
* and notifier type. */
#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))

/**
* amdgpu_mn_get - create notifier context
*
* @adev: amdgpu device pointer
* @type: type of MMU notifier context
*
* Creates a notifier context for current->mm.
*/
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type)
{
struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn;
unsigned long key = AMDGPU_MN_KEY(mm, type);
int r;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
struct hlist_node *node;
Expand All @@ -280,11 +350,11 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
hash_for_each_possible(adev->mn_hash, rmn, node, node, (unsigned long)mm)
hash_for_each_possible(adev->mn_hash, rmn, node, node, key)
#else
hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
hash_for_each_possible(adev->mn_hash, rmn, node, key)
#endif
if (rmn->mm == mm)
if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
goto release_locks;

rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
Expand All @@ -295,21 +365,22 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)

rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
init_rwsem(&rmn->lock);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
rmn->objects = RB_ROOT;
#else
rmn->objects = RB_ROOT_CACHED;
#endif
rmn->type = type;
rmn->mn.ops = &amdgpu_mn_ops[type];
mutex_init(&rmn->read_lock);
atomic_set(&rmn->recursion, 0);

r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
goto free_rmn;

hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));

release_locks:
up_write(&mm->mmap_sem);
Expand Down Expand Up @@ -338,12 +409,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
unsigned long end = addr + amdgpu_bo_size(bo) - 1;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
enum amdgpu_mn_type type =
bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
struct amdgpu_mn *rmn;
struct amdgpu_mn_node *node = NULL;
struct list_head bos;
struct interval_tree_node *it;

rmn = amdgpu_mn_get(adev);
rmn = amdgpu_mn_get(adev, type);
if (IS_ERR(rmn))
return PTR_ERR(rmn);

Expand All @@ -361,7 +434,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
}

if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_NOIO);
if (!node) {
up_write(&rmn->lock);
return -ENOMEM;
Expand Down
11 changes: 9 additions & 2 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,23 @@
*/
struct amdgpu_mn;

enum amdgpu_mn_type {
AMDGPU_MN_TYPE_GFX,
AMDGPU_MN_TYPE_HSA,
};

#if defined(CONFIG_MMU_NOTIFIER)
void amdgpu_mn_lock(struct amdgpu_mn *mn);
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
enum amdgpu_mn_type type)
{
return NULL;
}
Expand Down
5 changes: 5 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"


static bool amdgpu_need_backup(struct amdgpu_device *adev)
{
Expand All @@ -57,6 +59,9 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)

if (bo->tbo.mem.mem_type == AMDGPU_PL_DGMA_IMPORT)
kfree(tbo->mem.bus.addr);
if (bo->kfd_bo)
amdgpu_amdkfd_unreserve_system_memory_limit(bo);

amdgpu_bo_kunmap(bo);

drm_gem_object_release(&bo->gem_base);
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ struct amdgpu_bo {
struct list_head mn_list;
struct list_head shadow_list;
};
struct kgd_mem *kfd_bo;
};

static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
Expand Down
4 changes: 1 addition & 3 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>

static const struct dma_buf_ops amdgpu_dmabuf_ops;

struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
Expand Down Expand Up @@ -227,7 +225,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}

static const struct dma_buf_ops amdgpu_dmabuf_ops = {
const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
Expand Down
Loading

0 comments on commit cb82748

Please sign in to comment.