Skip to content

Commit 550e3b2

Browse files
committed
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
Some more radeon and amdgpu stuff for drm-next. Mostly just bug fixes for new features and cleanups. * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: drm/amdgpu: fix rb bitmap & cu bitmap calculation drm/amdgpu: trace the pd_addr in vm_grab_id as well drm/amdgpu: fix VM faults caused by vm_grab_id() v4 drm/amdgpu: update radeon acpi header drm/radeon: update radeon acpi header drm/amd: cleanup get_mfd_cell_dev() drm/amdgpu: fix error handling in amdgpu_bo_list_set drm/amd/powerplay: fix code style warning. drm/amd: Do not make DRM_AMD_ACP default to y drm/amdgpu/gfx: fix off by one in rb rework (v2)
2 parents 984fee6 + 6157bd7 commit 550e3b2

File tree

18 files changed

+131
-121
lines changed

18 files changed

+131
-121
lines changed

drivers/gpu/drm/amd/acp/Kconfig

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ menu "ACP Configuration"
22

33
config DRM_AMD_ACP
44
bool "Enable ACP IP support"
5-
default y
65
select MFD_CORE
76
select PM_GENERIC_DOMAINS if PM
87
help

drivers/gpu/drm/amd/amdgpu/amdgpu.h

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -769,8 +769,9 @@ struct amdgpu_ib {
769769
uint32_t *ptr;
770770
struct amdgpu_fence *fence;
771771
struct amdgpu_user_fence *user;
772-
bool grabbed_vmid;
773772
struct amdgpu_vm *vm;
773+
unsigned vm_id;
774+
uint64_t vm_pd_addr;
774775
struct amdgpu_ctx *ctx;
775776
uint32_t gds_base, gds_size;
776777
uint32_t gws_base, gws_size;
@@ -877,10 +878,10 @@ struct amdgpu_vm_pt {
877878
};
878879

879880
struct amdgpu_vm_id {
880-
unsigned id;
881-
uint64_t pd_gpu_addr;
881+
struct amdgpu_vm_manager_id *mgr_id;
882+
uint64_t pd_gpu_addr;
882883
/* last flushed PD/PT update */
883-
struct fence *flushed_updates;
884+
struct fence *flushed_updates;
884885
};
885886

886887
struct amdgpu_vm {
@@ -954,10 +955,11 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
954955
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
955956
struct amdgpu_vm *vm);
956957
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
957-
struct amdgpu_sync *sync, struct fence *fence);
958+
struct amdgpu_sync *sync, struct fence *fence,
959+
unsigned *vm_id, uint64_t *vm_pd_addr);
958960
void amdgpu_vm_flush(struct amdgpu_ring *ring,
959-
struct amdgpu_vm *vm,
960-
struct fence *updates);
961+
unsigned vmid,
962+
uint64_t pd_addr);
961963
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
962964
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
963965
struct amdgpu_vm *vm);

drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -240,12 +240,10 @@ static int acp_poweron(struct generic_pm_domain *genpd)
240240
static struct device *get_mfd_cell_dev(const char *device_name, int r)
241241
{
242242
char auto_dev_name[25];
243-
char buf[8];
244243
struct device *dev;
245244

246-
sprintf(buf, ".%d.auto", r);
247-
strcpy(auto_dev_name, device_name);
248-
strcat(auto_dev_name, buf);
245+
snprintf(auto_dev_name, sizeof(auto_dev_name),
246+
"%s.%d.auto", device_name, r);
249247
dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
250248
dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
251249

drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
118118
usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm);
119119
if (usermm) {
120120
if (usermm != current->mm) {
121+
amdgpu_bo_unref(&entry->robj);
121122
r = -EPERM;
122123
goto error_free;
123124
}
@@ -151,6 +152,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
151152
return 0;
152153

153154
error_free:
155+
while (i--)
156+
amdgpu_bo_unref(&array[i].robj);
154157
drm_free_large(array);
155158
return r;
156159
}

drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
7575
}
7676

7777
ib->vm = vm;
78+
ib->vm_id = 0;
7879

7980
return 0;
8081
}
@@ -139,7 +140,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
139140
return -EINVAL;
140141
}
141142

142-
if (vm && !ibs->grabbed_vmid) {
143+
if (vm && !ibs->vm_id) {
143144
dev_err(adev->dev, "VM IB without ID\n");
144145
return -EINVAL;
145146
}
@@ -152,10 +153,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
152153

153154
if (vm) {
154155
/* do context switch */
155-
amdgpu_vm_flush(ring, vm, last_vm_update);
156+
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr);
156157

157158
if (ring->funcs->emit_gds_switch)
158-
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
159+
amdgpu_ring_emit_gds_switch(ring, ib->vm_id,
159160
ib->gds_base, ib->gds_size,
160161
ib->gws_base, ib->gws_size,
161162
ib->oa_base, ib->oa_size);

drivers/gpu/drm/amd/amdgpu/amdgpu_job.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -105,16 +105,23 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
105105

106106
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
107107

108-
if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
108+
if (fence == NULL && vm && !job->ibs->vm_id) {
109109
struct amdgpu_ring *ring = job->ring;
110+
unsigned i, vm_id;
111+
uint64_t vm_pd_addr;
110112
int r;
111113

112114
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
113-
&job->base.s_fence->base);
115+
&job->base.s_fence->base,
116+
&vm_id, &vm_pd_addr);
114117
if (r)
115118
DRM_ERROR("Error getting VM ID (%d)\n", r);
116-
else
117-
job->ibs->grabbed_vmid = true;
119+
else {
120+
for (i = 0; i < job->num_ibs; ++i) {
121+
job->ibs[i].vm_id = vm_id;
122+
job->ibs[i].vm_pd_addr = vm_pd_addr;
123+
}
124+
}
118125

119126
fence = amdgpu_sync_get_fence(&job->sync);
120127
}

drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -100,21 +100,24 @@ TRACE_EVENT(amdgpu_sched_run_job,
100100

101101

102102
TRACE_EVENT(amdgpu_vm_grab_id,
103-
TP_PROTO(struct amdgpu_vm *vm, unsigned vmid, int ring),
104-
TP_ARGS(vm, vmid, ring),
103+
TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
104+
uint64_t pd_addr),
105+
TP_ARGS(vm, ring, vmid, pd_addr),
105106
TP_STRUCT__entry(
106107
__field(struct amdgpu_vm *, vm)
107-
__field(u32, vmid)
108108
__field(u32, ring)
109+
__field(u32, vmid)
110+
__field(u64, pd_addr)
109111
),
110112

111113
TP_fast_assign(
112114
__entry->vm = vm;
113-
__entry->vmid = vmid;
114115
__entry->ring = ring;
116+
__entry->vmid = vmid;
117+
__entry->pd_addr = pd_addr;
115118
),
116-
TP_printk("vm=%p, id=%u, ring=%u", __entry->vm, __entry->vmid,
117-
__entry->ring)
119+
TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
120+
__entry->ring, __entry->vmid, __entry->pd_addr)
118121
);
119122

120123
TRACE_EVENT(amdgpu_vm_bo_map,
@@ -231,8 +234,8 @@ TRACE_EVENT(amdgpu_vm_flush,
231234
__entry->ring = ring;
232235
__entry->id = id;
233236
),
234-
TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
235-
__entry->pd_addr, __entry->ring, __entry->id)
237+
TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
238+
__entry->ring, __entry->id, __entry->pd_addr)
236239
);
237240

238241
TRACE_EVENT(amdgpu_bo_list_set,

drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

Lines changed: 61 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@
5050
* SI supports 16.
5151
*/
5252

53+
/* Special value that no flush is necessary */
54+
#define AMDGPU_VM_NO_FLUSH (~0ll)
55+
5356
/**
5457
* amdgpu_vm_num_pde - return the number of page directory entries
5558
*
@@ -157,50 +160,70 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
157160
* Allocate an id for the vm, adding fences to the sync obj as necessary.
158161
*/
159162
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
160-
struct amdgpu_sync *sync, struct fence *fence)
163+
struct amdgpu_sync *sync, struct fence *fence,
164+
unsigned *vm_id, uint64_t *vm_pd_addr)
161165
{
162-
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
166+
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
163167
struct amdgpu_device *adev = ring->adev;
164-
struct amdgpu_vm_manager_id *id;
168+
struct amdgpu_vm_id *id = &vm->ids[ring->idx];
169+
struct fence *updates = sync->last_vm_update;
165170
int r;
166171

167172
mutex_lock(&adev->vm_manager.lock);
168173

169174
/* check if the id is still valid */
170-
if (vm_id->id) {
175+
if (id->mgr_id) {
176+
struct fence *flushed = id->flushed_updates;
177+
bool is_later;
171178
long owner;
172179

173-
id = &adev->vm_manager.ids[vm_id->id];
174-
owner = atomic_long_read(&id->owner);
175-
if (owner == (long)vm) {
176-
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
177-
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
180+
if (!flushed)
181+
is_later = true;
182+
else if (!updates)
183+
is_later = false;
184+
else
185+
is_later = fence_is_later(updates, flushed);
186+
187+
owner = atomic_long_read(&id->mgr_id->owner);
188+
if (!is_later && owner == (long)id &&
189+
pd_addr == id->pd_gpu_addr) {
190+
191+
fence_put(id->mgr_id->active);
192+
id->mgr_id->active = fence_get(fence);
193+
194+
list_move_tail(&id->mgr_id->list,
195+
&adev->vm_manager.ids_lru);
178196

179-
fence_put(id->active);
180-
id->active = fence_get(fence);
197+
*vm_id = id->mgr_id - adev->vm_manager.ids;
198+
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
199+
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
200+
*vm_pd_addr);
181201

182202
mutex_unlock(&adev->vm_manager.lock);
183203
return 0;
184204
}
185205
}
186206

187-
/* we definately need to flush */
188-
vm_id->pd_gpu_addr = ~0ll;
207+
id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
208+
struct amdgpu_vm_manager_id,
209+
list);
189210

190-
id = list_first_entry(&adev->vm_manager.ids_lru,
191-
struct amdgpu_vm_manager_id,
192-
list);
193-
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
194-
atomic_long_set(&id->owner, (long)vm);
211+
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
212+
if (!r) {
213+
fence_put(id->mgr_id->active);
214+
id->mgr_id->active = fence_get(fence);
195215

196-
vm_id->id = id - adev->vm_manager.ids;
197-
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
216+
fence_put(id->flushed_updates);
217+
id->flushed_updates = fence_get(updates);
198218

199-
r = amdgpu_sync_fence(ring->adev, sync, id->active);
219+
id->pd_gpu_addr = pd_addr;
200220

201-
if (!r) {
202-
fence_put(id->active);
203-
id->active = fence_get(fence);
221+
list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
222+
atomic_long_set(&id->mgr_id->owner, (long)id);
223+
224+
*vm_id = id->mgr_id - adev->vm_manager.ids;
225+
*vm_pd_addr = pd_addr;
226+
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
204227
}
205228

206229
mutex_unlock(&adev->vm_manager.lock);
@@ -211,35 +234,18 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
211234
* amdgpu_vm_flush - hardware flush the vm
212235
*
213236
* @ring: ring to use for flush
214-
* @vm: vm we want to flush
215-
* @updates: last vm update that we waited for
237+
* @vmid: vmid number to use
238+
* @pd_addr: address of the page directory
216239
*
217-
* Flush the vm.
240+
* Emit a VM flush when it is necessary.
218241
*/
219242
void amdgpu_vm_flush(struct amdgpu_ring *ring,
220-
struct amdgpu_vm *vm,
221-
struct fence *updates)
243+
unsigned vmid,
244+
uint64_t pd_addr)
222245
{
223-
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
224-
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
225-
struct fence *flushed_updates = vm_id->flushed_updates;
226-
bool is_later;
227-
228-
if (!flushed_updates)
229-
is_later = true;
230-
else if (!updates)
231-
is_later = false;
232-
else
233-
is_later = fence_is_later(updates, flushed_updates);
234-
235-
if (pd_addr != vm_id->pd_gpu_addr || is_later) {
236-
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
237-
if (is_later) {
238-
vm_id->flushed_updates = fence_get(updates);
239-
fence_put(flushed_updates);
240-
}
241-
vm_id->pd_gpu_addr = pd_addr;
242-
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
246+
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
247+
trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid);
248+
amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr);
243249
}
244250
}
245251

@@ -1284,7 +1290,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
12841290
int i, r;
12851291

12861292
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1287-
vm->ids[i].id = 0;
1293+
vm->ids[i].mgr_id = NULL;
12881294
vm->ids[i].flushed_updates = NULL;
12891295
}
12901296
vm->va = RB_ROOT;
@@ -1381,13 +1387,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
13811387
amdgpu_bo_unref(&vm->page_directory);
13821388
fence_put(vm->page_directory_fence);
13831389
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1384-
unsigned id = vm->ids[i].id;
1390+
struct amdgpu_vm_id *id = &vm->ids[i];
13851391

1386-
atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
1387-
(long)vm, 0);
1388-
fence_put(vm->ids[i].flushed_updates);
1392+
if (id->mgr_id)
1393+
atomic_long_cmpxchg(&id->mgr_id->owner,
1394+
(long)id, 0);
1395+
fence_put(id->flushed_updates);
13891396
}
1390-
13911397
}
13921398

13931399
/**

drivers/gpu/drm/amd/amdgpu/cik_sdma.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
212212
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
213213
struct amdgpu_ib *ib)
214214
{
215-
u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
215+
u32 extra_bits = ib->vm_id & 0xf;
216216
u32 next_rptr = ring->wptr + 5;
217217

218218
while ((next_rptr & 7) != 4)

drivers/gpu/drm/amd/amdgpu/cikd.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,6 @@
4646
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
4747
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
4848

49-
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
50-
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
51-
5249
#define AMDGPU_NUM_OF_VMIDS 8
5350

5451
#define PIPEID(x) ((x) << 0)

0 commit comments

Comments
 (0)