Skip to content

Commit db21854

Browse files
committed
Merge tag 'drm-fixes-for-v4.15-rc10-2' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "A fairly urgent nouveau regression fix for broken irqs across suspend/resume came in. This was broken before but a patch in 4.15 has made it much more obviously broken and now s/r fails a lot more often. The fix removes freeing the irq across s/r which never should have been done anyways. Also two vc4 fixes for a NULL deference and some misrendering / flickering on screen" * tag 'drm-fixes-for-v4.15-rc10-2' of git://people.freedesktop.org/~airlied/linux: drm/nouveau: Move irq setup/teardown to pci ctor/dtor drm/vc4: Fix NULL pointer dereference in vc4_save_hang_state() drm/vc4: Flush the caches before the bin jobs, as well.
2 parents 993ca20 + baa35cc commit db21854

File tree

2 files changed

+58
-21
lines changed

2 files changed

+58
-21
lines changed

drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c

Lines changed: 31 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
7171
struct nvkm_pci *pci = arg;
7272
struct nvkm_device *device = pci->subdev.device;
7373
bool handled = false;
74+
75+
if (pci->irq < 0)
76+
return IRQ_HANDLED;
77+
7478
nvkm_mc_intr_unarm(device);
7579
if (pci->msi)
7680
pci->func->msi_rearm(pci);
@@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
8488
{
8589
struct nvkm_pci *pci = nvkm_pci(subdev);
8690

87-
if (pci->irq >= 0) {
88-
free_irq(pci->irq, pci);
89-
pci->irq = -1;
90-
}
91-
9291
if (pci->agp.bridge)
9392
nvkm_agp_fini(pci);
9493

@@ -108,16 +107,27 @@ static int
108107
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
109108
{
110109
struct nvkm_pci *pci = nvkm_pci(subdev);
111-
if (pci_is_pcie(pci->pdev))
112-
return nvkm_pcie_oneinit(pci);
110+
struct pci_dev *pdev = pci->pdev;
111+
int ret;
112+
113+
if (pci_is_pcie(pci->pdev)) {
114+
ret = nvkm_pcie_oneinit(pci);
115+
if (ret)
116+
return ret;
117+
}
118+
119+
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
120+
if (ret)
121+
return ret;
122+
123+
pci->irq = pdev->irq;
113124
return 0;
114125
}
115126

116127
static int
117128
nvkm_pci_init(struct nvkm_subdev *subdev)
118129
{
119130
struct nvkm_pci *pci = nvkm_pci(subdev);
120-
struct pci_dev *pdev = pci->pdev;
121131
int ret;
122132

123133
if (pci->agp.bridge) {
@@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
131141
if (pci->func->init)
132142
pci->func->init(pci);
133143

134-
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
135-
if (ret)
136-
return ret;
137-
138-
pci->irq = pdev->irq;
139-
140144
/* Ensure MSI interrupts are armed, for the case where there are
141145
* already interrupts pending (for whatever reason) at load time.
142146
*/
143147
if (pci->msi)
144148
pci->func->msi_rearm(pci);
145149

146-
return ret;
150+
return 0;
147151
}
148152

149153
static void *
150154
nvkm_pci_dtor(struct nvkm_subdev *subdev)
151155
{
152156
struct nvkm_pci *pci = nvkm_pci(subdev);
157+
153158
nvkm_agp_dtor(pci);
159+
160+
if (pci->irq >= 0) {
161+
/* freq_irq() will call the handler, we use pci->irq == -1
162+
* to signal that it's been torn down and should be a noop.
163+
*/
164+
int irq = pci->irq;
165+
pci->irq = -1;
166+
free_irq(irq, pci);
167+
}
168+
154169
if (pci->msi)
155170
pci_disable_msi(pci->pdev);
171+
156172
return nvkm_pci(subdev);
157173
}
158174

drivers/gpu/drm/vc4/vc4_gem.c

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
146146
struct vc4_exec_info *exec[2];
147147
struct vc4_bo *bo;
148148
unsigned long irqflags;
149-
unsigned int i, j, unref_list_count, prev_idx;
149+
unsigned int i, j, k, unref_list_count;
150150

151151
kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
152152
if (!kernel_state)
@@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
182182
return;
183183
}
184184

185-
prev_idx = 0;
185+
k = 0;
186186
for (i = 0; i < 2; i++) {
187187
if (!exec[i])
188188
continue;
@@ -197,20 +197,20 @@ vc4_save_hang_state(struct drm_device *dev)
197197
WARN_ON(!refcount_read(&bo->usecnt));
198198
refcount_inc(&bo->usecnt);
199199
drm_gem_object_get(&exec[i]->bo[j]->base);
200-
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
200+
kernel_state->bo[k++] = &exec[i]->bo[j]->base;
201201
}
202202

203203
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
204204
/* No need to retain BOs coming from the ->unref_list
205205
* because they are naturally unpurgeable.
206206
*/
207207
drm_gem_object_get(&bo->base.base);
208-
kernel_state->bo[j + prev_idx] = &bo->base.base;
209-
j++;
208+
kernel_state->bo[k++] = &bo->base.base;
210209
}
211-
prev_idx = j + 1;
212210
}
213211

212+
WARN_ON_ONCE(k != state->bo_count);
213+
214214
if (exec[0])
215215
state->start_bin = exec[0]->ct0ca;
216216
if (exec[1])
@@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
436436
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
437437
}
438438

439+
static void
440+
vc4_flush_texture_caches(struct drm_device *dev)
441+
{
442+
struct vc4_dev *vc4 = to_vc4_dev(dev);
443+
444+
V3D_WRITE(V3D_L2CACTL,
445+
V3D_L2CACTL_L2CCLR);
446+
447+
V3D_WRITE(V3D_SLCACTL,
448+
VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
449+
VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
450+
}
451+
439452
/* Sets the registers for the next job to be actually be executed in
440453
* the hardware.
441454
*
@@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
474487
if (!exec)
475488
return;
476489

490+
/* A previous RCL may have written to one of our textures, and
491+
* our full cache flush at bin time may have occurred before
492+
* that RCL completed. Flush the texture cache now, but not
493+
* the instructions or uniforms (since we don't write those
494+
* from an RCL).
495+
*/
496+
vc4_flush_texture_caches(dev);
497+
477498
submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
478499
}
479500

0 commit comments

Comments
 (0)