Skip to content

Commit 41869c1

Browse files
committed
drm/amdgpu: fix dp link rate selection (v2)
Need to properly handle the max link rate in the dpcd. This prevents some cases where 5.4 Ghz is selected when it shouldn't be. v2: simplify logic, add array bounds check Reviewed-by: Tom St Denis <tom.stdenis@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
1 parent b24c683 commit 41869c1

File tree

1 file changed

+36
-60
lines changed

1 file changed

+36
-60
lines changed

drivers/gpu/drm/amd/amdgpu/atombios_dp.c

Lines changed: 36 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -243,72 +243,40 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
243243

244244
/* convert bits per color to bits per pixel */
245245
/* get bpc from the EDID */
246-
static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
246+
static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
247247
{
248248
if (bpc == 0)
249249
return 24;
250250
else
251251
return bpc * 3;
252252
}
253253

254-
/* get the max pix clock supported by the link rate and lane num */
255-
static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
256-
int lane_num,
257-
int bpp)
258-
{
259-
return (link_rate * lane_num * 8) / bpp;
260-
}
261-
262254
/***** amdgpu specific DP functions *****/
263255

264-
/* First get the min lane# when low rate is used according to pixel clock
265-
* (prefer low rate), second check max lane# supported by DP panel,
266-
* if the max lane# < low rate lane# then use max lane# instead.
267-
*/
268-
static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
256+
static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
269257
const u8 dpcd[DP_DPCD_SIZE],
270-
int pix_clock)
271-
{
272-
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
273-
int max_link_rate = drm_dp_max_link_rate(dpcd);
274-
int max_lane_num = drm_dp_max_lane_count(dpcd);
275-
int lane_num;
276-
int max_dp_pix_clock;
277-
278-
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
279-
max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
280-
if (pix_clock <= max_dp_pix_clock)
281-
break;
282-
}
283-
284-
return lane_num;
285-
}
286-
287-
static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
288-
const u8 dpcd[DP_DPCD_SIZE],
289-
int pix_clock)
258+
unsigned pix_clock,
259+
unsigned *dp_lanes, unsigned *dp_rate)
290260
{
291-
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
292-
int lane_num, max_pix_clock;
293-
294-
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
295-
ENCODER_OBJECT_ID_NUTMEG)
296-
return 270000;
297-
298-
lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock);
299-
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp);
300-
if (pix_clock <= max_pix_clock)
301-
return 162000;
302-
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp);
303-
if (pix_clock <= max_pix_clock)
304-
return 270000;
305-
if (amdgpu_connector_is_dp12_capable(connector)) {
306-
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp);
307-
if (pix_clock <= max_pix_clock)
308-
return 540000;
261+
unsigned bpp =
262+
amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
263+
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
264+
unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
265+
unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
266+
unsigned lane_num, i, max_pix_clock;
267+
268+
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
269+
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
270+
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
271+
if (max_pix_clock >= pix_clock) {
272+
*dp_lanes = lane_num;
273+
*dp_rate = link_rates[i];
274+
return 0;
275+
}
276+
}
309277
}
310278

311-
return drm_dp_max_link_rate(dpcd);
279+
return -EINVAL;
312280
}
313281

314282
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
@@ -422,17 +390,22 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
422390
{
423391
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
424392
struct amdgpu_connector_atom_dig *dig_connector;
393+
int ret;
425394

426395
if (!amdgpu_connector->con_priv)
427396
return;
428397
dig_connector = amdgpu_connector->con_priv;
429398

430399
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
431400
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
432-
dig_connector->dp_clock =
433-
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
434-
dig_connector->dp_lane_count =
435-
amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
401+
ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
402+
mode->clock,
403+
&dig_connector->dp_lane_count,
404+
&dig_connector->dp_clock);
405+
if (ret) {
406+
dig_connector->dp_clock = 0;
407+
dig_connector->dp_lane_count = 0;
408+
}
436409
}
437410
}
438411

@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
441414
{
442415
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
443416
struct amdgpu_connector_atom_dig *dig_connector;
444-
int dp_clock;
417+
unsigned dp_lanes, dp_clock;
418+
int ret;
445419

446420
if (!amdgpu_connector->con_priv)
447421
return MODE_CLOCK_HIGH;
448422
dig_connector = amdgpu_connector->con_priv;
449423

450-
dp_clock =
451-
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
424+
ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
425+
mode->clock, &dp_lanes, &dp_clock);
426+
if (ret)
427+
return MODE_CLOCK_HIGH;
452428

453429
if ((dp_clock == 540000) &&
454430
(!amdgpu_connector_is_dp12_capable(connector)))

0 commit comments

Comments
 (0)