@@ -56,6 +56,11 @@ drm_atomic_state_alloc(struct drm_device *dev)
56
56
if (!state )
57
57
return NULL ;
58
58
59
+ /* TODO legacy paths should maybe do a better job about
60
+ * setting this appropriately?
61
+ */
62
+ state -> allow_modeset = true;
63
+
59
64
state -> num_connector = ACCESS_ONCE (dev -> mode_config .num_connector );
60
65
61
66
state -> crtcs = kcalloc (dev -> mode_config .num_crtc ,
@@ -1003,6 +1008,22 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1003
1008
if (config -> funcs -> atomic_check )
1004
1009
ret = config -> funcs -> atomic_check (state -> dev , state );
1005
1010
1011
+ if (!state -> allow_modeset ) {
1012
+ for (i = 0 ; i < ncrtcs ; i ++ ) {
1013
+ struct drm_crtc * crtc = state -> crtcs [i ];
1014
+ struct drm_crtc_state * crtc_state = state -> crtc_states [i ];
1015
+
1016
+ if (!crtc )
1017
+ continue ;
1018
+
1019
+ if (crtc_state -> mode_changed ) {
1020
+ DRM_DEBUG_KMS ("[CRTC:%d] requires full modeset\n" ,
1021
+ crtc -> base .id );
1022
+ return - EINVAL ;
1023
+ }
1024
+ }
1025
+ }
1026
+
1006
1027
return ret ;
1007
1028
}
1008
1029
EXPORT_SYMBOL (drm_atomic_check_only );
@@ -1068,3 +1089,313 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
1068
1089
return config -> funcs -> atomic_commit (state -> dev , state , true);
1069
1090
}
1070
1091
EXPORT_SYMBOL (drm_atomic_async_commit );
1092
+
1093
+ /*
1094
+ * The big monstor ioctl
1095
+ */
1096
+
1097
+ static struct drm_pending_vblank_event * create_vblank_event (
1098
+ struct drm_device * dev , struct drm_file * file_priv , uint64_t user_data )
1099
+ {
1100
+ struct drm_pending_vblank_event * e = NULL ;
1101
+ unsigned long flags ;
1102
+
1103
+ spin_lock_irqsave (& dev -> event_lock , flags );
1104
+ if (file_priv -> event_space < sizeof e -> event ) {
1105
+ spin_unlock_irqrestore (& dev -> event_lock , flags );
1106
+ goto out ;
1107
+ }
1108
+ file_priv -> event_space -= sizeof e -> event ;
1109
+ spin_unlock_irqrestore (& dev -> event_lock , flags );
1110
+
1111
+ e = kzalloc (sizeof * e , GFP_KERNEL );
1112
+ if (e == NULL ) {
1113
+ spin_lock_irqsave (& dev -> event_lock , flags );
1114
+ file_priv -> event_space += sizeof e -> event ;
1115
+ spin_unlock_irqrestore (& dev -> event_lock , flags );
1116
+ goto out ;
1117
+ }
1118
+
1119
+ e -> event .base .type = DRM_EVENT_FLIP_COMPLETE ;
1120
+ e -> event .base .length = sizeof e -> event ;
1121
+ e -> event .user_data = user_data ;
1122
+ e -> base .event = & e -> event .base ;
1123
+ e -> base .file_priv = file_priv ;
1124
+ e -> base .destroy = (void (* ) (struct drm_pending_event * )) kfree ;
1125
+
1126
+ out :
1127
+ return e ;
1128
+ }
1129
+
1130
+ static void destroy_vblank_event (struct drm_device * dev ,
1131
+ struct drm_file * file_priv , struct drm_pending_vblank_event * e )
1132
+ {
1133
+ unsigned long flags ;
1134
+
1135
+ spin_lock_irqsave (& dev -> event_lock , flags );
1136
+ file_priv -> event_space += sizeof e -> event ;
1137
+ spin_unlock_irqrestore (& dev -> event_lock , flags );
1138
+ kfree (e );
1139
+ }
1140
+
1141
+ static int atomic_set_prop (struct drm_atomic_state * state ,
1142
+ struct drm_mode_object * obj , struct drm_property * prop ,
1143
+ uint64_t prop_value )
1144
+ {
1145
+ struct drm_mode_object * ref ;
1146
+ int ret ;
1147
+
1148
+ if (!drm_property_change_valid_get (prop , prop_value , & ref ))
1149
+ return - EINVAL ;
1150
+
1151
+ switch (obj -> type ) {
1152
+ case DRM_MODE_OBJECT_CONNECTOR : {
1153
+ struct drm_connector * connector = obj_to_connector (obj );
1154
+ struct drm_connector_state * connector_state ;
1155
+
1156
+ connector_state = drm_atomic_get_connector_state (state , connector );
1157
+ if (IS_ERR (connector_state )) {
1158
+ ret = PTR_ERR (connector_state );
1159
+ break ;
1160
+ }
1161
+
1162
+ ret = drm_atomic_connector_set_property (connector ,
1163
+ connector_state , prop , prop_value );
1164
+ break ;
1165
+ }
1166
+ case DRM_MODE_OBJECT_CRTC : {
1167
+ struct drm_crtc * crtc = obj_to_crtc (obj );
1168
+ struct drm_crtc_state * crtc_state ;
1169
+
1170
+ crtc_state = drm_atomic_get_crtc_state (state , crtc );
1171
+ if (IS_ERR (crtc_state )) {
1172
+ ret = PTR_ERR (crtc_state );
1173
+ break ;
1174
+ }
1175
+
1176
+ ret = drm_atomic_crtc_set_property (crtc ,
1177
+ crtc_state , prop , prop_value );
1178
+ break ;
1179
+ }
1180
+ case DRM_MODE_OBJECT_PLANE : {
1181
+ struct drm_plane * plane = obj_to_plane (obj );
1182
+ struct drm_plane_state * plane_state ;
1183
+
1184
+ plane_state = drm_atomic_get_plane_state (state , plane );
1185
+ if (IS_ERR (plane_state )) {
1186
+ ret = PTR_ERR (plane_state );
1187
+ break ;
1188
+ }
1189
+
1190
+ ret = drm_atomic_plane_set_property (plane ,
1191
+ plane_state , prop , prop_value );
1192
+ break ;
1193
+ }
1194
+ default :
1195
+ ret = - EINVAL ;
1196
+ break ;
1197
+ }
1198
+
1199
+ drm_property_change_valid_put (prop , ref );
1200
+ return ret ;
1201
+ }
1202
+
1203
+ int drm_mode_atomic_ioctl (struct drm_device * dev ,
1204
+ void * data , struct drm_file * file_priv )
1205
+ {
1206
+ struct drm_mode_atomic * arg = data ;
1207
+ uint32_t __user * objs_ptr = (uint32_t __user * )(unsigned long )(arg -> objs_ptr );
1208
+ uint32_t __user * count_props_ptr = (uint32_t __user * )(unsigned long )(arg -> count_props_ptr );
1209
+ uint32_t __user * props_ptr = (uint32_t __user * )(unsigned long )(arg -> props_ptr );
1210
+ uint64_t __user * prop_values_ptr = (uint64_t __user * )(unsigned long )(arg -> prop_values_ptr );
1211
+ unsigned int copied_objs , copied_props ;
1212
+ struct drm_atomic_state * state ;
1213
+ struct drm_modeset_acquire_ctx ctx ;
1214
+ struct drm_plane * plane ;
1215
+ unsigned plane_mask = 0 ;
1216
+ int ret = 0 ;
1217
+ unsigned int i , j ;
1218
+
1219
+ /* disallow for drivers not supporting atomic: */
1220
+ if (!drm_core_check_feature (dev , DRIVER_ATOMIC ))
1221
+ return - EINVAL ;
1222
+
1223
+ /* disallow for userspace that has not enabled atomic cap (even
1224
+ * though this may be a bit overkill, since legacy userspace
1225
+ * wouldn't know how to call this ioctl)
1226
+ */
1227
+ if (!file_priv -> atomic )
1228
+ return - EINVAL ;
1229
+
1230
+ if (arg -> flags & ~DRM_MODE_ATOMIC_FLAGS )
1231
+ return - EINVAL ;
1232
+
1233
+ if (arg -> reserved )
1234
+ return - EINVAL ;
1235
+
1236
+ if ((arg -> flags & DRM_MODE_PAGE_FLIP_ASYNC ) &&
1237
+ !dev -> mode_config .async_page_flip )
1238
+ return - EINVAL ;
1239
+
1240
+ /* can't test and expect an event at the same time. */
1241
+ if ((arg -> flags & DRM_MODE_ATOMIC_TEST_ONLY ) &&
1242
+ (arg -> flags & DRM_MODE_PAGE_FLIP_EVENT ))
1243
+ return - EINVAL ;
1244
+
1245
+ drm_modeset_acquire_init (& ctx , 0 );
1246
+
1247
+ state = drm_atomic_state_alloc (dev );
1248
+ if (!state )
1249
+ return - ENOMEM ;
1250
+
1251
+ state -> acquire_ctx = & ctx ;
1252
+ state -> allow_modeset = !!(arg -> flags & DRM_MODE_ATOMIC_ALLOW_MODESET );
1253
+
1254
+ retry :
1255
+ copied_objs = 0 ;
1256
+ copied_props = 0 ;
1257
+
1258
+ for (i = 0 ; i < arg -> count_objs ; i ++ ) {
1259
+ uint32_t obj_id , count_props ;
1260
+ struct drm_mode_object * obj ;
1261
+
1262
+ if (get_user (obj_id , objs_ptr + copied_objs )) {
1263
+ ret = - EFAULT ;
1264
+ goto fail ;
1265
+ }
1266
+
1267
+ obj = drm_mode_object_find (dev , obj_id , DRM_MODE_OBJECT_ANY );
1268
+ if (!obj || !obj -> properties ) {
1269
+ ret = - ENOENT ;
1270
+ goto fail ;
1271
+ }
1272
+
1273
+ if (obj -> type == DRM_MODE_OBJECT_PLANE ) {
1274
+ plane = obj_to_plane (obj );
1275
+ plane_mask |= (1 << drm_plane_index (plane ));
1276
+ plane -> old_fb = plane -> fb ;
1277
+ }
1278
+
1279
+ if (get_user (count_props , count_props_ptr + copied_objs )) {
1280
+ ret = - EFAULT ;
1281
+ goto fail ;
1282
+ }
1283
+
1284
+ copied_objs ++ ;
1285
+
1286
+ for (j = 0 ; j < count_props ; j ++ ) {
1287
+ uint32_t prop_id ;
1288
+ uint64_t prop_value ;
1289
+ struct drm_property * prop ;
1290
+
1291
+ if (get_user (prop_id , props_ptr + copied_props )) {
1292
+ ret = - EFAULT ;
1293
+ goto fail ;
1294
+ }
1295
+
1296
+ prop = drm_property_find (dev , prop_id );
1297
+ if (!prop ) {
1298
+ ret = - ENOENT ;
1299
+ goto fail ;
1300
+ }
1301
+
1302
+ if (get_user (prop_value , prop_values_ptr + copied_props )) {
1303
+ ret = - EFAULT ;
1304
+ goto fail ;
1305
+ }
1306
+
1307
+ ret = atomic_set_prop (state , obj , prop , prop_value );
1308
+ if (ret )
1309
+ goto fail ;
1310
+
1311
+ copied_props ++ ;
1312
+ }
1313
+ }
1314
+
1315
+ if (arg -> flags & DRM_MODE_PAGE_FLIP_EVENT ) {
1316
+ int ncrtcs = dev -> mode_config .num_crtc ;
1317
+
1318
+ for (i = 0 ; i < ncrtcs ; i ++ ) {
1319
+ struct drm_crtc_state * crtc_state = state -> crtc_states [i ];
1320
+ struct drm_pending_vblank_event * e ;
1321
+
1322
+ if (!crtc_state )
1323
+ continue ;
1324
+
1325
+ e = create_vblank_event (dev , file_priv , arg -> user_data );
1326
+ if (!e ) {
1327
+ ret = - ENOMEM ;
1328
+ goto fail ;
1329
+ }
1330
+
1331
+ crtc_state -> event = e ;
1332
+ }
1333
+ }
1334
+
1335
+ if (arg -> flags & DRM_MODE_ATOMIC_TEST_ONLY ) {
1336
+ ret = drm_atomic_check_only (state );
1337
+ /* _check_only() does not free state, unlike _commit() */
1338
+ drm_atomic_state_free (state );
1339
+ } else if (arg -> flags & DRM_MODE_ATOMIC_NONBLOCK ) {
1340
+ ret = drm_atomic_async_commit (state );
1341
+ } else {
1342
+ ret = drm_atomic_commit (state );
1343
+ }
1344
+
1345
+ /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
1346
+ * locks (ie. while it is still safe to deref plane->state). We
1347
+ * need to do this here because the driver entry points cannot
1348
+ * distinguish between legacy and atomic ioctls.
1349
+ */
1350
+ drm_for_each_plane_mask (plane , dev , plane_mask ) {
1351
+ if (ret == 0 ) {
1352
+ struct drm_framebuffer * new_fb = plane -> state -> fb ;
1353
+ if (new_fb )
1354
+ drm_framebuffer_reference (new_fb );
1355
+ plane -> fb = new_fb ;
1356
+ plane -> crtc = plane -> state -> crtc ;
1357
+ } else {
1358
+ plane -> old_fb = NULL ;
1359
+ }
1360
+ if (plane -> old_fb ) {
1361
+ drm_framebuffer_unreference (plane -> old_fb );
1362
+ plane -> old_fb = NULL ;
1363
+ }
1364
+ }
1365
+
1366
+ drm_modeset_drop_locks (& ctx );
1367
+ drm_modeset_acquire_fini (& ctx );
1368
+
1369
+ return ret ;
1370
+
1371
+ fail :
1372
+ if (ret == - EDEADLK )
1373
+ goto backoff ;
1374
+
1375
+ if (arg -> flags & DRM_MODE_PAGE_FLIP_EVENT ) {
1376
+ int ncrtcs = dev -> mode_config .num_crtc ;
1377
+
1378
+ for (i = 0 ; i < ncrtcs ; i ++ ) {
1379
+ struct drm_crtc_state * crtc_state = state -> crtc_states [i ];
1380
+
1381
+ if (!crtc_state )
1382
+ continue ;
1383
+
1384
+ destroy_vblank_event (dev , file_priv , crtc_state -> event );
1385
+ crtc_state -> event = NULL ;
1386
+ }
1387
+ }
1388
+
1389
+ drm_atomic_state_free (state );
1390
+
1391
+ drm_modeset_drop_locks (& ctx );
1392
+ drm_modeset_acquire_fini (& ctx );
1393
+
1394
+ return ret ;
1395
+
1396
+ backoff :
1397
+ drm_atomic_state_clear (state );
1398
+ drm_modeset_backoff (& ctx );
1399
+
1400
+ goto retry ;
1401
+ }
0 commit comments