@@ -2297,17 +2297,14 @@ void i915_vma_move_to_active(struct i915_vma *vma,
2297
2297
static void
2298
2298
i915_gem_object_move_to_inactive (struct drm_i915_gem_object * obj )
2299
2299
{
2300
- struct drm_i915_private * dev_priv = obj -> base .dev -> dev_private ;
2301
- struct i915_address_space * vm ;
2302
2300
struct i915_vma * vma ;
2303
2301
2304
2302
BUG_ON (obj -> base .write_domain & ~I915_GEM_GPU_DOMAINS );
2305
2303
BUG_ON (!obj -> active );
2306
2304
2307
- list_for_each_entry (vm , & dev_priv -> vm_list , global_link ) {
2308
- vma = i915_gem_obj_to_vma (obj , vm );
2309
- if (vma && !list_empty (& vma -> mm_list ))
2310
- list_move_tail (& vma -> mm_list , & vm -> inactive_list );
2305
+ list_for_each_entry (vma , & obj -> vma_list , vma_link ) {
2306
+ if (!list_empty (& vma -> mm_list ))
2307
+ list_move_tail (& vma -> mm_list , & vma -> vm -> inactive_list );
2311
2308
}
2312
2309
2313
2310
intel_fb_obj_flush (obj , true);
@@ -3062,10 +3059,8 @@ int i915_vma_unbind(struct i915_vma *vma)
3062
3059
* cause memory corruption through use-after-free.
3063
3060
*/
3064
3061
3065
- /* Throw away the active reference before moving to the unbound list */
3066
- i915_gem_object_retire (obj );
3067
-
3068
- if (i915_is_ggtt (vma -> vm )) {
3062
+ if (i915_is_ggtt (vma -> vm ) &&
3063
+ vma -> ggtt_view .type == I915_GGTT_VIEW_NORMAL ) {
3069
3064
i915_gem_object_finish_gtt (obj );
3070
3065
3071
3066
/* release the fence reg _after_ flushing */
@@ -3079,15 +3074,26 @@ int i915_vma_unbind(struct i915_vma *vma)
3079
3074
vma -> unbind_vma (vma );
3080
3075
3081
3076
list_del_init (& vma -> mm_list );
3082
- if (i915_is_ggtt (vma -> vm ))
3083
- obj -> map_and_fenceable = false;
3077
+ if (i915_is_ggtt (vma -> vm )) {
3078
+ if (vma -> ggtt_view .type == I915_GGTT_VIEW_NORMAL ) {
3079
+ obj -> map_and_fenceable = false;
3080
+ } else if (vma -> ggtt_view .pages ) {
3081
+ sg_free_table (vma -> ggtt_view .pages );
3082
+ kfree (vma -> ggtt_view .pages );
3083
+ vma -> ggtt_view .pages = NULL ;
3084
+ }
3085
+ }
3084
3086
3085
3087
drm_mm_remove_node (& vma -> node );
3086
3088
i915_gem_vma_destroy (vma );
3087
3089
3088
3090
/* Since the unbound list is global, only move to that list if
3089
3091
* no more VMAs exist. */
3090
3092
if (list_empty (& obj -> vma_list )) {
3093
+ /* Throw away the active reference before
3094
+ * moving to the unbound list. */
3095
+ i915_gem_object_retire (obj );
3096
+
3091
3097
i915_gem_gtt_finish_object (obj );
3092
3098
list_move_tail (& obj -> global_list , & dev_priv -> mm .unbound_list );
3093
3099
}
@@ -3498,7 +3504,8 @@ static struct i915_vma *
3498
3504
i915_gem_object_bind_to_vm (struct drm_i915_gem_object * obj ,
3499
3505
struct i915_address_space * vm ,
3500
3506
unsigned alignment ,
3501
- uint64_t flags )
3507
+ uint64_t flags ,
3508
+ const struct i915_ggtt_view * view )
3502
3509
{
3503
3510
struct drm_device * dev = obj -> base .dev ;
3504
3511
struct drm_i915_private * dev_priv = dev -> dev_private ;
@@ -3548,7 +3555,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3548
3555
3549
3556
i915_gem_object_pin_pages (obj );
3550
3557
3551
- vma = i915_gem_obj_lookup_or_create_vma (obj , vm );
3558
+ vma = i915_gem_obj_lookup_or_create_vma_view (obj , vm , view );
3552
3559
if (IS_ERR (vma ))
3553
3560
goto err_unpin ;
3554
3561
@@ -3578,15 +3585,19 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3578
3585
if (ret )
3579
3586
goto err_remove_node ;
3580
3587
3588
+ trace_i915_vma_bind (vma , flags );
3589
+ ret = i915_vma_bind (vma , obj -> cache_level ,
3590
+ flags & PIN_GLOBAL ? GLOBAL_BIND : 0 );
3591
+ if (ret )
3592
+ goto err_finish_gtt ;
3593
+
3581
3594
list_move_tail (& obj -> global_list , & dev_priv -> mm .bound_list );
3582
3595
list_add_tail (& vma -> mm_list , & vm -> inactive_list );
3583
3596
3584
- trace_i915_vma_bind (vma , flags );
3585
- vma -> bind_vma (vma , obj -> cache_level ,
3586
- flags & PIN_GLOBAL ? GLOBAL_BIND : 0 );
3587
-
3588
3597
return vma ;
3589
3598
3599
+ err_finish_gtt :
3600
+ i915_gem_gtt_finish_object (obj );
3590
3601
err_remove_node :
3591
3602
drm_mm_remove_node (& vma -> node );
3592
3603
err_free_vma :
@@ -3789,9 +3800,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3789
3800
}
3790
3801
3791
3802
list_for_each_entry (vma , & obj -> vma_list , vma_link )
3792
- if (drm_mm_node_allocated (& vma -> node ))
3793
- vma -> bind_vma (vma , cache_level ,
3794
- vma -> bound & GLOBAL_BIND );
3803
+ if (drm_mm_node_allocated (& vma -> node )) {
3804
+ ret = i915_vma_bind (vma , cache_level ,
3805
+ vma -> bound & GLOBAL_BIND );
3806
+ if (ret )
3807
+ return ret ;
3808
+ }
3795
3809
}
3796
3810
3797
3811
list_for_each_entry (vma , & obj -> vma_list , vma_link )
@@ -4144,10 +4158,11 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4144
4158
}
4145
4159
4146
4160
int
4147
- i915_gem_object_pin (struct drm_i915_gem_object * obj ,
4148
- struct i915_address_space * vm ,
4149
- uint32_t alignment ,
4150
- uint64_t flags )
4161
+ i915_gem_object_pin_view (struct drm_i915_gem_object * obj ,
4162
+ struct i915_address_space * vm ,
4163
+ uint32_t alignment ,
4164
+ uint64_t flags ,
4165
+ const struct i915_ggtt_view * view )
4151
4166
{
4152
4167
struct drm_i915_private * dev_priv = obj -> base .dev -> dev_private ;
4153
4168
struct i915_vma * vma ;
@@ -4163,7 +4178,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4163
4178
if (WARN_ON ((flags & (PIN_MAPPABLE | PIN_GLOBAL )) == PIN_MAPPABLE ))
4164
4179
return - EINVAL ;
4165
4180
4166
- vma = i915_gem_obj_to_vma (obj , vm );
4181
+ vma = i915_gem_obj_to_vma_view (obj , vm , view );
4167
4182
if (vma ) {
4168
4183
if (WARN_ON (vma -> pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT ))
4169
4184
return - EBUSY ;
@@ -4173,7 +4188,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4173
4188
"bo is already pinned with incorrect alignment:"
4174
4189
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4175
4190
" obj->map_and_fenceable=%d\n" ,
4176
- i915_gem_obj_offset (obj , vm ), alignment ,
4191
+ i915_gem_obj_offset_view (obj , vm , view -> type ),
4192
+ alignment ,
4177
4193
!!(flags & PIN_MAPPABLE ),
4178
4194
obj -> map_and_fenceable );
4179
4195
ret = i915_vma_unbind (vma );
@@ -4186,13 +4202,17 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4186
4202
4187
4203
bound = vma ? vma -> bound : 0 ;
4188
4204
if (vma == NULL || !drm_mm_node_allocated (& vma -> node )) {
4189
- vma = i915_gem_object_bind_to_vm (obj , vm , alignment , flags );
4205
+ vma = i915_gem_object_bind_to_vm (obj , vm , alignment ,
4206
+ flags , view );
4190
4207
if (IS_ERR (vma ))
4191
4208
return PTR_ERR (vma );
4192
4209
}
4193
4210
4194
- if (flags & PIN_GLOBAL && !(vma -> bound & GLOBAL_BIND ))
4195
- vma -> bind_vma (vma , obj -> cache_level , GLOBAL_BIND );
4211
+ if (flags & PIN_GLOBAL && !(vma -> bound & GLOBAL_BIND )) {
4212
+ ret = i915_vma_bind (vma , obj -> cache_level , GLOBAL_BIND );
4213
+ if (ret )
4214
+ return ret ;
4215
+ }
4196
4216
4197
4217
if ((bound ^ vma -> bound ) & GLOBAL_BIND ) {
4198
4218
bool mappable , fenceable ;
@@ -4528,12 +4548,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4528
4548
intel_runtime_pm_put (dev_priv );
4529
4549
}
4530
4550
4531
- struct i915_vma * i915_gem_obj_to_vma (struct drm_i915_gem_object * obj ,
4532
- struct i915_address_space * vm )
4551
+ struct i915_vma * i915_gem_obj_to_vma_view (struct drm_i915_gem_object * obj ,
4552
+ struct i915_address_space * vm ,
4553
+ const struct i915_ggtt_view * view )
4533
4554
{
4534
4555
struct i915_vma * vma ;
4535
4556
list_for_each_entry (vma , & obj -> vma_list , vma_link )
4536
- if (vma -> vm == vm )
4557
+ if (vma -> vm == vm && vma -> ggtt_view . type == view -> type )
4537
4558
return vma ;
4538
4559
4539
4560
return NULL ;
@@ -5145,16 +5166,17 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5145
5166
}
5146
5167
5147
5168
/* All the new VM stuff */
5148
- unsigned long i915_gem_obj_offset (struct drm_i915_gem_object * o ,
5149
- struct i915_address_space * vm )
5169
+ unsigned long i915_gem_obj_offset_view (struct drm_i915_gem_object * o ,
5170
+ struct i915_address_space * vm ,
5171
+ enum i915_ggtt_view_type view )
5150
5172
{
5151
5173
struct drm_i915_private * dev_priv = o -> base .dev -> dev_private ;
5152
5174
struct i915_vma * vma ;
5153
5175
5154
5176
WARN_ON (vm == & dev_priv -> mm .aliasing_ppgtt -> base );
5155
5177
5156
5178
list_for_each_entry (vma , & o -> vma_list , vma_link ) {
5157
- if (vma -> vm == vm )
5179
+ if (vma -> vm == vm && vma -> ggtt_view . type == view )
5158
5180
return vma -> node .start ;
5159
5181
5160
5182
}
@@ -5163,13 +5185,16 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5163
5185
return -1 ;
5164
5186
}
5165
5187
5166
- bool i915_gem_obj_bound (struct drm_i915_gem_object * o ,
5167
- struct i915_address_space * vm )
5188
+ bool i915_gem_obj_bound_view (struct drm_i915_gem_object * o ,
5189
+ struct i915_address_space * vm ,
5190
+ enum i915_ggtt_view_type view )
5168
5191
{
5169
5192
struct i915_vma * vma ;
5170
5193
5171
5194
list_for_each_entry (vma , & o -> vma_list , vma_link )
5172
- if (vma -> vm == vm && drm_mm_node_allocated (& vma -> node ))
5195
+ if (vma -> vm == vm &&
5196
+ vma -> ggtt_view .type == view &&
5197
+ drm_mm_node_allocated (& vma -> node ))
5173
5198
return true;
5174
5199
5175
5200
return false;
@@ -5304,10 +5329,10 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5304
5329
struct i915_address_space * ggtt = i915_obj_to_ggtt (obj );
5305
5330
struct i915_vma * vma ;
5306
5331
5307
- list_for_each_entry (vma , & obj -> vma_list , vma_link ) {
5308
- if (vma -> vm == ggtt )
5332
+ list_for_each_entry (vma , & obj -> vma_list , vma_link )
5333
+ if (vma -> vm == ggtt &&
5334
+ vma -> ggtt_view .type == I915_GGTT_VIEW_NORMAL )
5309
5335
return vma ;
5310
- }
5311
5336
5312
5337
return NULL ;
5313
5338
}
0 commit comments