@@ -135,11 +135,17 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
135
135
goto out_unreserve ;
136
136
}
137
137
138
- ret = ttm_bo_validate (bo , & vmw_vram_gmr_placement , & ctx );
138
+ vmw_bo_placement_set (buf ,
139
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM ,
140
+ VMW_BO_DOMAIN_GMR );
141
+ ret = ttm_bo_validate (bo , & buf -> placement , & ctx );
139
142
if (likely (ret == 0 ) || ret == - ERESTARTSYS )
140
143
goto out_unreserve ;
141
144
142
- ret = ttm_bo_validate (bo , & vmw_vram_placement , & ctx );
145
+ vmw_bo_placement_set (buf ,
146
+ VMW_BO_DOMAIN_VRAM ,
147
+ VMW_BO_DOMAIN_VRAM );
148
+ ret = ttm_bo_validate (bo , & buf -> placement , & ctx );
143
149
144
150
out_unreserve :
145
151
if (!ret )
@@ -190,17 +196,8 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
190
196
{
191
197
struct ttm_operation_ctx ctx = {interruptible , false };
192
198
struct ttm_buffer_object * bo = & buf -> base ;
193
- struct ttm_placement placement ;
194
- struct ttm_place place ;
195
199
int ret = 0 ;
196
200
197
- place = vmw_vram_placement .placement [0 ];
198
- place .lpfn = bo -> resource -> num_pages ;
199
- placement .num_placement = 1 ;
200
- placement .placement = & place ;
201
- placement .num_busy_placement = 1 ;
202
- placement .busy_placement = & place ;
203
-
204
201
vmw_execbuf_release_pinned_bo (dev_priv );
205
202
ret = ttm_bo_reserve (bo , interruptible , false, NULL );
206
203
if (unlikely (ret != 0 ))
@@ -216,14 +213,21 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
216
213
bo -> resource -> start > 0 &&
217
214
buf -> base .pin_count == 0 ) {
218
215
ctx .interruptible = false;
219
- (void ) ttm_bo_validate (bo , & vmw_sys_placement , & ctx );
216
+ vmw_bo_placement_set (buf ,
217
+ VMW_BO_DOMAIN_SYS ,
218
+ VMW_BO_DOMAIN_SYS );
219
+ (void )ttm_bo_validate (bo , & buf -> placement , & ctx );
220
220
}
221
221
222
+ vmw_bo_placement_set (buf ,
223
+ VMW_BO_DOMAIN_VRAM ,
224
+ VMW_BO_DOMAIN_VRAM );
225
+ buf -> places [0 ].lpfn = bo -> resource -> num_pages ;
222
226
if (buf -> base .pin_count > 0 )
223
- ret = ttm_resource_compat (bo -> resource , & placement )
227
+ ret = ttm_resource_compat (bo -> resource , & buf -> placement )
224
228
? 0 : - EINVAL ;
225
229
else
226
- ret = ttm_bo_validate (bo , & placement , & ctx );
230
+ ret = ttm_bo_validate (bo , & buf -> placement , & ctx );
227
231
228
232
/* For some reason we didn't end up at the start of vram */
229
233
WARN_ON (ret == 0 && bo -> resource -> start != 0 );
@@ -431,7 +435,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
431
435
}
432
436
433
437
int vmw_bo_create (struct vmw_private * vmw ,
434
- size_t size , struct ttm_placement * placement ,
438
+ size_t size , u32 domain , u32 busy_domain ,
435
439
bool interruptible , bool pin ,
436
440
struct vmw_bo * * p_bo )
437
441
{
@@ -444,7 +448,8 @@ int vmw_bo_create(struct vmw_private *vmw,
444
448
}
445
449
446
450
ret = vmw_bo_init (vmw , * p_bo , size ,
447
- placement , interruptible , pin );
451
+ domain , busy_domain ,
452
+ interruptible , pin );
448
453
if (unlikely (ret != 0 ))
449
454
goto out_error ;
450
455
@@ -461,7 +466,8 @@ int vmw_bo_create(struct vmw_private *vmw,
461
466
* @dev_priv: Pointer to the device private struct
462
467
* @vmw_bo: Pointer to the struct vmw_bo to initialize.
463
468
* @size: Buffer object size in bytes.
464
- * @placement: Initial placement.
469
+ * @domain: Domain to put the bo in.
470
+ * @busy_domain: Domain to put the bo if busy.
465
471
* @interruptible: Whether waits should be performed interruptible.
466
472
* @pin: If the BO should be created pinned at a fixed location.
467
473
* Returns: Zero on success, negative error code on error.
@@ -470,7 +476,9 @@ int vmw_bo_create(struct vmw_private *vmw,
470
476
*/
471
477
int vmw_bo_init (struct vmw_private * dev_priv ,
472
478
struct vmw_bo * vmw_bo ,
473
- size_t size , struct ttm_placement * placement ,
479
+ size_t size ,
480
+ u32 domain ,
481
+ u32 busy_domain ,
474
482
bool interruptible , bool pin )
475
483
{
476
484
struct ttm_operation_ctx ctx = {
@@ -489,9 +497,10 @@ int vmw_bo_init(struct vmw_private *dev_priv,
489
497
size = ALIGN (size , PAGE_SIZE );
490
498
drm_gem_private_object_init (vdev , & vmw_bo -> base .base , size );
491
499
500
+ vmw_bo_placement_set (vmw_bo , domain , busy_domain );
492
501
ret = ttm_bo_init_reserved (bdev , & vmw_bo -> base , size ,
493
502
ttm_bo_type_device ,
494
- placement ,
503
+ & vmw_bo -> placement ,
495
504
0 , & ctx , NULL , NULL , vmw_bo_free );
496
505
if (unlikely (ret )) {
497
506
return ret ;
@@ -815,3 +824,101 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
815
824
if (mem -> mem_type != VMW_PL_MOB && bo -> resource -> mem_type == VMW_PL_MOB )
816
825
vmw_resource_unbind_list (vbo );
817
826
}
827
+
828
+ static u32
829
+ set_placement_list (struct ttm_place * pl , u32 domain )
830
+ {
831
+ u32 n = 0 ;
832
+
833
+ /*
834
+ * The placements are ordered according to our preferences
835
+ */
836
+ if (domain & VMW_BO_DOMAIN_MOB ) {
837
+ pl [n ].mem_type = VMW_PL_MOB ;
838
+ pl [n ].flags = 0 ;
839
+ pl [n ].fpfn = 0 ;
840
+ pl [n ].lpfn = 0 ;
841
+ n ++ ;
842
+ }
843
+ if (domain & VMW_BO_DOMAIN_GMR ) {
844
+ pl [n ].mem_type = VMW_PL_GMR ;
845
+ pl [n ].flags = 0 ;
846
+ pl [n ].fpfn = 0 ;
847
+ pl [n ].lpfn = 0 ;
848
+ n ++ ;
849
+ }
850
+ if (domain & VMW_BO_DOMAIN_VRAM ) {
851
+ pl [n ].mem_type = TTM_PL_VRAM ;
852
+ pl [n ].flags = 0 ;
853
+ pl [n ].fpfn = 0 ;
854
+ pl [n ].lpfn = 0 ;
855
+ n ++ ;
856
+ }
857
+ WARN_ON ((domain & VMW_BO_DOMAIN_WAITABLE_SYS ) != 0 );
858
+ if (domain & VMW_BO_DOMAIN_WAITABLE_SYS ) {
859
+ pl [n ].mem_type = VMW_PL_SYSTEM ;
860
+ pl [n ].flags = 0 ;
861
+ pl [n ].fpfn = 0 ;
862
+ pl [n ].lpfn = 0 ;
863
+ n ++ ;
864
+ }
865
+ if (domain & VMW_BO_DOMAIN_SYS ) {
866
+ pl [n ].mem_type = TTM_PL_SYSTEM ;
867
+ pl [n ].flags = 0 ;
868
+ pl [n ].fpfn = 0 ;
869
+ pl [n ].lpfn = 0 ;
870
+ n ++ ;
871
+ }
872
+
873
+ WARN_ON (!n );
874
+ if (!n ) {
875
+ pl [n ].mem_type = TTM_PL_SYSTEM ;
876
+ pl [n ].flags = 0 ;
877
+ pl [n ].fpfn = 0 ;
878
+ pl [n ].lpfn = 0 ;
879
+ n ++ ;
880
+ }
881
+ return n ;
882
+ }
883
+
884
+ void vmw_bo_placement_set (struct vmw_bo * bo , u32 domain , u32 busy_domain )
885
+ {
886
+ struct ttm_device * bdev = bo -> base .bdev ;
887
+ struct vmw_private * vmw =
888
+ container_of (bdev , struct vmw_private , bdev );
889
+ struct ttm_placement * pl = & bo -> placement ;
890
+ bool mem_compatible = false;
891
+ u32 i ;
892
+
893
+ pl -> placement = bo -> places ;
894
+ pl -> num_placement = set_placement_list (bo -> places , domain );
895
+
896
+ if (drm_debug_enabled (DRM_UT_DRIVER ) && bo -> base .resource ) {
897
+ for (i = 0 ; i < pl -> num_placement ; ++ i ) {
898
+ if (bo -> base .resource -> mem_type == TTM_PL_SYSTEM ||
899
+ bo -> base .resource -> mem_type == pl -> placement [i ].mem_type )
900
+ mem_compatible = true;
901
+ }
902
+ if (!mem_compatible )
903
+ drm_warn (& vmw -> drm ,
904
+ "%s: Incompatible transition from "
905
+ "bo->base.resource->mem_type = %u to domain = %u\n" ,
906
+ __func__ , bo -> base .resource -> mem_type , domain );
907
+ }
908
+
909
+ pl -> busy_placement = bo -> busy_places ;
910
+ pl -> num_busy_placement = set_placement_list (bo -> busy_places , busy_domain );
911
+ }
912
+
913
+ void vmw_bo_placement_set_default_accelerated (struct vmw_bo * bo )
914
+ {
915
+ struct ttm_device * bdev = bo -> base .bdev ;
916
+ struct vmw_private * vmw =
917
+ container_of (bdev , struct vmw_private , bdev );
918
+ u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM ;
919
+
920
+ if (vmw -> has_mob )
921
+ domain = VMW_BO_DOMAIN_MOB ;
922
+
923
+ vmw_bo_placement_set (bo , domain , domain );
924
+ }
0 commit comments