1
1
// SPDX-License-Identifier: GPL-2.0 OR MIT
2
2
/**************************************************************************
3
3
*
4
- * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
4
+ * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
5
5
* All Rights Reserved.
6
6
*
7
7
* Permission is hereby granted, free of charge, to any person obtaining a
26
26
*
27
27
**************************************************************************/
28
28
29
- #include <drm/ttm/ttm_placement.h>
30
-
29
+ #include "vmwgfx_bo.h"
31
30
#include "vmwgfx_drv.h"
32
- #include "ttm_object.h"
33
31
34
32
35
- /**
36
- * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
37
- * vmw_buffer_object.
38
- *
39
- * @bo: Pointer to the TTM buffer object.
40
- * Return: Pointer to the struct vmw_buffer_object embedding the
41
- * TTM buffer object.
42
- */
43
- static struct vmw_buffer_object *
44
- vmw_buffer_object (struct ttm_buffer_object * bo )
45
- {
46
- return container_of (bo , struct vmw_buffer_object , base );
47
- }
33
+ #include <drm/ttm/ttm_placement.h>
48
34
49
35
/**
50
- * vmw_bo_bo_free - vmw buffer object destructor
36
+ * vmw_bo_free - vmw_bo destructor
51
37
*
52
38
* @bo: Pointer to the embedded struct ttm_buffer_object
53
39
*/
54
- static void vmw_bo_bo_free (struct ttm_buffer_object * bo )
40
+ static void vmw_bo_free (struct ttm_buffer_object * bo )
55
41
{
56
- struct vmw_buffer_object * vmw_bo = vmw_buffer_object ( bo );
42
+ struct vmw_bo * vbo = to_vmw_bo ( & bo -> base );
57
43
58
- WARN_ON (vmw_bo -> dirty );
59
- WARN_ON (!RB_EMPTY_ROOT (& vmw_bo -> res_tree ));
60
- vmw_bo_unmap (vmw_bo );
44
+ WARN_ON (vbo -> dirty );
45
+ WARN_ON (!RB_EMPTY_ROOT (& vbo -> res_tree ));
46
+ vmw_bo_unmap (vbo );
61
47
drm_gem_object_release (& bo -> base );
62
- kfree (vmw_bo );
48
+ kfree (vbo );
63
49
}
64
50
65
51
/**
66
- * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
52
+ * bo_is_vmw - check if the buffer object is a &vmw_bo
67
53
* @bo: ttm buffer object to be checked
68
54
*
69
55
* Uses destroy function associated with the object to determine if this is
70
- * a &vmw_buffer_object .
56
+ * a &vmw_bo .
71
57
*
72
58
* Returns:
73
- * true if the object is of &vmw_buffer_object type, false if not.
59
+ * true if the object is of &vmw_bo type, false if not.
74
60
*/
75
61
static bool bo_is_vmw (struct ttm_buffer_object * bo )
76
62
{
77
- return bo -> destroy == & vmw_bo_bo_free ;
63
+ return bo -> destroy == & vmw_bo_free ;
78
64
}
79
65
80
66
/**
@@ -88,7 +74,7 @@ static bool bo_is_vmw(struct ttm_buffer_object *bo)
88
74
* -ERESTARTSYS if interrupted by a signal
89
75
*/
90
76
int vmw_bo_pin_in_placement (struct vmw_private * dev_priv ,
91
- struct vmw_buffer_object * buf ,
77
+ struct vmw_bo * buf ,
92
78
struct ttm_placement * placement ,
93
79
bool interruptible )
94
80
{
@@ -130,7 +116,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
130
116
* -ERESTARTSYS if interrupted by a signal
131
117
*/
132
118
int vmw_bo_pin_in_vram_or_gmr (struct vmw_private * dev_priv ,
133
- struct vmw_buffer_object * buf ,
119
+ struct vmw_bo * buf ,
134
120
bool interruptible )
135
121
{
136
122
struct ttm_operation_ctx ctx = {interruptible , false };
@@ -178,7 +164,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
178
164
* -ERESTARTSYS if interrupted by a signal
179
165
*/
180
166
int vmw_bo_pin_in_vram (struct vmw_private * dev_priv ,
181
- struct vmw_buffer_object * buf ,
167
+ struct vmw_bo * buf ,
182
168
bool interruptible )
183
169
{
184
170
return vmw_bo_pin_in_placement (dev_priv , buf , & vmw_vram_placement ,
@@ -199,7 +185,7 @@ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
199
185
* -ERESTARTSYS if interrupted by a signal
200
186
*/
201
187
int vmw_bo_pin_in_start_of_vram (struct vmw_private * dev_priv ,
202
- struct vmw_buffer_object * buf ,
188
+ struct vmw_bo * buf ,
203
189
bool interruptible )
204
190
{
205
191
struct ttm_operation_ctx ctx = {interruptible , false };
@@ -263,7 +249,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
263
249
* -ERESTARTSYS if interrupted by a signal
264
250
*/
265
251
int vmw_bo_unpin (struct vmw_private * dev_priv ,
266
- struct vmw_buffer_object * buf ,
252
+ struct vmw_bo * buf ,
267
253
bool interruptible )
268
254
{
269
255
struct ttm_buffer_object * bo = & buf -> base ;
@@ -308,7 +294,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
308
294
* @pin: Whether to pin or unpin.
309
295
*
310
296
*/
311
- void vmw_bo_pin_reserved (struct vmw_buffer_object * vbo , bool pin )
297
+ void vmw_bo_pin_reserved (struct vmw_bo * vbo , bool pin )
312
298
{
313
299
struct ttm_operation_ctx ctx = { false, true };
314
300
struct ttm_place pl ;
@@ -356,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
356
342
* 3) Buffer object destruction
357
343
*
358
344
*/
359
- void * vmw_bo_map_and_cache (struct vmw_buffer_object * vbo )
345
+ void * vmw_bo_map_and_cache (struct vmw_bo * vbo )
360
346
{
361
347
struct ttm_buffer_object * bo = & vbo -> base ;
362
348
bool not_used ;
@@ -381,9 +367,9 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
381
367
* @vbo: The buffer object whose map we are tearing down.
382
368
*
383
369
* This function tears down a cached map set up using
384
- * vmw_buffer_object_map_and_cache ().
370
+ * vmw_bo_map_and_cache ().
385
371
*/
386
- void vmw_bo_unmap (struct vmw_buffer_object * vbo )
372
+ void vmw_bo_unmap (struct vmw_bo * vbo )
387
373
{
388
374
if (vbo -> map .bo == NULL )
389
375
return ;
@@ -447,7 +433,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
447
433
int vmw_bo_create (struct vmw_private * vmw ,
448
434
size_t size , struct ttm_placement * placement ,
449
435
bool interruptible , bool pin ,
450
- struct vmw_buffer_object * * p_bo )
436
+ struct vmw_bo * * p_bo )
451
437
{
452
438
int ret ;
453
439
@@ -473,7 +459,7 @@ int vmw_bo_create(struct vmw_private *vmw,
473
459
* vmw_bo_init - Initialize a vmw buffer object
474
460
*
475
461
* @dev_priv: Pointer to the device private struct
476
- * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
462
+ * @vmw_bo: Pointer to the struct vmw_bo to initialize.
477
463
* @size: Buffer object size in bytes.
478
464
* @placement: Initial placement.
479
465
* @interruptible: Whether waits should be performed interruptible.
@@ -483,7 +469,7 @@ int vmw_bo_create(struct vmw_private *vmw,
483
469
* Note that on error, the code will free the buffer object.
484
470
*/
485
471
int vmw_bo_init (struct vmw_private * dev_priv ,
486
- struct vmw_buffer_object * vmw_bo ,
472
+ struct vmw_bo * vmw_bo ,
487
473
size_t size , struct ttm_placement * placement ,
488
474
bool interruptible , bool pin )
489
475
{
@@ -506,7 +492,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
506
492
ret = ttm_bo_init_reserved (bdev , & vmw_bo -> base , size ,
507
493
ttm_bo_type_device ,
508
494
placement ,
509
- 0 , & ctx , NULL , NULL , vmw_bo_bo_free );
495
+ 0 , & ctx , NULL , NULL , vmw_bo_free );
510
496
if (unlikely (ret )) {
511
497
return ret ;
512
498
}
@@ -519,7 +505,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
519
505
}
520
506
521
507
/**
522
- * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
508
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
523
509
* access, idling previous GPU operations on the buffer and optionally
524
510
* blocking it for further command submissions.
525
511
*
@@ -532,7 +518,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
532
518
*
533
519
* A blocking grab will be automatically released when @tfile is closed.
534
520
*/
535
- static int vmw_user_bo_synccpu_grab (struct vmw_buffer_object * vmw_bo ,
521
+ static int vmw_user_bo_synccpu_grab (struct vmw_bo * vmw_bo ,
536
522
uint32_t flags )
537
523
{
538
524
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock );
@@ -579,7 +565,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
579
565
uint32_t handle ,
580
566
uint32_t flags )
581
567
{
582
- struct vmw_buffer_object * vmw_bo ;
568
+ struct vmw_bo * vmw_bo ;
583
569
int ret = vmw_user_bo_lookup (filp , handle , & vmw_bo );
584
570
585
571
if (!ret ) {
@@ -611,7 +597,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
611
597
{
612
598
struct drm_vmw_synccpu_arg * arg =
613
599
(struct drm_vmw_synccpu_arg * ) data ;
614
- struct vmw_buffer_object * vbo ;
600
+ struct vmw_bo * vbo ;
615
601
int ret ;
616
602
617
603
if ((arg -> flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write )) == 0
@@ -685,14 +671,14 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
685
671
* @filp: The file the handle is registered with.
686
672
* @handle: The user buffer object handle
687
673
* @out: Pointer to a where a pointer to the embedded
688
- * struct vmw_buffer_object should be placed.
674
+ * struct vmw_bo should be placed.
689
675
* Return: Zero on success, Negative error code on error.
690
676
*
691
677
* The vmw buffer object pointer will be refcounted (both ttm and gem)
692
678
*/
693
679
int vmw_user_bo_lookup (struct drm_file * filp ,
694
680
uint32_t handle ,
695
- struct vmw_buffer_object * * out )
681
+ struct vmw_bo * * out )
696
682
{
697
683
struct drm_gem_object * gobj ;
698
684
@@ -703,7 +689,7 @@ int vmw_user_bo_lookup(struct drm_file *filp,
703
689
return - ESRCH ;
704
690
}
705
691
706
- * out = gem_to_vmw_bo (gobj );
692
+ * out = to_vmw_bo (gobj );
707
693
ttm_bo_get (& (* out )-> base );
708
694
709
695
return 0 ;
@@ -762,7 +748,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
762
748
struct drm_mode_create_dumb * args )
763
749
{
764
750
struct vmw_private * dev_priv = vmw_priv (dev );
765
- struct vmw_buffer_object * vbo ;
751
+ struct vmw_bo * vbo ;
766
752
int ret ;
767
753
768
754
args -> pitch = args -> width * ((args -> bpp + 7 ) / 8 );
@@ -783,12 +769,12 @@ int vmw_dumb_create(struct drm_file *file_priv,
783
769
*/
784
770
void vmw_bo_swap_notify (struct ttm_buffer_object * bo )
785
771
{
786
- /* Is @bo embedded in a struct vmw_buffer_object ? */
772
+ /* Is @bo embedded in a struct vmw_bo ? */
787
773
if (!bo_is_vmw (bo ))
788
774
return ;
789
775
790
776
/* Kill any cached kernel maps before swapout */
791
- vmw_bo_unmap (vmw_buffer_object ( bo ));
777
+ vmw_bo_unmap (to_vmw_bo ( & bo -> base ));
792
778
}
793
779
794
780
@@ -805,13 +791,13 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
805
791
void vmw_bo_move_notify (struct ttm_buffer_object * bo ,
806
792
struct ttm_resource * mem )
807
793
{
808
- struct vmw_buffer_object * vbo ;
794
+ struct vmw_bo * vbo ;
809
795
810
- /* Make sure @bo is embedded in a struct vmw_buffer_object ? */
796
+ /* Make sure @bo is embedded in a struct vmw_bo ? */
811
797
if (!bo_is_vmw (bo ))
812
798
return ;
813
799
814
- vbo = container_of (bo , struct vmw_buffer_object , base );
800
+ vbo = container_of (bo , struct vmw_bo , base );
815
801
816
802
/*
817
803
* Kill any cached kernel maps before move to or from VRAM.
0 commit comments