@@ -65,21 +65,26 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
65
65
void virtio_gpu_cleanup_object (struct virtio_gpu_object * bo )
66
66
{
67
67
struct virtio_gpu_device * vgdev = bo -> base .base .dev -> dev_private ;
68
- struct virtio_gpu_object_shmem * shmem = to_virtio_gpu_shmem (bo );
69
68
70
- if (shmem -> pages ) {
71
- if (shmem -> mapped ) {
72
- dma_unmap_sg (vgdev -> vdev -> dev .parent ,
73
- shmem -> pages -> sgl , shmem -> mapped ,
74
- DMA_TO_DEVICE );
75
- shmem -> mapped = 0 ;
69
+ virtio_gpu_resource_id_put (vgdev , bo -> hw_res_handle );
70
+ if (virtio_gpu_is_shmem (bo )) {
71
+ struct virtio_gpu_object_shmem * shmem = to_virtio_gpu_shmem (bo );
72
+
73
+ if (shmem -> pages ) {
74
+ if (shmem -> mapped ) {
75
+ dma_unmap_sg (vgdev -> vdev -> dev .parent ,
76
+ shmem -> pages -> sgl , shmem -> mapped ,
77
+ DMA_TO_DEVICE );
78
+ shmem -> mapped = 0 ;
79
+ }
80
+
81
+ sg_free_table (shmem -> pages );
82
+ shmem -> pages = NULL ;
83
+ drm_gem_shmem_unpin (& bo -> base .base );
76
84
}
77
- sg_free_table (shmem -> pages );
78
- shmem -> pages = NULL ;
79
- drm_gem_shmem_unpin (& bo -> base .base );
85
+
86
+ drm_gem_shmem_free_object (& bo -> base .base );
80
87
}
81
- virtio_gpu_resource_id_put (vgdev , bo -> hw_res_handle );
82
- drm_gem_shmem_free_object (& bo -> base .base );
83
88
}
84
89
85
90
static void virtio_gpu_free_object (struct drm_gem_object * obj )
@@ -110,9 +115,9 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
110
115
.mmap = drm_gem_shmem_mmap ,
111
116
};
112
117
113
- bool virtio_gpu_is_shmem (struct drm_gem_object * obj )
118
+ bool virtio_gpu_is_shmem (struct virtio_gpu_object * bo )
114
119
{
115
- return obj -> funcs == & virtio_gpu_shmem_funcs ;
120
+ return bo -> base . base . funcs == & virtio_gpu_shmem_funcs ;
116
121
}
117
122
118
123
struct drm_gem_object * virtio_gpu_create_object (struct drm_device * dev ,
0 commit comments