@@ -293,8 +293,9 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
293293
294294static int
295295pvr_vm_bind_op_unmap_init (struct pvr_vm_bind_op * bind_op ,
296- struct pvr_vm_context * vm_ctx , u64 device_addr ,
297- u64 size )
296+ struct pvr_vm_context * vm_ctx ,
297+ struct pvr_gem_object * pvr_obj ,
298+ u64 device_addr , u64 size )
298299{
299300 int err ;
300301
@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
318319 goto err_bind_op_fini ;
319320 }
320321
322+ bind_op -> pvr_obj = pvr_obj ;
321323 bind_op -> vm_ctx = vm_ctx ;
322324 bind_op -> device_addr = device_addr ;
323325 bind_op -> size = size ;
@@ -597,20 +599,6 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
597599 return ERR_PTR (err );
598600}
599601
600- /**
601- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
602- * @vm_ctx: Target VM context.
603- *
604- * This function ensures that no mappings are left dangling by unmapping them
605- * all in order of ascending device-virtual address.
606- */
607- void
608- pvr_vm_unmap_all (struct pvr_vm_context * vm_ctx )
609- {
610- WARN_ON (pvr_vm_unmap (vm_ctx , vm_ctx -> gpuvm_mgr .mm_start ,
611- vm_ctx -> gpuvm_mgr .mm_range ));
612- }
613-
614602/**
615603 * pvr_vm_context_release() - Teardown a VM context.
616604 * @ref_count: Pointer to reference counter of the VM context.
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
703691 struct pvr_vm_bind_op * bind_op = vm_exec -> extra .priv ;
704692 struct pvr_gem_object * pvr_obj = bind_op -> pvr_obj ;
705693
706- /* Unmap operations don't have an object to lock. */
707- if (!pvr_obj )
708- return 0 ;
709-
710- /* Acquire lock on the GEM being mapped. */
694+ /* Acquire lock on the GEM object being mapped/unmapped. */
711695 return drm_exec_lock_obj (& vm_exec -> exec , gem_from_pvr_gem (pvr_obj ));
712696}
713697
@@ -772,8 +756,10 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
772756}
773757
774758/**
775- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
759+ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
760+ * memory.
776761 * @vm_ctx: Target VM context.
762+ * @pvr_obj: Target PowerVR memory object.
777763 * @device_addr: Virtual device address at the start of the target mapping.
778764 * @size: Size of the target mapping.
779765 *
@@ -784,9 +770,13 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
784770 * * Any error encountered while performing internal operations required to
785771 * destroy the mapping (returned from pvr_vm_gpuva_unmap or
786772 * pvr_vm_gpuva_remap).
773+ *
774+ * The vm_ctx->lock must be held when calling this function.
787775 */
788- int
789- pvr_vm_unmap (struct pvr_vm_context * vm_ctx , u64 device_addr , u64 size )
776+ static int
777+ pvr_vm_unmap_obj_locked (struct pvr_vm_context * vm_ctx ,
778+ struct pvr_gem_object * pvr_obj ,
779+ u64 device_addr , u64 size )
790780{
791781 struct pvr_vm_bind_op bind_op = {0 };
792782 struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
799789 },
800790 };
801791
802- int err = pvr_vm_bind_op_unmap_init (& bind_op , vm_ctx , device_addr ,
803- size );
792+ int err = pvr_vm_bind_op_unmap_init (& bind_op , vm_ctx , pvr_obj ,
793+ device_addr , size );
804794 if (err )
805795 return err ;
806796
797+ pvr_gem_object_get (pvr_obj );
798+
807799 err = drm_gpuvm_exec_lock (& vm_exec );
808800 if (err )
809801 goto err_cleanup ;
@@ -818,6 +810,96 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
818810 return err ;
819811}
820812
813+ /**
814+ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
815+ * memory.
816+ * @vm_ctx: Target VM context.
817+ * @pvr_obj: Target PowerVR memory object.
818+ * @device_addr: Virtual device address at the start of the target mapping.
819+ * @size: Size of the target mapping.
820+ *
821+ * Return:
822+ * * 0 on success,
823+ * * Any error encountered by pvr_vm_unmap_obj_locked.
824+ */
825+ int
826+ pvr_vm_unmap_obj (struct pvr_vm_context * vm_ctx , struct pvr_gem_object * pvr_obj ,
827+ u64 device_addr , u64 size )
828+ {
829+ int err ;
830+
831+ mutex_lock (& vm_ctx -> lock );
832+ err = pvr_vm_unmap_obj_locked (vm_ctx , pvr_obj , device_addr , size );
833+ mutex_unlock (& vm_ctx -> lock );
834+
835+ return err ;
836+ }
837+
838+ /**
839+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
840+ * @vm_ctx: Target VM context.
841+ * @device_addr: Virtual device address at the start of the target mapping.
842+ * @size: Size of the target mapping.
843+ *
844+ * Return:
845+ * * 0 on success,
846+ * * Any error encountered by drm_gpuva_find,
847+ * * Any error encountered by pvr_vm_unmap_obj_locked.
848+ */
849+ int
850+ pvr_vm_unmap (struct pvr_vm_context * vm_ctx , u64 device_addr , u64 size )
851+ {
852+ struct pvr_gem_object * pvr_obj ;
853+ struct drm_gpuva * va ;
854+ int err ;
855+
856+ mutex_lock (& vm_ctx -> lock );
857+
858+ va = drm_gpuva_find (& vm_ctx -> gpuvm_mgr , device_addr , size );
859+ if (va ) {
860+ pvr_obj = gem_to_pvr_gem (va -> gem .obj );
861+ err = pvr_vm_unmap_obj_locked (vm_ctx , pvr_obj ,
862+ va -> va .addr , va -> va .range );
863+ } else {
864+ err = - ENOENT ;
865+ }
866+
867+ mutex_unlock (& vm_ctx -> lock );
868+
869+ return err ;
870+ }
871+
872+ /**
873+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
874+ * @vm_ctx: Target VM context.
875+ *
876+ * This function ensures that no mappings are left dangling by unmapping them
877+ * all in order of ascending device-virtual address.
878+ */
879+ void
880+ pvr_vm_unmap_all (struct pvr_vm_context * vm_ctx )
881+ {
882+ mutex_lock (& vm_ctx -> lock );
883+
884+ for (;;) {
885+ struct pvr_gem_object * pvr_obj ;
886+ struct drm_gpuva * va ;
887+
888+ va = drm_gpuva_find_first (& vm_ctx -> gpuvm_mgr ,
889+ vm_ctx -> gpuvm_mgr .mm_start ,
890+ vm_ctx -> gpuvm_mgr .mm_range );
891+ if (!va )
892+ break ;
893+
894+ pvr_obj = gem_to_pvr_gem (va -> gem .obj );
895+
896+ WARN_ON (pvr_vm_unmap_obj_locked (vm_ctx , pvr_obj ,
897+ va -> va .addr , va -> va .range ));
898+ }
899+
900+ mutex_unlock (& vm_ctx -> lock );
901+ }
902+
821903/* Static data areas are determined by firmware. */
822904static const struct drm_pvr_static_data_area static_data_areas [] = {
823905 {
0 commit comments