46
46
47
47
static int drm_lock_take (struct drm_lock_data * lock_data , unsigned int context );
48
48
49
- /**
49
+ /*
50
50
* Take the heavyweight lock.
51
51
*
52
52
* \param lock lock pointer.
@@ -93,7 +93,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
93
93
return 0 ;
94
94
}
95
95
96
- /**
96
+ /*
97
97
* This takes a lock forcibly and hands it to context. Should ONLY be used
98
98
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
99
99
*
@@ -150,7 +150,7 @@ static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
150
150
return 0 ;
151
151
}
152
152
153
- /**
153
+ /*
154
154
* Lock ioctl.
155
155
*
156
156
* \param inode device inode.
@@ -243,7 +243,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
243
243
return 0 ;
244
244
}
245
245
246
- /**
246
+ /*
247
247
* Unlock ioctl.
248
248
*
249
249
* \param inode device inode.
@@ -275,7 +275,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
275
275
return 0 ;
276
276
}
277
277
278
- /**
278
+ /*
279
279
* This function returns immediately and takes the hw lock
280
280
* with the kernel context if it is free, otherwise it gets the highest priority when and if
281
281
* it is eventually released.
@@ -287,7 +287,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
287
287
* This should be sufficient to wait for GPU idle without
288
288
* having to worry about starvation.
289
289
*/
290
-
291
290
void drm_legacy_idlelock_take (struct drm_lock_data * lock_data )
292
291
{
293
292
int ret ;
0 commit comments