1
1
// SPDX-License-Identifier: GPL-2.0-only
2
2
/*
3
- * Copyright (c) 2015-2016, Linaro Limited
3
+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
4
4
*/
5
+ #include <linux/anon_inodes.h>
5
6
#include <linux/device.h>
6
- #include <linux/dma-buf.h>
7
- #include <linux/fdtable.h>
8
7
#include <linux/idr.h>
8
+ #include <linux/mm.h>
9
9
#include <linux/sched.h>
10
10
#include <linux/slab.h>
11
11
#include <linux/tee_drv.h>
12
12
#include <linux/uio.h>
13
- #include <linux/module.h>
14
13
#include "tee_private.h"
15
14
16
- MODULE_IMPORT_NS (DMA_BUF );
17
-
18
15
static void release_registered_pages (struct tee_shm * shm )
19
16
{
20
17
if (shm -> pages ) {
@@ -31,16 +28,8 @@ static void release_registered_pages(struct tee_shm *shm)
31
28
}
32
29
}
33
30
34
- static void tee_shm_release (struct tee_shm * shm )
31
+ static void tee_shm_release (struct tee_device * teedev , struct tee_shm * shm )
35
32
{
36
- struct tee_device * teedev = shm -> ctx -> teedev ;
37
-
38
- if (shm -> flags & TEE_SHM_DMA_BUF ) {
39
- mutex_lock (& teedev -> mutex );
40
- idr_remove (& teedev -> idr , shm -> id );
41
- mutex_unlock (& teedev -> mutex );
42
- }
43
-
44
33
if (shm -> flags & TEE_SHM_POOL ) {
45
34
struct tee_shm_pool_mgr * poolm ;
46
35
@@ -67,45 +56,6 @@ static void tee_shm_release(struct tee_shm *shm)
67
56
tee_device_put (teedev );
68
57
}
69
58
70
- static struct sg_table * tee_shm_op_map_dma_buf (struct dma_buf_attachment
71
- * attach , enum dma_data_direction dir )
72
- {
73
- return NULL ;
74
- }
75
-
76
- static void tee_shm_op_unmap_dma_buf (struct dma_buf_attachment * attach ,
77
- struct sg_table * table ,
78
- enum dma_data_direction dir )
79
- {
80
- }
81
-
82
- static void tee_shm_op_release (struct dma_buf * dmabuf )
83
- {
84
- struct tee_shm * shm = dmabuf -> priv ;
85
-
86
- tee_shm_release (shm );
87
- }
88
-
89
- static int tee_shm_op_mmap (struct dma_buf * dmabuf , struct vm_area_struct * vma )
90
- {
91
- struct tee_shm * shm = dmabuf -> priv ;
92
- size_t size = vma -> vm_end - vma -> vm_start ;
93
-
94
- /* Refuse sharing shared memory provided by application */
95
- if (shm -> flags & TEE_SHM_USER_MAPPED )
96
- return - EINVAL ;
97
-
98
- return remap_pfn_range (vma , vma -> vm_start , shm -> paddr >> PAGE_SHIFT ,
99
- size , vma -> vm_page_prot );
100
- }
101
-
102
- static const struct dma_buf_ops tee_shm_dma_buf_ops = {
103
- .map_dma_buf = tee_shm_op_map_dma_buf ,
104
- .unmap_dma_buf = tee_shm_op_unmap_dma_buf ,
105
- .release = tee_shm_op_release ,
106
- .mmap = tee_shm_op_mmap ,
107
- };
108
-
109
59
struct tee_shm * tee_shm_alloc (struct tee_context * ctx , size_t size , u32 flags )
110
60
{
111
61
struct tee_device * teedev = ctx -> teedev ;
@@ -140,6 +90,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
140
90
goto err_dev_put ;
141
91
}
142
92
93
+ refcount_set (& shm -> refcount , 1 );
143
94
shm -> flags = flags | TEE_SHM_POOL ;
144
95
shm -> ctx = ctx ;
145
96
if (flags & TEE_SHM_DMA_BUF )
@@ -153,39 +104,19 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
153
104
goto err_kfree ;
154
105
}
155
106
156
-
157
107
if (flags & TEE_SHM_DMA_BUF ) {
158
- DEFINE_DMA_BUF_EXPORT_INFO (exp_info );
159
-
160
108
mutex_lock (& teedev -> mutex );
161
109
shm -> id = idr_alloc (& teedev -> idr , shm , 1 , 0 , GFP_KERNEL );
162
110
mutex_unlock (& teedev -> mutex );
163
111
if (shm -> id < 0 ) {
164
112
ret = ERR_PTR (shm -> id );
165
113
goto err_pool_free ;
166
114
}
167
-
168
- exp_info .ops = & tee_shm_dma_buf_ops ;
169
- exp_info .size = shm -> size ;
170
- exp_info .flags = O_RDWR ;
171
- exp_info .priv = shm ;
172
-
173
- shm -> dmabuf = dma_buf_export (& exp_info );
174
- if (IS_ERR (shm -> dmabuf )) {
175
- ret = ERR_CAST (shm -> dmabuf );
176
- goto err_rem ;
177
- }
178
115
}
179
116
180
117
teedev_ctx_get (ctx );
181
118
182
119
return shm ;
183
- err_rem :
184
- if (flags & TEE_SHM_DMA_BUF ) {
185
- mutex_lock (& teedev -> mutex );
186
- idr_remove (& teedev -> idr , shm -> id );
187
- mutex_unlock (& teedev -> mutex );
188
- }
189
120
err_pool_free :
190
121
poolm -> ops -> free (poolm , shm );
191
122
err_kfree :
@@ -246,6 +177,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
246
177
goto err ;
247
178
}
248
179
180
+ refcount_set (& shm -> refcount , 1 );
249
181
shm -> flags = flags | TEE_SHM_REGISTER ;
250
182
shm -> ctx = ctx ;
251
183
shm -> id = -1 ;
@@ -306,22 +238,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
306
238
goto err ;
307
239
}
308
240
309
- if (flags & TEE_SHM_DMA_BUF ) {
310
- DEFINE_DMA_BUF_EXPORT_INFO (exp_info );
311
-
312
- exp_info .ops = & tee_shm_dma_buf_ops ;
313
- exp_info .size = shm -> size ;
314
- exp_info .flags = O_RDWR ;
315
- exp_info .priv = shm ;
316
-
317
- shm -> dmabuf = dma_buf_export (& exp_info );
318
- if (IS_ERR (shm -> dmabuf )) {
319
- ret = ERR_CAST (shm -> dmabuf );
320
- teedev -> desc -> ops -> shm_unregister (ctx , shm );
321
- goto err ;
322
- }
323
- }
324
-
325
241
return shm ;
326
242
err :
327
243
if (shm ) {
@@ -339,6 +255,35 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
339
255
}
340
256
EXPORT_SYMBOL_GPL (tee_shm_register );
341
257
258
+ static int tee_shm_fop_release (struct inode * inode , struct file * filp )
259
+ {
260
+ tee_shm_put (filp -> private_data );
261
+ return 0 ;
262
+ }
263
+
264
+ static int tee_shm_fop_mmap (struct file * filp , struct vm_area_struct * vma )
265
+ {
266
+ struct tee_shm * shm = filp -> private_data ;
267
+ size_t size = vma -> vm_end - vma -> vm_start ;
268
+
269
+ /* Refuse sharing shared memory provided by application */
270
+ if (shm -> flags & TEE_SHM_USER_MAPPED )
271
+ return - EINVAL ;
272
+
273
+ /* check for overflowing the buffer's size */
274
+ if (vma -> vm_pgoff + vma_pages (vma ) > shm -> size >> PAGE_SHIFT )
275
+ return - EINVAL ;
276
+
277
+ return remap_pfn_range (vma , vma -> vm_start , shm -> paddr >> PAGE_SHIFT ,
278
+ size , vma -> vm_page_prot );
279
+ }
280
+
281
+ static const struct file_operations tee_shm_fops = {
282
+ .owner = THIS_MODULE ,
283
+ .release = tee_shm_fop_release ,
284
+ .mmap = tee_shm_fop_mmap ,
285
+ };
286
+
342
287
/**
343
288
* tee_shm_get_fd() - Increase reference count and return file descriptor
344
289
* @shm: Shared memory handle
@@ -351,10 +296,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
351
296
if (!(shm -> flags & TEE_SHM_DMA_BUF ))
352
297
return - EINVAL ;
353
298
354
- get_dma_buf (shm -> dmabuf );
355
- fd = dma_buf_fd (shm -> dmabuf , O_CLOEXEC );
299
+ /* matched by tee_shm_put() in tee_shm_op_release() */
300
+ refcount_inc (& shm -> refcount );
301
+ fd = anon_inode_getfd ("tee_shm" , & tee_shm_fops , shm , O_RDWR );
356
302
if (fd < 0 )
357
- dma_buf_put (shm -> dmabuf );
303
+ tee_shm_put (shm );
358
304
return fd ;
359
305
}
360
306
@@ -364,17 +310,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
364
310
*/
365
311
void tee_shm_free (struct tee_shm * shm )
366
312
{
367
- /*
368
- * dma_buf_put() decreases the dmabuf reference counter and will
369
- * call tee_shm_release() when the last reference is gone.
370
- *
371
- * In the case of driver private memory we call tee_shm_release
372
- * directly instead as it doesn't have a reference counter.
373
- */
374
- if (shm -> flags & TEE_SHM_DMA_BUF )
375
- dma_buf_put (shm -> dmabuf );
376
- else
377
- tee_shm_release (shm );
313
+ tee_shm_put (shm );
378
314
}
379
315
EXPORT_SYMBOL_GPL (tee_shm_free );
380
316
@@ -481,10 +417,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
481
417
teedev = ctx -> teedev ;
482
418
mutex_lock (& teedev -> mutex );
483
419
shm = idr_find (& teedev -> idr , id );
420
+ /*
421
+ * If the tee_shm was found in the IDR it must have a refcount
422
+ * larger than 0 due to the guarantee in tee_shm_put() below. So
423
+ * it's safe to use refcount_inc().
424
+ */
484
425
if (!shm || shm -> ctx != ctx )
485
426
shm = ERR_PTR (- EINVAL );
486
- else if ( shm -> flags & TEE_SHM_DMA_BUF )
487
- get_dma_buf ( shm -> dmabuf );
427
+ else
428
+ refcount_inc ( & shm -> refcount );
488
429
mutex_unlock (& teedev -> mutex );
489
430
return shm ;
490
431
}
@@ -496,7 +437,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
496
437
*/
497
438
void tee_shm_put (struct tee_shm * shm )
498
439
{
499
- if (shm -> flags & TEE_SHM_DMA_BUF )
500
- dma_buf_put (shm -> dmabuf );
440
+ struct tee_device * teedev = shm -> ctx -> teedev ;
441
+ bool do_release = false;
442
+
443
+ mutex_lock (& teedev -> mutex );
444
+ if (refcount_dec_and_test (& shm -> refcount )) {
445
+ /*
446
+ * refcount has reached 0, we must now remove it from the
447
+ * IDR before releasing the mutex. This will guarantee that
448
+ * the refcount_inc() in tee_shm_get_from_id() never starts
449
+ * from 0.
450
+ */
451
+ if (shm -> flags & TEE_SHM_DMA_BUF )
452
+ idr_remove (& teedev -> idr , shm -> id );
453
+ do_release = true;
454
+ }
455
+ mutex_unlock (& teedev -> mutex );
456
+
457
+ if (do_release )
458
+ tee_shm_release (teedev , shm );
501
459
}
502
460
EXPORT_SYMBOL_GPL (tee_shm_put );
0 commit comments