@@ -33,7 +33,7 @@ struct system_heap_buffer {
33
33
34
34
struct dma_heap_attachment {
35
35
struct device * dev ;
36
- struct sg_table * table ;
36
+ struct sg_table table ;
37
37
struct list_head list ;
38
38
bool mapped ;
39
39
};
@@ -52,49 +52,41 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
52
52
static const unsigned int orders [] = {8 , 4 , 0 };
53
53
#define NUM_ORDERS ARRAY_SIZE(orders)
54
54
55
- static struct sg_table * dup_sg_table ( struct sg_table * table )
55
+ static int dup_sg_table ( struct sg_table * from , struct sg_table * to )
56
56
{
57
- struct sg_table * new_table ;
58
- int ret , i ;
59
57
struct scatterlist * sg , * new_sg ;
58
+ int ret , i ;
60
59
61
- new_table = kzalloc (sizeof (* new_table ), GFP_KERNEL );
62
- if (!new_table )
63
- return ERR_PTR (- ENOMEM );
64
-
65
- ret = sg_alloc_table (new_table , table -> orig_nents , GFP_KERNEL );
66
- if (ret ) {
67
- kfree (new_table );
68
- return ERR_PTR (- ENOMEM );
69
- }
60
+ ret = sg_alloc_table (to , from -> orig_nents , GFP_KERNEL );
61
+ if (ret )
62
+ return ret ;
70
63
71
- new_sg = new_table -> sgl ;
72
- for_each_sgtable_sg (table , sg , i ) {
64
+ new_sg = to -> sgl ;
65
+ for_each_sgtable_sg (from , sg , i ) {
73
66
sg_set_page (new_sg , sg_page (sg ), sg -> length , sg -> offset );
74
67
new_sg = sg_next (new_sg );
75
68
}
76
69
77
- return new_table ;
70
+ return 0 ;
78
71
}
79
72
80
73
static int system_heap_attach (struct dma_buf * dmabuf ,
81
74
struct dma_buf_attachment * attachment )
82
75
{
83
76
struct system_heap_buffer * buffer = dmabuf -> priv ;
84
77
struct dma_heap_attachment * a ;
85
- struct sg_table * table ;
78
+ int ret ;
86
79
87
80
a = kzalloc (sizeof (* a ), GFP_KERNEL );
88
81
if (!a )
89
82
return - ENOMEM ;
90
83
91
- table = dup_sg_table (& buffer -> sg_table );
92
- if (IS_ERR ( table ) ) {
84
+ ret = dup_sg_table (& buffer -> sg_table , & a -> table );
85
+ if (ret ) {
93
86
kfree (a );
94
- return - ENOMEM ;
87
+ return ret ;
95
88
}
96
89
97
- a -> table = table ;
98
90
a -> dev = attachment -> dev ;
99
91
INIT_LIST_HEAD (& a -> list );
100
92
a -> mapped = false;
@@ -118,16 +110,15 @@ static void system_heap_detach(struct dma_buf *dmabuf,
118
110
list_del (& a -> list );
119
111
mutex_unlock (& buffer -> lock );
120
112
121
- sg_free_table (a -> table );
122
- kfree (a -> table );
113
+ sg_free_table (& a -> table );
123
114
kfree (a );
124
115
}
125
116
126
117
static struct sg_table * system_heap_map_dma_buf (struct dma_buf_attachment * attachment ,
127
118
enum dma_data_direction direction )
128
119
{
129
120
struct dma_heap_attachment * a = attachment -> priv ;
130
- struct sg_table * table = a -> table ;
121
+ struct sg_table * table = & a -> table ;
131
122
int ret ;
132
123
133
124
ret = dma_map_sgtable (attachment -> dev , table , direction , 0 );
@@ -162,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
162
153
list_for_each_entry (a , & buffer -> attachments , list ) {
163
154
if (!a -> mapped )
164
155
continue ;
165
- dma_sync_sgtable_for_cpu (a -> dev , a -> table , direction );
156
+ dma_sync_sgtable_for_cpu (a -> dev , & a -> table , direction );
166
157
}
167
158
mutex_unlock (& buffer -> lock );
168
159
@@ -183,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
183
174
list_for_each_entry (a , & buffer -> attachments , list ) {
184
175
if (!a -> mapped )
185
176
continue ;
186
- dma_sync_sgtable_for_device (a -> dev , a -> table , direction );
177
+ dma_sync_sgtable_for_device (a -> dev , & a -> table , direction );
187
178
}
188
179
mutex_unlock (& buffer -> lock );
189
180
0 commit comments