7
7
8
8
#include <linux/pci.h>
9
9
#include <linux/etherdevice.h>
10
+ #include <linux/vmalloc.h>
10
11
11
12
#include "octep_vf_config.h"
12
13
#include "octep_vf_main.h"
13
14
15
+ static void octep_vf_oq_reset_indices (struct octep_vf_oq * oq )
16
+ {
17
+ oq -> host_read_idx = 0 ;
18
+ oq -> host_refill_idx = 0 ;
19
+ oq -> refill_count = 0 ;
20
+ oq -> last_pkt_count = 0 ;
21
+ oq -> pkts_pending = 0 ;
22
+ }
23
+
24
+ /**
25
+ * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
26
+ *
27
+ * @oq: Octeon Rx queue data structure.
28
+ *
29
+ * Return: 0, if successfully filled receive buffers for all descriptors.
30
+ * -1, if failed to allocate a buffer or failed to map for DMA.
31
+ */
32
+ static int octep_vf_oq_fill_ring_buffers (struct octep_vf_oq * oq )
33
+ {
34
+ struct octep_vf_oq_desc_hw * desc_ring = oq -> desc_ring ;
35
+ struct page * page ;
36
+ u32 i ;
37
+
38
+ for (i = 0 ; i < oq -> max_count ; i ++ ) {
39
+ page = dev_alloc_page ();
40
+ if (unlikely (!page )) {
41
+ dev_err (oq -> dev , "Rx buffer alloc failed\n" );
42
+ goto rx_buf_alloc_err ;
43
+ }
44
+ desc_ring [i ].buffer_ptr = dma_map_page (oq -> dev , page , 0 ,
45
+ PAGE_SIZE ,
46
+ DMA_FROM_DEVICE );
47
+ if (dma_mapping_error (oq -> dev , desc_ring [i ].buffer_ptr )) {
48
+ dev_err (oq -> dev ,
49
+ "OQ-%d buffer alloc: DMA mapping error!\n" ,
50
+ oq -> q_no );
51
+ put_page (page );
52
+ goto dma_map_err ;
53
+ }
54
+ oq -> buff_info [i ].page = page ;
55
+ }
56
+
57
+ return 0 ;
58
+
59
+ dma_map_err :
60
+ rx_buf_alloc_err :
61
+ while (i ) {
62
+ i -- ;
63
+ dma_unmap_page (oq -> dev , desc_ring [i ].buffer_ptr , PAGE_SIZE , DMA_FROM_DEVICE );
64
+ put_page (oq -> buff_info [i ].page );
65
+ oq -> buff_info [i ].page = NULL ;
66
+ }
67
+
68
+ return -1 ;
69
+ }
70
+
71
+ /**
72
+ * octep_vf_setup_oq() - Setup a Rx queue.
73
+ *
74
+ * @oct: Octeon device private data structure.
75
+ * @q_no: Rx queue number to be setup.
76
+ *
77
+ * Allocate resources for a Rx queue.
78
+ */
79
+ static int octep_vf_setup_oq (struct octep_vf_device * oct , int q_no )
80
+ {
81
+ struct octep_vf_oq * oq ;
82
+ u32 desc_ring_size ;
83
+
84
+ oq = vzalloc (sizeof (* oq ));
85
+ if (!oq )
86
+ goto create_oq_fail ;
87
+ oct -> oq [q_no ] = oq ;
88
+
89
+ oq -> octep_vf_dev = oct ;
90
+ oq -> netdev = oct -> netdev ;
91
+ oq -> dev = & oct -> pdev -> dev ;
92
+ oq -> q_no = q_no ;
93
+ oq -> max_count = CFG_GET_OQ_NUM_DESC (oct -> conf );
94
+ oq -> ring_size_mask = oq -> max_count - 1 ;
95
+ oq -> buffer_size = CFG_GET_OQ_BUF_SIZE (oct -> conf );
96
+ oq -> max_single_buffer_size = oq -> buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE ;
97
+
98
+ /* When the hardware/firmware supports additional capabilities,
99
+ * additional header is filled-in by Octeon after length field in
100
+ * Rx packets. this header contains additional packet information.
101
+ */
102
+ if (oct -> fw_info .rx_ol_flags )
103
+ oq -> max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE ;
104
+
105
+ oq -> refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD (oct -> conf );
106
+
107
+ desc_ring_size = oq -> max_count * OCTEP_VF_OQ_DESC_SIZE ;
108
+ oq -> desc_ring = dma_alloc_coherent (oq -> dev , desc_ring_size ,
109
+ & oq -> desc_ring_dma , GFP_KERNEL );
110
+
111
+ if (unlikely (!oq -> desc_ring )) {
112
+ dev_err (oq -> dev ,
113
+ "Failed to allocate DMA memory for OQ-%d !!\n" , q_no );
114
+ goto desc_dma_alloc_err ;
115
+ }
116
+
117
+ oq -> buff_info = (struct octep_vf_rx_buffer * )
118
+ vzalloc (oq -> max_count * OCTEP_VF_OQ_RECVBUF_SIZE );
119
+ if (unlikely (!oq -> buff_info )) {
120
+ dev_err (& oct -> pdev -> dev ,
121
+ "Failed to allocate buffer info for OQ-%d\n" , q_no );
122
+ goto buf_list_err ;
123
+ }
124
+
125
+ if (octep_vf_oq_fill_ring_buffers (oq ))
126
+ goto oq_fill_buff_err ;
127
+
128
+ octep_vf_oq_reset_indices (oq );
129
+ oct -> hw_ops .setup_oq_regs (oct , q_no );
130
+ oct -> num_oqs ++ ;
131
+
132
+ return 0 ;
133
+
134
+ oq_fill_buff_err :
135
+ vfree (oq -> buff_info );
136
+ oq -> buff_info = NULL ;
137
+ buf_list_err :
138
+ dma_free_coherent (oq -> dev , desc_ring_size ,
139
+ oq -> desc_ring , oq -> desc_ring_dma );
140
+ oq -> desc_ring = NULL ;
141
+ desc_dma_alloc_err :
142
+ vfree (oq );
143
+ oct -> oq [q_no ] = NULL ;
144
+ create_oq_fail :
145
+ return -1 ;
146
+ }
147
+
148
+ /**
149
+ * octep_vf_oq_free_ring_buffers() - Free ring buffers.
150
+ *
151
+ * @oq: Octeon Rx queue data structure.
152
+ *
153
+ * Free receive buffers in unused Rx queue descriptors.
154
+ */
155
+ static void octep_vf_oq_free_ring_buffers (struct octep_vf_oq * oq )
156
+ {
157
+ struct octep_vf_oq_desc_hw * desc_ring = oq -> desc_ring ;
158
+ int i ;
159
+
160
+ if (!oq -> desc_ring || !oq -> buff_info )
161
+ return ;
162
+
163
+ for (i = 0 ; i < oq -> max_count ; i ++ ) {
164
+ if (oq -> buff_info [i ].page ) {
165
+ dma_unmap_page (oq -> dev , desc_ring [i ].buffer_ptr ,
166
+ PAGE_SIZE , DMA_FROM_DEVICE );
167
+ put_page (oq -> buff_info [i ].page );
168
+ oq -> buff_info [i ].page = NULL ;
169
+ desc_ring [i ].buffer_ptr = 0 ;
170
+ }
171
+ }
172
+ octep_vf_oq_reset_indices (oq );
173
+ }
174
+
175
+ /**
176
+ * octep_vf_free_oq() - Free Rx queue resources.
177
+ *
178
+ * @oq: Octeon Rx queue data structure.
179
+ *
180
+ * Free all resources of a Rx queue.
181
+ */
182
+ static int octep_vf_free_oq (struct octep_vf_oq * oq )
183
+ {
184
+ struct octep_vf_device * oct = oq -> octep_vf_dev ;
185
+ int q_no = oq -> q_no ;
186
+
187
+ octep_vf_oq_free_ring_buffers (oq );
188
+
189
+ if (oq -> buff_info )
190
+ vfree (oq -> buff_info );
191
+
192
+ if (oq -> desc_ring )
193
+ dma_free_coherent (oq -> dev ,
194
+ oq -> max_count * OCTEP_VF_OQ_DESC_SIZE ,
195
+ oq -> desc_ring , oq -> desc_ring_dma );
196
+
197
+ vfree (oq );
198
+ oct -> oq [q_no ] = NULL ;
199
+ oct -> num_oqs -- ;
200
+ return 0 ;
201
+ }
202
+
14
203
/**
15
204
* octep_vf_setup_oqs() - setup resources for all Rx queues.
16
205
*
17
206
* @oct: Octeon device private data structure.
18
207
*/
19
208
int octep_vf_setup_oqs (struct octep_vf_device * oct )
20
209
{
210
+ int i , retval = 0 ;
211
+
212
+ oct -> num_oqs = 0 ;
213
+ for (i = 0 ; i < CFG_GET_PORTS_ACTIVE_IO_RINGS (oct -> conf ); i ++ ) {
214
+ retval = octep_vf_setup_oq (oct , i );
215
+ if (retval ) {
216
+ dev_err (& oct -> pdev -> dev ,
217
+ "Failed to setup OQ(RxQ)-%d.\n" , i );
218
+ goto oq_setup_err ;
219
+ }
220
+ dev_dbg (& oct -> pdev -> dev , "Successfully setup OQ(RxQ)-%d.\n" , i );
221
+ }
222
+
223
+ return 0 ;
224
+
225
+ oq_setup_err :
226
+ while (i ) {
227
+ i -- ;
228
+ octep_vf_free_oq (oct -> oq [i ]);
229
+ }
21
230
return -1 ;
22
231
}
23
232
@@ -30,6 +239,10 @@ int octep_vf_setup_oqs(struct octep_vf_device *oct)
30
239
*/
31
240
void octep_vf_oq_dbell_init (struct octep_vf_device * oct )
32
241
{
242
+ int i ;
243
+
244
+ for (i = 0 ; i < oct -> num_oqs ; i ++ )
245
+ writel (oct -> oq [i ]-> max_count , oct -> oq [i ]-> pkts_credit_reg );
33
246
}
34
247
35
248
/**
@@ -39,4 +252,13 @@ void octep_vf_oq_dbell_init(struct octep_vf_device *oct)
39
252
*/
40
253
void octep_vf_free_oqs (struct octep_vf_device * oct )
41
254
{
255
+ int i ;
256
+
257
+ for (i = 0 ; i < CFG_GET_PORTS_ACTIVE_IO_RINGS (oct -> conf ); i ++ ) {
258
+ if (!oct -> oq [i ])
259
+ continue ;
260
+ octep_vf_free_oq (oct -> oq [i ]);
261
+ dev_dbg (& oct -> pdev -> dev ,
262
+ "Successfully freed OQ(RxQ)-%d.\n" , i );
263
+ }
42
264
}
0 commit comments