blob: 69f8b46c90157fdd91f690b34e157cf6a1ac0129 [file] [log] [blame]
Thomas Gleixnerca47d342019-05-19 15:51:50 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Michael Buesch5100d5a2008-03-29 21:01:16 +01002/*
3
4 Broadcom B43 wireless driver
5
6 PIO data transfer
7
Michael Büscheb032b92011-07-04 20:50:05 +02008 Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
Michael Buesch5100d5a2008-03-29 21:01:16 +01009
Michael Buesch5100d5a2008-03-29 21:01:16 +010010
11*/
12
13#include "b43.h"
14#include "pio.h"
15#include "dma.h"
16#include "main.h"
17#include "xmit.h"
18
19#include <linux/delay.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040020#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Michael Buesch5100d5a2008-03-29 21:01:16 +010022
23
Michael Buesch5100d5a2008-03-29 21:01:16 +010024static u16 generate_cookie(struct b43_pio_txqueue *q,
25 struct b43_pio_txpacket *pack)
26{
27 u16 cookie;
28
29 /* Use the upper 4 bits of the cookie as
30 * PIO controller ID and store the packet index number
31 * in the lower 12 bits.
32 * Note that the cookie must never be 0, as this
33 * is a special value used in RX path.
34 * It can also not be 0xFFFF because that is special
35 * for multicast frames.
36 */
37 cookie = (((u16)q->index + 1) << 12);
38 cookie |= pack->index;
39
40 return cookie;
41}
42
43static
John Daiker99da1852009-02-24 02:16:42 -080044struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev,
45 u16 cookie,
Michael Buesch5100d5a2008-03-29 21:01:16 +010046 struct b43_pio_txpacket **pack)
47{
48 struct b43_pio *pio = &dev->pio;
49 struct b43_pio_txqueue *q = NULL;
50 unsigned int pack_index;
51
52 switch (cookie & 0xF000) {
53 case 0x1000:
54 q = pio->tx_queue_AC_BK;
55 break;
56 case 0x2000:
57 q = pio->tx_queue_AC_BE;
58 break;
59 case 0x3000:
60 q = pio->tx_queue_AC_VI;
61 break;
62 case 0x4000:
63 q = pio->tx_queue_AC_VO;
64 break;
65 case 0x5000:
66 q = pio->tx_queue_mcast;
67 break;
68 }
69 if (B43_WARN_ON(!q))
70 return NULL;
71 pack_index = (cookie & 0x0FFF);
72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
73 return NULL;
74 *pack = &q->packets[pack_index];
75
76 return q;
77}
78
79static u16 index_to_pioqueue_base(struct b43_wldev *dev,
80 unsigned int index)
81{
82 static const u16 bases[] = {
83 B43_MMIO_PIO_BASE0,
84 B43_MMIO_PIO_BASE1,
85 B43_MMIO_PIO_BASE2,
86 B43_MMIO_PIO_BASE3,
87 B43_MMIO_PIO_BASE4,
88 B43_MMIO_PIO_BASE5,
89 B43_MMIO_PIO_BASE6,
90 B43_MMIO_PIO_BASE7,
91 };
92 static const u16 bases_rev11[] = {
93 B43_MMIO_PIO11_BASE0,
94 B43_MMIO_PIO11_BASE1,
95 B43_MMIO_PIO11_BASE2,
96 B43_MMIO_PIO11_BASE3,
97 B43_MMIO_PIO11_BASE4,
98 B43_MMIO_PIO11_BASE5,
99 };
100
Rafał Miłecki21d889d2011-05-18 02:06:38 +0200101 if (dev->dev->core_rev >= 11) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100102 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
103 return bases_rev11[index];
104 }
105 B43_WARN_ON(index >= ARRAY_SIZE(bases));
106 return bases[index];
107}
108
109static u16 pio_txqueue_offset(struct b43_wldev *dev)
110{
Rafał Miłecki21d889d2011-05-18 02:06:38 +0200111 if (dev->dev->core_rev >= 11)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100112 return 0x18;
113 return 0;
114}
115
116static u16 pio_rxqueue_offset(struct b43_wldev *dev)
117{
Rafał Miłecki21d889d2011-05-18 02:06:38 +0200118 if (dev->dev->core_rev >= 11)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100119 return 0x38;
120 return 8;
121}
122
John Daiker99da1852009-02-24 02:16:42 -0800123static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
124 unsigned int index)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100125{
126 struct b43_pio_txqueue *q;
127 struct b43_pio_txpacket *p;
128 unsigned int i;
129
130 q = kzalloc(sizeof(*q), GFP_KERNEL);
131 if (!q)
132 return NULL;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100133 q->dev = dev;
Rafał Miłecki21d889d2011-05-18 02:06:38 +0200134 q->rev = dev->dev->core_rev;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100135 q->mmio_base = index_to_pioqueue_base(dev, index) +
136 pio_txqueue_offset(dev);
137 q->index = index;
138
139 q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
140 if (q->rev >= 8) {
141 q->buffer_size = 1920; //FIXME this constant is wrong.
142 } else {
143 q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
144 q->buffer_size -= 80;
145 }
146
147 INIT_LIST_HEAD(&q->packets_list);
148 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
149 p = &(q->packets[i]);
150 INIT_LIST_HEAD(&p->list);
151 p->index = i;
152 p->queue = q;
153 list_add(&p->list, &q->packets_list);
154 }
155
156 return q;
157}
158
John Daiker99da1852009-02-24 02:16:42 -0800159static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
160 unsigned int index)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100161{
162 struct b43_pio_rxqueue *q;
163
164 q = kzalloc(sizeof(*q), GFP_KERNEL);
165 if (!q)
166 return NULL;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100167 q->dev = dev;
Rafał Miłecki21d889d2011-05-18 02:06:38 +0200168 q->rev = dev->dev->core_rev;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100169 q->mmio_base = index_to_pioqueue_base(dev, index) +
170 pio_rxqueue_offset(dev);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100171
172 /* Enable Direct FIFO RX (PIO) on the engine. */
173 b43_dma_direct_fifo_rx(dev, index, 1);
174
175 return q;
176}
177
178static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
179{
180 struct b43_pio_txpacket *pack;
181 unsigned int i;
182
183 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
184 pack = &(q->packets[i]);
185 if (pack->skb) {
Felix Fietkau78f18df2012-12-10 17:40:21 +0100186 ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100187 pack->skb = NULL;
188 }
189 }
190}
191
192static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
193 const char *name)
194{
195 if (!q)
196 return;
197 b43_pio_cancel_tx_packets(q);
198 kfree(q);
199}
200
201static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
202 const char *name)
203{
204 if (!q)
205 return;
206 kfree(q);
207}
208
209#define destroy_queue_tx(pio, queue) do { \
210 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
211 (pio)->queue = NULL; \
212 } while (0)
213
214#define destroy_queue_rx(pio, queue) do { \
215 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
216 (pio)->queue = NULL; \
217 } while (0)
218
219void b43_pio_free(struct b43_wldev *dev)
220{
221 struct b43_pio *pio;
222
223 if (!b43_using_pio_transfers(dev))
224 return;
225 pio = &dev->pio;
226
227 destroy_queue_rx(pio, rx_queue);
228 destroy_queue_tx(pio, tx_queue_mcast);
229 destroy_queue_tx(pio, tx_queue_AC_VO);
230 destroy_queue_tx(pio, tx_queue_AC_VI);
231 destroy_queue_tx(pio, tx_queue_AC_BE);
232 destroy_queue_tx(pio, tx_queue_AC_BK);
233}
234
Michael Buesch5100d5a2008-03-29 21:01:16 +0100235int b43_pio_init(struct b43_wldev *dev)
236{
237 struct b43_pio *pio = &dev->pio;
238 int err = -ENOMEM;
239
240 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
241 & ~B43_MACCTL_BE);
242 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
243
244 pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
245 if (!pio->tx_queue_AC_BK)
246 goto out;
247
248 pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
249 if (!pio->tx_queue_AC_BE)
250 goto err_destroy_bk;
251
252 pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
253 if (!pio->tx_queue_AC_VI)
254 goto err_destroy_be;
255
256 pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
257 if (!pio->tx_queue_AC_VO)
258 goto err_destroy_vi;
259
260 pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
261 if (!pio->tx_queue_mcast)
262 goto err_destroy_vo;
263
264 pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
265 if (!pio->rx_queue)
266 goto err_destroy_mcast;
267
268 b43dbg(dev->wl, "PIO initialized\n");
269 err = 0;
270out:
271 return err;
272
273err_destroy_mcast:
274 destroy_queue_tx(pio, tx_queue_mcast);
275err_destroy_vo:
276 destroy_queue_tx(pio, tx_queue_AC_VO);
277err_destroy_vi:
278 destroy_queue_tx(pio, tx_queue_AC_VI);
279err_destroy_be:
280 destroy_queue_tx(pio, tx_queue_AC_BE);
281err_destroy_bk:
282 destroy_queue_tx(pio, tx_queue_AC_BK);
283 return err;
284}
285
286/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
John Daiker99da1852009-02-24 02:16:42 -0800287static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
288 u8 queue_prio)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100289{
290 struct b43_pio_txqueue *q;
291
Michael Buesch403a3a12009-06-08 21:04:57 +0200292 if (dev->qos_enabled) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100293 /* 0 = highest priority */
294 switch (queue_prio) {
295 default:
296 B43_WARN_ON(1);
297 /* fallthrough */
298 case 0:
299 q = dev->pio.tx_queue_AC_VO;
300 break;
301 case 1:
302 q = dev->pio.tx_queue_AC_VI;
303 break;
304 case 2:
305 q = dev->pio.tx_queue_AC_BE;
306 break;
307 case 3:
308 q = dev->pio.tx_queue_AC_BK;
309 break;
310 }
311 } else
312 q = dev->pio.tx_queue_AC_BE;
313
314 return q;
315}
316
Michael Bueschd8c17e12008-04-02 19:58:20 +0200317static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
318 u16 ctl,
319 const void *_data,
320 unsigned int data_len)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100321{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200322 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200323 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100324 const u8 *data = _data;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100325
Michael Bueschd8c17e12008-04-02 19:58:20 +0200326 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
327 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
328
Rafał Miłecki620d7852011-05-17 14:00:00 +0200329 b43_block_write(dev, data, (data_len & ~1),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200330 q->mmio_base + B43_PIO_TXDATA,
331 sizeof(u16));
332 if (data_len & 1) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200333 u8 *tail = wl->pio_tailspace;
334 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
335
Michael Bueschd8c17e12008-04-02 19:58:20 +0200336 /* Write the last byte. */
337 ctl &= ~B43_PIO_TXCTL_WRITEHI;
338 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
Michael Buesch88499ab2009-10-09 20:33:32 +0200339 tail[0] = data[data_len - 1];
340 tail[1] = 0;
Rafał Miłecki620d7852011-05-17 14:00:00 +0200341 b43_block_write(dev, tail, 2,
Michael Bueschb96ab542009-09-23 18:51:21 +0200342 q->mmio_base + B43_PIO_TXDATA,
343 sizeof(u16));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100344 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200345
346 return ctl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100347}
348
349static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
350 const u8 *hdr, unsigned int hdrlen)
351{
352 struct b43_pio_txqueue *q = pack->queue;
353 const char *frame = pack->skb->data;
354 unsigned int frame_len = pack->skb->len;
355 u16 ctl;
356
357 ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
358 ctl |= B43_PIO_TXCTL_FREADY;
359 ctl &= ~B43_PIO_TXCTL_EOF;
360
361 /* Transfer the header data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200362 ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100363 /* Transfer the frame data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200364 ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100365
366 ctl |= B43_PIO_TXCTL_EOF;
367 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
368}
369
Michael Bueschd8c17e12008-04-02 19:58:20 +0200370static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
371 u32 ctl,
372 const void *_data,
373 unsigned int data_len)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100374{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200375 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200376 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100377 const u8 *data = _data;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100378
Michael Bueschd8c17e12008-04-02 19:58:20 +0200379 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
380 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
381 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
382
Rafał Miłecki620d7852011-05-17 14:00:00 +0200383 b43_block_write(dev, data, (data_len & ~3),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200384 q->mmio_base + B43_PIO8_TXDATA,
385 sizeof(u32));
386 if (data_len & 3) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200387 u8 *tail = wl->pio_tailspace;
388 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
389
390 memset(tail, 0, 4);
Michael Bueschd8c17e12008-04-02 19:58:20 +0200391 /* Write the last few bytes. */
392 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
393 B43_PIO8_TXCTL_24_31);
Michael Bueschd8c17e12008-04-02 19:58:20 +0200394 switch (data_len & 3) {
395 case 3:
Michael Bueschb96ab542009-09-23 18:51:21 +0200396 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
Michael Buesch88499ab2009-10-09 20:33:32 +0200397 tail[0] = data[data_len - 3];
398 tail[1] = data[data_len - 2];
399 tail[2] = data[data_len - 1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200400 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200401 case 2:
402 ctl |= B43_PIO8_TXCTL_8_15;
Michael Buesch88499ab2009-10-09 20:33:32 +0200403 tail[0] = data[data_len - 2];
404 tail[1] = data[data_len - 1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200405 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200406 case 1:
Michael Buesch88499ab2009-10-09 20:33:32 +0200407 tail[0] = data[data_len - 1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200408 break;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100409 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200410 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
Rafał Miłecki620d7852011-05-17 14:00:00 +0200411 b43_block_write(dev, tail, 4,
Michael Bueschb96ab542009-09-23 18:51:21 +0200412 q->mmio_base + B43_PIO8_TXDATA,
413 sizeof(u32));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100414 }
Michael Bueschd8c17e12008-04-02 19:58:20 +0200415
416 return ctl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100417}
418
419static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
420 const u8 *hdr, unsigned int hdrlen)
421{
422 struct b43_pio_txqueue *q = pack->queue;
423 const char *frame = pack->skb->data;
424 unsigned int frame_len = pack->skb->len;
425 u32 ctl;
426
427 ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
428 ctl |= B43_PIO8_TXCTL_FREADY;
429 ctl &= ~B43_PIO8_TXCTL_EOF;
430
431 /* Transfer the header data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200432 ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100433 /* Transfer the frame data. */
Michael Bueschd8c17e12008-04-02 19:58:20 +0200434 ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100435
436 ctl |= B43_PIO8_TXCTL_EOF;
437 b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
438}
439
440static int pio_tx_frame(struct b43_pio_txqueue *q,
Johannes Berge039fa42008-05-15 12:55:29 +0200441 struct sk_buff *skb)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100442{
Albert Herranz7e937c62009-10-07 00:07:44 +0200443 struct b43_wldev *dev = q->dev;
444 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100445 struct b43_pio_txpacket *pack;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100446 u16 cookie;
447 int err;
448 unsigned int hdrlen;
Johannes Berge039fa42008-05-15 12:55:29 +0200449 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesch88499ab2009-10-09 20:33:32 +0200450 struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100451
452 B43_WARN_ON(list_empty(&q->packets_list));
453 pack = list_entry(q->packets_list.next,
454 struct b43_pio_txpacket, list);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100455
456 cookie = generate_cookie(q, pack);
Albert Herranz7e937c62009-10-07 00:07:44 +0200457 hdrlen = b43_txhdr_size(dev);
Michael Buesch88499ab2009-10-09 20:33:32 +0200458 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
459 B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
460 err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
gregor kowski035d0242009-08-19 22:35:45 +0200461 info, cookie);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100462 if (err)
463 return err;
464
Johannes Berge039fa42008-05-15 12:55:29 +0200465 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100466 /* Tell the firmware about the cookie of the last
467 * mcast frame, so it can clear the more-data bit in it. */
Albert Herranz7e937c62009-10-07 00:07:44 +0200468 b43_shm_write16(dev, B43_SHM_SHARED,
Michael Buesch5100d5a2008-03-29 21:01:16 +0100469 B43_SHM_SH_MCASTCOOKIE, cookie);
470 }
471
472 pack->skb = skb;
473 if (q->rev >= 8)
Michael Buesch88499ab2009-10-09 20:33:32 +0200474 pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100475 else
Michael Buesch88499ab2009-10-09 20:33:32 +0200476 pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100477
478 /* Remove it from the list of available packet slots.
479 * It will be put back when we receive the status report. */
480 list_del(&pack->list);
481
482 /* Update the queue statistics. */
483 q->buffer_used += roundup(skb->len + hdrlen, 4);
484 q->free_packet_slots -= 1;
485
486 return 0;
487}
488
Johannes Berge039fa42008-05-15 12:55:29 +0200489int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
Michael Buesch5100d5a2008-03-29 21:01:16 +0100490{
491 struct b43_pio_txqueue *q;
492 struct ieee80211_hdr *hdr;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100493 unsigned int hdrlen, total_len;
494 int err = 0;
Johannes Berge039fa42008-05-15 12:55:29 +0200495 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100496
497 hdr = (struct ieee80211_hdr *)skb->data;
Johannes Berge039fa42008-05-15 12:55:29 +0200498
499 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
Michael Buesch5100d5a2008-03-29 21:01:16 +0100500 /* The multicast queue will be sent after the DTIM. */
501 q = dev->pio.tx_queue_mcast;
502 /* Set the frame More-Data bit. Ucode will clear it
503 * for us on the last frame. */
504 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
505 } else {
506 /* Decide by priority where to put this frame. */
Johannes Berge2530082008-05-17 00:57:14 +0200507 q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100508 }
509
Michael Buesch5100d5a2008-03-29 21:01:16 +0100510 hdrlen = b43_txhdr_size(dev);
511 total_len = roundup(skb->len + hdrlen, 4);
512
513 if (unlikely(total_len > q->buffer_size)) {
514 err = -ENOBUFS;
515 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200516 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100517 }
518 if (unlikely(q->free_packet_slots == 0)) {
519 err = -ENOBUFS;
520 b43warn(dev->wl, "PIO: TX packet overflow.\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200521 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100522 }
523 B43_WARN_ON(q->buffer_used > q->buffer_size);
524
525 if (total_len > (q->buffer_size - q->buffer_used)) {
526 /* Not enough memory on the queue. */
527 err = -EBUSY;
Johannes Berge2530082008-05-17 00:57:14 +0200528 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Rusty Russell3db1cd52011-12-19 13:56:45 +0000529 q->stopped = true;
Michael Buesch637dae32009-09-04 22:55:00 +0200530 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100531 }
532
533 /* Assign the queue number to the ring (if not already done before)
534 * so TX status handling can use it. The mac80211-queue to b43-queue
535 * mapping is static, so we don't need to store it per frame. */
Johannes Berge2530082008-05-17 00:57:14 +0200536 q->queue_prio = skb_get_queue_mapping(skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100537
Johannes Berge039fa42008-05-15 12:55:29 +0200538 err = pio_tx_frame(q, skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100539 if (unlikely(err == -ENOKEY)) {
540 /* Drop this packet, as we don't have the encryption key
541 * anymore and must not transmit it unencrypted. */
Felix Fietkau78f18df2012-12-10 17:40:21 +0100542 ieee80211_free_txskb(dev->wl->hw, skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100543 err = 0;
Michael Buesch637dae32009-09-04 22:55:00 +0200544 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100545 }
546 if (unlikely(err)) {
547 b43err(dev->wl, "PIO transmission failure\n");
Michael Buesch637dae32009-09-04 22:55:00 +0200548 goto out;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100549 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100550
551 B43_WARN_ON(q->buffer_used > q->buffer_size);
552 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
553 (q->free_packet_slots == 0)) {
554 /* The queue is full. */
Johannes Berge2530082008-05-17 00:57:14 +0200555 ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
Rusty Russell3db1cd52011-12-19 13:56:45 +0000556 q->stopped = true;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100557 }
558
Michael Buesch637dae32009-09-04 22:55:00 +0200559out:
Michael Buesch5100d5a2008-03-29 21:01:16 +0100560 return err;
561}
562
Michael Buesch5100d5a2008-03-29 21:01:16 +0100563void b43_pio_handle_txstatus(struct b43_wldev *dev,
564 const struct b43_txstatus *status)
565{
566 struct b43_pio_txqueue *q;
567 struct b43_pio_txpacket *pack = NULL;
568 unsigned int total_len;
Johannes Berge039fa42008-05-15 12:55:29 +0200569 struct ieee80211_tx_info *info;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100570
571 q = parse_cookie(dev, status->cookie, &pack);
572 if (unlikely(!q))
573 return;
574 B43_WARN_ON(!pack);
575
Michael Buesch14a7dd62008-06-24 12:22:05 +0200576 info = IEEE80211_SKB_CB(pack->skb);
Johannes Berge039fa42008-05-15 12:55:29 +0200577
Johannes Berge6a98542008-10-21 12:40:02 +0200578 b43_fill_txstatus_report(dev, info, status);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100579
580 total_len = pack->skb->len + b43_txhdr_size(dev);
581 total_len = roundup(total_len, 4);
582 q->buffer_used -= total_len;
583 q->free_packet_slots += 1;
584
Michael Bueschce6c4a12009-09-10 20:22:02 +0200585 ieee80211_tx_status(dev->wl->hw, pack->skb);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100586 pack->skb = NULL;
587 list_add(&pack->list, &q->packets_list);
588
589 if (q->stopped) {
590 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
Rusty Russell3db1cd52011-12-19 13:56:45 +0000591 q->stopped = false;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100592 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100593}
594
Michael Buesch5100d5a2008-03-29 21:01:16 +0100595/* Returns whether we should fetch another frame. */
596static bool pio_rx_frame(struct b43_pio_rxqueue *q)
597{
Michael Bueschd8c17e12008-04-02 19:58:20 +0200598 struct b43_wldev *dev = q->dev;
Albert Herranz7e937c62009-10-07 00:07:44 +0200599 struct b43_wl *wl = dev->wl;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100600 u16 len;
John W. Linvillec3e5fac2011-08-24 15:05:14 -0400601 u32 macstat = 0;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100602 unsigned int i, padding;
603 struct sk_buff *skb;
604 const char *err_msg = NULL;
Michael Buesch88499ab2009-10-09 20:33:32 +0200605 struct b43_rxhdr_fw4 *rxhdr =
606 (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
Guennadi Liakhovetski09009512011-12-26 18:28:08 +0100607 size_t rxhdr_size = sizeof(*rxhdr);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100608
Michael Buesch88499ab2009-10-09 20:33:32 +0200609 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
Guennadi Liakhovetski09009512011-12-26 18:28:08 +0100610 switch (dev->fw.hdr_format) {
611 case B43_FW_HDR_410:
612 case B43_FW_HDR_351:
613 rxhdr_size -= sizeof(rxhdr->format_598) -
614 sizeof(rxhdr->format_351);
615 break;
616 case B43_FW_HDR_598:
617 break;
618 }
619 memset(rxhdr, 0, rxhdr_size);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100620
621 /* Check if we have data and wait for it to get ready. */
622 if (q->rev >= 8) {
623 u32 ctl;
624
625 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
626 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
Zhao, Gang1a2b2502014-02-16 22:31:38 +0800627 return false;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100628 b43_piorx_write32(q, B43_PIO8_RXCTL,
629 B43_PIO8_RXCTL_FRAMERDY);
630 for (i = 0; i < 10; i++) {
631 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
632 if (ctl & B43_PIO8_RXCTL_DATARDY)
633 goto data_ready;
634 udelay(10);
635 }
636 } else {
637 u16 ctl;
638
639 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
640 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
Zhao, Gang1a2b2502014-02-16 22:31:38 +0800641 return false;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100642 b43_piorx_write16(q, B43_PIO_RXCTL,
643 B43_PIO_RXCTL_FRAMERDY);
644 for (i = 0; i < 10; i++) {
645 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
646 if (ctl & B43_PIO_RXCTL_DATARDY)
647 goto data_ready;
648 udelay(10);
649 }
650 }
651 b43dbg(q->dev->wl, "PIO RX timed out\n");
Zhao, Gang1a2b2502014-02-16 22:31:38 +0800652 return true;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100653data_ready:
654
655 /* Get the preamble (RX header) */
656 if (q->rev >= 8) {
Guennadi Liakhovetski09009512011-12-26 18:28:08 +0100657 b43_block_read(dev, rxhdr, rxhdr_size,
Michael Bueschd8c17e12008-04-02 19:58:20 +0200658 q->mmio_base + B43_PIO8_RXDATA,
659 sizeof(u32));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100660 } else {
Guennadi Liakhovetski09009512011-12-26 18:28:08 +0100661 b43_block_read(dev, rxhdr, rxhdr_size,
Michael Bueschd8c17e12008-04-02 19:58:20 +0200662 q->mmio_base + B43_PIO_RXDATA,
663 sizeof(u16));
Michael Buesch5100d5a2008-03-29 21:01:16 +0100664 }
665 /* Sanity checks. */
Michael Buesch88499ab2009-10-09 20:33:32 +0200666 len = le16_to_cpu(rxhdr->frame_len);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100667 if (unlikely(len > 0x700)) {
668 err_msg = "len > 0x700";
669 goto rx_error;
670 }
671 if (unlikely(len == 0)) {
672 err_msg = "len == 0";
673 goto rx_error;
674 }
675
Rafał Miłecki17030f42011-08-11 17:16:27 +0200676 switch (dev->fw.hdr_format) {
677 case B43_FW_HDR_598:
678 macstat = le32_to_cpu(rxhdr->format_598.mac_status);
679 break;
680 case B43_FW_HDR_410:
681 case B43_FW_HDR_351:
682 macstat = le32_to_cpu(rxhdr->format_351.mac_status);
683 break;
684 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100685 if (macstat & B43_RX_MAC_FCSERR) {
686 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
687 /* Drop frames with failed FCS. */
688 err_msg = "Frame FCS error";
689 goto rx_error;
690 }
691 }
692
693 /* We always pad 2 bytes, as that's what upstream code expects
694 * due to the RX-header being 30 bytes. In case the frame is
695 * unaligned, we pad another 2 bytes. */
696 padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
697 skb = dev_alloc_skb(len + padding + 2);
698 if (unlikely(!skb)) {
699 err_msg = "Out of memory";
700 goto rx_error;
701 }
702 skb_reserve(skb, 2);
703 skb_put(skb, len + padding);
704 if (q->rev >= 8) {
Rafał Miłecki620d7852011-05-17 14:00:00 +0200705 b43_block_read(dev, skb->data + padding, (len & ~3),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200706 q->mmio_base + B43_PIO8_RXDATA,
707 sizeof(u32));
708 if (len & 3) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200709 u8 *tail = wl->pio_tailspace;
710 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
711
Michael Bueschd8c17e12008-04-02 19:58:20 +0200712 /* Read the last few bytes. */
Rafał Miłecki620d7852011-05-17 14:00:00 +0200713 b43_block_read(dev, tail, 4,
Michael Bueschb96ab542009-09-23 18:51:21 +0200714 q->mmio_base + B43_PIO8_RXDATA,
715 sizeof(u32));
Michael Bueschd8c17e12008-04-02 19:58:20 +0200716 switch (len & 3) {
717 case 3:
Michael Buesch88499ab2009-10-09 20:33:32 +0200718 skb->data[len + padding - 3] = tail[0];
719 skb->data[len + padding - 2] = tail[1];
720 skb->data[len + padding - 1] = tail[2];
Michael Bueschb96ab542009-09-23 18:51:21 +0200721 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200722 case 2:
Michael Buesch88499ab2009-10-09 20:33:32 +0200723 skb->data[len + padding - 2] = tail[0];
724 skb->data[len + padding - 1] = tail[1];
Michael Bueschb96ab542009-09-23 18:51:21 +0200725 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200726 case 1:
Michael Buesch88499ab2009-10-09 20:33:32 +0200727 skb->data[len + padding - 1] = tail[0];
Michael Bueschb96ab542009-09-23 18:51:21 +0200728 break;
Michael Bueschd8c17e12008-04-02 19:58:20 +0200729 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100730 }
731 } else {
Rafał Miłecki620d7852011-05-17 14:00:00 +0200732 b43_block_read(dev, skb->data + padding, (len & ~1),
Michael Bueschd8c17e12008-04-02 19:58:20 +0200733 q->mmio_base + B43_PIO_RXDATA,
734 sizeof(u16));
735 if (len & 1) {
Michael Buesch88499ab2009-10-09 20:33:32 +0200736 u8 *tail = wl->pio_tailspace;
737 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
738
Michael Bueschd8c17e12008-04-02 19:58:20 +0200739 /* Read the last byte. */
Rafał Miłecki620d7852011-05-17 14:00:00 +0200740 b43_block_read(dev, tail, 2,
Michael Bueschb96ab542009-09-23 18:51:21 +0200741 q->mmio_base + B43_PIO_RXDATA,
742 sizeof(u16));
Michael Buesch88499ab2009-10-09 20:33:32 +0200743 skb->data[len + padding - 1] = tail[0];
Michael Buesch5100d5a2008-03-29 21:01:16 +0100744 }
745 }
746
Michael Buesch88499ab2009-10-09 20:33:32 +0200747 b43_rx(q->dev, skb, rxhdr);
Michael Buesch5100d5a2008-03-29 21:01:16 +0100748
Zhao, Gang1a2b2502014-02-16 22:31:38 +0800749 return true;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100750
751rx_error:
752 if (err_msg)
753 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
Michael Bueschc2861812009-11-07 18:54:22 +0100754 if (q->rev >= 8)
755 b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
756 else
757 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
758
Zhao, Gang1a2b2502014-02-16 22:31:38 +0800759 return true;
Michael Buesch5100d5a2008-03-29 21:01:16 +0100760}
761
Michael Buesch5100d5a2008-03-29 21:01:16 +0100762void b43_pio_rx(struct b43_pio_rxqueue *q)
763{
Michael Buesch77ca07ff2009-09-04 22:56:19 +0200764 unsigned int count = 0;
765 bool stop;
766
767 while (1) {
768 stop = (pio_rx_frame(q) == 0);
769 if (stop)
770 break;
771 cond_resched();
772 if (WARN_ON_ONCE(++count > 10000))
773 break;
774 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100775}
776
777static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
778{
Michael Buesch5100d5a2008-03-29 21:01:16 +0100779 if (q->rev >= 8) {
780 b43_piotx_write32(q, B43_PIO8_TXCTL,
781 b43_piotx_read32(q, B43_PIO8_TXCTL)
782 | B43_PIO8_TXCTL_SUSPREQ);
783 } else {
784 b43_piotx_write16(q, B43_PIO_TXCTL,
785 b43_piotx_read16(q, B43_PIO_TXCTL)
786 | B43_PIO_TXCTL_SUSPREQ);
787 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100788}
789
790static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
791{
Michael Buesch5100d5a2008-03-29 21:01:16 +0100792 if (q->rev >= 8) {
793 b43_piotx_write32(q, B43_PIO8_TXCTL,
794 b43_piotx_read32(q, B43_PIO8_TXCTL)
795 & ~B43_PIO8_TXCTL_SUSPREQ);
796 } else {
797 b43_piotx_write16(q, B43_PIO_TXCTL,
798 b43_piotx_read16(q, B43_PIO_TXCTL)
799 & ~B43_PIO_TXCTL_SUSPREQ);
800 }
Michael Buesch5100d5a2008-03-29 21:01:16 +0100801}
802
803void b43_pio_tx_suspend(struct b43_wldev *dev)
804{
805 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
806 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
807 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
808 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
809 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
810 b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
811}
812
813void b43_pio_tx_resume(struct b43_wldev *dev)
814{
815 b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
816 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
817 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
818 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
819 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
820 b43_power_saving_ctl_bits(dev, 0);
821}