blob: 7895efefa95d3ae95384986eddc2b612e2225e8d [file] [log] [blame]
Solomon Peachya910e4a2013-05-24 20:04:38 -04001/*
2 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
3 *
4 * Copyright (c) 2010, ST-Ericsson
5 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <net/mac80211.h>
13#include <linux/sched.h>
14#include "queue.h"
15#include "cw1200.h"
16#include "debug.h"
17
18/* private */ struct cw1200_queue_item
19{
20 struct list_head head;
21 struct sk_buff *skb;
22 u32 packet_id;
23 unsigned long queue_timestamp;
24 unsigned long xmit_timestamp;
25 struct cw1200_txpriv txpriv;
26 u8 generation;
27};
28
29static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
30{
31 struct cw1200_queue_stats *stats = queue->stats;
32 if (queue->tx_locked_cnt++ == 0) {
33 pr_debug("[TX] Queue %d is locked.\n",
34 queue->queue_id);
35 ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
36 }
37}
38
39static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
40{
41 struct cw1200_queue_stats *stats = queue->stats;
42 BUG_ON(!queue->tx_locked_cnt);
43 if (--queue->tx_locked_cnt == 0) {
44 pr_debug("[TX] Queue %d is unlocked.\n",
45 queue->queue_id);
46 ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
47 }
48}
49
50static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
51 u8 *queue_id, u8 *item_generation,
52 u8 *item_id)
53{
54 *item_id = (packet_id >> 0) & 0xFF;
55 *item_generation = (packet_id >> 8) & 0xFF;
56 *queue_id = (packet_id >> 16) & 0xFF;
57 *queue_generation = (packet_id >> 24) & 0xFF;
58}
59
60static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
61 u8 item_generation, u8 item_id)
62{
63 return ((u32)item_id << 0) |
64 ((u32)item_generation << 8) |
65 ((u32)queue_id << 16) |
66 ((u32)queue_generation << 24);
67}
68
69static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
70 struct list_head *gc_list)
71{
72 struct cw1200_queue_item *item, *tmp;
73
74 list_for_each_entry_safe(item, tmp, gc_list, head) {
75 list_del(&item->head);
76 stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
77 kfree(item);
78 }
79}
80
81static void cw1200_queue_register_post_gc(struct list_head *gc_list,
82 struct cw1200_queue_item *item)
83{
84 struct cw1200_queue_item *gc_item;
85 gc_item = kmalloc(sizeof(struct cw1200_queue_item),
86 GFP_ATOMIC);
87 BUG_ON(!gc_item);
88 memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
89 list_add_tail(&gc_item->head, gc_list);
90}
91
92static void __cw1200_queue_gc(struct cw1200_queue *queue,
93 struct list_head *head,
94 bool unlock)
95{
96 struct cw1200_queue_stats *stats = queue->stats;
97 struct cw1200_queue_item *item = NULL, *tmp;
98 bool wakeup_stats = false;
99
100 list_for_each_entry_safe(item, tmp, &queue->queue, head) {
101 if (jiffies - item->queue_timestamp < queue->ttl)
102 break;
103 --queue->num_queued;
104 --queue->link_map_cache[item->txpriv.link_id];
105 spin_lock_bh(&stats->lock);
106 --stats->num_queued;
107 if (!--stats->link_map_cache[item->txpriv.link_id])
108 wakeup_stats = true;
109 spin_unlock_bh(&stats->lock);
110 cw1200_debug_tx_ttl(stats->priv);
111 cw1200_queue_register_post_gc(head, item);
112 item->skb = NULL;
113 list_move_tail(&item->head, &queue->free_pool);
114 }
115
116 if (wakeup_stats)
117 wake_up(&stats->wait_link_id_empty);
118
119 if (queue->overfull) {
120 if (queue->num_queued <= (queue->capacity >> 1)) {
121 queue->overfull = false;
122 if (unlock)
123 __cw1200_queue_unlock(queue);
124 } else if (item) {
125 unsigned long tmo = item->queue_timestamp + queue->ttl;
126 mod_timer(&queue->gc, tmo);
127 cw1200_pm_stay_awake(&stats->priv->pm_state,
128 tmo - jiffies);
129 }
130 }
131}
132
Kees Cooke3dcf8b2017-10-24 02:29:00 -0700133static void cw1200_queue_gc(struct timer_list *t)
Solomon Peachya910e4a2013-05-24 20:04:38 -0400134{
135 LIST_HEAD(list);
136 struct cw1200_queue *queue =
Kees Cooke3dcf8b2017-10-24 02:29:00 -0700137 from_timer(queue, t, gc);
Solomon Peachya910e4a2013-05-24 20:04:38 -0400138
139 spin_lock_bh(&queue->lock);
140 __cw1200_queue_gc(queue, &list, true);
141 spin_unlock_bh(&queue->lock);
142 cw1200_queue_post_gc(queue->stats, &list);
143}
144
145int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
146 size_t map_capacity,
147 cw1200_queue_skb_dtor_t skb_dtor,
148 struct cw1200_common *priv)
149{
150 memset(stats, 0, sizeof(*stats));
151 stats->map_capacity = map_capacity;
152 stats->skb_dtor = skb_dtor;
153 stats->priv = priv;
154 spin_lock_init(&stats->lock);
155 init_waitqueue_head(&stats->wait_link_id_empty);
156
Kees Cook6396bb22018-06-12 14:03:40 -0700157 stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
Solomon Peachya910e4a2013-05-24 20:04:38 -0400158 GFP_KERNEL);
159 if (!stats->link_map_cache)
160 return -ENOMEM;
161
162 return 0;
163}
164
165int cw1200_queue_init(struct cw1200_queue *queue,
166 struct cw1200_queue_stats *stats,
167 u8 queue_id,
168 size_t capacity,
169 unsigned long ttl)
170{
171 size_t i;
172
173 memset(queue, 0, sizeof(*queue));
174 queue->stats = stats;
175 queue->capacity = capacity;
176 queue->queue_id = queue_id;
177 queue->ttl = ttl;
178 INIT_LIST_HEAD(&queue->queue);
179 INIT_LIST_HEAD(&queue->pending);
180 INIT_LIST_HEAD(&queue->free_pool);
181 spin_lock_init(&queue->lock);
Kees Cooke3dcf8b2017-10-24 02:29:00 -0700182 timer_setup(&queue->gc, cw1200_queue_gc, 0);
Solomon Peachya910e4a2013-05-24 20:04:38 -0400183
Kees Cook6396bb22018-06-12 14:03:40 -0700184 queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
185 GFP_KERNEL);
Solomon Peachya910e4a2013-05-24 20:04:38 -0400186 if (!queue->pool)
187 return -ENOMEM;
188
Kees Cook6396bb22018-06-12 14:03:40 -0700189 queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
190 GFP_KERNEL);
Solomon Peachya910e4a2013-05-24 20:04:38 -0400191 if (!queue->link_map_cache) {
192 kfree(queue->pool);
193 queue->pool = NULL;
194 return -ENOMEM;
195 }
196
197 for (i = 0; i < capacity; ++i)
198 list_add_tail(&queue->pool[i].head, &queue->free_pool);
199
200 return 0;
201}
202
203int cw1200_queue_clear(struct cw1200_queue *queue)
204{
205 int i;
206 LIST_HEAD(gc_list);
207 struct cw1200_queue_stats *stats = queue->stats;
208 struct cw1200_queue_item *item, *tmp;
209
210 spin_lock_bh(&queue->lock);
211 queue->generation++;
212 list_splice_tail_init(&queue->queue, &queue->pending);
213 list_for_each_entry_safe(item, tmp, &queue->pending, head) {
214 WARN_ON(!item->skb);
215 cw1200_queue_register_post_gc(&gc_list, item);
216 item->skb = NULL;
217 list_move_tail(&item->head, &queue->free_pool);
218 }
219 queue->num_queued = 0;
220 queue->num_pending = 0;
221
222 spin_lock_bh(&stats->lock);
223 for (i = 0; i < stats->map_capacity; ++i) {
224 stats->num_queued -= queue->link_map_cache[i];
225 stats->link_map_cache[i] -= queue->link_map_cache[i];
226 queue->link_map_cache[i] = 0;
227 }
228 spin_unlock_bh(&stats->lock);
229 if (queue->overfull) {
230 queue->overfull = false;
231 __cw1200_queue_unlock(queue);
232 }
233 spin_unlock_bh(&queue->lock);
234 wake_up(&stats->wait_link_id_empty);
235 cw1200_queue_post_gc(stats, &gc_list);
236 return 0;
237}
238
239void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
240{
241 kfree(stats->link_map_cache);
242 stats->link_map_cache = NULL;
243}
244
245void cw1200_queue_deinit(struct cw1200_queue *queue)
246{
247 cw1200_queue_clear(queue);
248 del_timer_sync(&queue->gc);
249 INIT_LIST_HEAD(&queue->free_pool);
250 kfree(queue->pool);
251 kfree(queue->link_map_cache);
252 queue->pool = NULL;
253 queue->link_map_cache = NULL;
254 queue->capacity = 0;
255}
256
257size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
258 u32 link_id_map)
259{
260 size_t ret;
261 int i, bit;
262 size_t map_capacity = queue->stats->map_capacity;
263
264 if (!link_id_map)
265 return 0;
266
267 spin_lock_bh(&queue->lock);
268 if (link_id_map == (u32)-1) {
269 ret = queue->num_queued - queue->num_pending;
270 } else {
271 ret = 0;
272 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
273 if (link_id_map & bit)
274 ret += queue->link_map_cache[i];
275 }
276 }
277 spin_unlock_bh(&queue->lock);
278 return ret;
279}
280
281int cw1200_queue_put(struct cw1200_queue *queue,
282 struct sk_buff *skb,
283 struct cw1200_txpriv *txpriv)
284{
285 int ret = 0;
Solomon Peachya910e4a2013-05-24 20:04:38 -0400286 struct cw1200_queue_stats *stats = queue->stats;
287
288 if (txpriv->link_id >= queue->stats->map_capacity)
289 return -EINVAL;
290
291 spin_lock_bh(&queue->lock);
292 if (!WARN_ON(list_empty(&queue->free_pool))) {
293 struct cw1200_queue_item *item = list_first_entry(
294 &queue->free_pool, struct cw1200_queue_item, head);
295 BUG_ON(item->skb);
296
297 list_move_tail(&item->head, &queue->queue);
298 item->skb = skb;
299 item->txpriv = *txpriv;
300 item->generation = 0;
301 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
302 queue->queue_id,
303 item->generation,
304 item - queue->pool);
305 item->queue_timestamp = jiffies;
306
307 ++queue->num_queued;
308 ++queue->link_map_cache[txpriv->link_id];
309
310 spin_lock_bh(&stats->lock);
311 ++stats->num_queued;
312 ++stats->link_map_cache[txpriv->link_id];
313 spin_unlock_bh(&stats->lock);
314
315 /* TX may happen in parallel sometimes.
316 * Leave extra queue slots so we don't overflow.
317 */
318 if (queue->overfull == false &&
319 queue->num_queued >=
320 (queue->capacity - (num_present_cpus() - 1))) {
321 queue->overfull = true;
322 __cw1200_queue_lock(queue);
323 mod_timer(&queue->gc, jiffies);
324 }
325 } else {
326 ret = -ENOENT;
327 }
328 spin_unlock_bh(&queue->lock);
329 return ret;
330}
331
332int cw1200_queue_get(struct cw1200_queue *queue,
333 u32 link_id_map,
334 struct wsm_tx **tx,
335 struct ieee80211_tx_info **tx_info,
336 const struct cw1200_txpriv **txpriv)
337{
338 int ret = -ENOENT;
339 struct cw1200_queue_item *item;
340 struct cw1200_queue_stats *stats = queue->stats;
341 bool wakeup_stats = false;
342
343 spin_lock_bh(&queue->lock);
344 list_for_each_entry(item, &queue->queue, head) {
345 if (link_id_map & BIT(item->txpriv.link_id)) {
346 ret = 0;
347 break;
348 }
349 }
350
351 if (!WARN_ON(ret)) {
352 *tx = (struct wsm_tx *)item->skb->data;
353 *tx_info = IEEE80211_SKB_CB(item->skb);
354 *txpriv = &item->txpriv;
Solomon Peachy72584162013-06-20 23:03:12 -0400355 (*tx)->packet_id = item->packet_id;
Solomon Peachya910e4a2013-05-24 20:04:38 -0400356 list_move_tail(&item->head, &queue->pending);
357 ++queue->num_pending;
358 --queue->link_map_cache[item->txpriv.link_id];
359 item->xmit_timestamp = jiffies;
360
361 spin_lock_bh(&stats->lock);
362 --stats->num_queued;
363 if (!--stats->link_map_cache[item->txpriv.link_id])
364 wakeup_stats = true;
365 spin_unlock_bh(&stats->lock);
366 }
367 spin_unlock_bh(&queue->lock);
368 if (wakeup_stats)
369 wake_up(&stats->wait_link_id_empty);
370 return ret;
371}
372
373int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
374{
375 int ret = 0;
376 u8 queue_generation, queue_id, item_generation, item_id;
377 struct cw1200_queue_item *item;
378 struct cw1200_queue_stats *stats = queue->stats;
379
380 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
381 &item_generation, &item_id);
382
383 item = &queue->pool[item_id];
384
385 spin_lock_bh(&queue->lock);
386 BUG_ON(queue_id != queue->queue_id);
387 if (queue_generation != queue->generation) {
388 ret = -ENOENT;
389 } else if (item_id >= (unsigned) queue->capacity) {
390 WARN_ON(1);
391 ret = -EINVAL;
392 } else if (item->generation != item_generation) {
393 WARN_ON(1);
394 ret = -ENOENT;
395 } else {
396 --queue->num_pending;
397 ++queue->link_map_cache[item->txpriv.link_id];
398
399 spin_lock_bh(&stats->lock);
400 ++stats->num_queued;
401 ++stats->link_map_cache[item->txpriv.link_id];
402 spin_unlock_bh(&stats->lock);
403
404 item->generation = ++item_generation;
405 item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
406 queue_id,
407 item_generation,
408 item_id);
409 list_move(&item->head, &queue->queue);
410 }
411 spin_unlock_bh(&queue->lock);
412 return ret;
413}
414
415int cw1200_queue_requeue_all(struct cw1200_queue *queue)
416{
417 struct cw1200_queue_item *item, *tmp;
418 struct cw1200_queue_stats *stats = queue->stats;
419 spin_lock_bh(&queue->lock);
420
421 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
422 --queue->num_pending;
423 ++queue->link_map_cache[item->txpriv.link_id];
424
425 spin_lock_bh(&stats->lock);
426 ++stats->num_queued;
427 ++stats->link_map_cache[item->txpriv.link_id];
428 spin_unlock_bh(&stats->lock);
429
430 ++item->generation;
431 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
432 queue->queue_id,
433 item->generation,
434 item - queue->pool);
435 list_move(&item->head, &queue->queue);
436 }
437 spin_unlock_bh(&queue->lock);
438
439 return 0;
440}
441
442int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
443{
444 int ret = 0;
445 u8 queue_generation, queue_id, item_generation, item_id;
446 struct cw1200_queue_item *item;
447 struct cw1200_queue_stats *stats = queue->stats;
448 struct sk_buff *gc_skb = NULL;
449 struct cw1200_txpriv gc_txpriv;
450
451 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
452 &item_generation, &item_id);
453
454 item = &queue->pool[item_id];
455
456 spin_lock_bh(&queue->lock);
457 BUG_ON(queue_id != queue->queue_id);
458 if (queue_generation != queue->generation) {
459 ret = -ENOENT;
460 } else if (item_id >= (unsigned) queue->capacity) {
461 WARN_ON(1);
462 ret = -EINVAL;
463 } else if (item->generation != item_generation) {
464 WARN_ON(1);
465 ret = -ENOENT;
466 } else {
467 gc_txpriv = item->txpriv;
468 gc_skb = item->skb;
469 item->skb = NULL;
470 --queue->num_pending;
471 --queue->num_queued;
472 ++queue->num_sent;
473 ++item->generation;
474 /* Do not use list_move_tail here, but list_move:
475 * try to utilize cache row.
476 */
477 list_move(&item->head, &queue->free_pool);
478
479 if (queue->overfull &&
480 (queue->num_queued <= (queue->capacity >> 1))) {
481 queue->overfull = false;
482 __cw1200_queue_unlock(queue);
483 }
484 }
485 spin_unlock_bh(&queue->lock);
486
487 if (gc_skb)
488 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
489
490 return ret;
491}
492
493int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
494 struct sk_buff **skb,
495 const struct cw1200_txpriv **txpriv)
496{
497 int ret = 0;
498 u8 queue_generation, queue_id, item_generation, item_id;
499 struct cw1200_queue_item *item;
500 cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
501 &item_generation, &item_id);
502
503 item = &queue->pool[item_id];
504
505 spin_lock_bh(&queue->lock);
506 BUG_ON(queue_id != queue->queue_id);
507 if (queue_generation != queue->generation) {
508 ret = -ENOENT;
509 } else if (item_id >= (unsigned) queue->capacity) {
510 WARN_ON(1);
511 ret = -EINVAL;
512 } else if (item->generation != item_generation) {
513 WARN_ON(1);
514 ret = -ENOENT;
515 } else {
516 *skb = item->skb;
517 *txpriv = &item->txpriv;
518 }
519 spin_unlock_bh(&queue->lock);
520 return ret;
521}
522
523void cw1200_queue_lock(struct cw1200_queue *queue)
524{
525 spin_lock_bh(&queue->lock);
526 __cw1200_queue_lock(queue);
527 spin_unlock_bh(&queue->lock);
528}
529
530void cw1200_queue_unlock(struct cw1200_queue *queue)
531{
532 spin_lock_bh(&queue->lock);
533 __cw1200_queue_unlock(queue);
534 spin_unlock_bh(&queue->lock);
535}
536
537bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
538 unsigned long *timestamp,
539 u32 pending_frame_id)
540{
541 struct cw1200_queue_item *item;
542 bool ret;
543
544 spin_lock_bh(&queue->lock);
545 ret = !list_empty(&queue->pending);
546 if (ret) {
547 list_for_each_entry(item, &queue->pending, head) {
548 if (item->packet_id != pending_frame_id)
549 if (time_before(item->xmit_timestamp,
550 *timestamp))
551 *timestamp = item->xmit_timestamp;
552 }
553 }
554 spin_unlock_bh(&queue->lock);
555 return ret;
556}
557
558bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
559 u32 link_id_map)
560{
561 bool empty = true;
562
563 spin_lock_bh(&stats->lock);
564 if (link_id_map == (u32)-1) {
565 empty = stats->num_queued == 0;
566 } else {
567 int i;
568 for (i = 0; i < stats->map_capacity; ++i) {
569 if (link_id_map & BIT(i)) {
570 if (stats->link_map_cache[i]) {
571 empty = false;
572 break;
573 }
574 }
575 }
576 }
577 spin_unlock_bh(&stats->lock);
578
579 return empty;
580}