diff options
Diffstat (limited to 'tx.c')
| -rw-r--r-- | tx.c | 53 |
1 files changed, 51 insertions, 2 deletions
@@ -77,7 +77,9 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) } hw = mt76_tx_status_get_hw(dev, skb); + spin_lock_bh(&dev->rx_lock); ieee80211_tx_status_ext(hw, &status); + spin_unlock_bh(&dev->rx_lock); } rcu_read_unlock(); } @@ -119,6 +121,7 @@ int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, struct sk_buff *skb) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); int pid; @@ -132,8 +135,14 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, return MT_PACKET_ID_NO_ACK; if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_CTL_RATE_CTRL_PROBE))) + IEEE80211_TX_CTL_RATE_CTRL_PROBE))) { + if (mtk_wed_device_active(&dev->mmio.wed) && + ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) || + ieee80211_is_data(hdr->frame_control))) + return MT_PACKET_ID_WED; + return MT_PACKET_ID_NO_SKB; + } spin_lock_bh(&dev->status_lock); @@ -261,9 +270,18 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff * #endif if (cb->pktid < MT_PACKET_ID_FIRST) { + struct ieee80211_rate_status rs = {}; + hw = mt76_tx_status_get_hw(dev, skb); status.sta = wcid_to_sta(wcid); + if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) { + rs.rate_idx = wcid->rate; + status.rates = &rs; + status.n_rates = 1; + } + spin_lock_bh(&dev->rx_lock); ieee80211_tx_status_ext(hw, &status); + spin_unlock_bh(&dev->rx_lock); goto out; } @@ -330,7 +348,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && !ieee80211_is_data(hdr->frame_control) && - !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { + !ieee80211_is_bufferable_mmpdu(skb)) { qid = MT_TXQ_PSD; } @@ -756,6 +774,24 @@ int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) } EXPORT_SYMBOL_GPL(mt76_token_consume); +int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, + struct mt76_txwi_cache *t, dma_addr_t phys) +{ + int token; + + spin_lock_bh(&dev->rx_token_lock); + token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, + GFP_ATOMIC); + if (token >= 0) { + t->ptr = ptr; + t->dma_addr = phys; + } + spin_unlock_bh(&dev->rx_token_lock); + + return token; +} +EXPORT_SYMBOL_GPL(mt76_rx_token_consume); + struct mt76_txwi_cache * mt76_token_release(struct mt76_dev *dev, int token, bool *wake) { @@ -784,3 +820,16 @@ mt76_token_release(struct mt76_dev *dev, int token, bool *wake) return txwi; } EXPORT_SYMBOL_GPL(mt76_token_release); + +struct mt76_txwi_cache * +mt76_rx_token_release(struct mt76_dev *dev, int token) +{ + struct mt76_txwi_cache *t; + + spin_lock_bh(&dev->rx_token_lock); + t = idr_remove(&dev->rx_token, token); + spin_unlock_bh(&dev->rx_token_lock); + + return t; +} +EXPORT_SYMBOL_GPL(mt76_rx_token_release); |
