1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
|
// SPDX-License-Identifier: ISC
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*/
#include <linux/kernel.h>
#include "mt76x02.h"
#include "mt76x02_phy.h"
void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
{
u32 val;
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
switch (dev->mphy.chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
default:
val &= ~BIT(3);
break;
}
mt76_wr(dev, MT_BBP(AGC, 0), val);
mb();
val = mt76_rr(dev, MT_BBP(AGC, 0));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
txpath = (dev->mphy.chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
break;
default:
mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
break;
}
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txdac);
static u32
mt76x02_tx_power_mask(u8 v1, u8 v2, u8 v3, u8 v4)
{
u32 val = 0;
val |= (v1 & (BIT(6) - 1)) << 0;
val |= (v2 & (BIT(6) - 1)) << 8;
val |= (v3 & (BIT(6) - 1)) << 16;
val |= (v4 & (BIT(6) - 1)) << 24;
return val;
}
int mt76x02_get_max_rate_power(struct mt76_rate_power *r)
{
s8 ret = 0;
int i;
for (i = 0; i < sizeof(r->all); i++)
ret = max(ret, r->all[i]);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_get_max_rate_power);
void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit)
{
int i;
for (i = 0; i < sizeof(r->all); i++)
if (r->all[i] > limit)
r->all[i] = limit;
}
EXPORT_SYMBOL_GPL(mt76x02_limit_rate_power);
void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
{
int i;
for (i = 0; i < sizeof(r->all); i++)
r->all[i] += offset;
}
EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
{
struct mt76_rate_power *t = &dev->mt76.rate_power;
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
mt76_wr(dev, MT_TX_PWR_CFG_0,
mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
t->ofdm[2]));
mt76_wr(dev, MT_TX_PWR_CFG_1,
mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
t->ht[2]));
mt76_wr(dev, MT_TX_PWR_CFG_2,
mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
t->ht[10]));
mt76_wr(dev, MT_TX_PWR_CFG_3,
mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
t->stbc[2]));
mt76_wr(dev, MT_TX_PWR_CFG_4,
mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
mt76_wr(dev, MT_TX_PWR_CFG_7,
mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
t->vht[9]));
mt76_wr(dev, MT_TX_PWR_CFG_8,
mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
mt76_wr(dev, MT_TX_PWR_CFG_9,
mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
{
int core_val, agc_val;
switch (width) {
case NL80211_CHAN_WIDTH_80:
core_val = 3;
agc_val = 7;
break;
case NL80211_CHAN_WIDTH_40:
core_val = 2;
agc_val = 3;
break;
default:
core_val = 0;
agc_val = 1;
break;
}
mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
bool primary_upper)
{
switch (band) {
case NL80211_BAND_2GHZ:
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
break;
case NL80211_BAND_5GHZ:
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
break;
}
mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
primary_upper);
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
{
u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
bool ret = false;
u32 false_cca;
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
mt76_rr(dev, MT_RX_STAT_1));
dev->cal.false_cca = false_cca;
if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
dev->cal.agc_gain_adjust += 2;
ret = true;
} else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
(dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
dev->cal.agc_gain_adjust -= 2;
ret = true;
}
dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
{
dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
MT_BBP_AGC_GAIN);
dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
MT_BBP_AGC_GAIN);
memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
sizeof(dev->cal.agc_gain_cur));
dev->cal.low_gain = -1;
dev->cal.gain_init_done = true;
}
EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
|