28 { 0x5e4322, 0x5e4322, 0x5e4322, 0x604322, 0xa44f, 0x5ea44f};
30 { 0x5e4322, 0xa44f, 0x5e4322, 0x604322, 0x5ea44f, 0x5ea44f};
33 #define RTK_UL_EDCA 0xa44f
34 #define RTK_DL_EDCA 0x5e4322
90 static void dm_init_bandwidth_autoswitch(
struct net_device *
dev);
96 static void dm_check_txpower_tracking(
struct net_device *
dev);
105 static void dm_bb_initialgain_restore(
struct net_device *
dev);
109 static void dm_bb_initialgain_backup(
struct net_device *
dev);
114 static void dm_ctrl_initgain_byrssi_highpwr(
struct net_device *
dev);
115 static void dm_ctrl_initgain_byrssi_by_driverrssi(
struct net_device *
dev);
116 static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
struct net_device *
dev);
136 static void dm_check_rx_path_selection(
struct net_device *
dev);
137 static void dm_init_rxpath_selection(
struct net_device *
dev);
146 static void dm_check_txrateandretrycount(
struct net_device *
dev);
180 dm_init_dynamic_txpower(dev);
185 dm_init_bandwidth_autoswitch(dev);
187 dm_init_rxpath_selection(dev);
188 dm_init_ctstoself(dev);
195 dm_deInit_fsync(dev);
200 #ifdef USB_RX_AGGREGATION_SUPPORT
204 static unsigned long lastTxOkCnt = 0;
205 static unsigned long lastRxOkCnt = 0;
206 unsigned long curTxOkCnt = 0;
207 unsigned long curRxOkCnt = 0;
224 curTxOkCnt = priv->
stats.txbytesunicast - lastTxOkCnt;
225 curRxOkCnt = priv->
stats.rxbytesunicast - lastRxOkCnt;
227 if((curTxOkCnt + curRxOkCnt) < 15000000) {
231 if(curTxOkCnt > 4*curRxOkCnt) {
239 ulValue = (pHTInfo->UsbRxFwAggrEn<<24) | (pHTInfo->UsbRxFwAggrPageNum<<16) |
240 (pHTInfo->UsbRxFwAggrPacketNum<<8) | (pHTInfo->UsbRxFwAggrTimeout);
251 lastTxOkCnt = priv->
stats.txbytesunicast;
252 lastRxOkCnt = priv->
stats.rxbytesunicast;
265 dm_check_rate_adaptive(dev);
266 dm_dynamic_txpower(dev);
267 dm_check_txrateandretrycount(dev);
268 dm_check_txpower_tracking(dev);
269 dm_ctrl_initgain_byrssi(dev);
270 dm_check_edca_turbo(dev);
271 dm_bandwidth_autoswitch(dev);
272 dm_check_rfctrl_gpio(dev);
273 dm_check_rx_path_selection(dev);
277 dm_check_pbc_gpio(dev);
278 dm_send_rssi_tofw(dev);
280 #ifdef USB_RX_AGGREGATION_SUPPORT
281 dm_CheckRxAggregation(dev);
295 struct r8192_priv *priv = ieee80211_priv(dev);
354 static void dm_check_rate_adaptive(
struct net_device * dev)
356 struct r8192_priv *priv = ieee80211_priv(dev);
359 u32 currentRATR, targetRATR = 0;
360 u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0;
361 bool bshort_gi_enabled =
false;
362 static u8 ping_rssi_state=0;
367 RT_TRACE(
COMP_RATE,
"<---- dm_check_rate_adaptive(): driver is going to unload\n");
477 if(priv->
ieee80211->GetHalfNmodeSupportByAPsHandler(dev))
478 targetRATR &= 0xf00fffff;
484 if( targetRATR != currentRATR )
487 ratr_value = targetRATR;
488 RT_TRACE(
COMP_RATE,
"currentRATR = %x, targetRATR = %x\n", currentRATR, targetRATR);
508 static void dm_init_bandwidth_autoswitch(
struct net_device * dev)
510 struct r8192_priv *priv = ieee80211_priv(dev);
514 priv->
ieee80211->bandwidth_auto_switch.bforced_tx20Mhz =
false;
515 priv->
ieee80211->bandwidth_auto_switch.bautoswitch_enable =
false;
520 static void dm_bandwidth_autoswitch(
struct net_device * dev)
522 struct r8192_priv *priv = ieee80211_priv(dev);
527 if(priv->
ieee80211->bandwidth_auto_switch.bforced_tx20Mhz ==
false){
529 priv->
ieee80211->bandwidth_auto_switch.bforced_tx20Mhz =
true;
532 priv->
ieee80211->bandwidth_auto_switch.bforced_tx20Mhz =
false;
562 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
563 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
564 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
565 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
566 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
567 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
568 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
569 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
570 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
571 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
572 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
573 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}
577 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
578 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
579 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
580 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
581 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
582 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
583 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
584 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
585 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
586 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
587 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
588 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}
591 static void dm_TXPowerTrackingCallback_TSSI(
struct net_device * dev)
593 struct r8192_priv *priv = ieee80211_priv(dev);
594 bool bHighpowerstate, viviflag =
FALSE;
596 u8 powerlevelOFDM24G;
597 int i =0,
j = 0,
k = 0;
598 u8 RF_Type, tmp_report[5]={0, 0, 0, 0, 0};
603 bool rtStatus =
true;
608 priv->
ieee80211->bdynamic_txpower_enable =
false;
613 Value = (RF_Type<<8) | powerlevelOFDM24G;
617 for(
j = 0;
j<=30;
j++)
622 tx_cmd.
Value = Value;
635 for(i = 0;i <= 30; i++)
649 if(Avg_TSSI_Meas == 0)
655 for(
k = 0;
k < 5;
k++)
669 for(
k = 0;
k < 5;
k++)
671 if(tmp_report[
k] <= 20)
682 for(
k = 0;
k < 5;
k++)
687 for(
k = 0;
k < 5;
k++)
689 Avg_TSSI_Meas_from_driver += tmp_report[
k];
692 Avg_TSSI_Meas_from_driver = Avg_TSSI_Meas_from_driver*100/5;
699 if(Avg_TSSI_Meas_from_driver > TSSI_13dBm)
700 delta = Avg_TSSI_Meas_from_driver -
TSSI_13dBm;
702 delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
809 Avg_TSSI_Meas_from_driver = 0;
810 for(
k = 0;
k < 5;
k++)
819 static void dm_TXPowerTrackingCallback_ThermalMeter(
struct net_device * dev)
821 #define ThermalMeterVal 9
822 struct r8192_priv *priv = ieee80211_priv(dev);
823 u32 tmpRegA, TempCCk;
824 u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
825 int i =0, CCKSwingNeedUpdate=0;
833 if(tmpRegA == OFDMSwingTable[i])
845 if(TempCCk == (
u32)CCKSwingTable_Ch1_Ch13[i][0])
865 if(tmpRegA < 3 || tmpRegA > 13)
876 tmpOFDMindex = tmpCCK20Mindex = 6+(priv->
ThermalMeter[0]-(
u8)tmpRegA);
877 tmpCCK40Mindex = tmpCCK20Mindex - 6;
878 if(tmpOFDMindex >= OFDM_Table_Length)
879 tmpOFDMindex = OFDM_Table_Length-1;
880 if(tmpCCK20Mindex >= CCK_Table_length)
881 tmpCCK20Mindex = CCK_Table_length-1;
882 if(tmpCCK40Mindex >= CCK_Table_length)
883 tmpCCK40Mindex = CCK_Table_length-1;
889 tmpOFDMindex = tmpCCK20Mindex = 0;
891 tmpOFDMindex = tmpCCK20Mindex = 6 - tmpval;
898 tmpCCKindex = tmpCCK40Mindex;
900 tmpCCKindex = tmpCCK20Mindex;
905 CCKSwingNeedUpdate = 1;
910 CCKSwingNeedUpdate = 1;
916 CCKSwingNeedUpdate = 1;
919 if(CCKSwingNeedUpdate)
941 dm_TXPowerTrackingCallback_TSSI(dev);
944 dm_TXPowerTrackingCallback_TSSI(dev);
946 dm_TXPowerTrackingCallback_ThermalMeter(dev);
951 static void dm_InitializeTXPowerTracking_TSSI(
struct net_device *dev)
954 struct r8192_priv *priv = ieee80211_priv(dev);
1456 static void dm_InitializeTXPowerTracking_ThermalMeter(
struct net_device *dev)
1458 struct r8192_priv *priv = ieee80211_priv(dev);
1474 struct r8192_priv *priv = ieee80211_priv(dev);
1476 dm_InitializeTXPowerTracking_TSSI(dev);
1479 dm_InitializeTXPowerTracking_TSSI(dev);
1481 dm_InitializeTXPowerTracking_ThermalMeter(dev);
1486 static void dm_CheckTXPowerTracking_TSSI(
struct net_device *dev)
1488 struct r8192_priv *priv = ieee80211_priv(dev);
1489 static u32 tx_power_track_counter = 0;
1495 if((tx_power_track_counter % 30 == 0)&&(tx_power_track_counter != 0))
1499 tx_power_track_counter++;
1505 static void dm_CheckTXPowerTracking_ThermalMeter(
struct net_device *dev)
1507 struct r8192_priv *priv = ieee80211_priv(dev);
1508 static u8 TM_Trigger=0;
1542 static void dm_check_txpower_tracking(
struct net_device *dev)
1544 struct r8192_priv *priv = ieee80211_priv(dev);
1548 dm_CheckTXPowerTracking_TSSI(dev);
1551 dm_CheckTXPowerTracking_TSSI(dev);
1553 dm_CheckTXPowerTracking_ThermalMeter(dev);
1559 static void dm_CCKTxPowerAdjust_TSSI(
struct net_device *dev,
bool bInCH14)
1562 struct r8192_priv *priv = ieee80211_priv(dev);
1609 static void dm_CCKTxPowerAdjust_ThermalMeter(
struct net_device *dev,
bool bInCH14)
1612 struct r8192_priv *priv = ieee80211_priv(dev);
1618 TempVal = CCKSwingTable_Ch1_Ch13[priv->
CCK_index][0] +
1619 (CCKSwingTable_Ch1_Ch13[priv->
CCK_index][1]<<8) ;
1625 TempVal = CCKSwingTable_Ch1_Ch13[priv->
CCK_index][2] +
1626 (CCKSwingTable_Ch1_Ch13[priv->
CCK_index][3]<<8) +
1627 (CCKSwingTable_Ch1_Ch13[priv->
CCK_index][4]<<16 )+
1628 (CCKSwingTable_Ch1_Ch13[priv->
CCK_index][5]<<24);
1634 TempVal = CCKSwingTable_Ch1_Ch13[priv->
CCK_index][6] +
1635 (CCKSwingTable_Ch1_Ch13[priv->
CCK_index][7]<<8) ;
1645 TempVal = CCKSwingTable_Ch14[priv->
CCK_index][0] +
1646 (CCKSwingTable_Ch14[priv->
CCK_index][1]<<8) ;
1653 TempVal = CCKSwingTable_Ch14[priv->
CCK_index][2] +
1654 (CCKSwingTable_Ch14[priv->
CCK_index][3]<<8) +
1655 (CCKSwingTable_Ch14[priv->
CCK_index][4]<<16 )+
1656 (CCKSwingTable_Ch14[priv->
CCK_index][5]<<24);
1662 TempVal = CCKSwingTable_Ch14[priv->
CCK_index][6] +
1663 (CCKSwingTable_Ch14[priv->
CCK_index][7]<<8) ;
1679 struct r8192_priv *priv = ieee80211_priv(dev);
1681 dm_CCKTxPowerAdjust_TSSI(dev, binch14);
1684 dm_CCKTxPowerAdjust_TSSI(dev, binch14);
1686 dm_CCKTxPowerAdjust_ThermalMeter(dev, binch14);
1692 static void dm_txpower_reset_recovery(
1696 struct r8192_priv *priv = ieee80211_priv(dev);
1715 struct r8192_priv *priv = ieee80211_priv(dev);
1720 RT_TRACE(
COMP_RATE,
"<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n");
1736 ratr_value = reg_ratr;
1749 dm_txpower_reset_recovery(dev);
1755 dm_bb_initialgain_restore(dev);
1759 static void dm_bb_initialgain_restore(
struct net_device *dev)
1761 struct r8192_priv *priv = ieee80211_priv(dev);
1762 u32 bit_mask = 0x7f;
1791 struct r8192_priv *priv = ieee80211_priv(dev);
1797 dm_bb_initialgain_backup(dev);
1802 static void dm_bb_initialgain_backup(
struct net_device *dev)
1804 struct r8192_priv *priv = ieee80211_priv(dev);
1919 struct r8192_priv *priv = ieee80211_priv(dev);
1936 struct r8192_priv *priv = ieee80211_priv(dev);
1944 DM_RxPathSelTable.
Enable = (
u8)DM_Value;
1946 else if(DM_Type == 1)
1950 DM_RxPathSelTable.
DbgMode = (
u8)DM_Value;
1952 else if(DM_Type == 2)
1958 else if(DM_Type == 3)
1962 DM_RxPathSelTable.
diff_TH = (
u8)DM_Value;
1964 else if(DM_Type == 4)
1970 else if(DM_Type == 10)
1974 DM_RxPathSelTable.
rf_rssi[0] = (
u8)DM_Value;
1976 else if(DM_Type == 11)
1980 DM_RxPathSelTable.
rf_rssi[1] = (
u8)DM_Value;
1982 else if(DM_Type == 12)
1986 DM_RxPathSelTable.
rf_rssi[2] = (
u8)DM_Value;
1988 else if(DM_Type == 13)
1992 DM_RxPathSelTable.
rf_rssi[3] = (
u8)DM_Value;
1994 else if(DM_Type == 20)
2000 else if(DM_Type == 21)
2025 static void dm_dig_init(
struct net_device *dev)
2027 struct r8192_priv *priv = ieee80211_priv(dev);
2073 static void dm_ctrl_initgain_byrssi(
struct net_device *dev)
2080 dm_ctrl_initgain_byrssi_by_fwfalse_alarm(dev);
2082 dm_ctrl_initgain_byrssi_by_driverrssi(dev);
2089 static void dm_ctrl_initgain_byrssi_by_driverrssi(
2092 struct r8192_priv *priv = ieee80211_priv(dev);
2121 dm_initial_gain(dev);
2126 dm_digtable.pre_connect_state = dm_digtable.cur_connect_state;
2130 static void dm_ctrl_initgain_byrssi_by_fwfalse_alarm(
2133 struct r8192_priv *priv = ieee80211_priv(dev);
2134 static u32 reset_cnt = 0;
2232 dm_ctrl_initgain_byrssi_highpwr(dev);
2248 if (reset_flag == 1)
2297 dm_ctrl_initgain_byrssi_highpwr(dev);
2318 static void dm_ctrl_initgain_byrssi_highpwr(
2321 struct r8192_priv *priv = ieee80211_priv(dev);
2322 static u32 reset_cnt_highpwr = 0;
2393 static void dm_initial_gain(
2396 struct r8192_priv *priv = ieee80211_priv(dev);
2399 static u32 reset_cnt=0;
2407 if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
2445 || !initialized || force_write)
2461 static void dm_pd_th(
2464 struct r8192_priv *priv = ieee80211_priv(dev);
2465 static u8 initialized=0, force_write=0;
2466 static u32 reset_cnt = 0;
2474 if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
2507 (initialized<=3) || force_write)
2566 if(initialized <= 3)
2573 static void dm_cs_ratio(
2576 struct r8192_priv *priv = ieee80211_priv(dev);
2577 static u8 initialized=0,force_write=0;
2578 static u32 reset_cnt = 0;
2586 if(dm_digtable.pre_connect_state == dm_digtable.cur_connect_state)
2617 !initialized || force_write)
2639 struct r8192_priv *priv = ieee80211_priv(dev);
2642 priv->
ieee80211->bis_any_nonbepkts =
false;
2646 static void dm_check_edca_turbo(
2649 struct r8192_priv *priv = ieee80211_priv(dev);
2654 static unsigned long lastTxOkCnt = 0;
2655 static unsigned long lastRxOkCnt = 0;
2656 unsigned long curTxOkCnt = 0;
2657 unsigned long curRxOkCnt = 0;
2664 goto dm_CheckEdcaTurbo_EXIT;
2667 goto dm_CheckEdcaTurbo_EXIT;
2673 curTxOkCnt = priv->
stats.txbytesunicast - lastTxOkCnt;
2674 curRxOkCnt = priv->
stats.rxbytesunicast - lastRxOkCnt;
2676 if(curRxOkCnt > 4*curTxOkCnt)
2731 if( pAciAifsn->
f.ACM )
2740 RT_TRACE(
COMP_QOS,
"SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl ) ;
2749 dm_CheckEdcaTurbo_EXIT:
2751 priv->
ieee80211->bis_any_nonbepkts =
false;
2752 lastTxOkCnt = priv->
stats.txbytesunicast;
2753 lastRxOkCnt = priv->
stats.rxbytesunicast;
2767 else if(DM_Type == 1)
2776 static void dm_init_ctstoself(
struct net_device * dev)
2784 static void dm_ctstoself(
struct net_device *dev)
2788 static unsigned long lastTxOkCnt = 0;
2789 static unsigned long lastRxOkCnt = 0;
2790 unsigned long curTxOkCnt = 0;
2791 unsigned long curRxOkCnt = 0;
2806 curTxOkCnt = priv->
stats.txbytesunicast - lastTxOkCnt;
2807 curRxOkCnt = priv->
stats.rxbytesunicast - lastRxOkCnt;
2808 if(curRxOkCnt > 4*curTxOkCnt)
2818 lastTxOkCnt = priv->
stats.txbytesunicast;
2819 lastRxOkCnt = priv->
stats.rxbytesunicast;
2840 static void dm_check_rfctrl_gpio(
struct net_device * dev)
2877 static void dm_check_pbc_gpio(
struct net_device *dev)
2880 struct r8192_priv *priv = ieee80211_priv(dev);
2885 if(tmp1byte == 0xff)
2888 if (tmp1byte&
BIT6 || tmp1byte&
BIT0)
2923 bool bActuallySet =
false;
2945 bActuallySet =
true;
2951 bActuallySet =
true;
3004 if (rfpath & (0x01<<i))
3009 if(!DM_RxPathSelTable.
Enable)
3012 dm_rxpath_sel_byrssi(dev);
3015 static void dm_init_rxpath_selection(
struct net_device * dev)
3018 struct r8192_priv *priv = ieee80211_priv(dev);
3019 DM_RxPathSelTable.
Enable = 1;
3036 static void dm_rxpath_sel_byrssi(
struct net_device * dev)
3038 struct r8192_priv *priv = ieee80211_priv(dev);
3039 u8 i, max_rssi_index=0, min_rssi_index=0, sec_rssi_index=0, rf_num=0;
3040 u8 tmp_max_rssi=0, tmp_min_rssi=0, tmp_sec_rssi=0;
3041 u8 cck_default_Rx=0x2;
3042 u8 cck_optional_Rx=0x3;
3043 long tmp_cck_max_pwdb=0, tmp_cck_min_pwdb=0, tmp_cck_sec_pwdb=0;
3044 u8 cck_rx_ver2_max_index=0, cck_rx_ver2_min_index=0, cck_rx_ver2_sec_index=0;
3047 static u8 disabled_rf_cnt=0, cck_Rx_Path_initialized=0;
3048 u8 update_cck_rx_path;
3053 if(!cck_Rx_Path_initialized)
3056 cck_Rx_Path_initialized = 1;
3071 if(!DM_RxPathSelTable.
DbgMode)
3072 DM_RxPathSelTable.
rf_rssi[
i] = priv->
stats.rx_rssi_percentage[
i];
3077 cur_rf_rssi = DM_RxPathSelTable.
rf_rssi[
i];
3081 max_rssi_index = min_rssi_index = sec_rssi_index =
i;
3082 tmp_max_rssi = tmp_min_rssi = tmp_sec_rssi = cur_rf_rssi;
3084 else if(rf_num == 2)
3086 if(cur_rf_rssi >= tmp_max_rssi)
3088 tmp_max_rssi = cur_rf_rssi;
3093 tmp_sec_rssi = tmp_min_rssi = cur_rf_rssi;
3094 sec_rssi_index = min_rssi_index =
i;
3099 if(cur_rf_rssi > tmp_max_rssi)
3101 tmp_sec_rssi = tmp_max_rssi;
3102 sec_rssi_index = max_rssi_index;
3103 tmp_max_rssi = cur_rf_rssi;
3106 else if(cur_rf_rssi == tmp_max_rssi)
3108 tmp_sec_rssi = cur_rf_rssi;
3111 else if((cur_rf_rssi < tmp_max_rssi) &&(cur_rf_rssi > tmp_sec_rssi))
3113 tmp_sec_rssi = cur_rf_rssi;
3116 else if(cur_rf_rssi == tmp_sec_rssi)
3118 if(tmp_sec_rssi == tmp_min_rssi)
3120 tmp_sec_rssi = cur_rf_rssi;
3128 else if((cur_rf_rssi < tmp_sec_rssi) && (cur_rf_rssi > tmp_min_rssi))
3132 else if(cur_rf_rssi == tmp_min_rssi)
3134 if(tmp_sec_rssi == tmp_min_rssi)
3136 tmp_min_rssi = cur_rf_rssi;
3144 else if(cur_rf_rssi < tmp_min_rssi)
3146 tmp_min_rssi = cur_rf_rssi;
3166 cck_rx_ver2_max_index = cck_rx_ver2_min_index = cck_rx_ver2_sec_index =
i;
3167 tmp_cck_max_pwdb = tmp_cck_min_pwdb = tmp_cck_sec_pwdb = cur_cck_pwdb;
3169 else if(rf_num == 2)
3171 if(cur_cck_pwdb >= tmp_cck_max_pwdb)
3173 tmp_cck_max_pwdb = cur_cck_pwdb;
3174 cck_rx_ver2_max_index =
i;
3178 tmp_cck_sec_pwdb = tmp_cck_min_pwdb = cur_cck_pwdb;
3179 cck_rx_ver2_sec_index = cck_rx_ver2_min_index =
i;
3184 if(cur_cck_pwdb > tmp_cck_max_pwdb)
3186 tmp_cck_sec_pwdb = tmp_cck_max_pwdb;
3187 cck_rx_ver2_sec_index = cck_rx_ver2_max_index;
3188 tmp_cck_max_pwdb = cur_cck_pwdb;
3189 cck_rx_ver2_max_index =
i;
3191 else if(cur_cck_pwdb == tmp_cck_max_pwdb)
3193 tmp_cck_sec_pwdb = cur_cck_pwdb;
3194 cck_rx_ver2_sec_index =
i;
3196 else if((cur_cck_pwdb < tmp_cck_max_pwdb) &&(cur_cck_pwdb > tmp_cck_sec_pwdb))
3198 tmp_cck_sec_pwdb = cur_cck_pwdb;
3199 cck_rx_ver2_sec_index =
i;
3201 else if(cur_cck_pwdb == tmp_cck_sec_pwdb)
3203 if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
3205 tmp_cck_sec_pwdb = cur_cck_pwdb;
3206 cck_rx_ver2_sec_index =
i;
3213 else if((cur_cck_pwdb < tmp_cck_sec_pwdb) && (cur_cck_pwdb > tmp_cck_min_pwdb))
3217 else if(cur_cck_pwdb == tmp_cck_min_pwdb)
3219 if(tmp_cck_sec_pwdb == tmp_cck_min_pwdb)
3221 tmp_cck_min_pwdb = cur_cck_pwdb;
3222 cck_rx_ver2_min_index =
i;
3229 else if(cur_cck_pwdb < tmp_cck_min_pwdb)
3231 tmp_cck_min_pwdb = cur_cck_pwdb;
3232 cck_rx_ver2_min_index =
i;
3243 update_cck_rx_path = 0;
3246 cck_default_Rx = cck_rx_ver2_max_index;
3247 cck_optional_Rx = cck_rx_ver2_sec_index;
3248 if(tmp_cck_max_pwdb != -64)
3249 update_cck_rx_path = 1;
3252 if(tmp_min_rssi < DM_RxPathSelTable.
SS_TH_low && disabled_rf_cnt < 2)
3254 if((tmp_max_rssi - tmp_min_rssi) >= DM_RxPathSelTable.
diff_TH)
3265 cck_default_Rx = max_rssi_index;
3266 cck_optional_Rx = sec_rssi_index;
3268 update_cck_rx_path = 1;
3272 if(update_cck_rx_path)
3274 DM_RxPathSelTable.
cck_Rx_path = (cck_default_Rx<<2)|(cck_optional_Rx);
3314 static void dm_check_rx_path_selection(
struct net_device *dev)
3316 struct r8192_priv *priv = ieee80211_priv(dev);
3321 static void dm_init_fsync (
struct net_device *dev)
3323 struct r8192_priv *priv = ieee80211_priv(dev);
3325 priv->
ieee80211->fsync_time_interval = 500;
3326 priv->
ieee80211->fsync_rate_bitmap = 0x0f000800;
3327 priv->
ieee80211->fsync_rssi_threshold = 30;
3333 priv->
ieee80211->fsync_multiple_timeinterval = 3;
3334 priv->
ieee80211->fsync_firstdiff_ratethreshold= 100;
3335 priv->
ieee80211->fsync_seconddiff_ratethreshold= 200;
3345 static void dm_deInit_fsync(
struct net_device *dev)
3347 struct r8192_priv *priv = ieee80211_priv(dev);
3355 u32 rate_index, rate_count = 0, rate_count_diff=0;
3356 bool bSwitchFromCountDiff =
false;
3357 bool bDoubleTimeInterval =
false;
3365 for(rate_index = 0; rate_index <= 27; rate_index++)
3367 rate_bitmap = 1 << rate_index;
3368 if(priv->
ieee80211->fsync_rate_bitmap & rate_bitmap)
3369 rate_count+= priv->
stats.received_rate_histogram[1][rate_index];
3373 rate_count_diff = 0xffffffff - rate_count + priv->
rate_record;
3381 if(DiffNum >= priv->
ieee80211->fsync_seconddiff_ratethreshold)
3389 bSwitchFromCountDiff =
true;
3400 if(rate_count_diff <= priv->
ieee80211->fsync_firstdiff_ratethreshold)
3402 bSwitchFromCountDiff =
true;
3411 bDoubleTimeInterval =
true;
3445 if(bDoubleTimeInterval){
3482 static void dm_StartHWFsync(
struct net_device *dev)
3489 static void dm_EndSWFsync(
struct net_device *dev)
3491 struct r8192_priv *priv = ieee80211_priv(dev);
3517 static void dm_StartSWFsync(
struct net_device *dev)
3519 struct r8192_priv *priv = ieee80211_priv(dev);
3533 priv->
ieee80211->fsync_firstdiff_ratethreshold= 600;
3534 priv->
ieee80211->fsync_seconddiff_ratethreshold = 0xffff;
3538 priv->
ieee80211->fsync_firstdiff_ratethreshold= 200;
3539 priv->
ieee80211->fsync_seconddiff_ratethreshold = 200;
3541 for(rateIndex = 0; rateIndex <= 27; rateIndex++)
3543 rateBitmap = 1 << rateIndex;
3544 if(priv->
ieee80211->fsync_rate_bitmap & rateBitmap)
3558 static void dm_EndHWFsync(
struct net_device *dev)
3568 #define RegC38_Default 0
3569 #define RegC38_NonFsync_Other_AP 1
3570 #define RegC38_Fsync_AP_BCM 2
3571 struct r8192_priv *priv = ieee80211_priv(dev);
3574 static u32 reset_cnt=0;
3577 RT_TRACE(
COMP_HALDM,
"RateBitmap 0x%x FirstDiffRateThreshold %d SecondDiffRateThreshold %d\n", priv->
ieee80211->fsync_rate_bitmap, priv->
ieee80211->fsync_firstdiff_ratethreshold, priv->
ieee80211->fsync_seconddiff_ratethreshold);
3587 dm_StartHWFsync(dev);
3592 dm_StartHWFsync(dev);
3605 dm_StartSWFsync(dev);
3610 dm_StartSWFsync(dev);
3621 if(reg_c38_State != RegC38_Fsync_AP_BCM)
3656 if(reg_c38_State != RegC38_NonFsync_Other_AP)
3731 for (page = 0; page < 5; page++)
3732 for (offset = 0; offset < 256; offset++)
3738 for (page = 8; page < 11; page++)
3739 for (offset = 0; offset < 256; offset++)
3742 for (page = 12; page < 15; page++)
3743 for (offset = 0; offset < 256; offset++)
3766 static void dm_init_dynamic_txpower(
struct net_device *dev)
3768 struct r8192_priv *priv = ieee80211_priv(dev);
3771 priv->
ieee80211->bdynamic_txpower_enable =
true;
3778 static void dm_dynamic_txpower(
struct net_device *dev)
3780 struct r8192_priv *priv = ieee80211_priv(dev);
3781 unsigned int txhipower_threshhold=0;
3782 unsigned int txlowpower_threshold=0;
3783 if(priv->
ieee80211->bdynamic_txpower_enable !=
true)
3840 #if defined(RTL8190P) || defined(RTL8192E)
3841 SetTxPowerLevel8190(
Adapter,pHalData->CurrentChannel);
3855 static void dm_check_txrateandretrycount(
struct net_device * dev)
3857 struct r8192_priv *priv = ieee80211_priv(dev);
3871 static void dm_send_rssi_tofw(
struct net_device *dev)
3874 struct r8192_priv *priv = ieee80211_priv(dev);