aheros-txdma分析
26382 ワード
主要データ構造
(1)TAILQ_FOREEACH(bf)&txq->axqu(=2)ds=(struct ahudsec*)bf->bf_desc;(3)TAILQ_FOREEACH(bf)&sc->scraxbuf,bfulist(4)bf=TAILQ_FIRST(&txq->axqufifo[txq->axq tailindex])bf=TAILQ_FIRST(&txq->axq q);(5)ac=TAILQ_FIRST(&txq->axq(6)TAILQ_FOREEACHSAFE(tid)&ac->tiduq,tiduqelem,next tid)(7)bf=TAILQ_FIRST(&tid->buf q);(8)struct ar 9300_txc*ads=AR 9300 TXC(ds);(9)struct ath_atx_.ac*ac=tid->ac;(10)(11)struct ath_node*an=(struct ahunode*)bf->bf_node;
主な過程の分析
(a)ath_rate_findrate_11 n(sc,an,frame Len,numTries,4,rflags,ac,rcs,isProbe,isretry);(2)bf->bf_nframes=1(3)bf->bf_lastbf=bf->bf_lastfram;/*one single frame*/··ath_tx_send_ampdu(sc,txq,tid,&bfuhead,txctl)プロセス分析
struct ath_softc {
struct ath_hal *sc_ah; /* Atheros HAL */
struct ath_descdma sc_txdma; /* TX descriptors */
ath_bufhead sc_txbuf; /* transmit buffer */
struct ath_descdma sc_txsdma;
struct ath_buf *sc_rxbufptr;
struct ath_rx_status *sc_rxsptr;
u_int sc_txqsetup; /* h/w queues setup */
struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
HAL_TXQ_INFO sc_beacon_qi; /* adhoc only: beacon queue parameters */
u_int32_t sc_txdesclen; /* Tx descriptor length */
struct ath_descdma sc_bdma; /* beacon descriptors */
ath_bufhead sc_bbuf; /* beacon buffers */
u_int sc_bhalq; /* HAL q for outgoing beacons */
}
/* Receive FIFO management */
struct ath_rx_edma {
wbuf_t *rxfifo;
u_int8_t rxfifoheadindex;
u_int8_t rxfifotailindex;
u_int8_t rxfifodepth; /* count of RXBPs pushed into fifo */
u_int32_t rxfifohwsize; /* Rx FIFO size from HAL */
ath_bufhead rxqueue;
spinlock_t rxqlock;
};
/* Transmit Control Descriptor */
struct ar9300_txc {
u_int32_t ds_info; /* descriptor information */
u_int32_t ds_link; /* link pointer */
u_int32_t ds_data0; /* data pointer to 1st buffer */
u_int32_t ds_ctl3; /* DMA control 3 */
u_int32_t ds_data1; /* data pointer to 2nd buffer */
u_int32_t ds_ctl5; /* DMA control 5 */
u_int32_t ds_data2; /* data pointer to 3rd buffer */
u_int32_t ds_ctl7; /* DMA control 7 */
u_int32_t ds_data3; /* data pointer to 4th buffer */
u_int32_t ds_ctl9; /* DMA control 9 */
u_int32_t ds_ctl10; /* DMA control 10 */
u_int32_t ds_ctl11; /* DMA control 11 */
u_int32_t ds_ctl12; /* DMA control 12 */
u_int32_t ds_ctl13; /* DMA control 13 */
u_int32_t ds_ctl14; /* DMA control 14 */
u_int32_t ds_ctl15; /* DMA control 15 */
u_int32_t ds_ctl16; /* DMA control 16 */
u_int32_t ds_ctl17; /* DMA control 17 */
u_int32_t ds_ctl18; /* DMA control 18 */
u_int32_t ds_ctl19; /* DMA control 19 */
u_int32_t ds_ctl20; /* DMA control 20 */
u_int32_t ds_ctl21; /* DMA control 21 */
u_int32_t ds_ctl22; /* DMA control 22 */
u_int32_t ds_pad[9]; /* pad to cache line (128 bytes/32 dwords) */
};
struct ath_desc {
/*
* The following definitions are passed directly
* the hardware and managed by the HAL. Drivers
* should not touch those elements marked opaque.
*/
u_int32_t ds_link; /* phys address of next descriptor */
u_int32_t ds_data; /* phys address of data buffer */
u_int32_t ds_ctl0; /* opaque DMA control 0 */
u_int32_t ds_ctl1; /* opaque DMA control 1 */
u_int32_t ds_hw[20]; /* opaque h/w region */
/*
* The remaining definitions are managed by software;
* these are valid only after the rx/tx process descriptor
* methods return a non-EINPROGRESS code.
*/
union {
struct ath_tx_status tx;/* xmit status */
struct ath_rx_status rx;/* recv status */
void *stats;
} ds_us;
void *ds_vdata; /* virtual addr of data buffer */
} __packed;
struct ath_txq {
u_int axq_qnum; /* hardware q number */
u_int32_t *axq_link; /* link ptr in last TX desc */
TAILQ_HEAD(, ath_buf) axq_q; /* transmit queue */
spinlock_t axq_lock; /* lock on q and link */
unsigned long axq_lockflags; /* intr state when must cli */
u_int axq_depth; /* queue depth */
#if ATH_TX_BUF_FLOW_CNTL
u_int axq_minfree; /* Number of free tx_bufs required in common buf pool */
u_int axq_num_buf_used; /* Number of used tx_buf for this q */
#endif
u_int8_t axq_aggr_depth; /* aggregates queued */
u_int32_t axq_totalqueued;/* total ever queued */
u_int axq_intrcnt; /* count to determine if descriptor
* should generate int on this txq.
*/
/*
* State for patching up CTS when bursting.
*/
struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
struct ath_desc *axq_lastdsWithCTS; /* first desc of the last descriptor
* that contains CTS
*/
struct ath_desc *axq_gatingds; /* final desc of the gating desc
* that determines whether lastdsWithCTS has
* been DMA'ed or not
*/
/*
* Staging queue for frames awaiting a fast-frame pairing.
*/
TAILQ_HEAD(axq_headtype, ath_buf) axq_stageq;
TAILQ_HEAD(,ath_atx_ac) axq_acq;
#ifdef ATH_SUPERG_COMP
/* scratch compression buffer */
char *axq_compbuf; /* scratch comp buffer */
dma_addr_t axq_compbufp; /* scratch comp buffer (phys)*/
u_int axq_compbufsz; /* scratch comp buffer size */
OS_DMA_MEM_CONTEXT(axq_dmacontext)
#endif
#ifdef ATH_SWRETRY
u_int axq_destmask:1; /*Destination mask for this queue*/
#endif
#if ATH_TX_POLL
int axq_lastq_tick; /* ticks when last packet queued */
#endif
TAILQ_HEAD(, ath_buf) axq_fifo[HAL_TXFIFO_DEPTH];
u_int8_t axq_headindex;
u_int8_t axq_tailindex;
u_int irq_shared:1; /* This queue is shared by the irq_handler() */
/* BEGIN: added by SGR_TEAM 2011/11/29 for: tx */
#if ATH_HW_TXQ_STUCK_WAR
u_int8_t tx_done_stuck_count;
#endif
/* END: added by SGR_TEAM 2011/11/29 for: tx */
};
/*
* per access-category aggregate tx state for a destination*/
typedef struct ath_atx_ac {
int sched; /* dest-ac is scheduled */
int qnum; /* H/W queue number associated with this AC */
int hwqcnt; /* count of pkts on hw queue */
#if ATH_SUPPORT_VOWEXT
uint16_t max_sch_penality; /* Max Scheuding penality fo this ac */
uint16_t sch_penality_cnt; /* Current pending scheduling penality count */
#endif
TAILQ_ENTRY(ath_atx_ac) ac_qelem; /* round-robin txq entry */
TAILQ_HEAD(,ath_atx_tid)tid_q; /* queue of TIDs with buffers */
int filtered; /* ac is filtered */
TAILQ_ENTRY(ath_atx_ac) fltr_qelem; /* handle hwq filtering */
TAILQ_HEAD(,ath_atx_tid)fltr_q; /* queue of TIDs being filtered */
} ath_atx_ac_t;
/*
* per TID aggregate tx state for a destination
*/
typedef struct ath_atx_tid {
int tidno; /* TID number */
u_int16_t seq_start; /* starting seq of BA window */
u_int16_t seq_next; /* next seq to be used */
u_int16_t baw_size; /* BA window size */
#ifdef ATH_HTC_TX_SCHED
u_int8_t tid_buf_cnt;
u_int8_t pad0;
#endif
int baw_head; /* first un-acked tx buffer */
int baw_tail; /* next unused tx buffer slot */
u_int16_t sched:1, /* TID is scheduled */
filtered:1, /* TID has filtered pkts */
min_depth:2;/* num pkts that can be queued to h/w */
int paused; /* TID is paused */
int cleanup_inprogress; /* this TID's aggr being torn down */
TAILQ_HEAD(ath_tid_bq,ath_buf) buf_q; /* pending buffers */
TAILQ_ENTRY(ath_atx_tid) tid_qelem; /* round-robin tid entry */
TAILQ_HEAD(,ath_buf) fltr_q; /* filtered buffers */
TAILQ_ENTRY(ath_atx_tid) fltr_qelem; /* handle hwq filtering */
struct ath_node *an; /* parent node structure */
struct ath_atx_ac *ac; /* parent access category */
u_int32_t tx_buf_bitmap[TX_BUF_BITMAP_WORDS]; /* active tx frames */
/*
* ADDBA state
*/
u_int32_t addba_exchangecomplete:1,
addba_amsdusupported:1;
int32_t addba_exchangeinprogress;
struct ath_timer addba_requesttimer;
int addba_exchangeattempts;
u_int16_t addba_exchangestatuscode;
#ifdef VOW_TIDSCHED
int qw;
TAILQ_ENTRY(ath_atx_tid) wrr_tid_qelem; /* round-robin tid entry */
#endif
} ath_atx_tid_t;
struct ath_buf {
TAILQ_ENTRY(ath_buf) bf_list;
struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or an aggregate) */
struct ath_buf *bf_lastfrm; /* last buf of this frame */
struct ath_buf *bf_next; /* next subframe in the aggregate */
struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
void *bf_mpdu; /* enclosing frame structure */
void *bf_vdata; /* virtual addr of data buffer */
void *bf_node; /* pointer to the node */
void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr[ATH_MAX_MAPS]; /* physical addr of data buffer */
u_int32_t bf_buf_len[ATH_MAX_MAPS]; /* len of data */
u_int32_t bf_status;
u_int32_t bf_flags; /* tx descriptor flags */
#if ATH_SUPPORT_IQUE && ATH_SUPPORT_IQUE_EXT
u_int32_t bf_txduration;/* Tx duration of this buf */
#endif
u_int16_t bf_avail_buf;
u_int16_t bf_reftxpower; /* reference tx power */
struct ath_buf_state bf_state; /* buffer state */
OS_DMA_MEM_CONTEXT(bf_dmacontext) /* OS Specific DMA context */
#if 1//BR_RATECTRL_STAT_SUPPORT
int bf_rate_set_type; // yangzhicong 2012-10-06 AP bridge
#endif
};
struct ath_buf_state {
int bfs_nframes; /* # frames in aggregate */
u_int16_t bfs_al; /* length of aggregate */
u_int16_t bfs_frmlen; /* length of frame */
int bfs_seqno; /* sequence number */
int bfs_tidno; /* tid of this frame */
int bfs_retries; /* current retries */
struct ath_rc_series bfs_rcs[4]; /* rate series */
int bfs_useminrate: 1; /* use minrate */
int bfs_ismcast: 1; /* is mcast packet */
int bfs_isdata: 1; /* is a data frame/aggregate */
int bfs_isaggr: 1; /* is an aggregate */
int bfs_isampdu: 1; /* is an a-mpdu, aggregate or not */
int bfs_ht: 1; /* is an HT frame */
int bfs_isretried: 1; /* is retried */
int bfs_isxretried: 1; /* is excessive retried */
int bfs_shpreamble: 1; /* is short preamble */
int bfs_isbar: 1; /* is a BAR */
int bfs_ispspoll: 1; /* is a PS-Poll */
int bfs_aggrburst: 1; /* is a aggr burst */
int bfs_calcairtime:1; /* requests airtime be calculated when set for tx frame */
#ifdef ATH_SUPPORT_UAPSD
int bfs_qosnulleosp:1; /* is QoS null EOSP frame */
#endif
int bfs_ispaprd:1; /* is PAPRD frame */
int bfs_isswaborted: 1; /* is the frame marked as sw aborted*/
#if ATH_SUPPORT_CFEND
int bfs_iscfend:1; /* is QoS null EOSP frame */
#endif
#ifdef ATH_SWRETRY
int bfs_isswretry: 1; /* is the frame marked for swretry*/
int bfs_swretries; /* number of swretries made*/
int bfs_totaltries; /* total tries including hw retries*/
#endif
int bfs_qnum; /* h/w queue number */
int bfs_rifsburst_elem; /* RIFS burst/bar */
int bfs_nrifsubframes; /* # of elements in burst */
HAL_KEY_TYPE bfs_keytype; /* key type use to encrypt this frame */
u_int8_t bfs_txbfstatus; /* for TxBF , txbf status from TXS*/
};
struct ath_rc_series {
u_int8_t rix;
u_int8_t tries;
u_int16_t flags;
u_int32_t max4msframelen;
};
typedef struct {
ath_node_t an; /* destination to sent to */
int if_id; /* only valid for cab traffic */
int qnum; /* h/w queue number */
u_int ismcast:1; /* if it's a multicast frame */
u_int istxfrag:1; /* if it's a tx fragment */
u_int ismgmt:1; /* if it's a management frame */
u_int isdata:1; /* if it's a data frame */
u_int isqosdata:1; /* if it's a qos data frame */
u_int ps:1; /* if one or more stations are in PS mode */
u_int shortPreamble:1; /* use short preamble */
u_int ht:1; /* if it can be transmitted using HT */
u_int use_minrate:1; /* if this frame should transmitted using specified
* mininum rate */
u_int isbar:1; /* if it is a block ack request */
u_int ispspoll:1; /* if it is a PS-Poll frame */
u_int calcairtime:1; /* requests airtime be calculated when set for tx frame */
u_int iseap:1; /* Is this an EAP packet? */
#ifdef ATH_SUPPORT_UAPSD
u_int isuapsd:1; /* if this frame needs uapsd handling */
#endif
#ifdef ATH_SUPPORT_TxBF
u_int isdelayrpt:1;
#endif
HAL_PKT_TYPE atype; /* Atheros packet type */
u_int32_t flags; /* HAL flags */
u_int32_t keyix; /* key index */
HAL_KEY_TYPE keytype; /* key type */
u_int16_t txpower; /* transmit power */
u_int16_t seqno; /* sequence number */
u_int16_t tidno; /* tid number */
u_int16_t frmlen; /* frame length */
#ifdef USE_LEGACY_HAL
u_int16_t hdrlen; /* header length of this frame */
int compression; /* compression scheme */
u_int8_t ivlen; /* iv length for compression */
u_int8_t icvlen; /* icv length for compression */
u_int8_t antenna; /* antenna control */
#endif
int min_rate; /* minimum rate */
int mcast_rate; /* multicast rate */
u_int16_t nextfraglen; /* next fragment length */
/* below is set only by ath_dev */
ath_dev_t dev; /* device handle */
u_int8_t priv[64]; /* private rate control info */
OS_DMA_MEM_CONTEXT(dmacontext) /* OS specific DMA context */
#ifdef ATH_SUPPORT_HTC
u_int8_t nodeindex;
u_int8_t vapindex;
#ifdef ENCAP_OFFLOAD
u_int8_t keyid;
u_int8_t key_mapping_key;
#endif
#endif
} ieee80211_tx_control_t;
typedef struct {
u_int Tries;
u_int Rate;
u_int PktDuration;
u_int ChSel;
u_int RateFlags;
u_int RateIndex;
u_int TxPowerCap; /* in 1/2 dBm units */
} HAL_11N_RATE_SERIES;
typedef struct {
int rateCount; /* NB: for proper padding */
u_int8_t rateCodeToIndex[256]; /* back mapping */
struct {
u_int8_t valid; /* valid for rate control use */
u_int8_t phy; /* CCK/OFDM/XR */
u_int32_t rateKbps; /* transfer rate in kbs */
u_int8_t rateCode; /* rate for h/w descriptors */
u_int8_t shortPreamble; /* mask for enabling short
* preamble in CCK rate code */
u_int8_t dot11Rate; /* value for supported rates
* info element of MLME */
u_int8_t controlRate; /* index of next lower basic
* rate; used for dur. calcs */
u_int16_t lpAckDuration; /* long preamble ACK duration */
u_int16_t spAckDuration; /* short preamble ACK duration*/
} info[36];
} HAL_RATE_TABLE;
typedef struct {
int rateCount;
A_UINT8 rateCodeToIndex[RATE_TABLE_SIZE]; /* backward mapping */
struct {
A_UINT8 valid: 1, /* Valid for use in rate control */
validUAPSD : 1; /* Valid for use in rate control for UAPSD operation */
WLAN_PHY phy; /* CCK/OFDM/TURBO/XR */
A_UINT16 rateKbps; /* Rate in Kbits per second */
A_UINT16 userRateKbps; /* User rate in KBits per second */
A_UINT8 rateCode; /* rate that goes into hw descriptors */
A_UINT8 shortPreamble; /* Mask for enabling short preamble in rate code for CCK */
A_UINT8 dot11Rate; /* Value that goes into supported rates info element of MLME */
A_UINT8 controlRate; /* Index of next lower basic rate, used for duration computation */
A_RSSI rssiAckValidMin; /* Rate control related information */
A_RSSI rssiAckDeltaMin; /* Rate control related information */
A_UINT16 lpAckDuration; /* long preamble ACK duration */
A_UINT16 spAckDuration; /* short preamble ACK duration*/
A_UINT32 max4msFrameLen; /* Maximum frame length(bytes) for 4ms tx duration */
struct {
A_UINT8 Retries[4];
A_UINT8 Rates[4];
} normalSched;
struct {
A_UINT8 Retries[4];
A_UINT8 Rates[4];
} shortSched;
struct {
A_UINT8 Retries[4];
A_UINT8 Rates[4];
} probeSched;
struct {
A_UINT8 Retries[4];
A_UINT8 Rates[4];
} probeShortSched;
struct {
A_UINT8 Retries[4];
A_UINT8 Rates[4];
} uapsd_normalSched;
struct {
A_UINT8 Retries[4];
A_UINT8 Rates[4];
} uapsd_shortSched;
} info[32];
A_UINT32 probeInterval; /* interval for ratectrl to probe for
other rates */
A_UINT32 rssiReduceInterval; /* interval for ratectrl to reduce RSSI */
A_UINT8 regularToTurboThresh; /* upperbound on regular (11a or 11g)
mode's rate before switching to turbo*/
A_UINT8 turboToRegularThresh; /* lowerbound on turbo mode's rate before
switching to regular */
A_UINT8 pktCountThresh; /* mode switch recommendation criterion:
number of consecutive packets sent at
rate beyond the rate threshold */
A_UINT8 initialRateMax; /* the initial rateMax value used in
rcSibUpdate() */
A_UINT8 numTurboRates; /* number of Turbo rates in the rateTable */
A_UINT8 xrToRegularThresh; /* threshold to switch to Normal mode */
} RATE_TABLE;
構造関係分析(1)TAILQ_FOREEACH(bf)&txq->axqu(=2)ds=(struct ahudsec*)bf->bf_desc;(3)TAILQ_FOREEACH(bf)&sc->scraxbuf,bfulist(4)bf=TAILQ_FIRST(&txq->axqufifo[txq->axq tailindex])bf=TAILQ_FIRST(&txq->axq q);(5)ac=TAILQ_FIRST(&txq->axq(6)TAILQ_FOREEACHSAFE(tid)&ac->tiduq,tiduqelem,next tid)(7)bf=TAILQ_FIRST(&tid->buf q);(8)struct ar 9300_txc*ads=AR 9300 TXC(ds);(9)struct ath_atx_.ac*ac=tid->ac;(10)(11)struct ath_node*an=(struct ahunode*)bf->bf_node;
主な過程の分析
ath_reset_start(ath_dev_t dev, HAL_BOOL no_flush, int tx_timeout, int rx_timeout)
(1)ath_reset_draintxq(struct ath_softc *sc, HAL_BOOL retry_tx, int timeout)--->
1) ath_tx_abortalldma(struct ath_softc *sc)--->
#define ath_hal_aborttxdma(_ah) \
((*(_ah)->ah_abortTxDma)((_ah)))--->
HAL_BOOL ar9300AbortTxDma(struct ath_hal *ah);
2) ath_reset_drain_txdataq(sc, retry_tx, timeout);
ath_reset_draintxq(struct ath_softc *sc, HAL_BOOL retry_tx, int timeout)--->
1) ath_hal_stoptxdma(sc->sc_ah, sc->sc_bhalq, timeout);
ar9300StopTxDma(struct ath_hal *ah, u_int q, u_int timeout)
2) ath_reset_drain_txdataq(sc, retry_tx, timeout);
(1)
ath_tx_stopdma(sc, &sc->sc_txq[i], timeout);
ar9300StopTxDma(struct ath_hal *ah, u_int q, u_int timeout)
(2)
ath_dump_descriptors(sc);
dump_txq_desc(sc, &sc->sc_txq[0]);
dump_txq_desc(sc, &sc->sc_txq[1]);
dump_txq_desc(sc, &sc->sc_txq[2]);
dump_txq_desc(sc, &sc->sc_txq[3]);
dump_txq_desc(sc, sc->sc_cabq);---->
TAILQ_FOREACH(bf, &txq->axq_q, bf_list)
ds0 = (struct ath_desc *)bf->bf_desc;
status = ath_hal_txprocdesc(ah, ds);
ar9300ProcTxDesc(struct ath_hal *ah, void *txstatus)
---------- ads = &ahp->ts_ring[ahp->ts_tail];
??????bf->bf_daddr == ath_hal_gettxbuf(sc->sc_ah,txq->axq_qnum)
dump_desc(sc,bf,txq->axq_qnum);
??????ds = (struct ath_desc *)bf->bf_desc;
ath_dump_rx_desc(dev); ---->
ath_dump_rx_edma_desc(dev); ---->
(a)ath_dump_rx_que_desc( struct ath_softc *sc, HAL_RX_QUEUE qtype) ---->
DPRINTF(sc, ATH_DEBUG_ANY, "%s[%d]: RX[%d]rxedma->rxfifo[%d] %p
",__func__,__LINE__,qtype, i, rxedma->rxfifo[i]);
(b)dump_rx_desc(struct ath_softc *sc, int quenum)
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list)
DPRINTF(sc, ATH_DEBUG_ANY, "%s[%d]: [%d] RX bf 0x%p bf->bf_mpdu %p
",__func__,__LINE__, i++, bf, bf->bf_mpdu);
(3)
ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq, HAL_BOOL retry_tx)
(a) bf = TAILQ_FIRST(&txq->axq_fifo[txq->axq_tailindex]);
(b) ATH_TXQ_MOVE_HEAD_UNTIL(txq, &bf_head, lastbf, bf_list);
(c) ath_txq_drain_pending_buffers(sc, txq);
(i) ac = TAILQ_FIRST(&txq->axq_acq)
(ii) TAILQ_FOREACH_SAFE(tid, &ac->tid_q, tid_qelem, next_tid)
(iii) ath_tid_drain(sc, txq, tid);
i. ath_tid_swq_cleanup(sc, txq, tid, &drained_bf_head);
ii. tid->seq_next = tid->seq_start;
iii. tid->baw_tail = tid->baw_head;
iv. ath_complete_drained_tid_buf(sc, &drained_bf_head);
ath_tx_start/tx送信過程分析(1)__ath_tx_prepare(sc, wbuf, txctl); //txctl DMA
(a)an = txctl->an;
(b)txctl->tidno = wbuf_get_tid(wbuf);
……
(2) wbuf_map_sg(sc->sc_osdev, wbuf, OS_GET_DMA_MEM_CONTEXT(txctl, dmacontext),
txctl);---》
__wbuf_map_sg(os_handle, wbuf, context, arg); ---》
(a) wbuf_start_dma(nbf, &sg, 1, arg)
(b) ath_tx_start_dma(nbf, sg, n_sg, arg);
(i) ath_tx_get_buf(sc, sg, &bf, &bf_head,txctl->qnum, buf_used);
(ii) bf->bf_frmlen = txctl->frmlen;
(iii) bf->bf_isdata = txctl->isdata;
(iv) bf->bf_ismcast = txctl->ismcast;
(v) bf->bf_useminrate = txctl->use_minrate;
(vi) bf->bf_isbar = txctl->isbar;
(vii) bf->bf_ispspoll = txctl->ispspoll;
(viii) bf->bf_calcairtime = txctl->calcairtime;
(ix) bf->bf_flags = txctl->flags;
(x) bf->bf_shpreamble = txctl->shortPreamble;
(xi) bf->bf_keytype = txctl->keytype;
(xii) bf->bf_tidno = txctl->tidno;
(xiii) bf->bf_node = an;
(xiv) bf->bf_mpdu = wbuf;
(xv) ds = bf->bf_desc;
(xvi) OS_COPY_DMA_MEM_CONTEXT(OS_GET_DMA_MEM_CONTEXT(bf, bf_dmacontext), OS_GET_DMA_MEM_CONTEXT(txctl, dmacontext));
(xvii)
ath_hal_set11n_txdesc(ah, ds
, bf->bf_frmlen /* frame length */
, txctl->atype /* Atheros packet type */
, MIN(txctl->txpower, 60) /* txpower */
, txctl->keyix /* key cache index */
, txctl->keytype /* key type */
, txctl->flags /* flags */
);
(xviii) ath_hal_filltxdesc(ah, ds
, (bf->bf_buf_addr) /* buffer address */
, bf->bf_buf_len /* buffer length */
, 0 /* descriptor id */
, bf->bf_qnum /* QCU number */
, txctl->keytype /* key type */
, AH_TRUE /* first segment */
/* last segment */
, (n_sg <= sc->sc_num_txmaps) ? AH_TRUE : AH_FALSE
, ds /* first descriptor */
);
(xix) if (firstbf) {
(xx) firstbf->bf_lastfrm = bf;
(xxi) ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
// ath_tx_send_normal(sc, txq, tid, &bf_head, txctl);
ath_tx_addto_baw(sc, tid, bf);
ath_buf_set_rate(sc, bf); ---》
ath_hal_set11n_ratescenario(ah, ds, lastds,
!bf->bf_ispspoll,
ctsrate,
ctsduration,
series, 4, flags, smartAntenna);
ath_tx_send_normal(sc,txq,tid,&bfuhead,txctl)過程分析・・(1)ath_rate_findrate(sc,tid->an,bf->bf preamble,bf->bffuframlen,ATH 11 NuTXMAX TRY,ATHuPROBE,ac,bf->bfracs,&isProbe,AHuFAbraf-,AHuflubraf.(a)ath_rate_findrate_11 n(sc,an,frame Len,numTries,4,rflags,ac,rcs,isProbe,isretry);(2)bf->bf_nframes=1(3)bf->bf_lastbf=bf->bf_lastfram;/*one single frame*/··ath_tx_send_ampdu(sc,txq,tid,&bfuhead,txctl)プロセス分析