15 #include <asm/byteorder.h>
18 #include <linux/tcp.h>
19 #include <linux/if_ether.h>
25 #define B2H(x) __be16_to_cpu(x)
28 #define dprintk(fmt, args ...) printk(KERN_DEBUG "[QoS] " fmt, ## args)
30 #define wprintk(fmt, args ...) \
31 printk(KERN_WARNING "[QoS WARNING] " fmt, ## args)
33 #define eprintk(fmt, args ...) printk(KERN_ERR "[QoS ERROR] " fmt, ## args)
36 #define MAX_FREE_LIST_CNT 32
43 static void init_qos_entry_list(
void)
45 qos_free_list.cnt = 0;
46 INIT_LIST_HEAD(&qos_free_list.head);
50 static void *alloc_qos_entry(
void)
56 if (qos_free_list.cnt) {
61 spin_unlock_irqrestore(&qos_free_list.lock, flags);
64 spin_unlock_irqrestore(&qos_free_list.lock, flags);
70 static void free_qos_entry(
void *entry)
77 list_add(&qentry->
list, &qos_free_list.head);
79 spin_unlock_irqrestore(&qos_free_list.lock, flags);
82 spin_unlock_irqrestore(&qos_free_list.lock, flags);
87 static void free_qos_entry_list(
struct list_head *free_list)
98 dprintk(
"%s: total_free_cnt=%d\n", __func__, total_free);
103 struct nic *nic = nic_ptr;
107 for (i = 0 ; i <
QOS_MAX; i++) {
109 qcb->
csr[
i].qos_buf_count = 0;
110 qcb->
csr[
i].enabled = 0;
119 init_qos_entry_list();
124 struct nic *nic = nic_ptr;
131 INIT_LIST_HEAD(&free_list);
135 for (i = 0; i <
QOS_MAX; i++) {
136 qcb->
csr[
i].qos_buf_count = 0;
137 qcb->
csr[
i].enabled = 0;
143 for (i = 0; i <
QOS_MAX; i++) {
145 list_move_tail(&entry->
list, &free_list);
148 spin_unlock_irqrestore(&qcb->
qos_lock, flags);
149 free_qos_entry_list(&free_list);
168 for (i = 0; i < 4; i++) {
176 for (i = 0; i < 4; i++) {
184 i = ((port[0]<<8)&0xff00)+port[1];
185 if ((i < csr->srcport_lo) || (i > csr->
srcport_hi))
190 i = ((port[2]<<8)&0xff00)+port[3];
191 if ((i < csr->dstport_lo) || (i > csr->
dstport_hi))
198 static u32 get_qos_index(
struct nic *nic,
u8 *iph,
u8 *tcpudph)
200 u32 IP_Ver, Header_Len,
i;
206 IP_Ver = (iph[0]>>4)&0xf;
207 Header_Len = iph[0]&0xf;
210 for (i = 0; i <
QOS_MAX; i++) {
211 if (qcb->
csr[i].enabled) {
212 if (qcb->
csr[i].classifier_rule_en) {
213 if (chk_ipv4_rule(&qcb->
csr[i], iph,
230 INIT_LIST_HEAD(head);
232 for (i = 0; i <
QOS_MAX; i++) {
233 if (qcb->
csr[i].enabled) {
235 if (!list_empty(&qcb->
qos_list[i])) {
239 list_move_tail(&entry->
list, head);
240 qcb->
csr[
i].qos_buf_count++;
243 wprintk(
"QoS Index(%d) is piled!!\n", i);
252 static void send_qos_list(
struct nic *nic,
struct list_head *head)
258 free_qos_entry(entry);
265 struct nic *nic = netdev_priv(dev);
276 tcph = (
struct tcphdr *) iph + iph->ihl*4;
280 entry = alloc_qos_entry();
288 index = get_qos_index(nic, (
u8 *)iph, (
u8 *) tcph);
293 entry = alloc_qos_entry();
299 extract_qos_list(nic, &send_list);
300 spin_unlock_irqrestore(&qcb->
qos_lock, flags);
301 send_qos_list(nic, &send_list);
304 spin_unlock_irqrestore(&qcb->
qos_lock, flags);
306 free_qos_entry(entry);
319 if (qcb->
csr[i].SFID == SFID)
324 for (i = 0; i <
QOS_MAX; i++) {
325 if (qcb->
csr[i].enabled == 0) {
326 qcb->
csr[
i].enabled = 1;
335 #define QOS_CHANGE_DEL 0xFC
337 #define QOS_REPORT 0xFE
341 struct nic *nic = nic_ptr;
351 subCmdEvt = (
u8)buf[4];
358 SFID = ((buf[(i*5)+6]<<24)&0xff000000);
359 SFID += ((buf[(i*5)+7]<<16)&0xff0000);
360 SFID += ((buf[(i*5)+8]<<8)&0xff00);
361 SFID += (buf[(i*5)+9]);
362 index = get_csr(qcb, SFID, 0);
364 spin_unlock_irqrestore(&qcb->
qos_lock, flags);
368 qcb->
csr[
index].qos_buf_count = buf[(i*5)+10];
371 extract_qos_list(nic, &send_list);
372 spin_unlock_irqrestore(&qcb->
qos_lock, flags);
373 send_qos_list(nic, &send_list);
375 }
else if (subCmdEvt ==
QOS_ADD) {
377 len = (
u8)buf[pos++];
379 SFID = ((buf[pos++]<<24)&0xff000000);
380 SFID += ((buf[pos++]<<16)&0xff0000);
381 SFID += ((buf[pos++]<<8)&0xff00);
382 SFID += (buf[pos++]);
384 index = get_csr(qcb, SFID, 1);
386 eprintk(
"QoS ERROR: csr Update Error\n");
390 dprintk(
"QOS_ADD SFID = 0x%x, index=%d\n", SFID, index);
394 qcb->
csr[
index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
395 qcb->
csr[
index].classifier_rule_en += buf[pos++];
396 if (qcb->
csr[index].classifier_rule_en == 0)
398 qcb->
csr[
index].ip2s_mask = buf[pos++];
399 qcb->
csr[
index].ip2s_lo = buf[pos++];
400 qcb->
csr[
index].ip2s_hi = buf[pos++];
401 qcb->
csr[
index].protocol = buf[pos++];
402 qcb->
csr[
index].ipsrc_addrmask[0] = buf[pos++];
403 qcb->
csr[
index].ipsrc_addrmask[1] = buf[pos++];
404 qcb->
csr[
index].ipsrc_addrmask[2] = buf[pos++];
405 qcb->
csr[
index].ipsrc_addrmask[3] = buf[pos++];
406 qcb->
csr[
index].ipsrc_addr[0] = buf[pos++];
407 qcb->
csr[
index].ipsrc_addr[1] = buf[pos++];
408 qcb->
csr[
index].ipsrc_addr[2] = buf[pos++];
409 qcb->
csr[
index].ipsrc_addr[3] = buf[pos++];
410 qcb->
csr[
index].ipdst_addrmask[0] = buf[pos++];
411 qcb->
csr[
index].ipdst_addrmask[1] = buf[pos++];
412 qcb->
csr[
index].ipdst_addrmask[2] = buf[pos++];
413 qcb->
csr[
index].ipdst_addrmask[3] = buf[pos++];
414 qcb->
csr[
index].ipdst_addr[0] = buf[pos++];
415 qcb->
csr[
index].ipdst_addr[1] = buf[pos++];
416 qcb->
csr[
index].ipdst_addr[2] = buf[pos++];
417 qcb->
csr[
index].ipdst_addr[3] = buf[pos++];
418 qcb->
csr[
index].srcport_lo = ((buf[pos++]<<8)&0xff00);
419 qcb->
csr[
index].srcport_lo += buf[pos++];
420 qcb->
csr[
index].srcport_hi = ((buf[pos++]<<8)&0xff00);
421 qcb->
csr[
index].srcport_hi += buf[pos++];
422 qcb->
csr[
index].dstport_lo = ((buf[pos++]<<8)&0xff00);
423 qcb->
csr[
index].dstport_lo += buf[pos++];
424 qcb->
csr[
index].dstport_hi = ((buf[pos++]<<8)&0xff00);
425 qcb->
csr[
index].dstport_hi += buf[pos++];
428 spin_unlock_irqrestore(&qcb->
qos_lock, flags);
431 len = (
u8)buf[pos++];
432 SFID = ((buf[pos++]<<24)&0xff000000);
433 SFID += ((buf[pos++]<<16)&0xff0000);
434 SFID += ((buf[pos++]<<8)&0xff00);
435 SFID += (buf[pos++]);
436 index = get_csr(qcb, SFID, 1);
438 eprintk(
"QoS ERROR: Wrong index(%d)\n", index);
442 dprintk(
"QOS_CHANGE_DEL SFID = 0x%x, index=%d\n", SFID, index);
444 INIT_LIST_HEAD(&free_list);
453 list_move_tail(&entry->
list, &free_list);
455 spin_unlock_irqrestore(&qcb->
qos_lock, flags);
456 free_qos_entry_list(&free_list);