首先模块加载insmod ixgbe.ko

  1. module_init(ixgbe_init_module);
  2.  
  3. module_init(ixgbe_init_module);
  4. {
  5. int ret;
  6. pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
  7. pr_info("%s\n", ixgbe_copyright);
  8.  
  9. ixgbe_dbg_init();
  10.     ret = pci_register_driver(&ixgbe_driver);
  11. if (ret) {
  12. ixgbe_dbg_exit();
  13. return ret;
  14. }
  15.  
  16. #ifdef CONFIG_IXGBE_DCA
  17. dca_register_notify(&dca_notifier);
  18. #endif
  19.  
  20. return 0;
  21. }

于是看pci设备的核心结构体

  1. static struct pci_driver ixgbe_driver = {
  2. .name = ixgbe_driver_name,
  3. .id_table = ixgbe_pci_tbl,
  4. .probe = ixgbe_probe,
  5. .remove = ixgbe_remove,
  6. #ifdef CONFIG_PM
  7. .suspend = ixgbe_suspend,
  8. .resume = ixgbe_resume,
  9. #endif
  10. .shutdown = ixgbe_shutdown,
  11. .sriov_configure = ixgbe_pci_sriov_configure,
  12. .err_handler = &ixgbe_err_handler
  13. };

当设备加载成功后,会执行ixgbe_probe函数

  1. static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2. {
  3.  
  4. /*分配struct net_device *netdev 结构体*/
  5. netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
  6.  
  7. if (!netdev) {
  8. err = -ENOMEM;
  9. goto err_alloc_etherdev;
  10. }
  11.  
  12. SET_NETDEV_DEV(netdev, &pdev->dev);
  13.  
  14. /*分配struct ixgbe_adapter *adapter结构体*/
  15. adapter = netdev_priv(netdev);
  16.  
  17. /*分配dev结构体的ops函数指针集合*/
  18. netdev->netdev_ops = &ixgbe_netdev_ops;
  19.  
  20. err = ixgbe_sw_init(adapter);
  21.  
  22. err = ixgbe_init_interrupt_scheme(adapter);
  23. /*设备注册完毕*/
         err = register_netdev(netdev);
  24.  
  25. }

重点看ixgbe_init_interrupt_scheme(adapter)函数,该函数里面会初始化adapter结构体以及napi相关的东西

  1. int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
  2. {
  3.  
  4. err = ixgbe_alloc_q_vectors(adapter);
  5.  
  6. }
  7. static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
  8. {
  9.  
  10. if (q_vectors >= (rxr_remaining + txr_remaining)) {
  11. for (; rxr_remaining; v_idx++) {
  12. err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
  13. 0, 0, 1, rxr_idx);
  14.  
  15. if (err)
  16. goto err_out;
  17.  
  18. /* update counts and index */
  19. rxr_remaining--;
  20. rxr_idx++;
  21. }
  22. }
  23. }
  24. static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
  25. int v_count, int v_idx,
  26. int txr_count, int txr_idx,
  27. int rxr_count, int rxr_idx)
  28. {
  29. /* setup affinity mask and node */
  30. if (cpu != -1)
  31. cpumask_set_cpu(cpu, &q_vector->affinity_mask);
  32. q_vector->numa_node = node;
  33.  
  34. #ifdef CONFIG_IXGBE_DCA
  35. /* initialize CPU for DCA */
  36. q_vector->cpu = -1;
  37.  
  38. #endif
  39. /* initialize NAPI */
  40. netif_napi_add(adapter->netdev, &q_vector->napi,
  41. ixgbe_poll, 64);
  42. napi_hash_add(&q_vector->napi);
  43. }

到此为止,网卡设置初始化完毕  

其中涉及到如下几个结构体

  1. ixgbe_adapter
  2.  
  3. /* board specific private data structure */
  4. struct ixgbe_adapter {
  5.  
  6. //发送的rings
  7.  
  8. struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
  9.  
  10. //接收的rings
  11.  
  12. struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
  13.  
  14. //这个vector里面包含了napi结构
  15.  
  16. //应该是跟下面的entries一一对应起来做为是一个中断向量的东西吧
  17.  
  18. struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
  19.  
  20. //这个里面估计是MSIX的多个中断对应的响应接口
  21.  
  22. struct msix_entry *msix_entries;
  23.  
  24. }
  25.  
  26. struct ixgbe_q_vector {
  27. struct ixgbe_adapter *adapter;
  28. ifdef CONFIG_IXGBE_DCA
  29. int cpu; /* CPU for DCA */
  30. #endif
  31. u16 v_idx; /* index of q_vector within array, also used for
  32. * finding the bit in EICR and friends that
  33. * represents the vector for this ring */
  34. u16 itr; /* Interrupt throttle rate written to EITR */
  35. struct ixgbe_ring_container rx, tx;
  36.  
  37. struct napi_struct napi;/*napi结构体*/
  38. cpumask_t affinity_mask;
  39. int numa_node;
  40. struct rcu_head rcu; /* to avoid race with update stats on free */
  41. char name[IFNAMSIZ + 9];
  42.  
  43. /* for dynamic allocation of rings associated with this q_vector */
  44. struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
  45. };
  46.  
  47. struct napi_struct {
  48. /* The poll_list must only be managed by the entity which
  49. * changes the state of the NAPI_STATE_SCHED bit. This means
  50. * whoever atomically sets that bit can add this napi_struct
  51. * to the per-cpu poll_list, and whoever clears that bit
  52. * can remove from the list right before clearing the bit.
  53. */
  54. struct list_head poll_list;
  55.  
  56. unsigned long state;
  57. int weight;
  58. unsigned int gro_count;
  59. int (*poll)(struct napi_struct *, int);//poll的接口实现
  60. #ifdef CONFIG_NETPOLL
  61. spinlock_t poll_lock;
  62. int poll_owner;
  63. #endif
  64. struct net_device *dev;
  65. struct sk_buff *gro_list;
  66. struct sk_buff *skb;
  67. struct list_head dev_list;
  68. };

然后当我们ifconfig dev up 时,会执行dev_ops->open函数

  1. static int ixgbe_open(struct net_device *netdev)
  2. {
  3. /* allocate transmit descriptors */
  4. err = ixgbe_setup_all_tx_resources(adapter);
  5. if (err)
  6. goto err_setup_tx;
  7.  
  8. /* allocate receive descriptors */
  9. err = ixgbe_setup_all_rx_resources(adapter);
  10. /*注册中断*/
  11. err = ixgbe_request_irq(adapter);
  12. }
  13.  
  14. static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
  15. {
  16. struct net_device *netdev = adapter->netdev;
  17. int err;
  18.  
  19. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  20. err = ixgbe_request_msix_irqs(adapter);
  21. else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
  22. err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
  23. netdev->name, adapter);
  24. else
  25. err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
  26. netdev->name, adapter);
  27.  
  28. if (err)
  29. e_err(probe, "request_irq failed, Error %d\n", err);
  30.  
  31. return err;
  32. }
  33.  
  34. static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
  35. {
  36. for (vector = 0; vector < adapter->num_q_vectors; vector++) {
  37. struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
  38. struct msix_entry *entry = &adapter->msix_entries[vector];
  39.  
  40. err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
  41. q_vector->name, q_vector);
  42.  
  43. }

从上面的代码流程可以看出,最终注册的中断处理函数为ixgbe_msix_clean_rings

  1. static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
  2. {
  3. struct ixgbe_q_vector *q_vector = data;
  4.  
  5. /* EIAM disabled interrupts (on this vector) for us */
  6.  
  7. if (q_vector->rx.ring || q_vector->tx.ring)
  8. napi_schedule(&q_vector->napi);
  9.  
  10. return IRQ_HANDLED;
  11. }

从上述代码中可以看,该中断处理函数仅仅作为napi的调度者

当数据包到来时,首先唤醒硬中断执行ixgbe_msix_clean_rings函数,最终napi_schedule会调用__raise_softirq_irqoff去触发一个软中断NET_RX_SOFTIRQ,然后又对应的软中断接口去实现往上的协议栈逻辑

然后看看napi 调度函数都做了些什么工作

  1. static inline void napi_schedule(struct napi_struct *n)
  2. {
  3. if (napi_schedule_prep(n))
  4. __napi_schedule(n);
  5. }
  6. void __napi_schedule(struct napi_struct *n)
  7. {
  8. unsigned long flags;
  9.  
  10. local_irq_save(flags);
  11. ____napi_schedule(this_cpu_ptr(&softnet_data), n);
  12. local_irq_restore(flags);
  13. }
  14. 最终可以看出napi调度函数把napi结构体挂到了per cpu的私有数据结构softnet_data
  15. struct softnet_data {
  16. struct Qdisc *output_queue;
  17. struct Qdisc **output_queue_tailp;
  18. struct list_head poll_list;
  19. struct sk_buff *completion_queue;
  20. struct sk_buff_head process_queue;
  21.  
  22. /* stats */
  23. unsigned int processed;
  24. unsigned int time_squeeze;
  25. unsigned int cpu_collision;
  26. unsigned int received_rps;
  27.  
  28. #ifdef CONFIG_RPS
  29. struct softnet_data *rps_ipi_list;
  30.  
  31. /* Elements below can be accessed between CPUs for RPS */
  32. struct call_single_data csd ____cacheline_aligned_in_smp;
  33. struct softnet_data *rps_ipi_next;
  34. unsigned int cpu;
  35. unsigned int input_queue_head;
  36. unsigned int input_queue_tail;
  37. #endif
  38. unsigned int dropped;
  39. struct sk_buff_head input_pkt_queue;
  40. struct napi_struct backlog;/*napi结构体里面的双向链表中*/
  41. };

  

NET_RX_SOFTIRQ是收到数据包的软中断信号对应的接口是net_rx_action
NET_TX_SOFTIRQ是发送完数据包后的软中断信号对应的接口是net_tx_action  

  1. static void net_rx_action(struct softirq_action *h)
  2. {
  3. /* 获取每个cpu的数据*/
  4. struct softnet_data *sd = this_cpu_ptr(&softnet_data);
  5. while (!list_empty(&sd->poll_list)) {
  6. struct napi_struct *n;
  7. n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
  8.  
  9. if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  10. work = n->poll(n, weight);
  11. trace_napi_poll(n);
  12. }
  13.  
  14. }

于是就执行到初始化napi结构体中的poll函数,在这里为ixgbe_poll

  1. int ixgbe_poll(struct napi_struct *napi, int budget)
  2. {
  3. struct ixgbe_q_vector *q_vector =
  4. container_of(napi, struct ixgbe_q_vector, napi);
  5. struct ixgbe_adapter *adapter = q_vector->adapter;
  6. struct ixgbe_ring *ring;
  7. int per_ring_budget;
  8. bool clean_complete = true;
  9.  
  10. #ifdef CONFIG_IXGBE_DCA
  11. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  12. ixgbe_update_dca(q_vector);
  13. #endif
  14.  
  15. ixgbe_for_each_ring(ring, q_vector->tx)
  16. clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
  17.  
  18. if (!ixgbe_qv_lock_napi(q_vector))
  19. return budget;
  20.  
  21. /* attempt to distribute budget to each queue fairly, but don't allow
  22. * the budget to go below 1 because we'll exit polling */
  23. if (q_vector->rx.count > 1)
  24. per_ring_budget = max(budget/q_vector->rx.count, 1);
  25. else
  26. per_ring_budget = budget;
  27.  
  28. ixgbe_for_each_ring(ring, q_vector->rx)
  29. clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
  30. per_ring_budget) < per_ring_budget);
  31.  
  32. ixgbe_qv_unlock_napi(q_vector);
  33. /* If all work not completed, return budget and keep polling */
  34. if (!clean_complete)
  35. return budget;
  36.  
  37. /* all work done, exit the polling mode */
  38. napi_complete(napi);
  39. if (adapter->rx_itr_setting & 1)
  40. ixgbe_set_itr(q_vector);
  41. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  42. ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
  43.  
  44. return 0;
  45. }
  46.  
  47. static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
  48. struct ixgbe_ring *rx_ring,
  49. const int budget)
  50. {
  51. ixgbe_rx_skb(q_vector, skb);
  52. }
  53.  
  54. static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
  55. struct sk_buff *skb)
  56. {
  57. if (ixgbe_qv_busy_polling(q_vector))
  58. netif_receive_skb(skb);
  59. else
  60. napi_gro_receive(&q_vector->napi, skb);
  61. }
  62.  
  63. int netif_receive_skb(struct sk_buff *skb)
  64. {
  65. int ret;
  66.  
  67. net_timestamp_check(netdev_tstamp_prequeue, skb);
  68.  
  69. if (skb_defer_rx_timestamp(skb))
  70. return NET_RX_SUCCESS;
  71.  
  72. rcu_read_lock();
  73.  
  74. #ifdef CONFIG_RPS
  75. if (static_key_false(&rps_needed)) {
  76. struct rps_dev_flow voidflow, *rflow = &voidflow;
  77. int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  78.  
  79. if (cpu >= 0) {
  80. ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
  81. rcu_read_unlock();
  82. return ret;
  83. }
  84. }
  85. #endif
  86. /*最终协议栈开始收报*/
  87. ret = __netif_receive_skb(skb);
  88. rcu_read_unlock();
  89. return ret;
  90. }

  

  

linux 内核协议栈收报流程(一)ixgbe网卡驱动的更多相关文章

  1. linux 内核协议栈收报流程(二)Netfilter全貌

    ip层分片整理 int ip_local_deliver(struct sk_buff *skb){ /* * Reassemble IP fragments. */ if (ip_is_fragme ...

  2. Linux内核网络报文简单流程

    转:http://blog.csdn.net/adamska0104/article/details/45397177 Linux内核网络报文简单流程2014-08-12 10:05:09 分类: L ...

  3. linux内核数据包转发流程(三)网卡帧接收分析

    [版权声明:转载请保留出处:blog.csdn.net/gentleliu.邮箱:shallnew*163.com] 每一个cpu都有队列来处理接收到的帧,都有其数据结构来处理入口和出口流量,因此,不 ...

  4. 写在学习linux内核协议栈之前

    一直很喜欢内核,但是新手,非常的痛苦啊.现在看一本linux内核协议栈源码解析一书,将自己学习的经历以及 理解记录下来,以备将来回头查漏补缺,同时校正自己的理解错误,自勉

  5. TCP/IP协议栈源码图解分析系列10:linux内核协议栈中对于socket相关API的实现

    题记:本系列文章的目的是抛开书本从Linux内核源代码的角度详细分析TCP/IP协议栈内核相关技术 轻松搞定TCP/IP协议栈,原创文章欢迎交流, byhankswang@gmail.com linu ...

  6. Linux 内核协议栈之TCP连接关闭

    Close行为: 当应用程序在调用close()函数关闭TCP连接时,Linux内核的默认行为是将套接口发送队列里的原有数据(比如之前残留的数据)以及新加入 的数据(比如函数close()产生的FIN ...

  7. Linux 内核协议栈 学习资料

    终极资料 1.<Understanding Linux Network Internals> 2.<TCP/IP Architecture, Design and Implement ...

  8. Linux内核编译、安装流程

    原文链接:https://blog.csdn.net/qq_28437139/article/details/83692907 此处只讲linux内核编译步骤至于安装虚拟机,安装ubuntu操作系统请 ...

  9. linux内核数据包转发流程(一):网络设备驱动

    [版权声明:转载请保留出处:blog.csdn.net/gentleliu.邮箱:shallnew*163.com] 网卡驱动为每一个新的接口在一个全局的网络设备列表里插入一个数据结构.每一个接口由一 ...

随机推荐

  1. Var x;---定义变量

    变量定义有多种格式 <!DOCTYPE html> <html> <head> <meta charset="utf-8"> < ...

  2. 【C++】最大子列和

    此题来自<数据结构与算法>,书中一共介绍了四种方法,这里贴出两种. 1.分治递归,对本题来说,虽然有更好的算法,但是用此题理解分治算法感觉挺有用 #include <iostream ...

  3. POJ 1845 Sumdiv#质因数分解+二分

    题目链接:http://poj.org/problem?id=1845 关于质因数分解,模板见:http://www.cnblogs.com/atmacmer/p/5285810.html 二分法思想 ...

  4. POJ 3710 Christmas Game#经典图SG博弈

    http://poj.org/problem?id=3710 (说实话对于Tarjan算法在搞图论的时候就没搞太懂,以后得找时间深入了解) (以下有关无向图删边游戏的资料来自论文贾志豪<组合游戏 ...

  5. Chapter 16_1 Class

    一个类就是一个创建对象的模具.对于一些基于原型的语言,对象是没有“类型”的,而是每个对象都有一个原型(prototype). 原型也是一种常规的对象.当其他对象(类的实例)遇到一个未知操作时,原型会先 ...

  6. Events and Responder Chain

    事件类型(Event Type) iOS 有三种事件类型: 触控事件(UIEventTypeTouches):单点.多点触控以及各种手势操作: 传感器事件(UIEventTypeMotion):重力. ...

  7. javascript焦点图自动播放

    这次是完整版,网页点开就能自动播放 <!DOCTYPE html> <html> <head> <meta charset="UTF-8" ...

  8. Channel Allocation(DFS)

    Channel Allocation Time Limit : 2000/1000ms (Java/Other)   Memory Limit : 20000/10000K (Java/Other) ...

  9. Quartz 2D中的基本图形绘制

    在iOS中绘图一般分为以下几个步骤: 1.获取绘图上下文 2.创建并设置路径 3.将路径添加到上下文 4.设置上下文状态 5.绘制路径 6.释放路径 在UIKit中默认已经为我们准备好了一个图形上下文 ...

  10. JavaScript 事件 事件流 事件对象 事件处理程序 回调函数 error和try...catch和throw

    参考资料: 慕课网 DOM事件探秘    js事件对象 处理 事件驱动: JS是采用事件驱动的机制来响应用户操作的,也就是说当用户对某个html元素进行操作的时候,会产生一个时间,该时间会驱动某些函数 ...