怎么样赚钱快不用本钱:Android socket创建、绑定流程分析(三)

来源:百度文库 编辑:中财网 时间:2024/05/02 08:52:27
Android socket创建、绑定流程分析(三)2011年08月01日 星期一 22:11

net_dev_init中通过open_softirq(NET_RX_SOFTIRQ, net_rx_action);注册了软中断处理函数,napi调度中激发了软中断调用net_rx_action:

static void net_rx_action(struct softirq_action *h)

{

       struct list_head *list = &__get_cpu_var(softnet_data).poll_list;

       unsigned long start_time = jiffies;

       int budget = netdev_budget;

       void *have;

 

       local_irq_disable();

 

       while (!list_empty(list)) {

              struct napi_struct *n;

              int work, weight;

 

              /* If softirq window is exhuasted then punt.

               *

               * Note that this is a slight policy change from the

               * previous NAPI code, which would allow up to 2

               * jiffies to pass before breaking out.  The test

               * used to be "jiffies - start_time > 1".

               */

              if (unlikely(budget <= 0 || jiffies != start_time))

                     goto softnet_break;

 

              local_irq_enable();

 

              /* Even though interrupts have been re-enabled, this

               * access is safe because interrupts can only add new

               * entries to the tail of this list, and only ->poll()

               * calls can remove this head entry from the list.

               */

              n = list_entry(list->next, struct napi_struct, poll_list);

 

              have = netpoll_poll_lock(n);

        //weight 每次轮询最大数量

              weight = n->weight;

 

              /* This NAPI_STATE_SCHED test is for avoiding a race

               * with netpoll's poll_napi().  Only the entity which

               * obtains the lock and sees NAPI_STATE_SCHED set will

               * actually make the ->poll() call.  Therefore we avoid

               * accidently calling ->poll() when NAPI is not scheduled.

               */

              work = 0;

              if (test_bit(NAPI_STATE_SCHED, &n->state)) //napi的状态是否为NAPI_STATE_SCHED

                     work = n->poll(n, weight);             //执行轮询,轮询函数初始化是在net_dev_init中:

                                              //queue->backlog.poll = process_backlog;(默认)

              WARN_ON_ONCE(work > weight);

 

              budget -= work;

 

              local_irq_disable();

 

              /* Drivers must not modify the NAPI state if they

               * consume the entire weight.  In such cases this code

               * still "owns" the NAPI instance and therefore can

               * move the instance around on the list at-will.

               */

              if (unlikely(work == weight)) {

                     if (unlikely(napi_disable_pending(n)))

                            __napi_complete(n);

                     else

                            list_move_tail(&n->poll_list, list);

              }

 

              netpoll_poll_unlock(have);

       }

out:

       local_irq_enable();

。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。

}

 

static int process_backlog(struct napi_struct *napi, int quota)

{

       int work = 0;

       struct softnet_data *queue = &__get_cpu_var(softnet_data);

       unsigned long start_time = jiffies;

 

       napi->weight = weight_p;

       do {

              struct sk_buff *skb;

 

              local_irq_disable();

              skb = __skb_dequeue(&queue->input_pkt_queue);

              if (!skb) {

                     __napi_complete(napi);  //从poll队列中删除napi

                     local_irq_enable();

                     break;

              }

              local_irq_enable();

 

              netif_receive_skb(skb);

       } while (++work < quota && jiffies == start_time);

 

       return work;

}

在netif_receive_skb中,会轮询ptype_all list找到与packet_bind中协议类型相符的packet_type,然后通过deliver_skb调用packet type所注册的接收处理函数。

static inline int deliver_skb(struct sk_buff *skb,

                           struct packet_type *pt_prev,

                           struct net_device *orig_dev)

{

       atomic_inc(&skb->users);

       return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);   //packet_create时初始化func指向packet_rcv

}