网络数据包头部在linux网络协议栈中的变化
接收时使用skb_pull()不断去掉各层协议头部;发送时使用skb_push()不断添加各层协议头部。
先说说接收:
* eth_type_trans - determine the packet's protocol ID.
* @skb: received socket data
* @dev: receiving network device
*
* The rule here is that we
* assume 802.3 if the type field is short enough to be a length.
* This is normal practice and works for any 'now in use' protocol.
*/
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct ethhdr *eth;
unsigned char *rawp; skb->dev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
eth = eth_hdr(skb); if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} /*
176 * This ALLMULTI check should be redundant by 1.4
177 * so don't forget to remove it.
178 *
179 * Seems, you forgot to remove it. All silly devices
180 * seems to set IFF_PROMISC.
181 */ else if ( /*dev->flags&IFF_PROMISC */ ) {
if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr)))
skb->pkt_type = PACKET_OTHERHOST;
} /*
189 * Some variants of DSA tagging don't have an ethertype field
190 * at all, so we check here whether one of those tagging
191 * variants has been configured on the receiving interface,
192 * and if so, set skb->protocol without looking at the packet.
193 */
if (netdev_uses_dsa_tags(dev))
return htons(ETH_P_DSA);
if (netdev_uses_trailer_tags(dev))
return htons(ETH_P_TRAILER); if (ntohs(eth->h_proto) >= )
return eth->h_proto; rawp = skb->data; /*
205 * This is a magic hack to spot IPX packets. Older Novell breaks
206 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
207 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
208 * won't work for fault tolerant netware but does for the rest.
209 */
if (*(unsigned short *)rawp == 0xFFFF)
return htons(ETH_P_802_3); /*
214 * Real 802.2 LLC
215 */
return htons(ETH_P_802_2);
}
EXPORT_SYMBOL(eth_type_trans);
static int ip_local_deliver_finish(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev); __skb_pull(skb, ip_hdrlen(skb)); /* Point into the IP datagram, just past the header. */
skb_reset_transport_header(skb); rcu_read_lock();
{
int protocol = ip_hdr(skb)->protocol;
int hash, raw;
const struct net_protocol *ipprot; resubmit:
raw = raw_local_deliver(skb, protocol); hash = protocol & (MAX_INET_PROTOS - );
ipprot = rcu_dereference(inet_protos[hash]);
if (ipprot != NULL) {
int ret; if (!net_eq(net, &init_net) && !ipprot->netns_ok) {
if (net_ratelimit())
printk("%s: proto %d isn't netns-ready\n",
__func__, protocol);
kfree_skb(skb);
goto out;
} if (!ipprot->no_policy) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
kfree_skb(skb);
goto out;
}
nf_reset(skb);
}
ret = ipprot->handler(skb);
if (ret < ) {
protocol = -ret;
goto resubmit;
}
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
} else {
if (!raw) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, );
}
} else
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
kfree_skb(skb);
}
}
out:
rcu_read_unlock(); return ;
}
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
struct tcphdr *th, unsigned len)
{
struct tcp_sock *tp = tcp_sk(sk);
int res; /*
5231 * Header prediction.
5232 * The code loosely follows the one in the famous
5233 * "30 instruction TCP receive" Van Jacobson mail.
5234 *
5235 * Van's trick is to deposit buffers into socket queue
5236 * on a device interrupt, to call tcp_recv function
5237 * on the receive process context and checksum and copy
5238 * the buffer to user space. smart...
5239 *
5240 * Our current scheme is not silly either but we take the
5241 * extra cost of the net_bh soft interrupt processing...
5242 * We do checksum and copy also but from device to kernel.
5243 */ tp->rx_opt.saw_tstamp = ; /* pred_flags is 0xS?10 << 16 + snd_wnd
5248 * if header_prediction is to be made
5249 * 'S' will always be tp->tcp_header_len >> 2
5250 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
5251 * turn it off (when there are holes in the receive
5252 * space for instance)
5253 * PSH flag is ignored.
5254 */ if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
int tcp_header_len = tp->tcp_header_len; /* Timestamp header prediction: tcp_header_len
5262 * is automatically equal to th->doff*4 due to pred_flags
5263 * match.
5264 */ /* Check timestamp */
if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
/* No? Slow path! */
if (!tcp_parse_aligned_timestamp(tp, th))
goto slow_path; /* If PAWS failed, check it more carefully in slow path */
if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < )
goto slow_path; /* DO NOT update ts_recent here, if checksum fails
5277 * and timestamp was corrupted part, it will result
5278 * in a hung connection since we will drop all
5279 * future packets due to the PAWS test.
5280 */
} if (len <= tcp_header_len) {
/* Bulk data transfer: sender */
if (len == tcp_header_len) {
/* Predicted packet is in window by definition.
5287 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5288 * Hence, check seq<=rcv_wup reduces to:
5289 */
if (tcp_header_len ==
(sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp); /* We know that such packets are checksummed
5296 * on entry.
5297 */
tcp_ack(sk, skb, );
__kfree_skb(skb);
tcp_data_snd_check(sk);
return ;
} else { /* Header too small */
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
goto discard;
}
} else {
int eaten = ;
int copied_early = ; if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA
if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = ;
eaten = ;
}
#endif
if (tp->ucopy.task == current &&
sock_owned_by_user(sk) && !copied_early) {
__set_current_state(TASK_RUNNING); if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
eaten = ;
}
if (eaten) {
/* Predicted packet is in window by definition.
5327 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5328 * Hence, check seq<=rcv_wup reduces to:
5329 */
if (tcp_header_len ==
(sizeof(struct tcphdr) +
TCPOLEN_TSTAMP_ALIGNED) &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); __skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
}
if (copied_early)
tcp_cleanup_rbuf(sk, skb->len);
}
if (!eaten) {
if (tcp_checksum_complete_user(sk, skb))
goto csum_error; /* Predicted packet is in window by definition.
5350 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5351 * Hence, check seq<=rcv_wup reduces to:
5352 */
if (tcp_header_len ==
(sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp); tcp_rcv_rtt_measure_ts(sk, skb); if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */
__skb_pull(skb, tcp_header_len);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
} tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
/* Well, only one small jumplet in fast path... */
tcp_ack(sk, skb, FLAG_DATA);
tcp_data_snd_check(sk);
if (!inet_csk_ack_scheduled(sk))
goto no_ack;
} if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
__tcp_ack_snd_check(sk, );
no_ack:
#ifdef CONFIG_NET_DMA
if (copied_early)
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
else
#endif
if (eaten)
__kfree_skb(skb);
else
sk->sk_data_ready(sk, );
return ;
}
} slow_path:
if (len < (th->doff << ) || tcp_checksum_complete_user(sk, skb))
goto csum_error; /*
5403 * Standard slow path.
5404 */ res = tcp_validate_incoming(sk, skb, th, );
if (res <= )
return -res; step5:
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < )
goto discard; tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */
tcp_urg(sk, skb, th); /* step 7: process the segment text */
tcp_data_queue(sk, skb); tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
return ; csum_error:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); discard:
__kfree_skb(skb);
return ;
}
再来看看发送过程中的填充头部:
/* This routine actually transmits TCP packets queued in by
778 * tcp_do_sendmsg(). This is used by both the initial
779 * transmission and possible later retransmissions.
780 * All SKB's seen here are completely headerless. It is our
781 * job to build the TCP header, and pass the packet down to
782 * IP so it can do the same plus pass the packet off to the
783 * device.
784 *
785 * We are working here with either a clone of the original
786 * SKB, or a fresh unique copy made by the retransmit engine.
787 */
static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
gfp_t gfp_mask)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet;
struct tcp_sock *tp;
struct tcp_skb_cb *tcb;
struct tcp_out_options opts;
unsigned tcp_options_size, tcp_header_size;
struct tcp_md5sig_key *md5;
struct tcphdr *th;
int err; BUG_ON(!skb || !tcp_skb_pcount(skb)); /* If congestion control is doing timestamping, we must
804 * take such a timestamp before we potentially clone/copy.
805 */
if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
__net_timestamp(skb); if (likely(clone_it)) {
if (unlikely(skb_cloned(skb)))
skb = pskb_copy(skb, gfp_mask);
else
skb = skb_clone(skb, gfp_mask);
if (unlikely(!skb))
return -ENOBUFS;
} inet = inet_sk(sk);
tp = tcp_sk(sk);
tcb = TCP_SKB_CB(skb);
memset(&opts, , sizeof(opts)); if (unlikely(tcb->flags & TCPCB_FLAG_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else
tcp_options_size = tcp_established_options(sk, skb, &opts,
&md5);
tcp_header_size = tcp_options_size + sizeof(struct tcphdr); if (tcp_packets_in_flight(tp) == )
tcp_ca_event(sk, CA_EVENT_TX_START); skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
skb_set_owner_w(skb, sk); /* Build TCP header and checksum it. */
th = tcp_hdr(skb);
th->source = inet->inet_sport;
th->dest = inet->inet_dport;
th->seq = htonl(tcb->seq);
th->ack_seq = htonl(tp->rcv_nxt);
*(((__be16 *)th) + ) = htons(((tcp_header_size >> ) << ) |
tcb->flags); if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments
848 * is never scaled.
849 */
th->window = htons(min(tp->rcv_wnd, 65535U));
} else {
th->window = htons(tcp_select_window(sk));
}
th->check = ;
th->urg_ptr = ; /* The urg_mode check is necessary during a below snd_una win probe */
if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
if (before(tp->snd_up, tcb->seq + 0x10000)) {
th->urg_ptr = htons(tp->snd_up - tcb->seq);
th->urg = ;
} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
th->urg_ptr = 0xFFFF;
th->urg = ;
}
} tcp_options_write((__be32 *)(th + ), tp, &opts);
if (likely((tcb->flags & TCPCB_FLAG_SYN) == ))
TCP_ECN_send(sk, skb, tcp_header_size); #ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
tp->af_specific->calc_md5_hash(opts.hash_location,
md5, sk, NULL, skb);
}
#endif icsk->icsk_af_ops->send_check(sk, skb->len, skb); if (likely(tcb->flags & TCPCB_FLAG_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); if (skb->len != tcp_header_size)
tcp_event_data_sent(tp, skb, sk); if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, );
if (likely(err <= ))
return err; tcp_enter_cwr(sk, ); return net_xmit_eval(err);
}
int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ip_options *opt = inet->opt;
struct rtable *rt;
struct iphdr *iph; /* Skip all of this if the packet is already routed,
323 * f.e. by something like SCTP.
324 */
rt = skb_rtable(skb);
if (rt != NULL)
goto packet_routed; /* Make sure we can route this packet. */
rt = (struct rtable *)__sk_dst_check(sk, );
if (rt == NULL) {
__be32 daddr; /* Use correct destination address if we have options. */
daddr = inet->inet_daddr;
if(opt && opt->srr)
daddr = opt->faddr; {
struct flowi fl = { .oif = sk->sk_bound_dev_if,
.mark = sk->sk_mark,
.nl_u = { .ip4_u =
{ .daddr = daddr,
.saddr = inet->inet_saddr,
.tos = RT_CONN_FLAGS(sk) } },
.proto = sk->sk_protocol,
.flags = inet_sk_flowi_flags(sk),
.uli_u = { .ports =
{ .sport = inet->inet_sport,
.dport = inet->inet_dport } } }; /* If this fails, retransmit mechanism of transport layer will
353 * keep trying until route appears or the connection times
354 * itself out.
355 */
security_sk_classify_flow(sk, &fl);
if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, ))
goto no_route;
}
sk_setup_caps(sk, &rt->u.dst);
}
skb_dst_set(skb, dst_clone(&rt->u.dst)); packet_routed:
if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
goto no_route; /* OK, we know where to send it, allocate and build IP header. */
skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
*((__be16 *)iph) = htons(( << ) | ( << ) | (inet->tos & 0xff));
if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
iph->frag_off = htons(IP_DF);
else
iph->frag_off = ;
iph->ttl = ip_select_ttl(inet, &rt->u.dst);
iph->protocol = sk->sk_protocol;
iph->saddr = rt->rt_src;
iph->daddr = rt->rt_dst;
/* Transport layer set skb->h.foo itself. */ if (opt && opt->optlen) {
iph->ihl += opt->optlen >> ;
ip_options_build(skb, opt, inet->inet_daddr, rt, );
} ip_select_ident_more(iph, &rt->u.dst, sk,
(skb_shinfo(skb)->gso_segs ?: ) - ); skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark; return ip_local_out(skb); no_route:
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EHOSTUNREACH;
}
static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev); if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len); /* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
if (skb2 == NULL) {
kfree_skb(skb);
return -ENOMEM;
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
kfree_skb(skb);
skb = skb2;
} if (dst->hh)
return neigh_hh_output(dst->hh, skb); //添加eth头部
else if (dst->neighbour)
return dst->neighbour->output(skb); //添加eth头部 if (net_ratelimit())
printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
kfree_skb(skb);
return -EINVAL;
}
//以太网头部填充函数之一
302 static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
{
unsigned seq;
int hh_len; do {
int hh_alen; seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
hh_alen = HH_DATA_ALIGN(hh_len);
memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
} while (read_seqretry(&hh->hh_lock, seq)); skb_push(skb, hh_len);
return hh->hh_output(skb);
}
//以太网头部填充函数之二
1195 int neigh_resolve_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct neighbour *neigh;
int rc = ; if (!dst || !(neigh = dst->neighbour))
goto discard; __skb_pull(skb, skb_network_offset(skb)); if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
if (dev->header_ops->cache && !dst->hh) {
write_lock_bh(&neigh->lock);
if (!dst->hh)
neigh_hh_init(neigh, dst, dst->ops->protocol);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
write_unlock_bh(&neigh->lock);
} else {
read_lock_bh(&neigh->lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
read_unlock_bh(&neigh->lock);
}
if (err >= )
rc = neigh->ops->queue_xmit(skb);
else
goto out_kfree_skb;
}
out:
return rc;
discard:
NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
dst, dst ? dst->neighbour : NULL);
out_kfree_skb:
rc = -EINVAL;
kfree_skb(skb);
goto out;
}
EXPORT_SYMBOL(neigh_resolve_output);
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned len)
{
if (!dev->header_ops || !dev->header_ops->create)
return ; return dev->header_ops->create(skb, dev, type, daddr, saddr, len);//调用eth_header
}
int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len); /*
91 * Set the source hardware address.
92 */ if (!saddr)
saddr = dev->dev_addr;
memcpy(eth->h_source, saddr, ETH_ALEN); if (daddr) {
memcpy(eth->h_dest, daddr, ETH_ALEN);
return ETH_HLEN;
} /*
104 * Anyway, the loopback-device should never use this function...
105 */ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
memset(eth->h_dest, , ETH_ALEN);
return ETH_HLEN;
} return -ETH_HLEN;
}
EXPORT_SYMBOL(eth_header);
网络数据包头部在linux网络协议栈中的变化的更多相关文章
- linux2.6.24内核源代码分析(2)——扒一扒网络数据包在链路层的流向路径之一
在2.6.24内核中链路层接收网络数据包出现了两种方法,第一种是传统方法,利用中断来接收网络数据包,适用于低速设备:第二种是New Api(简称NAPI)方法,利用了中断+轮询的方法来接收网络数据包, ...
- Linux 中的网络数据包捕获
Linux 中的网络数据包捕获 Ashish Chaurasia, 工程师 简介: 本教程介绍了捕获和操纵数据包的不同机制.安全应用程序,如 VPN.防火墙和嗅探器,以及网络应用程序,如路由程序,都依 ...
- Linux网络 - 数据包的接收过程【转】
转自:https://segmentfault.com/a/1190000008836467 本文将介绍在Linux系统中,数据包是如何一步一步从网卡传到进程手中的. 如果英文没有问题,强烈建议阅读后 ...
- Linux内核中网络数据包的接收-第一部分 概念和框架
与网络数据包的发送不同,网络收包是异步的的.由于你不确定谁会在什么时候突然发一个网络包给你.因此这个网络收包逻辑事实上包括两件事:1.数据包到来后的通知2.收到通知并从数据包中获取数据这两件事发生在协 ...
- Linux内核网络数据包处理流程
Linux内核网络数据包处理流程 from kernel-4.9: 0. Linux内核网络数据包处理流程 - 网络硬件 网卡工作在物理层和数据链路层,主要由PHY/MAC芯片.Tx/Rx FIFO. ...
- [转]Linux网络 - 数据包的接收过程
转, 原文: https://segmentfault.com/a/1190000008836467 ------------------------------------------------- ...
- linux 内核网络数据包接收流程
转:https://segmentfault.com/a/1190000008836467 本文将介绍在Linux系统中,数据包是如何一步一步从网卡传到进程手中的. 如果英文没有问题,强烈建议阅读后面 ...
- Linux网络数据包的揭秘以及常见的调优方式总结
https://mp.weixin.qq.com/s/boRWlx1R7TX0NLuI2sZBfQ 作为业务 SRE,我们所运维的业务,常常以 Linux+TCP/UDP daemon 的形式对外提供 ...
- LINUX下的远端主机登入 校园网络注册 网络数据包转发和捕获
第一部分:LINUX 下的远端主机登入和校园网注册 校园网内目的主机远程管理登入程序 本程序为校园网内远程登入,管理功能,该程序分服务器端和客户端两部分:服务器端为remote_server_udp. ...
随机推荐
- 使用NSKeyedArichiver进行归档、NSKeyedUnarchiver进行解档
一.使用archiveRootObject进行简单的归档 使用NSKeyedArichiver进行归档.NSKeyedUnarchiver进行接档,这种方式会在写入.读出数据之前对数据进行序列化.反序 ...
- Java基础语法 - 面向对象 - this 关键字
在Java语言中规定使用this关键字来代表本类对象的引用,this关键字被隐式地用于引用对象的成员变量和方法. this关键字引用的就是本类的一个对象,在局部变量或方法参数覆盖了成员变量时,就要添加 ...
- MFC DLL获取当前路径
.首先定义此获取模块的静态方法 #if _MSC_VER >= 1300 // for VC 7.0 // from ATL 7.0 sources #ifndef _delayimp_h ex ...
- emo前端
1 点击按钮可以在form中添加input控件,以name给input编号,然后点击按钮ajax上传表单,在回调函数中弹框显示结果: <form id="newfriends" ...
- Java并发—java.util.concurrent并发包概括(转载)
一.描述线程的类:Runable和Thread都属于java.lang包 二.内置锁synchronized属于jvm关键字,内置条件队列操作接口Object.wait()/notify()/noti ...
- HDU1158:Employment Planning(线性dp)
题目:http://acm.hdu.edu.cn/showproblem.php?pid=1158 这题又是看了题解,题意是一项工作需要n个月完成,雇佣一个人需要m1的钱,一个人的月工资为sa,辞退一 ...
- Docker 网络之端口绑定
外部访问容器 容器中可以运行一些网络应用,要让外部也可以访问这些应用,可以通过 -P 或 -p 参数来指定端口映射. -P 标记时 Docker 会随机映射一个 49000~49900 的端口到内部容 ...
- 吴超老师课程--HBASE的Java_API
public static void main(String[] args) throws IOException { String tableName="hbase_tb"; S ...
- Delphi 正则表达式语法(3): 匹配范围
Delphi 正则表达式语法(3): 匹配范围 // [A-Z]: 匹配所有大写字母 var reg: TPerlRegEx; begin reg := TPerlRegEx.Create(n ...
- redmine安装及SVN(https)配置
一键安装redmine https://blog.csdn.net/qq_26898315/article/details/50233483 配置SVN(引用: https://blog.csdn.n ...