package net.ripe.hadoop.pcap;

import java.io.DataInputStream;
import java.io.IOException; import com.google.common.hash.Hashing; import net.ripe.hadoop.pcap.packet.HashPayloadPacket;
import net.ripe.hadoop.pcap.packet.Packet; public class HashPayloadPcapReader extends PcapReader {
public HashPayloadPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new HashPayloadPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, byte[] payload) {
if (payload.length > 0) {
packet.put(HashPayloadPacket.PAYLOAD_SHA1_HASH, Hashing.sha1().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_SHA256_HASH, Hashing.sha256().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_SHA512_HASH, Hashing.sha512().hashBytes(payload).toString());
packet.put(HashPayloadPacket.PAYLOAD_MD5_HASH, Hashing.md5().hashBytes(payload).toString());
}
}
}

  

HttpPcapReader

package net.ripe.hadoop.pcap;

import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.LinkedList; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.Header;
import org.apache.http.HttpClientConnection;
import org.apache.http.HttpException;
import org.apache.http.HttpRequest;
import org.apache.http.HttpRequestFactory;
import org.apache.http.HttpResponse;
import org.apache.http.HttpResponseFactory;
import org.apache.http.impl.DefaultHttpRequestFactory;
import org.apache.http.impl.DefaultHttpResponseFactory;
import org.apache.http.impl.conn.DefaultClientConnection;
import org.apache.http.impl.io.AbstractSessionInputBuffer;
import org.apache.http.impl.io.AbstractSessionOutputBuffer;
import org.apache.http.impl.io.DefaultHttpRequestParser;
import org.apache.http.impl.io.DefaultHttpResponseParser;
import org.apache.http.io.HttpMessageParser;
import org.apache.http.io.SessionInputBuffer;
import org.apache.http.io.SessionOutputBuffer;
import org.apache.http.params.BasicHttpParams;
import org.apache.http.params.HttpParams; import com.google.common.base.Joiner; import net.ripe.hadoop.pcap.packet.HttpPacket;
import net.ripe.hadoop.pcap.packet.Packet; public class HttpPcapReader extends PcapReader{
public static final Log LOG = LogFactory.getLog(HttpPcapReader.class); public static final int HTTP_PORT = 80;
public static final String HEADER_PREFIX = "header_"; private HttpParams params = new BasicHttpParams();
private HttpRequestFactory reqFactory = new DefaultHttpRequestFactory();
private HttpResponseFactory respFactory = new DefaultHttpResponseFactory(); public HttpPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new HttpPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, final byte[] payload) {
HttpPacket httpPacket = (HttpPacket)packet;
Integer srcPort = (Integer)packet.get(Packet.SRC_PORT);
Integer dstPort = (Integer)packet.get(Packet.DST_PORT);
if ((HTTP_PORT == srcPort || HTTP_PORT == dstPort) &&
packet.containsKey(Packet.REASSEMBLED_FRAGMENTS) &&
PROTOCOL_TCP.equals(packet.get(Packet.PROTOCOL))) {
final SessionInputBuffer inBuf = new AbstractSessionInputBuffer() {
{
init(new ByteArrayInputStream(payload), 1024, params);
} @Override
public boolean isDataAvailable(int timeout) throws IOException {
return true;
}
};
final SessionOutputBuffer outBuf = new AbstractSessionOutputBuffer() {}; if (HTTP_PORT == srcPort) {
HttpMessageParser<HttpResponse> parser = new DefaultHttpResponseParser(inBuf, null, respFactory, params); HttpClientConnection conn = new DefaultClientConnection() {
{
init(inBuf, outBuf, params);
} @Override
protected void assertNotOpen() {} @Override
protected void assertOpen() {}
}; try {
HttpResponse response = parser.parse();
conn.receiveResponseEntity(response);
propagateHeaders(httpPacket, response.getAllHeaders());
} catch (IOException e) {
LOG.error("IOException when decoding HTTP response", e);
} catch (HttpException e) {
LOG.error("HttpException when decoding HTTP response", e);
}
} else if (HTTP_PORT == dstPort) {
HttpMessageParser<HttpRequest> parser = new DefaultHttpRequestParser(inBuf, null, reqFactory, params);
try {
HttpRequest request = parser.parse();
propagateHeaders(httpPacket, request.getAllHeaders());
} catch (IOException e) {
LOG.error("IOException when decoding HTTP request", e);
} catch (HttpException e) {
LOG.error("HttpException when decoding HTTP request", e);
}
}
}
} private void propagateHeaders(HttpPacket packet, Header[] headers) {
LinkedList<String> headerKeys = new LinkedList<String>();
for (Header header : headers) {
String headerKey = HEADER_PREFIX + header.getName().toLowerCase();
packet.put(headerKey, header.getValue());
}
packet.put(HttpPacket.HTTP_HEADERS, Joiner.on(',').join(headerKeys));
}
}

  DnsPcapReader

package net.ripe.hadoop.pcap;

import java.io.DataInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import net.ripe.hadoop.pcap.packet.DnsPacket;
import net.ripe.hadoop.pcap.packet.Packet; import org.xbill.DNS.Header;
import org.xbill.DNS.Message;
import org.xbill.DNS.Opcode;
import org.xbill.DNS.Rcode;
import org.xbill.DNS.Record;
import org.xbill.DNS.Section;
import org.xbill.DNS.Flags; public class DnsPcapReader extends PcapReader {
public static final int DNS_PORT = 53; public DnsPcapReader(DataInputStream is) throws IOException {
super(is);
} @Override
protected Packet createPacket() {
return new DnsPacket();
} @Override
protected boolean isReassemble() {
return true;
} @Override
protected boolean isPush() {
return false;
} @Override
protected void processPacketPayload(Packet packet, byte[] payload) {
DnsPacket dnsPacket = (DnsPacket)packet; if (DNS_PORT == (Integer)packet.get(Packet.SRC_PORT) || DNS_PORT == (Integer)packet.get(Packet.DST_PORT)) {
if (PROTOCOL_TCP.equals(packet.get(Packet.PROTOCOL)) &&
payload.length > 2) // TODO Support DNS responses with multiple messages (as used for XFRs)
payload = Arrays.copyOfRange(payload, 2, payload.length); // First two bytes denote the size of the DNS message, ignore them
try {
Message msg = new Message(payload);
Header header = msg.getHeader();
dnsPacket.put(DnsPacket.QUERYID, header.getID());
dnsPacket.put(DnsPacket.FLAGS, header.printFlags());
dnsPacket.put(DnsPacket.QR, header.getFlag(Flags.QR));
dnsPacket.put(DnsPacket.OPCODE, Opcode.string(header.getOpcode()));
dnsPacket.put(DnsPacket.RCODE, Rcode.string(header.getRcode()));
dnsPacket.put(DnsPacket.QUESTION, convertRecordToString(msg.getQuestion()));
dnsPacket.put(DnsPacket.QNAME, convertRecordOwnerToString(msg.getQuestion()));
dnsPacket.put(DnsPacket.QTYPE, convertRecordTypeToInt(msg.getQuestion()));
dnsPacket.put(DnsPacket.ANSWER, convertRecordsToStrings(msg.getSectionArray(Section.ANSWER)));
dnsPacket.put(DnsPacket.AUTHORITY, convertRecordsToStrings(msg.getSectionArray(Section.AUTHORITY)));
dnsPacket.put(DnsPacket.ADDITIONAL, convertRecordsToStrings(msg.getSectionArray(Section.ADDITIONAL)));
} catch (Exception e) {
// If we cannot decode a DNS packet we ignore it
}
}
} private String convertRecordToString(Record record) {
if (record == null)
return null; String recordString = record.toString();
recordString = normalizeRecordString(recordString);
return recordString;
} private String convertRecordOwnerToString(Record record) {
if (record == null)
return null;
String ownerString = record.getName().toString();
ownerString = ownerString.toLowerCase();
return ownerString;
} private int convertRecordTypeToInt(Record record) {
if (record == null)
return -1;
return record.getType();
} private List<String> convertRecordsToStrings(Record[] records) {
if (records == null)
return null; ArrayList<String> retVal = new ArrayList<String>(records.length);
for (Record record : records)
retVal.add(convertRecordToString(record));
return retVal;
} protected String normalizeRecordString(String recordString) {
if (recordString == null)
return null; // Reduce everything that is more than one whitespace to a single whitespace
recordString = recordString.replaceAll("\\s{2,}", " ");
// Replace tabs with a single whitespace
recordString = recordString.replaceAll("\\t{1,}", " ");
return recordString;
}
}

  

HashPayloadPcapReader的更多相关文章

随机推荐

  1. c# 基础任务1

    1.winform系统全局异常布局处理. Application.SetUnhandledExceptionMode(UnhandledExceptionMode.CatchException);   ...

  2. windows程序设计获取文本框(窗口、对话框)文本

    就是这样一个简单的界面,窗口上重绘的对话框(这种写法参考我之前博文): 需要做到的就是点击确定,获取文本框中内容. // 处理对话框消息 INT_PTR CALLBACK NewDlgProc(HWN ...

  3. python3中time模块的用法及说明

    python中,导入time模块使用的命令是 import time 可以使用以下命令查看time模块内置的能够使用的方法: dir(time) 可以使用以下命令查看time模块中每个内置方法的说明: ...

  4. ABP官方文档翻译 10.1 ABP Nuget包

    ABP Nuget包 Packages Abp Abp.AspNetCore Abp.Web.Common Abp.Web Abp.Web.Mvc Abp.Web.Api Abp.Web.Api.OD ...

  5. ABP官方文档翻译 4.5 特征管理

    特征管理 介绍 关于IFeatureValueStore 特征类型 Boolean特征 Value特征 定义特征 基本特征属性 其他特征属性 特征层级 检查特征 使用RequiresFeature特性 ...

  6. bzoj 2655: calc [容斥原理 伯努利数]

    2655: calc 题意:长n的序列,每个数\(a_i \in [1,A]\),求所有满足\(a_i\)互不相同的序列的\(\prod_i a_i\)的和 clj的题 一下子想到容斥,一开始从普通容 ...

  7. BZOJ 1355: [Baltic2009]Radio Transmission [KMP 循环节]

    1355: [Baltic2009]Radio Transmission Time Limit: 10 Sec  Memory Limit: 64 MBSubmit: 792  Solved: 535 ...

  8. verilog实验3:AD转换后串口输出到PC端

    一.实验任务 通过tcl549AD转换芯片将模拟电压信号转换为数字信号,并通过串口显示到电脑上.此AD转换芯片为串行转换芯片,且转换速率要和串口选择的速率匹配.等待串口发送完后,再进行下一次AD转换. ...

  9. 《第一行代码 android》 读书笔记:找出当前界面对应的Activity

    在android开发中找出当前界面对应的Activity,步骤如下: 新建一个BaseActivity继承自Activity,然后在BaseActivity中重写onCreate()方法,通过getC ...

  10. WPF项目学习.一

    WPF项目搭建 版权声明:本文为博主初学经验,未经博主允许不得转载. 一.前言 记录在学习与制作WPF过程中遇到的解决方案. 使用MVVM的优点是 数据和视图分离,双向绑定,低耦合,可重用行,相对独立 ...