本文介绍flume读取kafka数据的方法

代码:

/*******************************************************************************
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *  
 * http://www.apache.org/licenses/LICENSE-2.0
 *  
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 *******************************************************************************/
package org.apache.flume.source.kafka;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.Message;

import kafka.message.MessageAndMetadata;
import org.apache.flume.*;
import org.apache.flume.conf.Configurable;
import org.apache.flume.conf.ConfigurationException;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.source.AbstractSource;
import org.apache.flume.source.SyslogParser;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * A Source for Kafka which reads messages from kafka. I use this in company production environment

 * and its performance is good. Over 100k messages per second can be read from kafka in one source.<p>
 * <tt>zookeeper.connect: </tt> the zookeeper ip kafka use.<p>
 * <tt>topic: </tt> the topic to read from kafka.<p>
 * <tt>group.id: </tt> the groupid of consumer group.<p>
 */
public class KafkaSource extends AbstractSource implements Configurable, PollableSource {
    private static final Logger log = LoggerFactory.getLogger(KafkaSource.class);
    private ConsumerConnector consumer;
    private ConsumerIterator<byte[], byte[]> it;
    private String topic;
    
    public Status process() throws EventDeliveryException {
        List<Event> eventList = new ArrayList<Event>();
        MessageAndMetadata<byte[],byte[]> message;
        Event event;
        Map<String, String> headers;
        String strMessage;
        try {
            if(it.hasNext()) {
                message = it.next();
                event = new SimpleEvent();
                headers = new HashMap<String, String>();
                headers.put("timestamp", String.valueOf(System.currentTimeMillis()));

                strMessage =  String.valueOf(System.currentTimeMillis()) + "|" + new String(message.message());
                log.debug("Message: {}", strMessage);

                event.setBody(strMessage.getBytes());
                //event.setBody(message.message());
                event.setHeaders(headers);
                eventList.add(event);
            }
            getChannelProcessor().processEventBatch(eventList);
            return Status.READY;
        } catch (Exception e) {
            log.error("KafkaSource EXCEPTION, {}", e.getMessage());
            return Status.BACKOFF;
        }
    }

    public void configure(Context context) {
        topic = context.getString("topic");
        if(topic == null) {
            throw new ConfigurationException("Kafka topic must be specified.");
        }
        try {
            this.consumer = KafkaSourceUtil.getConsumer(context);
        } catch (IOException e) {
            log.error("IOException occur, {}", e.getMessage());
        } catch (InterruptedException e) {
            log.error("InterruptedException occur, {}", e.getMessage());
        }
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
        topicCountMap.put(topic, new Integer(1));
        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
        if(consumerMap == null) {
            throw new ConfigurationException("topicCountMap is null");
        }
        List<KafkaStream<byte[], byte[]>> topicList = consumerMap.get(topic);
        if(topicList == null || topicList.isEmpty()) {
            throw new ConfigurationException("topicList is null or empty");
        }
        KafkaStream<byte[], byte[]> stream =  topicList.get(0);
        it = stream.iterator();
    }

    @Override
    public synchronized void stop() {
        consumer.shutdown();
        super.stop();
    }

}

/*******************************************************************************
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *  
 * http://www.apache.org/licenses/LICENSE-2.0
 *  
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 *******************************************************************************/
package org.apache.flume.source.kafka;

import java.io.IOException;
import java.util.Map;
import java.util.Properties;

import com.google.common.collect.ImmutableMap;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.javaapi.consumer.ConsumerConnector;

import org.apache.flume.Context;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class KafkaSourceUtil {
    private static final Logger log = LoggerFactory.getLogger(KafkaSourceUtil.class);

    public static Properties getKafkaConfigProperties(Context context) {
        log.info("context={}",context.toString());
        Properties props = new Properties();
        ImmutableMap<String, String> contextMap = context.getParameters();
        for (Map.Entry<String,String> entry : contextMap.entrySet()) {
            String key = entry.getKey();
            if (!key.equals("type") && !key.equals("channel")) {
                props.setProperty(entry.getKey(), entry.getValue());
                log.info("key={},value={}", entry.getKey(), entry.getValue());
            }
        }
        return props;
    }
    public static ConsumerConnector getConsumer(Context context) throws IOException, InterruptedException {
        ConsumerConfig consumerConfig = new ConsumerConfig(getKafkaConfigProperties(context));
        ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
        return consumer;
    }
}

配置文件:( /etc/flume/conf/flume-kafka-file.properties)

agent_log.sources = kafka0
agent_log.channels = ch0
agent_log.sinks = sink0

agent_log.sources.kafka0.channels = ch0
agent_log.sinks.sink0.channel = ch0

agent_log.sources.kafka0.type = org.apache.flume.source.kafka.KafkaSource
agent_log.sources.kafka0.zookeeper.connect = node3:2181,node4:2181,node5:2181
agent_log.sources.kafka0.topic = kkt-test-topic
agent_log.sources.kafka0.group.id= test

agent_log.channels.ch0.type = memory
agent_log.channels.ch0.capacity = 2048
agent_log.channels.ch0.transactionCapacity = 1000

agent_log.sinks.sink0.type=file_roll
agent_log.sinks.sink0.sink.directory=/data/flumeng/data/test
agent_log.sinks.sink0.sink.rollInterval=300

启动脚本:

sudo su  -l -s /bin/bash  flume  -c '/usr/lib/flume/bin/flume-ng agent --conf /etc/flume/conf --conf-file /etc/flume/conf/flume-kafka-file.properties -name agent_log -Dflume.root.logger=INFO,console '

注意: 红色字体的功能是对原来数据增加时间戳

            版本号 flume-1.4.0.2.1.1.0 + kafka2.8.0-0.8.0

            參考资料:https://github.com/baniuyao/flume-kafka

             编译用到的库:

            flume-ng-configuration-1.4.0.2.1.1.0-385

            flume-ng-core-1.4.0.2.1.1.0-385

            flume-ng-sdk-1.4.0.2.1.1.0-385

            flume-tools-1.4.0.2.1.1.0-385

            guava-11.0.2

            kafka_2.8.0-0.8.0

            log4j-1.2.15

            scala-compiler

            scala-library

            slf4j-api-1.6.1

            slf4j-log4j12-1.6.1

            zkclient-0.3

            zookeeper-3.3.4

                

flume 读取kafka 数据的更多相关文章

  1. spark读取kafka数据 createStream和createDirectStream的区别

    1.KafkaUtils.createDstream 构造函数为KafkaUtils.createDstream(ssc, [zk], [consumer group id], [per-topic, ...

  2. SparkStreaming直连方式读取kafka数据,使用MySQL保存偏移量

    SparkStreaming直连方式读取kafka数据,使用MySQL保存偏移量 1. ScalikeJDBC 2.配置文件 3.导入依赖的jar包 4.源码测试 通过MySQL保存kafka的偏移量 ...

  3. Flume下读取kafka数据后再打把数据输出到kafka,利用拦截器解决topic覆盖问题

    1:如果在一个Flume Agent中同时使用Kafka Source和Kafka Sink来处理events,便会遇到Kafka Topic覆盖问题,具体表现为,Kafka Source可以正常从指 ...

  4. 使用Flume消费Kafka数据到HDFS

    1.概述 对于数据的转发,Kafka是一个不错的选择.Kafka能够装载数据到消息队列,然后等待其他业务场景去消费这些数据,Kafka的应用接口API非常的丰富,支持各种存储介质,例如HDFS.HBa ...

  5. 使用flume将kafka数据sink到HBase【转】

    1. hbase sink介绍 1.1 HbaseSink 1.2 AsyncHbaseSink 2. 配置flume 3. 运行测试flume 4. 使用RegexHbaseEventSeriali ...

  6. flink 读取kafka 数据,partition分配

    每个并发有个编号,只会读取kafka partition  % 总并发数 == 编号 的分区   如: 6 分区, 4个并发 分区: p0 p1 p2 p3 p4 p5 并发: 0 1 2 3    ...

  7. Logstash读取Kafka数据写入HDFS详解

    强大的功能,丰富的插件,让logstash在数据处理的行列中出类拔萃 通常日志数据除了要入ES提供实时展示和简单统计外,还需要写入大数据集群来提供更为深入的逻辑处理,前边几篇ELK的文章介绍过利用lo ...

  8. 使用spark-streaming实时读取Kafka数据统计结果存入MySQL

    在这篇文章里,我们模拟了一个场景,实时分析订单数据,统计实时收益. 场景模拟 我试图覆盖工程上最为常用的一个场景: 1)首先,向Kafka里实时的写入订单数据,JSON格式,包含订单ID-订单类型-订 ...

  9. SparkStreaming python 读取kafka数据将结果输出到单个指定本地文件

    # -*- coding: UTF-8 -*- #!/bin/env python3 # filename readFromKafkaStreamingGetLocation.py import IP ...

随机推荐

  1. win10开机时内存使用率达到99%以上

    开始,运行,输入msconfig回车就能看到自启的项目. 搞定! 其实,感觉特别像是输入法的某个监听程序导致内存泄漏,造成的系统问题. 再遇到的时候要认真检查下.

  2. [terry笔记]python FTP

    如下是作业,用python做一个ftp,主要利用socket. server端在linux下运行,在client端可以执行shell命令(静态的) 在client端输入get xxx,即可下载. 在c ...

  3. HDU 3073 Saving Beans

    Saving Beans Time Limit: 3000ms Memory Limit: 32768KB This problem will be judged on HDU. Original I ...

  4. Hbase读取数据

    get命令和HTable类的get()方法用于从HBase表中读取数据.使用 get 命令,可以同时获取一行数据.它的语法如下: get ’<table name>’,’row1’ 下面的 ...

  5. angular-API

    AngularJS 全局 API 用于执行常见任务的 JavaScript 函数集合,如: 比较对象 迭代对象 转换对象 API 描述 angular.lowercase() 转换字符串为小写 ang ...

  6. Oracle 实现 mysql 更新 update limit

    oracle给人的感觉非常落后.使用非常不方便,Toad 这个软件又笨又迟钝.pl/sql更是,90年代的界面风格,速度还卡得要死.并且oracle不支持limit .by default7#zbph ...

  7. centos6.5配置SSH免password登录

    创建新用户:useradd hadoop 设置password:passwd hadoop,输入自己想要的password就可以.之后su hadoop切换用户 改动主机名:vim /etc/sysc ...

  8. 如何从 Datagrid 中获得单元格的内容与 使用值转换器进行绑定数据的转换IValueConverter

    一.如何从 Datagrid 中获得单元格的内容 DataGrid 属于一种 ItemsControl, 因此,它有 Items 属性并且用ItemContainer 封装它的 items. 但是,W ...

  9. 文本编辑工具 Vim

    Vim是Vi的升级版 vi和Vim的区别在于vi不会显示颜色,Vim会显示颜色 1.如果没有,可以安装 #yum install -y vim-enhanced

  10. TYVJ 1541 八数码

    Orz双向搜索的cy大神 我用的是hash 也蛮快的 //By SiriusRen #include <queue> #include <cstdio> using names ...