Kafka Java客户端代码示例

开发 后端 Kafka
kafka是linkedin用于日志处理的分布式消息队列,linkedin的日志数据容量大,但对可靠性要求不高,其日志数据主要包括用户行为(登录、浏览、点击、分享、喜欢)以及系统运行日志(CPU、内存、磁盘、网络、系统及进程状态)……

介绍      http://kafka.apache.org

kafka是一种高吞吐量的分布式发布订阅消息系统

kafka是linkedin用于日志处理的分布式消息队列,linkedin的日志数据容量大,但对可靠性要求不高,其日志数据主要包括用户行为(登录、浏览、点击、分享、喜欢)以及系统运行日志(CPU、内存、磁盘、网络、系统及进程状态)

 当前很多的消息队列服务提供可靠交付保证,并默认是即时消费(不适合离线)。

高可靠交付对linkedin的日志不是必须的,故可通过降低可靠性来提高性能,同时通过构建分布式的集群,允许消息在系统中累积,使得kafka同时支持离线和在线日志处理

测试环境

kafka_2.10-0.8.1.1 3个节点做的集群

zookeeper-3.4.5 一个实例节点

代码示例

消息生产者代码示例

  1. import java.util.Collections;  
  2. import java.util.Date;  
  3. import java.util.Properties;  
  4. import java.util.Random;  
  5.    
  6. import kafka.javaapi.producer.Producer;  
  7. import kafka.producer.KeyedMessage;  
  8. import kafka.producer.ProducerConfig;  
  9.    
  10. /**  
  11.  * 详细可以参考:https://cwiki.apache.org/confluence/display/KAFKA/0.8.0+Producer+Example  
  12.  * @author Fung  
  13.  *  
  14.  */ 
  15. public class ProducerDemo {  
  16.     public static void main(String[] args) {  
  17.         Random rnd = new Random();  
  18.         int events=100;  
  19.    
  20.         // 设置配置属性  
  21.         Properties props = new Properties();  
  22.         props.put("metadata.broker.list","172.168.63.221:9092,172.168.63.233:9092,172.168.63.234:9092");  
  23.         props.put("serializer.class""kafka.serializer.StringEncoder");  
  24.         // key.serializer.class默认为serializer.class  
  25.         props.put("key.serializer.class""kafka.serializer.StringEncoder");  
  26.         // 可选配置,如果不配置,则使用默认的partitioner  
  27.         props.put("partitioner.class""com.catt.kafka.demo.PartitionerDemo");  
  28.         // 触发acknowledgement机制,否则是fire and forget,可能会引起数据丢失  
  29.         // 值为0,1,-1,可以参考  
  30.         // http://kafka.apache.org/08/configuration.html  
  31.         props.put("request.required.acks""1");  
  32.         ProducerConfig config = new ProducerConfig(props);  
  33.    
  34.         // 创建producer  
  35.         Producer<String, String> producer = new Producer<String, String>(config);  
  36.         // 产生并发送消息  
  37.         long start=System.currentTimeMillis();  
  38.         for (long i = 0; i < events; i++) {  
  39.             long runtime = new Date().getTime();  
  40.             String ip = "192.168.2." + i;//rnd.nextInt(255);  
  41.             String msg = runtime + ",www.example.com," + ip;  
  42.             //如果topic不存在,则会自动创建,默认replication-factor为1,partitions为0  
  43.             KeyedMessage<String, String> data = new KeyedMessage<String, String>(  
  44.                     "page_visits", ip, msg);  
  45.             producer.send(data);  
  46.         }  
  47.         System.out.println("耗时:" + (System.currentTimeMillis() - start));  
  48.         // 关闭producer  
  49.         producer.close();  
  50.     }  

消息消费者代码示例

  1. import java.util.HashMap;  
  2. import java.util.List;  
  3. import java.util.Map;  
  4. import java.util.Properties;  
  5. import java.util.concurrent.ExecutorService;  
  6. import java.util.concurrent.Executors;  
  7.    
  8. import kafka.consumer.Consumer;  
  9. import kafka.consumer.ConsumerConfig;  
  10. import kafka.consumer.KafkaStream;  
  11. import kafka.javaapi.consumer.ConsumerConnector;  
  12.    
  13. /**  
  14.  * 详细可以参考:https://cwiki.apache.org/confluence/display/KAFKA/Consumer+Group+Example  
  15.  *   
  16.  * @author Fung  
  17.  *  
  18.  */ 
  19. public class ConsumerDemo {  
  20.     private final ConsumerConnector consumer;  
  21.     private final String topic;  
  22.     private ExecutorService executor;  
  23.    
  24.     public ConsumerDemo(String a_zookeeper, String a_groupId, String a_topic) {  
  25.         consumer = Consumer.createJavaConsumerConnector(createConsumerConfig(a_zookeeper,a_groupId));  
  26.         this.topic = a_topic;  
  27.     }  
  28.    
  29.     public void shutdown() {  
  30.         if (consumer != null)  
  31.             consumer.shutdown();  
  32.         if (executor != null)  
  33.             executor.shutdown();  
  34.     }  
  35.    
  36.     public void run(int numThreads) {  
  37.         Map<String, Integer> topicCountMap = new HashMap<String, Integer>();  
  38.         topicCountMap.put(topic, new Integer(numThreads));  
  39.         Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer  
  40.                 .createMessageStreams(topicCountMap);  
  41.         List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);  
  42.    
  43.         // now launch all the threads  
  44.         executor = Executors.newFixedThreadPool(numThreads);  
  45.    
  46.         // now create an object to consume the messages  
  47.         //  
  48.         int threadNumber = 0;  
  49.         for (final KafkaStream stream : streams) {  
  50.             executor.submit(new ConsumerMsgTask(stream, threadNumber));  
  51.             threadNumber++;  
  52.         }  
  53.     }  
  54.    
  55.     private static ConsumerConfig createConsumerConfig(String a_zookeeper,  
  56.             String a_groupId) {  
  57.         Properties props = new Properties();  
  58.         props.put("zookeeper.connect", a_zookeeper);  
  59.         props.put("group.id", a_groupId);  
  60.         props.put("zookeeper.session.timeout.ms""400");  
  61.         props.put("zookeeper.sync.time.ms""200");  
  62.         props.put("auto.commit.interval.ms""1000");  
  63.    
  64.         return new ConsumerConfig(props);  
  65.     }  
  66.    
  67.     public static void main(String[] arg) {  
  68.         String[] args = { "172.168.63.221:2188""group-1""page_visits""12" };  
  69.         String zooKeeper = args[0];  
  70.         String groupId = args[1];  
  71.         String topic = args[2];  
  72.         int threads = Integer.parseInt(args[3]);  
  73.    
  74.         ConsumerDemo demo = new ConsumerDemo(zooKeeper, groupId, topic);  
  75.         demo.run(threads);  
  76.    
  77.         try {  
  78.             Thread.sleep(10000);  
  79.         } catch (InterruptedException ie) {  
  80.    
  81.         }  
  82.         demo.shutdown();  
  83.     }  

消息处理类

  1. import kafka.consumer.ConsumerIterator;  
  2. import kafka.consumer.KafkaStream;  
  3.    
  4. public class ConsumerMsgTask implements Runnable {  
  5.     private KafkaStream m_stream;  
  6.     private int m_threadNumber;  
  7.    
  8.     public ConsumerMsgTask(KafkaStream stream, int threadNumber) {  
  9.         m_threadNumber = threadNumber;  
  10.         m_stream = stream;  
  11.     }  
  12.    
  13.     public void run() {  
  14.         ConsumerIterator<byte[], byte[]> it = m_stream.iterator();  
  15.         while (it.hasNext())  
  16.             System.out.println("Thread " + m_threadNumber + ": " 
  17.                     + new String(it.next().message()));  
  18.         System.out.println("Shutting down Thread: " + m_threadNumber);  
  19.     }  

Partitioner类示例

  1. import kafka.producer.Partitioner;  
  2. import kafka.utils.VerifiableProperties;  
  3.    
  4. public class PartitionerDemo implements Partitioner {  
  5.     public PartitionerDemo(VerifiableProperties props) {  
  6.    
  7.     }  
  8.    
  9.     @Override 
  10.     public int partition(Object obj, int numPartitions) {  
  11.         int partition = 0;  
  12.         if (obj instanceof String) {  
  13.             String key=(String)obj;  
  14.             int offset = key.lastIndexOf('.');  
  15.             if (offset > 0) {  
  16.                 partition = Integer.parseInt(key.substring(offset + 1)) % numPartitions;  
  17.             }  
  18.         }else{  
  19.             partition = obj.toString().length() % numPartitions;  
  20.         }  
  21.            
  22.         return partition;  
  23.     }  
  24.    

参考

https://cwiki.apache.org/confluence/display/KAFKA/Index

https://kafka.apache.org/

原文链接:http://my.oschina.net/cloudcoder/blog/299215

责任编辑:林师授 来源: cloud-coder的博客
相关推荐

2010-03-18 16:49:43

Java Socket

2010-03-18 17:30:46

Java Socket

2021-05-07 15:28:03

Kafka客户端Sarama

2010-03-18 17:47:07

Java 多客户端通信

2010-04-21 12:57:33

RAC负载均衡配置

2017-01-11 10:38:17

MySQL客户端代码

2021-09-22 15:46:29

虚拟桌面瘦客户端胖客户端

2011-08-17 10:10:59

2022-09-23 08:02:42

Kafka消息缓存

2022-08-01 08:04:58

MySQL客户端字符

2010-05-31 10:11:32

瘦客户端

2011-03-02 14:36:24

Filezilla客户端

2011-03-24 13:00:31

配置nagios客户端

2010-12-21 11:03:15

获取客户端证书

2011-10-26 13:17:05

2011-03-21 14:53:36

Nagios监控Linux

2011-04-06 14:24:20

Nagios监控Linux

2009-03-04 10:27:50

客户端组件桌面虚拟化Xendesktop

2013-05-09 09:33:59

2011-08-15 14:09:59

JavaHBase
点赞
收藏

51CTO技术栈公众号