flume 多chanel配置

#配置文
a1.sources= r1  
a1.sinks= k1 k2  
a1.channels= c1 c2  
   
#Describe/configure the source  


a1.sources.r1.type = avro
a1.sources.r1.bind =  slave3
a1.sources.r1.port = 50001
a1.sources.r1.host=slave3
a1.sources.r1.selector.type = replicating
a1.sources.r1.channels = c1 c2 

   
#第一个 hdfs
a1.sinks.k1.type = hdfs
a1.sinks.k1.channel = c1
a1.sinks.k1.hdfs.path = /maats5/%{logtype}/logdate=%{date}/
a1.sinks.k1.hdfs.filePrefix = %{logtype}-%{date}
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 1
a1.sinks.k1.hdfs.roundUnit = hour
a1.sinks.k1.hdfs.rollInterval = 180000
a1.sinks.k1.hdfs.rollSize = 134217700


a1.sinks.k1.hdfs.useLocalTimeStamp = true  
a1.sinks.k1.hdfs.fileType=DataStream  
a1.sinks.k1.hdfs.writeFormat=Text  
#第二个到kafka

#a1.channels = c1
#a1.sinks = k1
#a1.sinks.k1.type = org.example.MySink
#a1.sinks.k1.channel = c1




a1.sinks.k2.channel = c2
a1.sinks.k2.type = my.bigdata.KafkaSink2
a1.sinks.k2.kafka.topic = maats1
a1.sinks.k2.kafka.bootstrap.servers = slave3:9092
a1.sinks.k2.kafka.flumeBatchSize = 20
a1.sinks.k2.kafka.producer.acks = 1
a1.sinks.k2.kafka.producer.linger.ms = 1
#a1.sinks.ki.kafka.producer.compression.type = snappy




# Usea channel which buffers events in memory  
a1.channels.c1.type= memory  
a1.channels.c1.capacity= 1000  
a1.channels.c1.transactionCapacity= 100  
   
a1.channels.c2.type= memory  
a1.channels.c2.capacity= 1000  
a1.channels.c2.transactionCapacity= 100  
原文地址:https://www.cnblogs.com/rocky-AGE-24/p/7469297.html