javascript
SpringCloud实践分享-日志收集Kafka-ELK
2019獨角獸企業重金招聘Python工程師標準>>>
微服務應用在容器化后,日志的查詢就會變成困難的問題,雖說有portainer這類的容器管理工具,能夠方便的查詢每個容器中的日志,但容器到達一定數量后,尤其是應用有多個實例時候,查詢就成了頭疼的問題。所以我們采用了Kafka-Logstash-Elasticsearch-Kibana的方案來處理日志。
首先說說我的日志收集思路:
這樣的好處,1)幾乎不用做特別大的修改,只需做一定的配置工作即可完成日志收集;2)日志內容輸入kafka幾乎沒有什么瓶頸,另外kafka的擴展性能很好,也很簡單;3)收集的日志幾乎是實時的;4)整體的擴展性很好,很容易消除瓶頸,例如elasticsearch分片、擴展都很容易。
應用側配置
在應用中,我們只需配置log4j的相應配置,將其日志輸出到kafka即可。以下為配置示例,配置中包含將日志輸入出命令行和kafka部分中。注意,在輸出到kafka中,需要有一個appender類kafka.producer.KafkaLog4jAppender一般是沒有的,則我在本地自己寫上該類(KafkaLog4jAppender.java),并加入相應的文件路徑。
log4j.rootLogger=INFO,console,kafka# 輸出到kafka log4j.appender.kafka=com.yang.config.KafkaLog4jAppender#kafka.producer.KafkaLog4jAppender log4j.appender.kafka.topic=api-admin log4j.appender.kafka.brokerList=192.0.0.2:9092,192.0.0.3:9092,192.0.0.4:9092 # 這里填寫kafka的ip log4j.appender.kafka.compressionType=none log4j.appender.kafka.syncSend=true log4j.appender.kafka.layout=org.apache.log4j.PatternLayout log4j.appender.kafka.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L %% - %m%n# 輸出到Consolelog4j.appender.console=org.apache.log4j.ConsoleAppender log4j.appender.console.target=System.err log4j.appender.console.layout=org.apache.log4j.PatternLayout log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%nKafkaLog4jAppender.java
public class KafkaLog4jAppender extends AppenderSkeleton {private static final String BOOTSTRAP_SERVERS_CONFIG = ProducerConfig.BOOTSTRAP_SERVERS_CONFIG;private static final String COMPRESSION_TYPE_CONFIG = ProducerConfig.COMPRESSION_TYPE_CONFIG;private static final String ACKS_CONFIG = ProducerConfig.ACKS_CONFIG;private static final String RETRIES_CONFIG = ProducerConfig.RETRIES_CONFIG;private static final String KEY_SERIALIZER_CLASS_CONFIG = ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG;private static final String VALUE_SERIALIZER_CLASS_CONFIG = ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG;private static final String SECURITY_PROTOCOL = CommonClientConfigs.SECURITY_PROTOCOL_CONFIG;private static final String SSL_TRUSTSTORE_LOCATION = SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG;private static final String SSL_TRUSTSTORE_PASSWORD = SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG;private static final String SSL_KEYSTORE_TYPE = SslConfigs.SSL_KEYSTORE_TYPE_CONFIG;private static final String SSL_KEYSTORE_LOCATION = SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG;private static final String SSL_KEYSTORE_PASSWORD = SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG;private String brokerList = null;private String topic = null;private String compressionType = null;private String securityProtocol = null;private String sslTruststoreLocation = null;private String sslTruststorePassword = null;private String sslKeystoreType = null;private String sslKeystoreLocation = null;private String sslKeystorePassword = null;private int retries = 0;private int requiredNumAcks = Integer.MAX_VALUE;private boolean syncSend = false;private Producer<byte[], byte[]> producer = null;public Producer<byte[], byte[]> getProducer() {return producer;}public String getBrokerList() {return brokerList;}public void setBrokerList(String brokerList) {this.brokerList = brokerList;}public int getRequiredNumAcks() {return requiredNumAcks;}public void setRequiredNumAcks(int requiredNumAcks) {this.requiredNumAcks = requiredNumAcks;}public int getRetries() {return retries;}public void setRetries(int retries) {this.retries = retries;}public String getCompressionType() {return compressionType;}public void setCompressionType(String compressionType) {this.compressionType = compressionType;}public String getTopic() {return topic;}public void setTopic(String topic) {this.topic = topic;}public boolean getSyncSend() {return syncSend;}public void setSyncSend(boolean syncSend) {this.syncSend = syncSend;}public String getSslTruststorePassword() {return sslTruststorePassword;}public String getSslTruststoreLocation() {return sslTruststoreLocation;}public String getSecurityProtocol() {return securityProtocol;}public void setSecurityProtocol(String securityProtocol) {this.securityProtocol = securityProtocol;}public void setSslTruststoreLocation(String sslTruststoreLocation) {this.sslTruststoreLocation = sslTruststoreLocation;}public void setSslTruststorePassword(String sslTruststorePassword) {this.sslTruststorePassword = sslTruststorePassword;}public void setSslKeystorePassword(String sslKeystorePassword) {this.sslKeystorePassword = sslKeystorePassword;}public void setSslKeystoreType(String sslKeystoreType) {this.sslKeystoreType = sslKeystoreType;}public void setSslKeystoreLocation(String sslKeystoreLocation) {this.sslKeystoreLocation = sslKeystoreLocation;}public String getSslKeystoreLocation() {return sslKeystoreLocation;}public String getSslKeystoreType() {return sslKeystoreType;}public String getSslKeystorePassword() {return sslKeystorePassword;}@Overridepublic void activateOptions() {// check for config parameter validityProperties props = new Properties();if (brokerList != null)props.put(BOOTSTRAP_SERVERS_CONFIG, brokerList);if (props.isEmpty())throw new ConfigException("The bootstrap servers property should be specified");if (topic == null)throw new ConfigException("Topic must be specified by the Kafka log4j appender");if (compressionType != null)props.put(COMPRESSION_TYPE_CONFIG, compressionType);if (requiredNumAcks != Integer.MAX_VALUE)props.put(ACKS_CONFIG, Integer.toString(requiredNumAcks));if (retries > 0)props.put(RETRIES_CONFIG, retries);if (securityProtocol != null && sslTruststoreLocation != null &&sslTruststorePassword != null) {props.put(SECURITY_PROTOCOL, securityProtocol);props.put(SSL_TRUSTSTORE_LOCATION, sslTruststoreLocation);props.put(SSL_TRUSTSTORE_PASSWORD, sslTruststorePassword);if (sslKeystoreType != null && sslKeystoreLocation != null &&sslKeystorePassword != null) {props.put(SSL_KEYSTORE_TYPE, sslKeystoreType);props.put(SSL_KEYSTORE_LOCATION, sslKeystoreLocation);props.put(SSL_KEYSTORE_PASSWORD, sslKeystorePassword);}}props.put(KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");props.put(VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");this.producer = getKafkaProducer(props);LogLog.debug("Kafka producer connected to " + brokerList);LogLog.debug("Logging for topic: " + topic);}protected Producer<byte[], byte[]> getKafkaProducer(Properties props) {return new KafkaProducer<byte[], byte[]>(props);}@Overrideprotected void append(LoggingEvent event) {String message = subAppend(event);LogLog.debug("[" + new Date(event.getTimeStamp()) + "]" + message);Future<RecordMetadata> response = producer.send(new ProducerRecord<byte[], byte[]>(topic, message.getBytes()));if (syncSend) {try {response.get();} catch (InterruptedException ex) {throw new RuntimeException(ex);} catch (ExecutionException ex) {throw new RuntimeException(ex);}}}private String subAppend(LoggingEvent event) {return (this.layout == null) ? event.getRenderedMessage() : this.layout.format(event);}public void close() {if (!this.closed) {this.closed = true;producer.close();}}public boolean requiresLayout() {return true;}}logstash配置
logstash可能會有快速啟動實例的需求,我們就采用docker部署,能夠快速啟動、擴展等功能。
鏡像就直接logstash官方鏡像logstash docker鏡像,我們選擇了一種最簡單啟動方式做演示,具體還有多種docker部署方法,可以參考以上鏈接。
docker啟動命令,input輸入中,指定kafka集群地址和對應topic;輸出,則是elasticsearch集群地址、索引,以及elastisearch配置參數。
#啟動命令 docker run -it -d logstash -e 'input { kafka { bootstrap_servers => "kafkaIp1:9092,kafkaIp2:9092" topics => ["api-admin"] } } output { elasticsearch { hosts => ["elasticsearch1:9200","elasticsearch2:9200","elasticsearch3:9200"] index => "api-admin-%{+YYYY.MM.dd}" flush_size => 20000 idle_flush_time => 10 template_overwrite => true } }'在marathon中啟動在Command選項中加入參數logstash -e 'input {} output {}'即可。另外說一句, 如果在容器編排系統(mesos/marathon、kubernetes)中,可能會有健康檢查要求,其端口為9600。
轉載于:https://my.oschina.net/yangzijing/blog/1827927
總結
以上是生活随笔為你收集整理的SpringCloud实践分享-日志收集Kafka-ELK的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: python List,切片的用法
- 下一篇: C++使用GDI+实现图片格式转换