golang logrus自定義hook:日誌切片hook、郵件警報hook、kafkahook。

logrus Hook 分析

  • logrus hook 接口定義很簡單。如下
package logrus

// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
	Levels() []Level
	Fire(*Entry) error
}

// Internal type for storing the hooks on a logger instance.
type LevelHooks map[Level][]Hook

// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks LevelHooks) Add(hook Hook) {
	for _, level := range hook.Levels() {
		hooks[level] = append(hooks[level], hook)
	}
}

// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
	for _, hook := range hooks[level] {
		if err := hook.Fire(entry); err != nil {
			return err
		}
	}

	return nil
}

只需實現 該結構的接口。

type Hook interface {
	Levels() []Level
	Fire(*Entry) error
}

就會被logrus框架遍歷調用已註冊的 hook 的 Fire 方法

獲取日誌實例

// log_hook.go
package logger

import (
	"fmt"
	"github.com/sirupsen/logrus"
	"library/util/constant"
	"os"
)


//自實現 logrus hook
func getLogger(module string) *logrus.Logger {
	//實例化
	logger := logrus.New()
	//設置輸出
	logger.Out = os.Stdout
	//設置日誌級別
	logger.SetLevel(logrus.DebugLevel)
	//設置日誌格式
	//自定writer就行, hook 交給 lfshook
	logger.AddHook(newLogrusHook(constant.GetLogPath(), module))
	
	logger.SetFormatter(&logrus.JSONFormatter{
		TimestampFormat:"2006-01-02 15:04:05",
	})
	return logger
}

//確保每次調用使用的文件都是唯一的。
func GetNewFieldLoggerContext(module,appField string) *logrus.Entry {
	logger:= getLogger(module)
	return logger.WithFields(logrus.Fields{
		"app": appField,
	})
}

//訂閱 警告日誌
func SubscribeLog(entry *logrus.Entry, subMap SubscribeMap) {
	logger := entry.Logger
	logger.AddHook(newSubScribeHook(subMap))
	fmt.Println("日誌訂閱成功")
}

constant.GetLogPath() 可以替換爲自己的日誌文件輸出目錄地址,比如我的mac上則是:/usr/local/log ,直接替換即可。

日誌切片hook

  • 代碼
// writer.go
package logger

import (
	"fmt"
	"github.com/pkg/errors"
	"io"
	"library/util"
	"os"
	"path/filepath"
	"sync"
	"time"
)

type LogWriter struct {
	logDir              string  //日誌根目錄地址。
	module              string  //模塊 名
  	curFileName    		string  //當前被指定的filename
	curBaseFileName     string  //在使用中的file
	turnCateDuration    time.Duration
	mutex         		sync.RWMutex
	outFh         		*os.File
}

func (w  *LogWriter) Write(p []byte) (n int, err error) {
	w.mutex.Lock()
	defer w.mutex.Unlock()
	if out, err:= w.getWriter(); err!=nil {
		return 0, errors.New("failed to fetch target io.Writer")
	}else{
		return out.Write(p)
	}
}

func (w *LogWriter) getFileName() string {
	base := time.Now().Truncate(w.turnCateDuration)
	return fmt.Sprintf("%s/%s/%s_%s", w.logDir, base.Format("2006-01-02"), w.module, base.Format("15"))
}

func (w *LogWriter) getWriter()(io.Writer, error) {
	fileName := w.curBaseFileName
	//判斷是否有新的文件名
	//會出現新的文件名
	baseFileName := w.getFileName()
	if baseFileName != fileName {
		fileName = baseFileName
	}

	dirname := filepath.Dir(fileName)
	if err := os.MkdirAll(dirname, 0755); err != nil {
		return nil, errors.Wrapf(err, "failed to create directory %s", dirname)
	}

	fileHandler, err := os.OpenFile(fileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
	if err != nil {
		return nil, errors.Errorf("failed to open file %s", err)
	}
	w.outFh.Close()
	w.outFh = fileHandler
	w.curBaseFileName = fileName
	w.curFileName = fileName

	return fileHandler, nil
}

func New(logPath, module string, duration time.Duration) *LogWriter {
	return &LogWriter{
		logDir: logPath,
		module: module,
		turnCateDuration:duration,
		curFileName: "",
		curBaseFileName: "",
	}
}
// hook.go
package logger

import (
	"github.com/rifflock/lfshook"
	"github.com/sirupsen/logrus"
	"time"
)
func newLogrusHook(logPath, moduel string) logrus.Hook {
	logrus.SetLevel(logrus.WarnLevel)

	writer := New(logPath, moduel, time.Hour * 2)

	lfsHook := lfshook.NewHook(lfshook.WriterMap{
		logrus.DebugLevel: writer,
		logrus.InfoLevel:  writer,
		logrus.WarnLevel:  writer,
		logrus.ErrorLevel: writer,
		logrus.FatalLevel: writer,
		logrus.PanicLevel: writer,
	}, &logrus.TextFormatter{DisableColors: true})

	// writer 生成新的log文件類型 writer  在通過new hook函數 消費 fire 函數
	// writer 是實現了writer 接口的庫,在日誌調用write是做預處理
	return lfsHook
}
  • 測試代碼
func TestGetLogger(t *testing.T) {
	lg := GetNewFieldLoggerContext("test","d")
	lg.Logger.Info("????")
}

解析

logger實例持有了 自定義的 io.writer 結構體,在消費Fire函數時,會調用Write方法,此時通過Truncate時間切片函數邏輯判斷需要寫入的文件。或創建新的文件。
注: 文章提供的代碼是按天切分文件夾的,文件夾內模塊日誌再按2小時切分。可自行替換成按模塊切分。

郵件警報hook

  • 代碼
// subscribeHook.go
package logger

import (
	"fmt"
	"github.com/sirupsen/logrus"
	"library/email"
	"strings"
)

type SubscribeMap  map[logrus.Level][]*email.Receiver
type SubscribeHook struct {
	subMap SubscribeMap
}
//此處可以自實現hook 目前使用三方hook
func(h *SubscribeHook)Levels() []logrus.Level{
	return logrus.AllLevels
}

func(h *SubscribeHook)Fire(entry *logrus.Entry) error{
	for level, receivers := range  h.subMap {
		//命中 準備消費
		if level == entry.Level {
			if len(receivers) > 0 {
				email.SendEmail(receivers, fmt.Sprintf("%s:[系統日誌警報]", entry.Level.String()),
					fmt.Sprintf("錯誤內容: %s",entry.Message))
			}
		}
	}
	return nil
}
func NewSubscribeMap(level logrus.Level, receiverStr string) SubscribeMap{
	subMap := SubscribeMap{}
	addressList := strings.Split(receiverStr,";")
	var receivers []*email.Receiver
	for _, address := range addressList {
		receivers = append(receivers,  &email.Receiver{Email: address})
	}
	subMap[level] = receivers
	return  subMap
}
func newSubScribeHook(subMap SubscribeMap) *SubscribeHook {
	return &SubscribeHook{subMap}
}
// email.go
package email

import (
	"fmt"
	"gopkg.in/gomail.v2"
	"regexp"
	"strconv"
)

type Sender struct {
	User      string
	Password  string
	Host      string
	Port      int
	MailTo    []string
	Subject   string
	Content   string
}

type Receiver struct {
	Email    string
}

func (r *Receiver) Check() bool {
	pattern := `\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*` //匹配電子郵箱
	reg := regexp.MustCompile(pattern)
	return reg.MatchString(r.Email)
}

func (s *Sender) clean (){

}

//檢查 郵箱正確性
func (s *Sender)NewReceiver(email string) *Receiver {
	rec := &Receiver{Email:email}
	if rec.Check() {
		m.MailTo = []string{email}
		return rec
	}else{
		fmt.Printf("email check fail 【%s】\n", email)
		return nil
	}
}
func (s *Sender)NewReceivers(receivers []*Receiver)  {
	for _, rec := range receivers {
		if rec.Check() {
			m.MailTo = append(m.MailTo, rec.Email)
		}else{
			fmt.Printf("email check fail 【%s】\n", rec.Email)
		}
	}
}
// 163郵箱 password 爲開啓smtp後給的祕鑰
var m  = Sender{User:"[email protected]", Password:"666666666", Host: "smtp.163.com", Port: 465}

func SendEmail(receivers []*Receiver,subject, content string){
	m.NewReceivers(receivers)
	m.Subject = subject
	m.Content = content

	e := gomail.NewMessage()
	e.SetHeader("From",  e.FormatAddress(m.User, "hengsheng"))
	e.SetHeader("To", m.MailTo...)    //發送給多個用戶
	e.SetHeader("Subject", m.Subject) //設置郵件主題
	e.SetBody("text/html", m.Content)    //設置郵件正文
	d := gomail.NewDialer(m.Host, m.Port, m.User, m.Password)
	err := d.DialAndSend(e)
	if err != nil {
		fmt.Printf("error 郵件發送錯誤! %s  \n", err.Error())
	}
}
使用

同理在writer時 如果是錯誤日誌則發送郵件。

o.logger = logger.GetNewFieldLoggerContext("test", "666")
if subscribeSocket  {
		logger.SubscribeLog(o.Logger, logger.NewSubscribeMap(logrus.ErrorLevel, "[email protected];[email protected]"))
	}
	// o 爲實際結構體實例

kafkahook

// kafka hook
package logger

import (
	"github.com/sirupsen/logrus"
	"library/kafka"
	"library/util/constant"
)

type KafKaHook struct {
	kafkaProducer   *kafka.KafkaProducer
}


func(h *KafKaHook)Levels() []logrus.Level{
	return logrus.AllLevels
}

func(h *KafKaHook)Fire(entry *logrus.Entry) error{
	h.kafkaProducer.SendMsgSync(entry.Message)
	return nil
}

func newKafkaHook() *KafKaHook{
	producer := kafka.NewKafkaProducer(constant.KafkaLogElkTopic,true)
	return &KafKaHook{kafkaProducer: producer}
}

使用時logger.AddHook(newKafkaHook()) 即可

kafka模塊

  • 生產者
// kafkaProducer.go
package kafka

import (
	"errors"
	"fmt"
	"github.com/Shopify/sarama"
	"library/util/constant"
	"log"
	"time"
)

func GetKafkaAddress()[]string{
	return "127.0.0.1:9092"
}


//同步消息模式
func SyncProducer(topic, message string) error {
	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	config.Producer.Timeout = 5 * time.Second
	p, err := sarama.NewSyncProducer(GetKafkaAddress(), config)
	if err != nil {
		return errors.New(fmt.Sprintf("sarama.NewSyncProducer err, message=%s \n", err))
	}
	defer p.Close()
	msg := &sarama.ProducerMessage{
		Topic: topic,
		Value: sarama.ByteEncoder(message),
	}
	part, offset, err := p.SendMessage(msg)
	if err != nil {
		return errors.New(fmt.Sprintf("send sdsds err=%s \n", err))
	} else {
		fmt.Printf("發送成功,partition=%d, offset=%d \n", part, offset)
		return nil
	}
}


//async 異步生產者
type KafkaProducer struct {
	topic     		string
	asyncProducer  	*sarama.AsyncProducer
	syncProducer    *sarama.SyncProducer
	sync      		bool
}

func NewKafkaProducer(topic string, sync bool) *KafkaProducer  {
	k := &KafkaProducer{
		topic:     topic,
		sync:      sync,
	}
	if sync {
		k.initSync()
	}else{
		k.initAsync()
	}
	return k
}

func (k *KafkaProducer) initAsync() bool {
	if k.sync {
		fmt.Printf("sync producer cant call async func !\n")
		return false
	}
	config := sarama.NewConfig()
	//等待服務器所有副本都保存成功後的響應
	config.Producer.RequiredAcks = sarama.WaitForAll
	//隨機向partition發送消息
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	//是否等待成功和失敗後的響應,只有上面的RequireAcks設置不是NoReponse這裏纔有用.
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	//設置使用的kafka版本,如果低於V0_10_0_0版本,消息中的timestrap沒有作用.需要消費和生產同時配置
	//注意,版本設置不對的話,kafka會返回很奇怪的錯誤,並且無法成功發送消息
	config.Version = sarama.V0_10_0_1

	producer, e := sarama.NewAsyncProducer(GetKafkaAddress(), config)
	if e != nil {
		fmt.Println(e)
		return false
	}
	k.asyncProducer = &producer
	defer producer.AsyncClose()
	pd := *k.asyncProducer
	go func() {
		for{
			select {
			case <-pd.Successes():
				//fmt.Println("offset: ", suc.Offset, "timestamp: ", suc.Timestamp.String(), "partitions: ", suc.Partition)
			case fail := <-pd.Errors():
				fmt.Printf("err: %s  \n", fail.Err.Error())
			}
		}
	}()

	return true
}

func (k *KafkaProducer) initSync() bool {
	if !k.sync {
		fmt.Println("async producer cant call sync func !")
		return false
	}

	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	config.Producer.Timeout = 5 * time.Second
	p, err := sarama.NewSyncProducer(GetKafkaAddress(), config)
	k.syncProducer = &p
	if err != nil {
		log.Printf("sarama.NewSyncProducer err, message=%s \n", err)
		return false
	}
	return true
}

func (k *KafkaProducer) SendMsgAsync(sendStr string)  {

	msg := &sarama.ProducerMessage{
		Topic: k.topic,
	}

	//將字符串轉化爲字節數組
	msg.Value = sarama.ByteEncoder(sendStr)
	//fmt.Println(value)

	//使用通道發送
	pd := *k.asyncProducer
	pd.Input() <- msg
}

func (k *KafkaProducer) SendMsgSync(sendStr string) bool {
	msg := &sarama.ProducerMessage{
		Topic: k.topic,
		Value: sarama.ByteEncoder(sendStr),
	}
	pd := *k.syncProducer
	part, offset, err := pd.SendMessage(msg)
	if err != nil {
		fmt.Printf("發送失敗 send  message(%s) err=%s \n", sendStr, err)
		return false
	} else {
		fmt.Printf("發送成功 partition=%d, offset=%d \n", part, offset)
		return true
	}
}

調用 SendMsgSync 或 SendMsgAsync 生產消息,注意初始化時的參數要保證一致!

  • 消費者組
// kafkaConsumerGroup.go

package kafka

import (
	"context"
	"fmt"
	"github.com/Shopify/sarama"
	"log"
	"sync"
)

func NewKafkaConsumerGroup(topics []string, group string,  businessCall func(message *sarama.ConsumerMessage) bool) *KafkaConsumerGroup {
	k := &KafkaConsumerGroup{
		brokers: 			GetKafkaAddress(),
		topics: 			topics,
		group:             	group,
		channelBufferSize: 	2,
		ready:             	make(chan bool),
		version:			"1.1.1",
		handler:			businessCall,
	}
	k.Init()
	return k
}

// 消費者組(consumer group): 相同的group.id的消費者將視爲同一個消費者組,
// 每個消費者都需要設置一個組id, 每條消息只能被 consumer group 中的一個
// Consumer 消費,但可以被多個 consumer group 消費
type KafkaConsumerGroup struct {
	//代理(broker): 一臺kafka服務器稱之爲一個broker
	brokers   			[]string
	//主題(topic): 消息的一種邏輯分組,用於對消息分門別類,每一類消息稱之爲一個主題,相同主題的消息放在一個隊列中
	topics    			[]string
	version   			string
	ready             	chan bool
	group             	string
	channelBufferSize  	int
	//業務調用
	handler         	func(message *sarama.ConsumerMessage) bool
}

func (k *KafkaConsumerGroup)Init() func()  {

	 version,err := sarama.ParseKafkaVersion(k.version)
	 if err!=nil{
		fmt.Printf("Error parsing Kafka version: %v", err)
	}
		cfg := sarama.NewConfig()
		cfg.Version = version
		// 分區分配策略
		cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
		// 未找到組消費位移的時候從哪邊開始消費
		cfg.Consumer.Offsets.Initial = -2
		// channel長度
		cfg.ChannelBufferSize = k.channelBufferSize
		ctx, cancel := context.WithCancel(context.Background())
		client, err := sarama.NewConsumerGroup(k.brokers, k.group, cfg)
		if err != nil {
			fmt.Printf("Error creating consumer group client: %v", err)
		}

		wg := &sync.WaitGroup{}
		wg.Add(1)
		go func() {
			defer func() {
				wg.Done()
				//util.HandlePanic("client.Consume panic", log.StandardLogger())
			}()
			for {
				if err := client.Consume(ctx, k.topics, k); err != nil {
					log.Printf("Error from consumer: %v", err)
				}
				// check if context was cancelled, signaling that the consumer should stop
				if ctx.Err() != nil {
					log.Println(ctx.Err())
					return
				}
				k.ready = make(chan bool)
			}
		}()

		<-k.ready
	fmt.Printf("Sarama consumer up and running!... \n")
		// 保證在系統退出時,通道里面的消息被消費
		return func() {
			cancel()
			wg.Wait()
			if err = client.Close(); err != nil {
				fmt.Printf("Error closing client: %v  \n", err)
			}
		}

}



// Setup is run at the beginning of a new session, before ConsumeClaim
func (k *KafkaConsumerGroup) Setup(sarama.ConsumerGroupSession) error {
	// Mark the consumer as ready
	close(k.ready)
	return nil
}

// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
func (k *KafkaConsumerGroup) Cleanup(sarama.ConsumerGroupSession) error {
	return nil
}

// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
func (k *KafkaConsumerGroup) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {

	// NOTE:
	// Do not move the code below to a goroutine.
	// The `ConsumeClaim` itself is called within a goroutine, see:
	// https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29
	// 具體消費消息
	for message := range claim.Messages() {
		//msg := string(message.Value)
		//k.logger.Infof("卡夫卡: %s", msg)

		if ok:= k.handler(message); ok {
			// 更新位移
			session.MarkMessage(message, "")
		}
		//run.Run(msg)
	}
	return nil
}

測試代碼

func TestKafkaConsumerGroup_Init(t *testing.T) {
	//pd := NewKafkaProducer("test-fail",true)
	//pd.InitSync()
	k := NewKafkaConsumerGroup([]string{constant.KafkaALiSdkTopic}, "group-2", func(message *sarama.ConsumerMessage) bool {
		fmt.Println(string(message.Value))
		//如果失敗的處理邏輯
		//if ok := pd.SendMsgSync("666666"); ok {
		//	return true
		//}
		return false

	})
	consumerDone := k.Init()

	sigterm := make(chan os.Signal, 1)
	signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
	select {
	case <-sigterm:
		fmt.Println("terminating: via signal")
	}
	consumerDone()
}

這裏有一些補償邏輯在裏面。

以上就是logrus相關hook。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章