goim源碼解讀-全局概覽

架構圖

arch

對這個架構做一下簡單說明:

1.logic啓動http服務器, 接受http請求,用於將數據推送到kafka以及獲取在線用戶信息,websocket身份校驗

2.comet組件起動webdocket/tcp服務, 管理連接, 並負責將數據推送至指定連接

3. job組件訂閱指定kafka指定頻道的消息信息, 開啓管道監聽(將獲得的數據推送到comet當中某個鏈接上)

  從discovery當中找到comet組件

4. discovery負責監控以上組件的活動狀態

核心依賴庫

    //配置文件操作
    github.com/BurntSushi/toml v0.3.1
    //kafka相關
	github.com/Shopify/sarama v1.19.0 // indirect
    //discovery依賴
	github.com/bilibili/discovery v1.0.1
    //kafka相關
	github.com/bsm/sarama-cluster v2.1.15+incompatible
	github.com/davecgh/go-spew v1.1.1 // indirect
	github.com/eapache/go-resiliency v1.1.0 // indirect
	github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
	github.com/eapache/queue v1.1.0 // indirect
    //http請求處理庫
	github.com/gin-gonic/gin v1.3.0
    //grpc數據序列化庫
	github.com/gogo/protobuf v1.1.1
	github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
	github.com/golang/protobuf v1.2.0
	github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
    //redis操作
	github.com/gomodule/redigo v2.0.0+incompatible
	github.com/google/uuid v1.0.0
	github.com/issue9/assert v1.0.0
	github.com/pierrec/lz4 v2.0.5+incompatible // indirect
	github.com/pkg/errors v0.8.0
	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect
	github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
	github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a
	github.com/stretchr/testify v1.3.0
	github.com/thinkboy/log4go v0.0.0-20160303045050-f91a411e4a18
	github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43
	github.com/zhenjl/cityhash v0.0.0-20131128155616-cdd6a94144ab
	golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1
    //遠程服務調用相關rpc庫
	google.golang.org/grpc v1.16.0
    //kafka相關庫
	gopkg.in/Shopify/sarama.v1 v1.19.0
	gopkg.in/yaml.v2 v2.2.2 // indirect

 在配置中做了註釋, 這些庫很重要,特別是grpc, redis, kafka的一些操作, 對於goim框架的理解很重要

分組件代碼分析

discovery組件的實現,暫時不考慮

1. comet處理websocket/tcp連接(cmd/comet/main.go代碼中可以看到, 啓用tcp與websocket監聽服務)

func main() {
	flag.Parse()
    //配置初始化
	if err := conf.Init(); err != nil {
		panic(err)
	}
	rand.Seed(time.Now().UTC().UnixNano())
	runtime.GOMAXPROCS(runtime.NumCPU())
	log.Infof("goim-comet [version: %s env: %+v] start", ver, conf.Conf.Env)
	// register discovery
	dis := naming.New(conf.Conf.Discovery)
	resolver.Register(dis)
	// new comet server
	srv := comet.NewServer(conf.Conf)
	if err := comet.InitWhitelist(conf.Conf.Whitelist); err != nil {
		panic(err)
	}
    //初始化TCP服務
	if err := comet.InitTCP(srv, conf.Conf.TCP.Bind, runtime.NumCPU()); err != nil {
		panic(err)
	}
    //初始化websocket服務
	if err := comet.InitWebsocket(srv, conf.Conf.Websocket.Bind, runtime.NumCPU()); err != nil {
		panic(err)
	}
    //使用tls傳輸方式的websocket驗證
	if conf.Conf.Websocket.TLSOpen {
		if err := comet.InitWebsocketWithTLS(srv, conf.Conf.Websocket.TLSBind, conf.Conf.Websocket.CertFile, conf.Conf.Websocket.PrivateFile, runtime.NumCPU()); err != nil {
			panic(err)
		}
	}
	// new grpc server
	rpcSrv := grpc.New(conf.Conf.RPCServer, srv)
	//多餘的代碼就不看了
}

2. logic處理http請求(啓用http服務,rpc服務,供其他組件進行調用)

cmd/logic/main.go

func main() {
	flag.Parse()
	if err := conf.Init(); err != nil {
		panic(err)
	}
	log.Infof("goim-logic [version: %s env: %+v] start", ver, conf.Conf.Env)
	// grpc register naming
	dis := naming.New(conf.Conf.Discovery)
	resolver.Register(dis)
	// logic
	srv := logic.New(conf.Conf)
    //啓動http監聽服務, 監聽來自客戶端的http請求
	httpSrv := http.New(conf.Conf.HTTPServer, srv)
    //啓動grpc服務, 監聽來自其他組件的rpc調用
	rpcSrv := grpc.New(conf.Conf.RPCServer, srv)
	...
}

 internal/logic/http/server.go

func New(c *conf.HTTPServer, l *logic.Logic) *Server {
	engine := gin.New()
	engine.Use(loggerHandler, recoverHandler)
	go func() {
		if err := engine.Run(c.Addr); err != nil {
			panic(err)
		}
	}()
	s := &Server{
		engine: engine,
		logic:  l,
	}
    //初始化路由(測試例子當中的請求uri就是這邊設置映射的)
	s.initRouter()
	return s
}
...
//初始化http路由
func (s *Server) initRouter() {
	group := s.engine.Group("/goim")
	group.POST("/push/keys", s.pushKeys)
	group.POST("/push/mids", s.pushMids)
	group.POST("/push/room", s.pushRoom)
	group.POST("/push/all", s.pushAll)
	group.GET("/online/top", s.onlineTop)
	group.GET("/online/room", s.onlineRoom)
	group.GET("/online/total", s.onlineTotal)
	group.GET("/nodes/weighted", s.nodesWeighted)
	group.GET("/nodes/instances", s.nodesInstances)
}

 

internal/logic/grpc/server.go

func New(c *conf.RPCServer, l *logic.Logic) *grpc.Server {
	keepParams := grpc.KeepaliveParams(keepalive.ServerParameters{
		MaxConnectionIdle:     time.Duration(c.IdleTimeout),
		MaxConnectionAgeGrace: time.Duration(c.ForceCloseWait),
		Time:             time.Duration(c.KeepAliveInterval),
		Timeout:          time.Duration(c.KeepAliveTimeout),
		MaxConnectionAge: time.Duration(c.MaxLifeTime),
	})
    //創建rpc服務
	srv := grpc.NewServer(keepParams)
    //註冊rpc服務(做一些路由映射..)
	pb.RegisterLogicServer(srv, &server{l})
	lis, err := net.Listen(c.Network, c.Addr)
	if err != nil {
		panic(err)
	}
	go func() {
		if err := srv.Serve(lis); err != nil {
			panic(err)
		}
	}()
	return srv
}

3. job組件(創建kafka訂閱服務,對comet組件進行監聽

func main() {
	flag.Parse()
	if err := conf.Init(); err != nil {
		panic(err)
	}
	log.Infof("goim-job [version: %s env: %+v] start", ver, conf.Conf.Env)
	// grpc register naming
	dis := naming.New(conf.Conf.Discovery)
	resolver.Register(dis)
	// job
	j := job.New(conf.Conf)
	go j.Consume()
	...
}

internal/job/job.go 具體實現

func New(c *conf.Config) *Job {
	j := &Job{
		c:        c,
		consumer: newKafkaSub(c.Kafka),
		rooms:    make(map[string]*Room),
	}
	j.watchComet(c.Discovery)
	return j
}

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章