Merge pull request #1 from bilibili/master

test  merge
pull/169/head
godsoul 6 years ago committed by GitHub
commit 60c1b68039
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 31
      .gitignore
  2. 16
      .travis.yml
  3. 21
      LICENSE
  4. 50
      README.md
  5. BIN
      doc/img/kratos-log.jpg
  6. BIN
      doc/img/kratos.png
  7. BIN
      doc/img/kratos2.png
  8. BIN
      doc/img/kratos3.png
  9. BIN
      doc/img/kratosinit.gif
  10. BIN
      doc/img/ratelimit-benchmark-up-1.png
  11. BIN
      doc/img/ratelimit-rolling-window.png
  12. 40
      doc/wiki-cn/README.md
  13. 33
      doc/wiki-cn/blademaster-mid.md
  14. 14
      doc/wiki-cn/blademaster-mod.md
  15. 86
      doc/wiki-cn/blademaster-pb.md
  16. 71
      doc/wiki-cn/blademaster-quickstart.md
  17. 20
      doc/wiki-cn/blademaster.md
  18. 239
      doc/wiki-cn/cache-mc.md
  19. 226
      doc/wiki-cn/cache-redis.md
  20. 23
      doc/wiki-cn/cache.md
  21. 111
      doc/wiki-cn/config-paladin.md
  22. 51
      doc/wiki-cn/config.md
  23. 54
      doc/wiki-cn/database-hbase.md
  24. 234
      doc/wiki-cn/database-mysql.md
  25. 21
      doc/wiki-cn/database.md
  26. 30
      doc/wiki-cn/kratos-genbts.md
  27. 71
      doc/wiki-cn/kratos-genmc.md
  28. 38
      doc/wiki-cn/kratos-protoc.md
  29. 11
      doc/wiki-cn/kratos-swagger.md
  30. 102
      doc/wiki-cn/kratos-tool.md
  31. 37
      doc/wiki-cn/logger.md
  32. 29
      doc/wiki-cn/protoc.md
  33. 12
      doc/wiki-cn/quickstart.md
  34. 60
      doc/wiki-cn/ratelimit.md
  35. 15
      doc/wiki-cn/summary.md
  36. 43
      doc/wiki-cn/warden-balancer.md
  37. 376
      doc/wiki-cn/warden-mid.md
  38. 47
      doc/wiki-cn/warden-pb.md
  39. 171
      doc/wiki-cn/warden-quickstart.md
  40. 204
      doc/wiki-cn/warden-resolver.md
  41. 40
      doc/wiki-cn/warden.md
  42. 27
      go.mod
  43. 106
      go.sum
  44. 25
      pkg/cache/memcache/README.md
  45. 261
      pkg/cache/memcache/ascii_conn.go
  46. 569
      pkg/cache/memcache/ascii_conn_test.go
  47. 187
      pkg/cache/memcache/client.go
  48. 650
      pkg/cache/memcache/conn.go
  49. 185
      pkg/cache/memcache/conn_test.go
  50. 162
      pkg/cache/memcache/encoding.go
  51. 220
      pkg/cache/memcache/encoding_test.go
  52. 177
      pkg/cache/memcache/example_test.go
  53. 85
      pkg/cache/memcache/main_test.go
  54. 267
      pkg/cache/memcache/memcache.go
  55. 300
      pkg/cache/memcache/memcache_test.go
  56. 59
      pkg/cache/memcache/mock.go
  57. 197
      pkg/cache/memcache/pool.go
  58. 204
      pkg/cache/memcache/pool_conn.go
  59. 545
      pkg/cache/memcache/pool_conn_test.go
  60. 48
      pkg/cache/memcache/test/BUILD.bazel
  61. 375
      pkg/cache/memcache/test/test.pb.go
  62. 12
      pkg/cache/memcache/test/test.proto
  63. 109
      pkg/cache/memcache/trace.go
  64. 103
      pkg/cache/memcache/trace_conn.go
  65. 57
      pkg/cache/memcache/util.go
  66. 75
      pkg/cache/memcache/util_test.go
  67. 3
      pkg/conf/env/env.go
  68. 27
      pkg/conf/paladin/default.go
  69. 226
      pkg/conf/paladin/file.go
  70. 15
      pkg/conf/paladin/file_test.go
  71. 7
      pkg/conf/paladin/map.go
  72. 12
      pkg/container/group/README.md
  73. 46
      pkg/container/group/example_test.go
  74. 55
      pkg/container/group/group.go
  75. 69
      pkg/container/group/group_test.go
  76. 10
      pkg/database/tidb/discovery.go
  77. 24
      pkg/ecode/status_test.go
  78. 34
      pkg/log/doc.go
  79. 50
      pkg/log/dsn.go
  80. 13
      pkg/log/handler.go
  81. 20
      pkg/log/internal/core/buffer_test.go
  82. 22
      pkg/log/internal/core/bufferpool.go
  83. 20
      pkg/log/internal/core/pool.go
  84. 20
      pkg/log/internal/core/pool_test.go
  85. 123
      pkg/log/log.go
  86. 22
      pkg/log/log_test.go
  87. 61
      pkg/log/logrus.go
  88. 7
      pkg/log/pattern.go
  89. 19
      pkg/log/stdout.go
  90. 30
      pkg/log/util.go
  91. 54
      pkg/log/util_test.go
  92. 29
      pkg/log/verbose.go
  93. 479
      pkg/naming/discovery/discovery.go
  94. 93
      pkg/naming/naming.go
  95. 57
      pkg/net/criticality/criticality.go
  96. 8
      pkg/net/http/blademaster/client.go
  97. 6
      pkg/net/http/blademaster/context.go
  98. 21
      pkg/net/http/blademaster/criticality.go
  99. 83
      pkg/net/http/blademaster/metadata.go
  100. 62
      pkg/net/http/blademaster/ratelimit.go
  101. Some files were not shown because too many files have changed in this diff Show More

31
.gitignore vendored

@ -1,4 +1,31 @@
go.sum
BUILD
# idea ignore
.idea/
*.ipr
*.iml
*.iws
.vscode/
# temp ignore
*.log
*.cache
*.diff
*.exe
*.exe~
*.patch
*.swp
*.tmp
# system ignore
.DS_Store
Thumbs.db
# project
*.cert
*.key
tool/kratos/kratos
tool/kratos-protoc/kratos-protoc
tool/kratos-gen-bts/kratos-gen-bts
tool/kratos-gen-mc/kratos-gen-mc
tool/kratos/kratos-protoc/kratos-protoc
tool/kratos/protobuf/protoc-gen-bm/protoc-gen-bm
tool/kratos/protobuf/protoc-gen-bswagger/protoc-gen-bswagger

@ -1,7 +1,7 @@
language: go
go:
- 1.11.x
- 1.12.x
# Only clone the most recent commit.
git:
@ -9,7 +9,12 @@ git:
# Force-enable Go modules. This will be unnecessary when Go 1.12 lands.
env:
- GO111MODULE=on
global:
- GO111MODULE=on
- REGION=sh
- ZONE=sh001
- DEPLOY_ENV=dev
- DISCOVERY_NODES=127.0.0.1:7171
# Skip the install step. Don't `go get` dependencies. Only build with the code
# in vendor/
@ -19,11 +24,14 @@ install: true
# build and immediately stop. It's sorta like having set -e enabled in bash.
# Make sure golangci-lint is vendored.
before_script:
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $GOPATH/bin
- curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $GOPATH/bin
- curl -sfL https://raw.githubusercontent.com/bilibili/discovery/master/install.sh | sh -s -- -b $GOPATH/bin
- curl -sfL https://raw.githubusercontent.com/bilibili/discovery/master/cmd/discovery/discovery-example.toml -o $GOPATH/bin/discovery.toml
- nohup bash -c "$GOPATH/bin/discovery -conf $GOPATH/bin/discovery.toml &"
script:
- go test ./...
- go build ./...
- go test ./...
after_success:
- golangci-lint run # run a bunch of code checkers/linters in parallel

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 bilibili
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

@ -1,25 +1,46 @@
![kratos](doc/img/kratos3.png)
[![Language](https://img.shields.io/badge/Language-Go-blue.svg)](https://golang.org/)
[![Build Status](https://travis-ci.org/bilibili/kratos.svg?branch=master)](https://travis-ci.org/bilibili/kratos)
[![GoDoc](https://godoc.org/github.com/bilibili/kratos?status.svg)](https://godoc.org/github.com/bilibili/kratos)
[![Go Report Card](https://goreportcard.com/badge/github.com/bilibili/kratos)](https://goreportcard.com/report/github.com/bilibili/kratos)
# Kratos
Kratos是[bilibili](https://www.bilibili.com)开源的一套Go微服务框架,包含大量微服务相关框架及工具。主要包括以下组件:
Kratos是[bilibili](https://www.bilibili.com)开源的一套Go微服务框架,包含大量微服务相关框架及工具。
* [http框架blademaster(bm)](doc/wiki-cn/blademaster.md):基于[gin](https://github.com/gin-gonic/gin)二次开发,具有快速、灵活的特点,可以方便的开发中间件处理通用或特殊逻辑,基础库默认实现了log&trace等。
* [gRPC框架warden](doc/wiki-cn/warden.md):基于官方gRPC封装,默认使用[discovery](https://github.com/bilibili/discovery)进行服务注册发现,及wrr和p2c(默认)负载均衡。
* [dapper trace](doc/wiki-cn/dapper.md):基于opentracing,全链路集成了trace,我们还提供dapper实现,请参看:[dapper敬请期待]()。
* [log](doc/wiki-cn/logger.md):基于[zap](https://github.com/uber-go/zap)的field方式实现的高性能log库,集成了我们提供的[log-agent敬请期待]()日志收集方案。
* [database](doc/wiki-cn/database.md):集成MySQL&HBase&TiDB的SDK,其中TiDB使用服务发现方案。
* [cache](doc/wiki-cn/cache.md):集成memcache&redis的SDK,注意无redis-cluster实现,推荐使用代理模式[overlord](https://github.com/bilibili/overlord)。
* [kratos tool](doc/wiki-cn/kratos-tool.md):kratos相关工具量,包括项目快速生成、pb文件代码生成、swagger文档生成等。
> 名字来源于:《战神》游戏以希腊神话为背景,讲述由凡人成为战神的奎托斯(Kratos)成为战神并展开弑神屠杀的冒险历程。
## Goals
我们致力于提供完整的微服务研发体验,整合相关框架及工具后,微服务治理相关部分可对整体业务开发周期无感,从而更加聚焦于业务交付。对每位开发者而言,整套Kratos框架也是不错的学习仓库,可以了解和参考到[bilibili](https://www.bilibili.com)在微服务方面的技术积累和经验。
# 快速开始
## Features
* HTTP Blademaster:核心基于[gin](https://github.com/gin-gonic/gin)进行模块化设计,简单易用、核心足够轻量;
* GRPC Warden:基于官方gRPC开发,集成[discovery](https://github.com/bilibili/discovery)服务发现,并融合P2C负载均衡;
* Cache:优雅的接口化设计,非常方便的缓存序列化,推荐结合代理模式[overlord](https://github.com/bilibili/overlord);
* Database:集成MySQL/HBase/TiDB,添加熔断保护和统计支持,可快速发现数据层压力;
* Config:方便易用的[paladin sdk](doc/wiki-cn/config.md),可配合远程配置中心,实现配置版本管理和更新;
* Log:类似[zap](https://github.com/uber-go/zap)的field实现高性能日志库,并结合log-agent实现远程日志管理;
* Trace:基于opentracing,集成了全链路trace支持(gRPC/HTTP/MySQL/Redis/Memcached);
* Kratos Tool:工具链,可快速生成标准项目,或者通过Protobuf生成代码,非常便捷使用gRPC、HTTP、swagger文档;
## Quick start
### Requirments
Go version>=1.12 and GO111MODULE=on
### Installation
```shell
go get -u github.com/bilibili/kratos/tool/kratos
kratos init
cd $GOPATH/src
kratos new kratos-demo
```
`kratos init`会快速生成基于kratos库的脚手架代码,如生成[kratos-demo](https://github.com/bilibili/kratos-demo)
通过 `kratos new` 会快速生成基于kratos库的脚手架代码,如生成 [kratos-demo](https://github.com/bilibili/kratos-demo)
### Build & Run
```shell
cd kratos-demo/cmd
@ -29,12 +50,15 @@ go build
打开浏览器访问:[http://localhost:8000/kratos-demo/start](http://localhost:8000/kratos-demo/start),你会看到输出了`Golang 大法好 !!!`
[快速开始](doc/wiki-cn/quickstart.md)
[快速开始](doc/wiki-cn/quickstart.md) [kratos工具](doc/wiki-cn/kratos-tool.md)
# Document
## Documentation
[简体中文](doc/wiki-cn/summary.md)
## License
Kratos is under the MIT license. See the [LICENSE](./LICENSE) file for details.
-------------
*Please report bugs, concerns, suggestions by issues, or join QQ-group 716486124 to discuss problems around source code.*

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 661 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

@ -0,0 +1,40 @@
# Kratos
Kratos是bilibili开源的一套Go微服务框架,包含大量微服务相关框架及工具。
### Goals
我们致力于提供完整的微服务研发体验,整合相关框架及工具后,微服务治理相关部分可对整体业务开发周期无感,从而更加聚焦于业务交付。对每位开发者而言,整套Kratos框架也是不错的学习仓库,可以了解和参考到bilibili在微服务方面的技术积累和经验。
### Principles
* 简单:不过度设计,代码平实简单
* 通用:通用业务开发所需要的基础库的功能
* 高效:提高业务迭代的效率
* 稳定:基础库可测试性高,覆盖率高,有线上实践安全可靠
* 健壮:通过良好的基础库设计,减少错用
* 高性能:性能高,但不特定为了性能做hack优化,引入unsafe
* 扩展性:良好的接口设计,来扩展实现,或者通过新增基础库目录来扩展功能
* 容错性:为失败设计,大量引入对SRE的理解,鲁棒性高
* 工具链:包含大量工具链,比如cache代码生成,lint工具等等
### Features
* HTTP Blademaster:核心基于[gin](https://github.com/gin-gonic/gin)进行模块化设计,简单易用、核心足够轻量;
* GRPC Warden:基于官方gRPC开发,集成[discovery](https://github.com/bilibili/discovery)服务发现,并融合P2C负载均衡;
* Cache:优雅的接口化设计,非常方便的缓存序列化,推荐结合代理模式[overlord](https://github.com/bilibili/overlord);
* Database:集成MySQL/HBase/TiDB,添加熔断保护和统计支持,可快速发现数据层压力;
* Config:方便易用的[paladin sdk](config-paladin.md),可配合远程配置中心,实现配置版本管理和更新;
* Log:类似[zap](https://github.com/uber-go/zap)的field实现高性能日志库,并结合log-agent实现远程日志管理;
* Trace:基于opentracing,集成了全链路trace支持(gRPC/HTTP/MySQL/Redis/Memcached);
* Kratos Tool:工具链,可快速生成标准项目,或者通过Protobuf生成代码,非常便捷使用gRPC、HTTP、swagger文档;
<br>
-------------
> 名字来源于:《战神》游戏以希腊神话为背景,讲述由凡人成为战神的奎托斯(Kratos)成为战神并展开弑神屠杀的冒险历程。
-------------
[文档目录树](summary.md)

@ -20,7 +20,7 @@ func (f HandlerFunc) ServeHTTP(c *Context) {
}
```
1. 实现了`Handler`接口,可以作为engine的全局中间件使用:`engine.User(YourHandler)`
1. 实现了`Handler`接口,可以作为engine的全局中间件使用:`engine.Use(YourHandler)`
2. 声明为`HandlerFunc`方法,可以作为router的局部中间件使用:`e.GET("/path", YourHandlerFunc)`
简单示例代码如下:
@ -103,6 +103,37 @@ func Example() {
}
```
# 内置中间件
## 自适应限流
更多关于自适应限流的信息,请参考:[kratos 自适应限流](/doc/wiki-cn/ratelimit.md)
```go
func Example() {
myHandler := func(ctx *bm.Context) {
mid := metadata.Int64(ctx, metadata.Mid)
ctx.JSON(fmt.Sprintf("%d", mid), nil)
}
e := bm.DefaultServer(nil)
// 挂载自适应限流中间件到 bm engine,使用默认配置
limiter := bm.NewRateLimiter(nil)
e.Use(limiter.Limit())
e.GET("/user", myHandler)
e.Start()
}
```
# 扩展阅读
[bm快速开始](blademaster-quickstart.md)
[bm模块说明](blademaster-mod.md)
[bm基于pb生成](blademaster-pb.md)
-------------

@ -73,11 +73,17 @@ func (c *Context) Protobuf(data proto.Message, err error)
![handler](/doc/img/bm-handlers.png)
初次接触 blademaster 的用户可能会对其 Handler 的流程处理产生不小的疑惑,实际上 bm 对 Handler 对处理非常简单。
将 Router 模块中预先注册的中间件与其他 Handler 合并,放入 Context 的 handlers 字段,并将 index 置 0,然后通过 Next() 方法一个个执行下去。
部分中间件可能想要在过程中中断整个流程,此时可以使用 Abort() 方法提前结束处理。
有些中间件还想在所有 Handler 执行完后再执行部分逻辑,此时可以在自身 Handler 中显式调用 Next() 方法,并将这些逻辑放在调用了 Next() 方法之后。
初次接触`blademaster`的用户可能会对其`Handler`的流程处理产生不小的疑惑,实际上`bm`对`Handler`对处理非常简单:
* 将`Router`模块中预先注册的`middleware`与其他`Handler`合并,放入`Context`的`handlers`字段,并将`index`字段置`0`
* 然后通过`Next()`方法一个个执行下去,部分`middleware`可能想要在过程中中断整个流程,此时可以使用`Abort()`方法提前结束处理
* 有些`middleware`还想在所有`Handler`执行完后再执行部分逻辑,此时可以在自身`Handler`中显式调用`Next()`方法,并将这些逻辑放在调用了`Next()`方法之后
# 扩展阅读
[bm快速开始](blademaster-quickstart.md)
[bm中间件](blademaster-mid.md)
[bm基于pb生成](blademaster-pb.md)
-------------

@ -0,0 +1,86 @@
# 介绍
基于proto文件可以快速生成`bm`框架对应的代码,提前需要准备以下工作:
* 安装`kratos tool protoc`工具,请看[kratos工具](kratos-tool.md)
* 编写`proto`文件,示例可参考[kratos-demo内proto文件](https://github.com/bilibili/kratos-demo/blob/master/api/api.proto)
### kratos工具说明
`kratos tool protoc`工具可以生成`warden` `bm` `swagger`对应的代码和文档,想要单独生成`bm`代码只需加上`--bm`如:
```shell
# generate BM HTTP
kratos tool protoc --bm api.proto
```
### proto文件说明
请注意想要生成`bm`代码,需要特别在`proto`的`service`内指定`google.api.http`配置,如下:
```go
service Demo {
rpc SayHello (HelloReq) returns (.google.protobuf.Empty);
rpc SayHelloURL(HelloReq) returns (HelloResp) {
option (google.api.http) = { // 该配置指定SayHelloURL方法对应的url
get:"/kratos-demo/say_hello" // 指定url和请求方式为GET
};
};
}
```
# 使用
建议在项目`api`目录下编写`proto`文件及生成对应的代码,可参考[kratos-demo内的api目录](https://github.com/bilibili/kratos-demo/tree/master/api)。
执行命令后生成的`api.bm.go`代码,注意其中的`type DemoBMServer interface`和`RegisterDemoBMServer`,其中:
* `DemoBMServer`接口,包含`proto`文件内配置了`google.api.http`选项的所有方法
* `RegisterDemoBMServer`方法提供注册`DemoBMServer`接口的实现对象,和`bm`的`Engine`用于注册路由
* `DemoBMServer`接口的实现,一般为`internal/service`内的业务逻辑代码,需要实现`DemoBMServer`接口
使用`RegisterDemoBMServer`示例代码请参考[kratos-demo内的http](https://github.com/bilibili/kratos-demo/blob/master/internal/server/http/server.go)内的如下代码:
```go
engine = bm.DefaultServer(hc.Server)
pb.RegisterDemoBMServer(engine, svc)
initRouter(engine)
```
`internal/service`内的`Service`结构实现了`DemoBMServer`接口可参考[kratos-demo内的service](https://github.com/bilibili/kratos-demo/blob/master/internal/service/service.go)内的如下代码:
```go
// SayHelloURL bm demo func.
func (s *Service) SayHelloURL(ctx context.Context, req *pb.HelloReq) (reply *pb.HelloResp, err error) {
reply = &pb.HelloResp{
Content: "hello " + req.Name,
}
fmt.Printf("hello url %s", req.Name)
return
}
```
# 文档
基于同一份`proto`文件还可以生成对应的`swagger`文档,运行命令如下:
```shell
# generate swagger
kratos tool protoc --swagger api.proto
```
该命令将生成对应的`swagger.json`文件,可用于`swagger`工具通过WEBUI的方式打开使用,可运行命令如下:
```shell
kratos tool swagger serve api/api.swagger.json
```
# 扩展阅读
[bm快速开始](blademaster-quickstart.md)
[bm模块说明](blademaster-mod.md)
[bm中间件](blademaster-mid.md)
-------------
[文档目录树](summary.md)

@ -1,8 +1,7 @@
# 准备工作
推荐使用[kratos tool](kratos-tool.md)快速生成项目,如我们生成一个叫`kratos-demo`的项目。
推荐使用[kratos工具](kratos-tool.md)快速生成项目,如我们生成一个叫`kratos-demo`的项目。目录结构如下:
生成目录结构如下:
```
├── CHANGELOG.md
├── CONTRIBUTORS.md
@ -36,6 +35,7 @@
# 路由
创建项目成功后,进入`internal/server/http`目录下,打开`http.go`文件,其中有默认生成的`blademaster`模板。其中:
```go
engine = bm.DefaultServer(hc.Server)
initRouter(engine)
@ -43,7 +43,9 @@ if err := engine.Start(); err != nil {
panic(err)
}
```
是bm默认创建的`engine`及启动代码,我们看`initRouter`初始化路由方法,默认实现了:
```go
func initRouter(e *bm.Engine) {
e.Ping(ping) // engine自带的"/ping"接口,用于负载均衡检测服务健康状态
@ -55,14 +57,75 @@ func initRouter(e *bm.Engine) {
```
bm的handler方法,结构如下:
```go
func howToStart(c *bm.Context) // handler方法默认传入bm的Context对象
```
# 扩展阅读
### Ping
engine自带Ping方法,用于设置`/ping`路由的handler,该路由统一提供于负载均衡服务做健康检测。服务是否健康,可自定义`ping handler`进行逻辑判断,如检测DB是否正常等。
```go
func ping(c *bm.Context) {
if some DB check not ok {
c.AbortWithStatus(503)
}
}
```
# 默认路由
默认路由有:
* /metrics 用于prometheus信息采集
* /metadata 可以查看所有注册的路由信息
[bm模块说明](blademaster-mod.md) [bm中间件](blademaster-mid.md) [bm基于pb生成](blademaster-pb.md)
查看加载的所有路由信息:
```shell
curl 'http://127.0.0.1:8000/metadata'
```
输出:
```json
{
"code": 0,
"message": "0",
"ttl": 1,
"data": {
"/kratos-demo/start": {
"method": "GET"
},
"/metadata": {
"method": "GET"
},
"/metrics": {
"method": "GET"
},
"/ping": {
"method": "GET"
}
}
}
```
# 性能分析
启动时默认监听了`2333`端口用于`pprof`信息采集,如:
```shell
go tool pprof http://127.0.0.1:8000/debug/pprof/profile
```
改变端口可以使用flag,如:`-http.perf=tcp://0.0.0.0:12333`
# 扩展阅读
[bm模块说明](blademaster-mod.md)
[bm中间件](blademaster-mid.md)
[bm基于pb生成](blademaster-pb.md)
-------------

@ -1,6 +1,6 @@
# 背景
在像微服务这样的分布式架构中,经常会有一些需求需要你调用多个服务,但是还需要确保服务的安全性、一化每次的请求日志或者追踪用户完整的行为等等。要实现这些功能,你可能需要在所有服务中都设置一些相同的属性,虽然这个可以通过一些明确的接入文档来描述或者准入规范来界定,但是这么做的话还是有可能会有一些问题:
在像微服务这样的分布式架构中,经常会有一些需求需要你调用多个服务,但是还需要确保服务的安全性、一化每次的请求日志或者追踪用户完整的行为等等。要实现这些功能,你可能需要在所有服务中都设置一些相同的属性,虽然这个可以通过一些明确的接入文档来描述或者准入规范来界定,但是这么做的话还是有可能会有一些问题:
1. 你很难让每一个服务都实现上述功能。因为对于开发者而言,他们应当注重的是实现功能。很多项目的开发者经常在一些日常开发中遗漏了这些关键点,经常有人会忘记去打日志或者去记录调用链。但是对于一些大流量的互联网服务而言,一个线上服务一旦发生故障时,即使故障时间很小,其影响面会非常大。一旦有人在关键路径上忘记路记录日志,那么故障的排除成本会非常高,那样会导致影响面进一步扩大。
2. 事实上实现之前叙述的这些功能的成本也非常高。比如说对于鉴权(Identify)这个功能,你要是去一个服务一个服务地去实现,那样的成本也是非常高的。如果说把这个确保认证的责任分担在每个开发者身上,那样其实也会增加大家遗忘或者忽略的概率。
@ -23,15 +23,23 @@
![bm-arch](/doc/img/bm-arch-2-2.png)
blademaster 由几个非常精简的内部模块组成。其中 Router 用于根据请求的路径分发请求,Context 包含了一个完整的请求信息,Handler 则负责处理传入的 Context,Handlers 为一个列表,一个串一个地执行。
所有的中间件均以 Handler 的形式存在,这样可以保证 blademaster 自身足够精简,且扩展性足够强。
`blademaster`由几个非常精简的内部模块组成。其中`Router`用于根据请求的路径分发请求,`Context`包含了一个完整的请求信息,`Handler`则负责处理传入的`Context`,`Handlers`为一个列表,一个串一个地执行。
所有的`middlerware`均以`Handler`的形式存在,这样可以保证`blademaster`自身足够精简且扩展性足够强。
![bm-arch](/doc/img/bm-arch-2-3.png)
blademaster 处理请求的模式非常简单,大部分的逻辑都被封装在了各种 Handler 中。一般而言,业务逻辑作为最后一个 Handler。正常情况下,每个 Handler 按照顺序一个一个串形地执行下去。
但是 Handler 中可以也中断整个处理流程,直接输出 Response。这种模式常被用于校验登陆的中间件中;一旦发现请求不合法,直接响应拒绝。
请求处理的流程中也可以使用 Render 来辅助渲染 Response,比如对于不同的请求需要响应不同的数据格式(JSON、XML),此时可以使用不同的 Render 来简化逻辑。
`blademaster`处理请求的模式非常简单,大部分的逻辑都被封装在了各种`Handler`中。一般而言,业务逻辑作为最后一个`Handler`。
正常情况下每个`Handler`按照顺序一个一个串行地执行下去,但是`Handler`中可以也中断整个处理流程,直接输出`Response`。这种模式常被用于校验登陆的`middleware`中:一旦发现请求不合法,直接响应拒绝。
请求处理的流程中也可以使用`Render`来辅助渲染`Response`,比如对于不同的请求需要响应不同的数据格式`JSON`、`XML`,此时可以使用不同的`Render`来简化逻辑。
# 扩展阅读
[bm快速开始](blademaster-quickstart.md)
[bm模块说明](blademaster-mod.md)
[bm中间件](blademaster-mid.md)
[bm基于pb生成](blademaster-pb.md)
-------------

@ -0,0 +1,239 @@
# 准备工作
推荐使用[kratos工具](kratos-tool.md)快速生成项目,如我们生成一个叫`kratos-demo`的项目。目录结构如下:
```
├── CHANGELOG.md
├── CONTRIBUTORS.md
├── LICENSE
├── README.md
├── cmd
   ├── cmd
   └── main.go
├── configs
   ├── application.toml
   ├── grpc.toml
   ├── http.toml
   ├── log.toml
   ├── memcache.toml
   ├── mysql.toml
   └── redis.toml
├── go.mod
├── go.sum
└── internal
├── dao
   └── dao.go
├── model
   └── model.go
├── server
   └── http
   └── http.go
└── service
└── service.go
```
# 开始使用
## 配置
创建项目成功后,进入项目中的configs目录,打开memcache.toml,我们可以看到:
```toml
demoExpire = "24h"
[demo]
name = "kratos-demo"
proto = "tcp"
addr = "127.0.0.1:11211"
active = 50
idle = 10
dialTimeout = "100ms"
readTimeout = "200ms"
writeTimeout = "300ms"
idleTimeout = "80s"
```
在该配置文件中我们可以配置memcache的连接方式proto、连接地址addr、连接池的闲置连接数idle、最大连接数active以及各类超时。
这里可选添加mc的过期时间设置。
## 初始化
进入项目的internal/dao目录,打开dao.go,其中:
```go
var (
mc struct {
Demo *memcache.Config
DemoExpire xtime.Duration
}
)
checkErr(paladin.Get("memcache.toml").UnmarshalTOML(&mc))
```
使用paladin配置管理工具将上文中的memcache.toml中的配置解析为我们需要使用的配置。
```go
// Dao dao.
type Dao struct {
mc *memcache.Memcache
mcExpire int32
}
```
在dao的主结构提中定义了memcache的连接池对象和过期时间。
```go
dao = &Dao{
// memcache
mc: memcache.New(mc.Demo),
mcExpire: int32(time.Duration(mc.DemoExpire) / time.Second),
}
```
使用kratos/pkg/cache/memcache包的New方法进行连接池对象的初始化,需要传入上文解析的配置。
## Ping
```go
// Ping ping the resource.
func (d *Dao) Ping(ctx context.Context) (err error) {
return d.pingMC(ctx)
}
func (d *Dao) pingMC(ctx context.Context) (err error) {
if err = d.mc.Set(ctx, &memcache.Item{Key: "ping", Value: []byte("pong"), Expiration: 0}); err != nil {
log.Error("conn.Set(PING) error(%v)", err)
}
return
}
```
生成的dao层模板中自带了memcache相关的ping方法,用于为负载均衡服务的健康监测提供依据,详见[blademaster](blademaster-quickstart.md)。
## 关闭
```go
// Close close the resource.
func (d *Dao) Close() {
d.mc.Close()
}
```
在关闭dao层时,通过调用memcache连接池对象的Close方法,我们可以关闭该连接池,从而释放相关资源。
# 常用方法
推荐使用[memcache代码生成器](kratos-genmc.md)帮助我们生成memcache操作的相关代码。
以下我们来逐一解析以下kratos/pkg/cache/memcache包中提供的常用方法。
## 单个查询
```go
// CacheDemo get data from mc
func (d *Dao) CacheDemo(c context.Context, id int64) (res *Demo, err error) {
key := demoKey(id)
res = &Demo{}
if err = d.mc.Get(c, key).Scan(res); err != nil {
res = nil
if err == memcache.ErrNotFound {
err = nil
}
}
if err != nil {
prom.BusinessErrCount.Incr("mc:CacheDemo")
log.Errorv(c, log.KV("CacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key))
return
}
return
}
```
如上为代码生成器生成的进行单个查询的代码,使用到mc.Get(c,key)方法获得返回值,再使用scan方法将memcache的返回值转换为golang中的类型(如string,bool, 结构体等)。
## 批量查询使用
```go
replies, err := d.mc.GetMulti(c, keys)
for _, key := range replies.Keys() {
v := &Demo{}
err = replies.Scan(key, v)
}
```
如上为代码生成器生成的进行批量查询的代码片段,这里使用到mc.GetMulti(c,keys)方法获得返回值,与单个查询类似地,我们需要再使用scan方法将memcache的返回值转换为我们定义的结构体。
## 设置KV
```go
// AddCacheDemo Set data to mc
func (d *Dao) AddCacheDemo(c context.Context, id int64, val *Demo) (err error) {
if val == nil {
return
}
key := demoKey(id)
item := &memcache.Item{Key: key, Object: val, Expiration: d.demoExpire, Flags: memcache.FlagJSON | memcache.FlagGzip}
if err = d.mc.Set(c, item); err != nil {
prom.BusinessErrCount.Incr("mc:AddCacheDemo")
log.Errorv(c, log.KV("AddCacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key))
return
}
return
}
```
如上为代码生成器生成的添加结构体进入memcache的代码,这里需要使用到的是mc.Set方法进行设置。
这里使用的item为memcache.Item结构体,包含key, value, 超时时间(秒), Flags。
### Flags
上文添加结构体进入memcache中,使用到的flags为:memcache.FlagJSON | memcache.FlagGzip代表着:使用json作为编码方式,gzip作为压缩方式。
Flags的相关常量在kratos/pkg/cache/memcache包中进行定义,包含编码方式如gob, json, protobuf,和压缩方式gzip。
```go
const(
// Flag, 15(encoding) bit+ 17(compress) bit
// FlagRAW default flag.
FlagRAW = uint32(0)
// FlagGOB gob encoding.
FlagGOB = uint32(1) << 0
// FlagJSON json encoding.
FlagJSON = uint32(1) << 1
// FlagProtobuf protobuf
FlagProtobuf = uint32(1) << 2
// FlagGzip gzip compress.
FlagGzip = uint32(1) << 15
)
```
## 删除KV
```go
// DelCacheDemo delete data from mc
func (d *Dao) DelCacheDemo(c context.Context, id int64) (err error) {
key := demoKey(id)
if err = d.mc.Delete(c, key); err != nil {
if err == memcache.ErrNotFound {
err = nil
return
}
prom.BusinessErrCount.Incr("mc:DelCacheDemo")
log.Errorv(c, log.KV("DelCacheDemo", fmt.Sprintf("%+v", err)), log.KV("key", key))
return
}
return
}
```
如上为代码生成器生成的从memcache中删除KV的代码,这里需要使用到的是mc.Delete方法。
和查询时类似地,当memcache中不存在参数中的key时,会返回error为memcache.ErrNotFound。如果不需要处理这种error,可以参考上述代码将返回出去的error置为nil。
# 扩展阅读
[memcache代码生成器](kratos-genmc.md)
[redis模块说明](cache-redis.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,226 @@
# 准备工作
推荐使用[kratos工具](kratos-tool.md)快速生成项目,如我们生成一个叫`kratos-demo`的项目。目录结构如下:
```
├── CHANGELOG.md
├── CONTRIBUTORS.md
├── LICENSE
├── README.md
├── cmd
   ├── cmd
   └── main.go
├── configs
   ├── application.toml
   ├── grpc.toml
   ├── http.toml
   ├── log.toml
   ├── memcache.toml
   ├── mysql.toml
   └── redis.toml
├── go.mod
├── go.sum
└── internal
├── dao
   └── dao.go
├── model
   └── model.go
├── server
   └── http
   └── http.go
└── service
└── service.go
```
# 开始使用
## 配置
创建项目成功后,进入项目中的configs目录,打开redis.toml,我们可以看到:
```toml
demoExpire = "24h"
[demo]
name = "kratos-demo"
proto = "tcp"
addr = "127.0.0.1:6389"
idle = 10
active = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
```
在该配置文件中我们可以配置redis的连接方式proto、连接地址addr、连接池的闲置连接数idle、最大连接数active以及各类超时。
这里可选添加redis的过期时间设置。
## 初始化
进入项目的internal/dao目录,打开dao.go,其中:
```go
var (
rc struct {
Demo *redis.Config
DemoExpire xtime.Duration
}
)
checkErr(paladin.Get("redis.toml").UnmarshalTOML(&rc))
```
使用paladin配置管理工具将上文中的redis.toml中的配置解析为我们需要使用的配置。
```go
// Dao dao.
type Dao struct {
redis *redis.Pool
redisExpire int32
}
```
在dao的主结构提中定义了redis的连接池对象和过期时间。
```go
dao = &Dao{
// redis
redis: redis.NewPool(rc.Demo),
redisExpire: int32(time.Duration(rc.DemoExpire) / time.Second),
}
```
使用kratos/pkg/cache/redis包的NewPool方法进行连接池对象的初始化,需要传入上文解析的配置。
## Ping
```go
// Ping ping the resource.
func (d *Dao) Ping(ctx context.Context) (err error) {
return d.pingRedis(ctx)
}
func (d *Dao) pingRedis(ctx context.Context) (err error) {
conn := d.redis.Get(ctx)
defer conn.Close()
if _, err = conn.Do("SET", "ping", "pong"); err != nil {
log.Error("conn.Set(PING) error(%v)", err)
}
return
}
```
生成的dao层模板中自带了redis相关的ping方法,用于为负载均衡服务的健康监测提供依据,详见[blademaster](blademaster-quickstart.md)。
## 关闭
```go
// Close close the resource.
func (d *Dao) Close() {
d.redis.Close()
}
```
在关闭dao层时,通过调用redis连接池对象的Close方法,我们可以关闭该连接池,从而释放相关资源。
# 常用方法
## 发送单个命令 Do
```go
// DemoIncrby .
func (d *Dao) DemoIncrby(c context.Context, pid int) (err error) {
cacheKey := keyDemo(pid)
conn := d.redis.Get(c)
defer conn.Close()
if _, err = conn.Do("INCRBY", cacheKey, 1); err != nil {
log.Error("DemoIncrby conn.Do(INCRBY) key(%s) error(%v)", cacheKey, err)
}
return
}
```
如上为向redis server发送单个命令的用法示意。这里需要使用redis连接池的Get方法获取一个redis连接conn,再使用conn.Do方法即可发送一条指令。
注意,在使用该连接完毕后,需要使用conn.Close方法将该连接关闭。
## 批量发送命令 Pipeline
kratos/pkg/cache/redis包除了支持发送单个命令,也支持批量发送命令(redis pipeline),比如:
```go
// DemoIncrbys .
func (d *Dao) DemoIncrbys(c context.Context, pid int) (err error) {
cacheKey := keyDemo(pid)
conn := d.redis.Get(c)
defer conn.Close()
if err = conn.Send("INCRBY", cacheKey, 1); err != nil {
return
}
if err = conn.Send("EXPIRE", cacheKey, d.redisExpire); err != nil {
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive error(%v)", err)
return
}
}
return
}
```
和发送单个命令类似地,这里需要使用redis连接池的Get方法获取一个redis连接conn,在使用该连接完毕后,需要使用conn.Close方法将该连接关闭。
这里使用conn.Send方法将命令写入客户端的buffer(缓冲区)中,使用conn.Flush将客户端的缓冲区内的命令打包发送到redis server。redis server按顺序返回的reply可以使用conn.Receive方法进行接收和处理。
## 返回值转换
与[memcache包](cache-mc.md)类似地,kratos/pkg/cache/redis包中也提供了Scan方法将redis server的返回值转换为golang类型。
除此之外,kratos/pkg/cache/redis包提供了大量返回值转换的快捷方式:
### 单个查询
单个查询可以使用redis.Uint64/Int64/Float64/Int/String/Bool/Bytes进行返回值的转换,比如:
```go
// GetDemo get
func (d *Dao) GetDemo(ctx context.Context, key string) (string, error) {
conn := d.redis.Get(ctx)
defer conn.Close()
return redis.String(conn.Do("GET", key))
}
```
### 批量查询
批量查询时候,可以使用redis.Int64s,Ints,Strings,ByteSlices方法转换如MGET,HMGET,ZRANGE,SMEMBERS等命令的返回值。
还可以使用StringMap, IntMap, Int64Map方法转换HGETALL命令的返回值,比如:
```go
// HGETALLDemo get
func (d *Dao) HGETALLDemo(c context.Context, pid int64) (res map[string]int64, err error) {
var (
key = keyDemo(pid)
conn = d.redis.Get(c)
)
defer conn.Close()
if res, err = redis.Int64Map(conn.Do("HGETALL", key)); err != nil {
log.Error("HGETALL %v failed error(%v)", key, err)
}
return
}
```
# 扩展阅读
[memcache模块说明](cache-mc.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,23 @@
# 背景
我们需要统一的cache包,用于进行各类缓存操作。
# 概览
* 缓存操作均使用连接池,保证较快的数据读写速度且提高系统的安全可靠性。
# Memcache
提供protobuf,gob,json序列化方式,gzip的memcache接口
[memcache模块说明](cache-mc.md)
# Redis
提供redis操作的各类接口以及各类将redis server返回值转换为golang类型的快捷方法。
[redis模块说明](cache-redis.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,111 @@
# Paladin SDK
## 配置模块化
进行配置的模块化是为了更好地管理配置,尽可能避免由修改配置带来的失误。
在配置种类里,可以看到其实 环境配置 和 应用配置 已经由平台进行管理化。
我们通常业务里只用配置 业务配置 和 在线配置 就可以了,之前我们大部分都是单个文件配置,而为了更好管理我们需要按类型进行拆分配置文件。
例如:
| 名称 | 说明 |
|:------|:------|
| application.toml | 在线配置 |
| mysql.toml | 业务db配置 |
| hbase.toml | 业务hbase配置 |
| memcache.toml | 业务mc配置 |
| redis.toml | 业务redis配置 |
| http.toml | 业务http client/server/auth配置 |
| grpc.toml | 业务grpc client/server配置 |
## 使用方式
paladin 是一个config SDK客户端,包括了remote、file、mock几个抽象功能,方便使用本地文件或者远程配置中心,并且集成了对象自动reload功能。
### 远程配置中心
可以通过环境变量注入,例如:APP_ID/DEPLOY_ENV/ZONE/HOSTNAME,然后通过paladin实现远程配置中心SDK进行配合使用。
### 指定本地文件:
```shell
./cmd -conf=/data/conf/app/demo.toml
# or multi file
./cmd -conf=/data/conf/app/
```
### mock配置文件
```go
func TestMain(t *testing.M) {
mock := make(map[string]string])
mock["application.toml"] = `
demoSwitch = false
demoNum = 100
demoAPI = "xxx"
`
paladin.DefaultClient = paladin.NewMock(mock)
}
```
### example main
```go
# main.go
func main() {
# 初始化paladin
if err := paladin.Init(); err != nil {
panic(err)
}
log.Init(nil) // debug flag: log.dir={path}
defer log.Close()
}
```
### example HTTP/gRPC
```go
# http.toml
[server]
addr = "0.0.0.0:9000"
timeout = "1s"
# server.go
func NewServer() {
# 默认配置用nil,这时读取HTTP/gRPC构架中的flag或者环境变量(可能是docker注入的环境变量,默认端口:8000/9000)
engine := bm.DefaultServer(nil)
# 除非自己要替换了配置,用http.toml
var bc struct {
Server *bm.ServerConfig
}
if err = paladin.Get("http.toml").UnmarshalTOML("server", &bc); err != nil {
// 不存在时,将会为nil使用默认配置
if err != paladin.ErrNotExist {
panic(err)
}
}
engine := bm.DefaultServer(conf)
}
```
### example Service(在线配置热加载配置)
```go
# service.go
type Service struct {
ac *paladin.Map
}
func New() *Service {
# paladin.Map 通过atomic.Value支持自动热加载
var ac = new(paladin.TOML)
if err := paladin.Watch("application.toml", ac); err != nil {
panic(err)
}
s := &Service{
ac : ac;
}
return s
}
func (s *Service) Test() {
switch, err := s.ac.Bool("switch")
if err != nil {
// TODO
}
# or use default value
switch := paladin.Bool(s.ac.Value("switch"), false)
}
```

@ -0,0 +1,51 @@
# config
## 介绍
初看起来,配置管理可能很简单,但是这其实是不稳定的一个重要来源。
即变更管理导致的故障,我们目前基于配置中心(config-service)的部署方式,二级制文件的发布与配置文件的修改是异步进行的,每次变更配置,需要重新构建发版。
由此,我们整体对配置文件进行梳理,对配置进行模块化,以及方便易用的paladin config sdk。
## 环境配置
| flag | env | remark |
|:----------|:----------|:------|
| region | REGION | 部署地区,sh-上海、gz-广州、bj-北京 |
| zone | ZONE | 分布区域,sh001-上海核心、sh004-上海嘉定 |
| deploy.env | DEPLOY_ENV | dev-开发、fat1-功能、uat-集成、pre-预发、prod-生产 |
| deploy.color | DEPLOY_COLOR | 服务颜色,blue(测试feature染色请求) |
| - | HOSTNAME | 主机名,xxx-hostname |
全局公用环境变量,通常为部署环境配置,由系统、发布系统或supervisor进行环境变量注入,并不用进行例外配置,如果开发过程中可以通过flag注入进行运行测试。
## 应用配置
| flag | env | default | remark |
|:----------|:----------|:-------------|:------|
| appid | APP_ID | - | 应用ID |
| http | HTTP | tcp://0.0.0.0:8000/?timeout=1s | http 监听端口 |
| http.perf | HTTP_PERF | tcp://0.0.0.0:2233/?timeout=1s | http perf 监听端口 |
| grpc | GRPC | tcp://0.0.0.0:9000/?timeout=1s&idle_timeout=60s | grpc 监听端口 |
| grpc.target | - | - | 指定服务运行:<br>-grpc.target=demo.service=127.0.0.1:9000 <br>-grpc.target=demo.service=127.0.0.2:9000 |
| discovery.nodes | DISCOVERY_NODES | - | 服务发现节点:127.0.0.1:7171,127.0.0.2:7171 |
| log.v | LOG_V | 0 | 日志级别:<br>DEBUG:0 INFO:1 WARN:2 ERROR:3 FATAL:4 |
| log.stdout | LOG_STDOUT | false | 是否标准输出:true、false|
| log.dir | LOG_DIR | - | 日志文件目录,如果配置会输出日志到文件,否则不输出日志文件 |
| log.agent | LOG_AGENT | - | 日志采集agent:<br>unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024 |
| log.module | LOG_MODULE | - | 指定field信息 format: file=1,file2=2. |
| log.filter | LOG_FILTER | - | 过虑敏感信息 format: field1,field2. |
基本为一些应用相关的配置信息,通常发布系统和supervisor都有对应的部署环境进行配置注入,并不用进行例外配置,如果开发过程中可以通过flag进行注入运行测试。
## 业务配置
Redis、MySQL等业务组件,可以使用静态的配置文件来初始化,根据应用业务集群进行配置。
## 在线配置
需要在线读取、变更的配置信息,比如某个业务开关,可以实现配置reload实时更新。
## 扩展阅读
[paladin配置sdk](config-paladin.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,54 @@
# database/hbase
## 说明
Hbase Client,进行封装加入了链路追踪和统计。
## 配置
需要指定hbase集群的zookeeper地址。
```
config := &hbase.Config{Zookeeper: &hbase.ZKConfig{Addrs: []string{"localhost"}}}
client := hbase.NewClient(config)
```
## 使用方式
```
package main
import (
"context"
"fmt"
"github.com/bilibili/kratos/pkg/database/hbase"
)
func main() {
config := &hbase.Config{Zookeeper: &hbase.ZKConfig{Addrs: []string{"localhost"}}}
client := hbase.NewClient(config)
//
values := map[string]map[string][]byte{"name": {"firstname": []byte("hello"), "lastname": []byte("world")}}
ctx := context.Background()
// 写入信息
// table: user
// rowkey: user1
// values["family"] = columns
_, err := client.PutStr(ctx, "user", "user1", values)
if err != nil {
panic(err)
}
// 读取信息
// table: user
// rowkey: user1
result, err := client.GetStr(ctx, "user", "user1")
if err != nil {
panic(err)
}
fmt.Printf("%v", result)
}
```
-------------
[文档目录树](summary.md)

@ -0,0 +1,234 @@
# 准备工作
推荐使用[kratos工具](kratos-tool.md)快速生成项目,如我们生成一个叫`kratos-demo`的项目。目录结构如下:
```
├── CHANGELOG.md
├── CONTRIBUTORS.md
├── LICENSE
├── README.md
├── cmd
   ├── cmd
   └── main.go
├── configs
   ├── application.toml
   ├── grpc.toml
   ├── http.toml
   ├── log.toml
   ├── memcache.toml
   ├── mysql.toml
   └── redis.toml
├── go.mod
├── go.sum
└── internal
├── dao
   └── dao.go
├── model
   └── model.go
├── server
   └── http
   └── http.go
└── service
└── service.go
```
# 开始使用
## 配置
创建项目成功后,进入项目中的configs目录,mysql.toml,我们可以看到:
```toml
[demo]
addr = "127.0.0.1:3306"
dsn = "{user}:{password}@tcp(127.0.0.1:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8"
readDSN = ["{user}:{password}@tcp(127.0.0.2:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8mb4,utf8","{user}:{password}@tcp(127.0.0.3:3306)/{database}?timeout=1s&readTimeout=1s&writeTimeout=1s&parseTime=true&loc=Local&charset=utf8,utf8mb4"]
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "200ms"
execTimeout = "300ms"
tranTimeout = "400ms"
```
在该配置文件中我们可以配置mysql的读和写的dsn、连接地址addr、连接池的闲置连接数idle、最大连接数active以及各类超时。
如果配置了readDSN,在进行读操作的时候会优先使用readDSN的连接。
## 初始化
进入项目的internal/dao目录,打开dao.go,其中:
```go
var (
dc struct {
Demo *sql.Config
}
)
checkErr(paladin.Get("mysql.toml").UnmarshalTOML(&dc))
```
使用paladin配置管理工具将上文中的mysql.toml中的配置解析为我们需要使用mysql的相关配置。
```go
// Dao dao.
type Dao struct {
db *sql.DB
}
```
在dao的主结构提中定义了mysql的连接池对象。
```go
dao = &Dao{
db: sql.NewMySQL(dc.Demo),
}
```
使用kratos/pkg/database/sql包的NewMySQL方法进行连接池对象的初始化,需要传入上文解析的配置。
## Ping
```go
// Ping ping the resource.
func (d *Dao) Ping(ctx context.Context) (err error) {
return d.db.Ping(ctx)
}
```
生成的dao层模板中自带了mysql相关的ping方法,用于为负载均衡服务的健康监测提供依据,详见[blademaster](blademaster-quickstart.md)。
## 关闭
```go
// Close close the resource.
func (d *Dao) Close() {
d.db.Close()
}
```
在关闭dao层时,通过调用mysql连接池对象的Close方法,我们可以关闭该连接池,从而释放相关资源。
# 常用方法
## 单个查询
```go
// GetDemo 用户角色
func (d *Dao) GetDemo(c context.Context, did int64) (demo int8, err error) {
err = d.db.QueryRow(c, _getDemoSQL, did).Scan(&demo)
if err != nil && err != sql.ErrNoRows {
log.Error("d.GetDemo.Query error(%v)", err)
return
}
return demo, nil
}
```
db.QueryRow方法用于返回最多一条记录的查询,在QueryRow方法后使用Scan方法即可将mysql的返回值转换为Golang的数据类型。
当mysql查询不到对应数据时,会返回sql.ErrNoRows,如果不需处理,可以参考如上代码忽略此error。
## 批量查询
```go
// ResourceLogs ResourceLogs.
func (d *Dao) GetDemos(c context.Context, dids []int64) (demos []int8, err error) {
rows, err := d.db.Query(c, _getDemosSQL, dids)
if err != nil {
log.Error("query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
var tmpD int8
if err = rows.Scan(&tmpD); err != nil {
log.Error("scan demo log error(%v)", err)
return
}
demos = append(demos, tmpD)
}
return
}
```
db.Query方法一般用于批量查询的场景,返回*sql.Rows和error信息。
我们可以使用rows.Next()方法获得下一行的返回结果,并且配合使用rows.Scan()方法将该结果转换为Golang的数据类型。当没有下一行时,rows.Next方法将返回false,此时循环结束。
注意,在使用完毕rows对象后,需要调用rows.Close方法关闭连接,释放相关资源。
## 执行语句
```go
// DemoExec exec
func (d *Dao) DemoExec(c context.Context, id int64) (rows int64, err error) {
res, err := d.db.Exec(c, _demoUpdateSQL, id)
if err != nil {
log.Error("db.DemoExec.Exec(%s) error(%v)", _demoUpdateSQL, err)
return
}
return res.RowsAffected()
}
```
执行UPDATE/DELETE/INSERT语句时,使用db.Exec方法进行语句执行,返回*sql.Result和error信息:
```go
// A Result summarizes an executed SQL command.
type Result interface {
LastInsertId() (int64, error)
RowsAffected() (int64, error)
}
```
Result接口支持获取影响行数和LastInsertId(一般用于获取Insert语句插入数据库后的主键ID)
## 事务
kratos/pkg/database/sql包支持事务操作,具体操作示例如下:
开启一个事务:
```go
tx := d.db.Begin()
if err = tx.Error; err != nil {
log.Error("db begin transcation failed, err=%+v", err)
return
}
```
在事务中执行语句:
```go
res, err := tx.Exec(_demoSQL, did)
if err != nil {
return
}
rows := res.RowsAffected()
```
提交事务:
```go
if err = tx.Commit().Error; err!=nil{
log.Error("db commit transcation failed, err=%+v", err)
}
```
回滚事务:
```go
if err = tx.Rollback().Error; err!=nil{
log.Error("db rollback failed, err=%+v", rollbackErr)
}
```
# 扩展阅读
[tidb模块说明](database-tidb.md)
[hbase模块说明](database-hbase.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,21 @@
# database/sql
## 背景
数据库驱动,进行封装加入了熔断、链路追踪和统计,以及链路超时。
通常数据模块都写在`internal/dao`目录中,并提供对应的数据访问接口。
## MySQL
MySQL数据库驱动,支持读写分离、context、timeout、trace和统计功能,以及错误熔断防止数据库雪崩。
[mysql client](database-mysql.md)
## HBase
HBase客户端,支持trace、slowlog和统计功能。
[hbase client](database-hbase.md)
## TiDB
TiDB客户端,支持服务发现和熔断功能。
[tidb client](database-tidb.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,30 @@
### kratos tool genbts
> 缓存回源代码生成
在internal/dao/dao.go中添加mc缓存interface定义,可以指定对应的[注解参数](../../tool/kratos-gen-mc/README.md);
并且在接口前面添加`go:generate kratos tool genbts`;
然后在当前目录执行`go generate`,可以看到自动生成的dao.bts.go代码。
### 回源模板
```go
//go:generate kratos tool genbts
type _bts interface {
// bts: -batch=2 -max_group=20 -batch_err=break -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1
Demos(c context.Context, keys []int64) (map[int64]*Demo, error)
// bts: -sync=true -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1
Demo(c context.Context, key int64) (*Demo, error)
// bts: -paging=true
Demo1(c context.Context, key int64, pn int, ps int) (*Demo, error)
// bts: -nullcache=&Demo{ID:-1} -check_null_code=$.ID==-1
None(c context.Context) (*Demo, error)
}
```
### 参考
也可以参考完整的testdata例子:kratos/tool/kratos-gen-bts/testdata
-------------
[文档目录树](summary.md)

@ -0,0 +1,71 @@
### kratos tool genmc
> 缓存代码生成
在internal/dao/dao.go中添加mc缓存interface定义,可以指定对应的[注解参数](../../tool/kratos-gen-mc/README.md);
并且在接口前面添加`go:generate kratos tool genmc`;
然后在当前目录执行`go generate`,可以看到自动生成的mc.cache.go代码。
### 缓存模板
```go
//go:generate kratos tool genmc
type _mc interface {
// mc: -key=demoKey
CacheDemos(c context.Context, keys []int64) (map[int64]*Demo, error)
// mc: -key=demoKey
CacheDemo(c context.Context, key int64) (*Demo, error)
// mc: -key=keyMid
CacheDemo1(c context.Context, key int64, mid int64) (*Demo, error)
// mc: -key=noneKey
CacheNone(c context.Context) (*Demo, error)
// mc: -key=demoKey
CacheString(c context.Context, key int64) (string, error)
// mc: -key=demoKey -expire=d.demoExpire -encode=json
AddCacheDemos(c context.Context, values map[int64]*Demo) error
// mc: -key=demo2Key -expire=d.demoExpire -encode=json
AddCacheDemos2(c context.Context, values map[int64]*Demo, tp int64) error
// 这里也支持自定义注释 会替换默认的注释
// mc: -key=demoKey -expire=d.demoExpire -encode=json|gzip
AddCacheDemo(c context.Context, key int64, value *Demo) error
// mc: -key=keyMid -expire=d.demoExpire -encode=gob
AddCacheDemo1(c context.Context, key int64, value *Demo, mid int64) error
// mc: -key=noneKey
AddCacheNone(c context.Context, value *Demo) error
// mc: -key=demoKey -expire=d.demoExpire
AddCacheString(c context.Context, key int64, value string) error
// mc: -key=demoKey
DelCacheDemos(c context.Context, keys []int64) error
// mc: -key=demoKey
DelCacheDemo(c context.Context, key int64) error
// mc: -key=keyMid
DelCacheDemo1(c context.Context, key int64, mid int64) error
// mc: -key=noneKey
DelCacheNone(c context.Context) error
}
func demoKey(id int64) string {
return fmt.Sprintf("art_%d", id)
}
func demo2Key(id, tp int64) string {
return fmt.Sprintf("art_%d_%d", id, tp)
}
func keyMid(id, mid int64) string {
return fmt.Sprintf("art_%d_%d", id, mid)
}
func noneKey() string {
return "none"
}
```
### 参考
也可以参考完整的testdata例子:kratos/tool/kratos-gen-mc/testdata
-------------
[文档目录树](summary.md)

@ -0,0 +1,38 @@
### kratos tool protoc
```
// generate all
kratos tool protoc api.proto
// generate gRPC
kratos tool protoc --grpc api.proto
// generate BM HTTP
kratos tool protoc --bm api.proto
// generate swagger
kratos tool protoc --swagger api.proto
```
执行对应生成 `api.pb.go/api.bm.go/api.swagger.json` 源文档。
> 该工具在Windows/Linux下运行,需提前安装好 protobuf 工具
该工具实际是一段`shell`脚本,其中自动将`protoc`命令进行了拼接,识别了需要的`*.proto`文件和当前目录下的`proto`文件,最终会拼接为如下命令进行执行:
```shell
export $KRATOS_HOME = kratos路径
export $KRATOS_DEMO = 项目路径
// 生成:api.pb.go
protoc -I$GOPATH/src:$KRATOS_HOME/third_party:$KRATOS_DEMO/api --gofast_out=plugins=grpc:$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto
// 生成:api.bm.go
protoc -I$GOPATH/src:$KRATOS_HOME/third_party:$KRATOS_DEMO/api --bm_out=$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto
// 生成:api.swagger.json
protoc -I$GOPATH/src:$KRATOS_HOME/third_party:$KRATOS_DEMO/api --bswagger_out=$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto
```
大家也可以参考该命令进行`proto`生成,也可以参考 [protobuf](https://github.com/google/protobuf) 官方参数。
-------------
[文档目录树](summary.md)

@ -0,0 +1,11 @@
### kratos tool swagger
```shell
kratos tool swagger serve api/api.swagger.json
```
执行命令后,浏览器会自动打开swagger文档地址。
同时也可以查看更多的 [go-swagger](https://github.com/go-swagger/go-swagger) 官方参数进行使用。
-------------
[文档目录树](summary.md)

@ -0,0 +1,102 @@
# 介绍
kratos包含了一批好用的工具集,比如项目一键生成、基于proto生成http&grpc代码,生成缓存回源代码,生成memcache执行代码,生成swagger文档等。
# 获取工具
执行以下命令,即可快速安装好`kratos`工具
```shell
go get -u github.com/bilibili/kratos/tool/kratos
```
那么接下来让我们快速开始熟悉工具的用法~
# kratos本体
`kratos`是所有工具集的本体,就像`go`一样,拥有执行各种子工具的能力,如`go build`和`go tool`。先让我们看看`-h`的输出:
```
NAME:
kratos - kratos tool
USAGE:
kratos [global options] command [command options] [arguments...]
VERSION:
0.0.1
COMMANDS:
new, n create new project
build, b kratos build
run, r kratos run
tool, t kratos tool
version, v kratos version
self-upgrade kratos self-upgrade
help, h Shows a list of commands or help for one command
GLOBAL OPTIONS:
--help, -h show help
--version, -v print the version
```
可以看到`kratos`有如:`new` `build` `run` `tool`等在内的COMMANDS,那么接下来一一演示如何使用。
# kratos new
`kratos new`是快速创建一个项目的命令,执行如下:
```shell
kratos new kratos-demo
```
即可快速在当前目录生成一个叫`kratos-demo`的项目。此外还支持指定owner和path,如下:
```shell
kratos new kratos-demo -o YourName -d YourPath
```
注意,`kratos new`默认是不会生成通过 protobuf 定义的`grpc`和`bm`示例代码的,如需生成请加`--proto`,如下:
```shell
kratos new kratos-demo -o YourName -d YourPath --proto
```
> 特别注意,如果不是MacOS系统,需要自己进行手动安装protoc,用于生成的示例项目`api`目录下的`proto`文件并不会自动生成对应的`.pb.go`和`.bm.go`文件。
> 也可以参考以下说明进行生成:[protoc说明](protoc.md)
# kratos build & run
`kratos build`和`kratos run`是`go build`和`go run`的封装,可以在当前项目任意目录进行快速运行进行调试,并无特别用途。
# kratos tool
`kratos tool`是基于proto生成http&grpc代码,生成缓存回源代码,生成memcache执行代码,生成swagger文档等工具集,先看下的执行效果:
```
kratos tool
swagger(已安装): swagger api文档 Author(goswagger.io) [2019/05/05]
protoc(已安装): 快速方便生成pb.go和bm.go的protoc封装,windows、Linux请先安装protoc工具 Author(kratos) [2019/05/04]
kratos(已安装): Kratos工具集本体 Author(kratos) [2019/04/02]
安装工具: kratos tool install demo
执行工具: kratos tool demo
安装全部工具: kratos tool install all
详细文档: https://github.com/bilibili/kratos/blob/master/doc/wiki-cn/kratos-tool.md
```
> 小小说明:如未安装工具,第一次运行也可自动安装,不需要特别执行install
目前已经集成的工具有:
* [kratos](kratos-tool.md) 为本体工具,只用于安装更新使用;
* [protoc](kratos-protoc.md) 用于快速生成gRPC、HTTP、Swagger文件,该命令Windows,Linux用户需要手动安装 protobuf 工具;
* [swagger](kratos-swagger.md) 用于显示自动生成的HTTP API接口文档,通过 `kratos tool swagger serve api/api.swagger.json` 可以查看文档;
* [genmc](kratos-genmc.md) 用于自动生成memcached缓存代码;
* [genbts](kratos-genbts.md) 用于生成缓存回源代码生成,如果miss则调用回源函数从数据源获取,然后塞入缓存;
-------------
[文档目录树](summary.md)

@ -0,0 +1,37 @@
# 日志基础库
## 概览
基于[zap](https://github.com/uber-go/zap)的field方式实现的高性能log库,提供Info、Warn、Error日志级别;
并提供了context支持,方便打印环境信息以及日志的链路追踪,在框架中都通过field方式实现,避免format日志带来的性能消耗。
## 配置选项
| flag | env | type | remark |
|:----------|:----------|:-------------:|:------|
| log.v | LOG_V | int | 日志级别:DEBUG:0 INFO:1 WARN:2 ERROR:3 FATAL:4 |
| log.stdout | LOG_STDOUT | bool | 是否标准输出:true、false|
| log.dir | LOG_DIR | string | 日志文件目录,如果配置会输出日志到文件,否则不输出日志文件 |
| log.agent | LOG_AGENT | string | 日志采集agent:unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024 |
| log.module | LOG_MODULE | string | 指定field信息 format: file=1,file2=2. |
| log.filter | LOG_FILTER | string | 过虑敏感信息 format: field1,field2. |
## 使用方式
```go
func main() {
// 解析flag
flag.Parse()
// 初始化日志模块
log.Init(nil)
// 打印日志
log.Info("hi:%s", "kratos")
log.Infoc(Context.TODO(), "hi:%s", "kratos")
log.Infov(Context.TODO(), log.KVInt("key1", 100), log.KVString("key2", "test value")
}
```
## 扩展阅读
* [log-agent](log-agent.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,29 @@
# protoc
`protobuf`是Google官方出品的一种轻便高效的结构化数据存储格式,可以用于结构化数据串行化,或者说序列化。它很适合做数据存储或 RPC 数据交换格式。可用于通讯协议、数据存储等领域的语言无关、平台无关、可扩展的序列化结构数据格式。
使用`protobuf`,需要先书写`.proto`文件,然后编译该文件。编译`proto`文件则需要使用到官方的`protoc`工具,安装文档请参看:[google官方protoc工具](https://github.com/protocolbuffers/protobuf#protocol-compiler-installation)。
注意:`protoc`是用于编辑`proto`文件的工具,它并不具备生成对应语言代码的能力,所以正常都是`protoc`配合对应语言的代码生成工具来使用,如Go语言的[gogo protobuf](https://github.com/gogo/protobuf),请先点击按文档说明安装。
安装好对应工具后,我们可以进入`api`目录,执行如下命令:
```shell
export $KRATOS_HOME = kratos路径
export $KRATOS_DEMO = 项目路径
// 生成:api.pb.go
protoc -I$GOPATH/src:$KRATOS_HOME/tool/protobuf/pkg/extensions:$KRATOS_DEMO/api --gogofast_out=plugins=grpc:$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto
// 生成:api.bm.go
protoc -I$GOPATH/src:$KRATOS_HOME/tool/protobuf/pkg/extensions:$KRATOS_DEMO/api --bm_out=$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto
// 生成:api.swagger.json
protoc -I$GOPATH/src:$KRATOS_HOME/tool/protobuf/pkg/extensions:$KRATOS_DEMO/api --bswagger_out=$KRATOS_DEMO/api $KRATOS_DEMO/api/api.proto
```
请注意替换`/Users/felix/work/go/src`目录为你本地开发环境对应GOPATH目录,其中`--gogofast_out`意味着告诉`protoc`工具需要使用`gogo protobuf`的工具生成代码。
-------------
[文档目录树](summary.md)

@ -1,18 +1,20 @@
# 快速开始
快速使用kratos项目,可以使用`kratos tool`,如下:
快速使用kratos项目,可以使用`kratos`工具,如下:
```shell
go get -u github.com/bilibili/kratos/tool/kratos
kratos init
cd $GOPATH/src
kratos new kratos-demo
```
根据提示可以快速创建项目,如[kratos-demo](https://github.com/bilibili/kratos-demo)就是通过工具创建生成。目录结构如下:
```
├── CHANGELOG.md # CHANGELOG
├── CONTRIBUTORS.md # CONTRIBUTORS
├── README.md # README
├── api # api目录为对外保留的proto文件及生成的pb.go文件
├── api # api目录为对外保留的proto文件及生成的pb.go文件,注:需要"--grpc"参数
   ├── api.proto
   ├── api.pb.go # 通过go generate生成的pb.go文件
   └── generate.go
@ -51,9 +53,7 @@ go build
打开浏览器访问:[http://localhost:8000/kratos-demo/start](http://localhost:8000/kratos-demo/start),你会看到输出了`Golang 大法好 !!!`
# 动图说明
![kratos init](/doc/img/kratosinit.gif)
[kratos工具](kratos-tool.md)
-------------

@ -0,0 +1,60 @@
# 自适应限流保护
kratos 借鉴了 Sentinel 项目的自适应限流系统,通过综合分析服务的 cpu 使用率、请求成功的 qps 和请求成功的 rt 来做自适应限流保护。
## 核心目标
* 自动嗅探负载和 qps,减少人工配置
* 削顶,保证超载时系统不被拖垮,并能以高水位 qps 继续运行
## 限流规则
1,指标介绍
|指标名称|指标含义|
|---|---|
|cpu|最近 1s 的 CPU 使用率均值,使用滑动平均计算,采样周期是 250ms|
|inflight|当前处理中正在处理的请求数量|
|pass|请求处理成功的量|
|rt|请求成功的响应耗时|
2,滑动窗口
在自适应限流保护中,采集到的指标的时效性非常强,系统只需要采集最近一小段时间内的 qps、rt 即可,对于较老的数据,会自动丢弃。为了实现这个效果,kratos 使用了滑动窗口来保存采样数据。
![ratelimit-rolling-window](/doc/img/ratelimit-rolling-window.png)
如上图,展示了一个具有两个桶(bucket)的滑动窗口(rolling window)。整个滑动窗口用来保存最近 1s 的采样数据,每个小的桶用来保存 500ms 的采样数据。
当时间流动之后,过期的桶会自动被新桶的数据覆盖掉,在图中,在 1000-1500ms 时,bucket 1 的数据因为过期而被丢弃,之后 bucket 3 的数据填到了窗口的头部。
3,限流公式
判断是否丢弃当前请求的算法如下:
`cpu > 800 AND (Now - PrevDrop) < 1s AND (MaxPass * MinRt * windows / 1000) < InFlight`
MaxPass 表示最近 5s 内,单个采样窗口中最大的请求数。
MinRt 表示最近 5s 内,单个采样窗口中最小的响应时间。
windows 表示一秒内采样窗口的数量,默认配置中是 5s 50 个采样,那么 windows 的值为 10。
## 压测报告
场景1,请求以每秒增加1个的速度不停上升,压测效果如下:
![ratelimit-benchmark-up-1](/doc/img/ratelimit-benchmark-up-1.png)
左测是没有限流的压测效果,右侧是带限流的压测效果。
可以看到,没有限流的场景里,系统在 700qps 时开始抖动,在 1k qps 时被拖垮,几乎没有新的请求能被放行,然而在使用限流之后,系统请求能够稳定在 600 qps 左右,rt 没有暴增,服务也没有被打垮,可见,限流有效的保护了服务。
参考资料:
[Sentinel 系统自适应限流](https://github.com/alibaba/Sentinel/wiki/%E7%B3%BB%E7%BB%9F%E8%87%AA%E9%80%82%E5%BA%94%E9%99%90%E6%B5%81)
-------------
[文档目录树](summary.md)

@ -9,8 +9,13 @@
* [bm middleware](blademaster-mid.md)
* [bm protobuf](blademaster-pb.md)
* [grpc warden](warden.md)
* [middleware](warden-mid.md)
* [protobuf生成](warden-pb.md)
* [warden quickstart](warden-quickstart.md)
* [warden interceptor](warden-mid.md)
* [warden resolver](warden-resolver.md)
* [warden balancer](warden-balancer.md)
* [warden protobuf](warden-pb.md)
* [config](config.md)
* [paladin](config-paladin.md)
* [dapper trace](dapper.md)
* [log](logger.md)
* [log-agent](log-agent.md)
@ -21,4 +26,8 @@
* [cache](cache.md)
* [memcache](cache-mc.md)
* [redis](cache-redis.md)
* [kratos tool](kratos-tool.md)
* [kratos工具](kratos-tool.md)
* [protoc](kratos-protoc.md)
* [swagger](kratos-swagger.md)
* [genmc](kratos-genmc.md)
* [genbts](kratos-genbts.md)

@ -0,0 +1,43 @@
# Warden Balancer
## 介绍
grpc-go内置了round-robin轮询,但由于自带的轮询算法不支持权重,也不支持color筛选等需求,故需要重新实现一个负载均衡算法。
## WRR (Weighted Round Robin)
该算法在加权轮询法基础上增加了动态调节权重值,用户可以在为每一个节点先配置一个初始的权重分,之后算法会根据节点cpu、延迟、服务端错误率、客户端错误率动态打分,在将打分乘用户自定义的初始权重分得到最后的权重值。
## P2C (Pick of two choices)
本算法通过随机选择两个node选择优胜者来避免羊群效应,并通过ewma尽量获取服务端的实时状态。
服务端:
服务端获取最近500ms内的CPU使用率(需要将cgroup设置的限制考虑进去,并除于CPU核心数),并将CPUC使用率乘与1000后塞入每次grpc请求中的的Trailer中夹带返回:
cpu_usage
uint64 encoded with string
cpu_usage : 1000
客户端:
主要参数:
* server_cpu:通过每次请求中服务端塞在trailer中的cpu_usage拿到服务端最近500ms内的cpu使用率
* inflight:当前客户端正在发送并等待response的请求数(pending request)
* latency: 加权移动平均算法计算出的接口延迟
* client_success:加权移动平均算法计算出的请求成功率(只记录grpc内部错误,比如context deadline)
目前客户端,已经默认使用p2c负载均衡算法`grpc.WithBalancerName(p2c.Name)`:
```go
// NewClient returns a new blank Client instance with a default client interceptor.
// opt can be used to add grpc dial options.
func NewClient(conf *ClientConfig, opt ...grpc.DialOption) *Client {
c := new(Client)
if err := c.SetConfig(conf); err != nil {
panic(err)
}
c.UseOpt(grpc.WithBalancerName(p2c.Name))
c.UseOpt(opt...)
c.Use(c.recovery(), clientLogging(), c.handle())
return c
}
```
-------------
[文档目录树](summary.md)

@ -0,0 +1,376 @@
# 说明
gRPC暴露了两个拦截器接口,分别是:
* `grpc.UnaryServerInterceptor`服务端拦截器
* `grpc.UnaryClientInterceptor`客户端拦截器
基于两个拦截器可以针对性的定制公共模块的封装代码,比如`warden/logging.go`是通用日志逻辑。
# 分析
## 服务端拦截器
让我们先看一下`grpc.UnaryServerInterceptor`的声明,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/interceptor.go):
```go
// UnaryServerInfo consists of various information about a unary RPC on
// server side. All per-rpc information may be mutated by the interceptor.
type UnaryServerInfo struct {
// Server is the service implementation the user provides. This is read-only.
Server interface{}
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
}
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
// the status message of the RPC.
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
// to complete the RPC.
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
```
看起来很简单包括:
* 一个`UnaryServerInfo`结构体用于`Server`和`FullMethod`字段传递,`Server`为`gRPC server`的对象实例,`FullMethod`为请求方法的全名
* 一个`UnaryHandler`方法用于传递`Handler`,就是基于`proto`文件`service`内声明而生成的方法
* 一个`UnaryServerInterceptor`用于拦截`Handler`方法,可在`Handler`执行前后插入拦截代码
为了更形象的说明拦截器的执行过程,请看基于`proto`生成的以下代码[代码位置](https://github.com/bilibili/kratos-demo/blob/master/api/api.pb.go):
```go
func _Demo_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HelloReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DemoServer).SayHello(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/demo.service.v1.Demo/SayHello",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DemoServer).SayHello(ctx, req.(*HelloReq))
}
return interceptor(ctx, in, info, handler)
}
```
这个`_Demo_SayHello_Handler`方法是关键,该方法会被包装为`grpc.ServiceDesc`结构,被注册到gRPC内部,具体可在生成的`pb.go`代码内查找`s.RegisterService(&_Demo_serviceDesc, srv)`。
* 当`gRPC server`收到一次请求时,首先根据请求方法从注册到`server`内的`grpc.ServiceDesc`找到该方法对应的`Handler`如:`_Demo_SayHello_Handler`并执行
* `_Demo_SayHello_Handler`执行过程请看上面具体代码,当`interceptor`不为`nil`时,会将`SayHello`包装为`grpc.UnaryHandler`结构传递给`interceptor`
这样就完成了`UnaryServerInterceptor`的执行过程。那么`_Demo_SayHello_Handler`内的`interceptor`是如何注入到`gRPC server`内,则看下面这段代码[官方代码位置](https://github.com/grpc/grpc-go/blob/master/server.go):
```go
// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
// server. Only one unary interceptor can be installed. The construction of multiple
// interceptors (e.g., chaining) can be implemented at the caller.
func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
return func(o *options) {
if o.unaryInt != nil {
panic("The unary server interceptor was already set and may not be reset.")
}
o.unaryInt = i
}
}
```
请一定注意这方法的注释!!!
> Only one unary interceptor can be installed. The construction of multiple interceptors (e.g., chaining) can be implemented at the caller.
`gRPC`本身只支持一个`interceptor`,想要多`interceptors`需要自己实现~~所以`warden`基于`grpc.UnaryClientInterceptor`实现了`interceptor chain`,请看下面代码[代码位置](https://github.com/bilibili/kratos/blob/master/pkg/net/rpc/warden/server.go):
```go
// Use attachs a global inteceptor to the server.
// For example, this is the right place for a rate limiter or error management inteceptor.
func (s *Server) Use(handlers ...grpc.UnaryServerInterceptor) *Server {
finalSize := len(s.handlers) + len(handlers)
if finalSize >= int(_abortIndex) {
panic("warden: server use too many handlers")
}
mergedHandlers := make([]grpc.UnaryServerInterceptor, finalSize)
copy(mergedHandlers, s.handlers)
copy(mergedHandlers[len(s.handlers):], handlers)
s.handlers = mergedHandlers
return s
}
// interceptor is a single interceptor out of a chain of many interceptors.
// Execution is done in left-to-right order, including passing of context.
// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
// will see context changes of one and two.
func (s *Server) interceptor(ctx context.Context, req interface{}, args *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
var (
i int
chain grpc.UnaryHandler
)
n := len(s.handlers)
if n == 0 {
return handler(ctx, req)
}
chain = func(ic context.Context, ir interface{}) (interface{}, error) {
if i == n-1 {
return handler(ic, ir)
}
i++
return s.handlers[i](ic, ir, args, chain)
}
return s.handlers[0](ctx, req, args, chain)
}
```
很简单的逻辑:
* `warden server`使用`Use`方法进行`grpc.UnaryServerInterceptor`的注入,而`func (s *Server) interceptor`本身就实现了`grpc.UnaryServerInterceptor`
* `func (s *Server) interceptor`可以根据注册的`grpc.UnaryServerInterceptor`顺序从前到后依次执行
而`warden`在初始化的时候将该方法本身注册到了`gRPC server`,在`NewServer`方法内可以看到下面代码:
```go
opt = append(opt, keepParam, grpc.UnaryInterceptor(s.interceptor))
s.server = grpc.NewServer(opt...)
```
如此完整的服务端拦截器逻辑就串联完成。
## 客户端拦截器
让我们先看一下`grpc.UnaryClientInterceptor`的声明,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/interceptor.go):
```go
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
// and it is the responsibility of the interceptor to call it.
// This is an EXPERIMENTAL API.
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
```
看起来和服务端拦截器并没有什么太大的区别,比较简单包括:
* 一个`UnaryInvoker`表示客户端具体要发出的执行方法
* 一个`UnaryClientInterceptor`用于拦截`Invoker`方法,可在`Invoker`执行前后插入拦截代码
具体执行过程,请看基于`proto`生成的下面代码[代码位置](https://github.com/bilibili/kratos-demo/blob/master/api/api.pb.go):
```go
func (c *demoClient) SayHello(ctx context.Context, in *HelloReq, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
out := new(google_protobuf1.Empty)
err := grpc.Invoke(ctx, "/demo.service.v1.Demo/SayHello", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
```
当客户端调用`SayHello`时可以看到执行了`grpc.Invoke`方法,并且将`fullMethod`和其他参数传入,最终会执行下面代码[官方代码位置](https://github.com/grpc/grpc-go/blob/master/call.go):
```go
// Invoke sends the RPC request on the wire and returns after response is
// received. This is typically called by generated code.
//
// All errors returned by Invoke are compatible with the status package.
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
// allow interceptor to see all applicable call options, which means those
// configured as defaults from dial option as well as per-call options
opts = combine(cc.dopts.callOptions, opts)
if cc.dopts.unaryInt != nil {
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
}
return invoke(ctx, method, args, reply, cc, opts...)
}
```
其中的`unaryInt`即为客户端连接创建时注册的拦截器,使用下面代码注册[官方代码位置](https://github.com/grpc/grpc-go/blob/master/dialoptions.go):
```go
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
// unary RPCs.
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.unaryInt = f
})
}
```
需要注意的是客户端的拦截器在官方`gRPC`内也只能支持注册一个,与服务端拦截器`interceptor chain`逻辑类似`warden`在客户端拦截器也做了相同处理,并且在客户端连接时进行注册,请看下面代码[代码位置](https://github.com/bilibili/kratos/blob/master/pkg/net/rpc/warden/client.go):
```go
// Use attachs a global inteceptor to the Client.
// For example, this is the right place for a circuit breaker or error management inteceptor.
func (c *Client) Use(handlers ...grpc.UnaryClientInterceptor) *Client {
finalSize := len(c.handlers) + len(handlers)
if finalSize >= int(_abortIndex) {
panic("warden: client use too many handlers")
}
mergedHandlers := make([]grpc.UnaryClientInterceptor, finalSize)
copy(mergedHandlers, c.handlers)
copy(mergedHandlers[len(c.handlers):], handlers)
c.handlers = mergedHandlers
return c
}
// chainUnaryClient creates a single interceptor out of a chain of many interceptors.
//
// Execution is done in left-to-right order, including passing of context.
// For example ChainUnaryClient(one, two, three) will execute one before two before three.
func (c *Client) chainUnaryClient() grpc.UnaryClientInterceptor {
n := len(c.handlers)
if n == 0 {
return func(ctx context.Context, method string, req, reply interface{},
cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
return invoker(ctx, method, req, reply, cc, opts...)
}
}
return func(ctx context.Context, method string, req, reply interface{},
cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
var (
i int
chainHandler grpc.UnaryInvoker
)
chainHandler = func(ictx context.Context, imethod string, ireq, ireply interface{}, ic *grpc.ClientConn, iopts ...grpc.CallOption) error {
if i == n-1 {
return invoker(ictx, imethod, ireq, ireply, ic, iopts...)
}
i++
return c.handlers[i](ictx, imethod, ireq, ireply, ic, chainHandler, iopts...)
}
return c.handlers[0](ctx, method, req, reply, cc, chainHandler, opts...)
}
}
```
如此完整的客户端拦截器逻辑就串联完成。
# 实现自己的拦截器
以服务端拦截器`logging`为例:
```go
// serverLogging warden grpc logging
func serverLogging() grpc.UnaryServerInterceptor {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
// NOTE: handler执行之前的拦截代码:主要获取一些关键参数,如耗时计时、ip等
// 如果自定义的拦截器只需要在handler执行后,那么可以直接执行handler
startTime := time.Now()
caller := metadata.String(ctx, metadata.Caller)
if caller == "" {
caller = "no_user"
}
var remoteIP string
if peerInfo, ok := peer.FromContext(ctx); ok {
remoteIP = peerInfo.Addr.String()
}
var quota float64
if deadline, ok := ctx.Deadline(); ok {
quota = time.Until(deadline).Seconds()
}
// call server handler
resp, err := handler(ctx, req) // NOTE: 以具体执行的handler为分界线!!!
// NOTE: handler执行之后的拦截代码:主要进行耗时计算、日志记录
// 如果自定义的拦截器在handler执行后不需要逻辑,这可直接返回
// after server response
code := ecode.Cause(err).Code()
duration := time.Since(startTime)
// monitor
statsServer.Timing(caller, int64(duration/time.Millisecond), info.FullMethod)
statsServer.Incr(caller, info.FullMethod, strconv.Itoa(code))
logFields := []log.D{
log.KVString("user", caller),
log.KVString("ip", remoteIP),
log.KVString("path", info.FullMethod),
log.KVInt("ret", code),
// TODO: it will panic if someone remove String method from protobuf message struct that auto generate from protoc.
log.KVString("args", req.(fmt.Stringer).String()),
log.KVFloat64("ts", duration.Seconds()),
log.KVFloat64("timeout_quota", quota),
log.KVString("source", "grpc-access-log"),
}
if err != nil {
logFields = append(logFields, log.KV("error", err.Error()), log.KV("stack", fmt.Sprintf("%+v", err)))
}
logFn(code, duration)(ctx, logFields...)
return resp, err
}
}
```
# 内置拦截器
## 自适应限流拦截器
更多关于自适应限流的信息,请参考:[kratos 自适应限流](/doc/wiki-cn/ratelimit.md)
```go
package grpc
import (
pb "kratos-demo/api"
"kratos-demo/internal/service"
"github.com/bilibili/kratos/pkg/conf/paladin"
"github.com/bilibili/kratos/pkg/net/rpc/warden"
"github.com/bilibili/kratos/pkg/net/rpc/warden/ratelimiter"
)
// New new a grpc server.
func New(svc *service.Service) *warden.Server {
var rc struct {
Server *warden.ServerConfig
}
if err := paladin.Get("grpc.toml").UnmarshalTOML(&rc); err != nil {
if err != paladin.ErrNotExist {
panic(err)
}
}
ws := warden.NewServer(rc.Server)
// 挂载自适应限流拦截器到 warden server,使用默认配置
limiter := ratelimiter.New(nil)
ws.Use(limiter.Limit())
// 注意替换这里:
// RegisterDemoServer方法是在"api"目录下代码生成的
// 对应proto文件内自定义的service名字,请使用正确方法名替换
pb.RegisterDemoServer(ws.Server(), svc)
ws, err := ws.Start()
if err != nil {
panic(err)
}
return ws
}
```
# 扩展阅读
[warden快速开始](warden-quickstart.md) [warden基于pb生成](warden-pb.md) [warden负载均衡](warden-balancer.md) [warden服务发现](warden-resolver.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,47 @@
# 介绍
基于proto文件可以快速生成`warden`框架对应的代码,提前需要准备以下工作:
* 安装`kratos tool protoc`工具,请看[kratos工具](kratos-tool.md)
* 编写`proto`文件,示例可参考[kratos-demo内proto文件](https://github.com/bilibili/kratos-demo/blob/master/api/api.proto)
### kratos工具说明
`kratos tool protoc`工具可以生成`warden` `bm` `swagger`对应的代码和文档,想要单独生成`warden`代码只需加上`--grpc`如:
```shell
# generate gRPC
kratos tool protoc --grpc api.proto
```
# 使用
建议在项目`api`目录下编写`proto`文件及生成对应的代码,可参考[kratos-demo内的api目录](https://github.com/bilibili/kratos-demo/tree/master/api)。
执行命令后生成的`api.pb.go`代码,注意其中的`DemoClient`和`DemoServer`,其中:
* `DemoClient`接口为客户端调用接口,相对应的有`demoClient`结构体为其实现
* `DemoServer`接口为服务端接口声明,需要业务自己实现该接口的所有方法,`kratos`建议在`internal/service`目录下使用`Service`结构体实现
`internal/service`内的`Service`结构实现了`DemoServer`接口可参考[kratos-demo内的service](https://github.com/bilibili/kratos-demo/blob/master/internal/service/service.go)内的如下代码:
```go
// SayHelloURL bm demo func.
func (s *Service) SayHelloURL(ctx context.Context, req *pb.HelloReq) (reply *pb.HelloResp, err error) {
reply = &pb.HelloResp{
Content: "hello " + req.Name,
}
fmt.Printf("hello url %s", req.Name)
return
}
```
更详细的客户端和服务端使用请看[warden快速开始](warden-quickstart.md)
# 扩展阅读
[warden快速开始](warden-quickstart.md) [warden拦截器](warden-mid.md) [warden负载均衡](warden-balancer.md) [warden服务发现](warden-resolver.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,171 @@
# 准备工作
推荐使用[kratos工具](kratos-tool.md)快速生成带`--grpc`的项目,如我们生成一个叫`kratos-demo`的项目。
# pb文件
创建项目成功后,进入`api`目录下可以看到`api.proto`和`api.pb.go`和`generate.go`文件,其中:
* `api.proto`是gRPC server的描述文件
* `api.pb.go`是基于`api.proto`生成的代码文件
* `generate.go`是用于`kratos tool protoc`执行`go generate`进行代码生成的临时文件
接下来可以将以上三个文件全部删除或者保留`generate.go`,之后编写自己的proto文件,确认proto无误后,进行代码生成:
* 可直接执行`kratos tool protoc`,该命令会调用protoc工具生成`.pb.go`文件
* 如`generate.go`没删除,也可以执行`go generate`命令,将调用`kratos tool protoc`工具进行代码生成
[kratos工具请看](kratos-tool.md)
### 如没看kprotoc文档,请看下面这段话
`kratos tool protoc`用于快速生成`pb.go`文件,但目前windows和Linux需要先自己安装`protoc`工具,具体请看[protoc说明](protoc.md)。
# 注册server
进入`internal/server/grpc`目录打开`server.go`文件,可以看到以下代码,只需要替换以下注释内容就可以启动一个gRPC服务。
```go
package grpc
import (
pb "kratos-demo/api"
"kratos-demo/internal/service"
"github.com/bilibili/kratos/pkg/conf/paladin"
"github.com/bilibili/kratos/pkg/net/rpc/warden"
)
// New new a grpc server.
func New(svc *service.Service) *warden.Server {
var rc struct {
Server *warden.ServerConfig
}
if err := paladin.Get("grpc.toml").UnmarshalTOML(&rc); err != nil {
if err != paladin.ErrNotExist {
panic(err)
}
}
ws := warden.NewServer(rc.Server)
// 注意替换这里:
// RegisterDemoServer方法是在"api"目录下代码生成的
// 对应proto文件内自定义的service名字,请使用正确方法名替换
pb.RegisterDemoServer(ws.Server(), svc)
ws, err := ws.Start()
if err != nil {
panic(err)
}
return ws
}
```
### 注册注意
```go
// SayHello grpc demo func.
func (s *Service) SayHello(ctx context.Context, req *pb.HelloReq) (reply *empty.Empty, err error) {
reply = new(empty.Empty)
fmt.Printf("hello %s", req.Name)
return
}
```
请进入`internal/service`内找到`SayHello`方法,注意方法的入参和出参,都是按照gRPC的方法声明对应的:
* 第一个参数必须是`context.Context`,第二个必须是proto内定义的`message`对应生成的结构体
* 第一个返回值必须是proto内定义的`message`对应生成的结构体,第二个参数必须是`error`
* 在http框架bm中,如果共用proto文件生成bm代码,那么也可以直接使用该service方法
建议service严格按照此格式声明方法使其能够在bm和warden内共用。
# client调用
请进入`internal/dao`方法内,一般对资源的处理都会在这一层封装。
对于`client`端,前提必须有对应`proto`文件生成的代码,那么有两种选择:
* 拷贝proto文件到自己项目下并且执行代码生成
* 直接import服务端的api package
> 这也是业务代码我们加了一层`internal`的关系,服务对外暴露的只有接口
不管哪一种方式,以下初始化gRPC client的代码建议伴随生成的代码存放在统一目录下:
```go
package dao
import (
"context"
"github.com/bilibili/kratos/pkg/net/rpc/warden"
"google.golang.org/grpc"
)
// target server addrs.
const target = "direct://default/127.0.0.1:9000,127.0.0.1:9091" // NOTE: example
// NewClient new member grpc client
func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) {
client := warden.NewClient(cfg, opts...)
conn, err := client.Dial(context.Background(), target)
if err != nil {
return nil, err
}
// 注意替换这里:
// NewDemoClient方法是在"api"目录下代码生成的
// 对应proto文件内自定义的service名字,请使用正确方法名替换
return NewDemoClient(conn), nil
}
```
其中,`target`为gRPC用于服务发现的目标,使用标准url资源格式提供给resolver用于服务发现。`warden`默认使用`direct`直连方式,直接与`server`端进行连接。如果在使用其他服务发现组件请看[warden服务发现](warden-resolver.md)。
有了初始化`Client`的代码,我们的`Dao`对象即可进行初始化和使用,以下以直接import服务端api包为例:
```go
package dao
import(
demoapi "kratos-demo/api"
grpcempty "github.com/golang/protobuf/ptypes/empty"
"github.com/bilibili/kratos/pkg/net/rpc/warden"
"github.com/pkg/errors"
)
type Dao struct{
demoClient demoapi.DemoClient
}
// New account dao.
func New() (d *Dao) {
cfg := &warden.ClientConfig{}
paladin.Get("grpc.toml").UnmarshalTOML(cfg)
d = &Dao{}
var err error
if d.demoClient, err = demoapi.NewClient(cfg); err != nil {
panic(err)
}
return
}
// SayHello say hello.
func (d *Dao) SayHello(c context.Context, req *demoapi.HelloReq) (resp *grpcempty.Empty, err error) {
if resp, err = d.demoClient.SayHello(c, req); err != nil {
err = errors.Wrapf(err, "%v", arg)
}
return
}
```
如此在`internal/service`层就可以进行资源的方法调用。
# 扩展阅读
[warden拦截器](warden-mid.md)
[warden基于pb生成](warden-pb.md)
[warden服务发现](warden-resolver.md)
[warden负载均衡](warden-balancer.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,204 @@
# 前提
服务注册与发现最简单的就是`direct`固定服务端地址的直连方式。也就是服务端正常监听端口启动不进行额外操作,客户端使用如下`target`:
```url
direct://default/127.0.0.1:9000,127.0.0.1:9091
```
> `target`就是标准的`URL`资源定位符[查看WIKI](https://zh.wikipedia.org/wiki/%E7%BB%9F%E4%B8%80%E8%B5%84%E6%BA%90%E5%AE%9A%E4%BD%8D%E7%AC%A6)
其中`direct`为协议类型,此处表示直接使用该`URL`内提供的地址`127.0.0.1:9000,127.0.0.1:9091`进行连接,而`default`在此处无意义仅当做占位符。
# gRPC Resolver
gRPC暴露了服务发现的接口`resolver.Builder`和`resolver.ClientConn`和`resolver.Resolver`,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver/resolver.go):
```go
// Builder creates a resolver that will be used to watch name resolution updates.
type Builder interface {
// Build creates a new resolver for the given target.
//
// gRPC dial calls Build synchronously, and fails if the returned error is
// not nil.
Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
// Scheme returns the scheme supported by this resolver.
// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
Scheme() string
}
// ClientConn contains the callbacks for resolver to notify any updates
// to the gRPC ClientConn.
//
// This interface is to be implemented by gRPC. Users should not need a
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
UpdateState(State)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
// The address list should be the complete list of resolved addresses.
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
// NewServiceConfig is called by resolver to notify ClientConn a new
// service config. The service config should be provided as a json string.
//
// Deprecated: Use UpdateState instead.
NewServiceConfig(serviceConfig string)
}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
type Resolver interface {
// ResolveNow will be called by gRPC to try to resolve the target name
// again. It's just a hint, resolver can ignore this if it's not necessary.
//
// It could be called multiple times concurrently.
ResolveNow(ResolveNowOption)
// Close closes the resolver.
Close()
}
```
下面依次分析这三个接口的作用:
* `Builder`用于gRPC内部创建`Resolver`接口的实现,但注意声明的`Build`方法将接口`ClientConn`作为参数传入了
* `ClientConn`接口有两个废弃方法不用管,看`UpdateState`方法需要传入`State`结构,看代码可以发现其中包含了`Addresses []Address // Resolved addresses for the target`,可以看出是需要将服务发现得到的`Address`对象列表告诉`ClientConn`的对象
* `Resolver`提供了`ResolveNow`用于被gRPC尝试重新进行服务发现
看完这三个接口就可以明白gRPC的服务发现实现逻辑,通过`Builder`进行`Reslover`的创建,在`Build`的过程中将服务发现的地址信息丢给`ClientConn`用于内部连接创建等逻辑。主要逻辑可以按下面顺序来看源码理解:
* 当`client`在`Dial`时会根据`target`解析的`scheme`获取对应的`Builder`,[官方代码位置](https://github.com/grpc/grpc-go/blob/master/clientconn.go#L242)
* 当`Dial`成功会创建出结构体`ClientConn`的对象[官方代码位置](https://github.com/grpc/grpc-go/blob/master/clientconn.go#L447)(注意不是上面的`ClientConn`接口),可以看到结构体`ClientConn`内的成员`resolverWrapper`又实现了接口`ClientConn`的方法[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver_conn_wrapper.go)
* 当`resolverWrapper`被初始化时就会调用`Build`方法[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver_conn_wrapper.go#L89),其中参数为接口`ClientConn`传入的是`ccResolverWrapper`
* 当用户基于`Builder`的实现进行`UpdateState`调用时,则会触发结构体`ClientConn`的`updateResolverState`方法[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver_conn_wrapper.go#L109),`updateResolverState`则会对传入的`Address`进行初始化等逻辑[官方代码位置](https://github.com/grpc/grpc-go/blob/master/clientconn.go#L553)
如此整个服务发现过程就结束了。从中也可以看出gRPC官方提供的三个接口还是很灵活的,但也正因为灵活要实现稍微麻烦一些,而`Address`[官方代码位置](https://github.com/grpc/grpc-go/blob/master/resolver/resolver.go#L79)如果直接被业务拿来用于服务节点信息的描述结构则显得有些过于简单。
所以`warden`包装了gRPC的整个服务发现实现逻辑,代码分别位于`pkg/naming/naming.go`和`warden/resolver/resolver.go`,其中:
* `naming.go`内定义了用于描述业务实例的`Instance`结构、用于服务注册的`Registry`接口、用于服务发现的`Resolver`接口
* `resolver.go`内实现了gRPC官方的`resolver.Builder`和`resolver.Resolver`接口,但也暴露了`naming.go`内的`naming.Builder`和`naming.Resolver`接口
# warden Resolver
接下来看`naming`内的接口如下:
```go
// Resolver resolve naming service
type Resolver interface {
Fetch(context.Context) (*InstancesInfo, bool)
Watch() <-chan struct{}
Close() error
}
// Builder resolver builder.
type Builder interface {
Build(id string) Resolver
Scheme() string
}
```
可以看到封装方式与gRPC官方的方法一样,通过`Builder`进行`Resolver`的初始化。不同的是通过封装将参数进行了简化:
* `Build`只需要传对应的服务`id`即可:`warden/resolver/resolver.go`在gRPC进行调用后,会根据`Scheme`方法查询对应的`naming.Builder`实现并调用`Build`将`id`传入,而`naming.Resolver`的实现即可通过`id`去对应的服务发现中间件进行实例信息的查询
* 而`Resolver`则对方法进行了扩展,除了简单进行`Fetch`操作外还多了`Watch`方法,用于监听服务发现中间件的节点变化情况,从而能够实时的进行服务实例信息的更新
在`naming/discovery`内实现了基于[discovery](https://github.com/bilibili/discovery)为中间件的服务注册与发现逻辑。如果要实现其他中间件如`etcd`|`zookeeper`等的逻辑,参考`naming/discovery/discovery.go`内的逻辑,将与`discovery`的交互逻辑替换掉即可(后续会默认将etcd/zk等实现,敬请期待)。
# 使用discovery
因为`warden`内默认使用`direct`的方式,所以要使用[discovery](https://github.com/bilibili/discovery)需要在业务的`NewClient`前进行注册,代码如下:
```go
package dao
import (
"context"
"github.com/bilibili/kratos/pkg/naming/discovery"
"github.com/bilibili/kratos/pkg/net/rpc/warden"
"github.com/bilibili/kratos/pkg/net/rpc/warden/resolver"
"google.golang.org/grpc"
)
// AppID your appid, ensure unique.
const AppID = "demo.service" // NOTE: example
func init(){
// NOTE: 注意这段代码,表示要使用discovery进行服务发现
// NOTE: 还需注意的是,resolver.Register是全局生效的,所以建议该代码放在进程初始化的时候执行
// NOTE: !!!切记不要在一个进程内进行多个不同中间件的Register!!!
// NOTE: 在启动应用时,可以通过flag(-discovery.nodes) 或者 环境配置(DISCOVERY_NODES)指定discovery节点
resolver.Register(discovery.Builder())
}
// NewClient new member grpc client
func NewClient(cfg *warden.ClientConfig, opts ...grpc.DialOption) (DemoClient, error) {
client := warden.NewClient(cfg, opts...)
conn, err := client.Dial(context.Background(), "discovery://default/"+AppID)
if err != nil {
return nil, err
}
// 注意替换这里:
// NewDemoClient方法是在"api"目录下代码生成的
// 对应proto文件内自定义的service名字,请使用正确方法名替换
return NewDemoClient(conn), nil
}
```
> 注意:`resolver.Register`是全局行为,建议放在包加载阶段或main方法开始时执行,该方法执行后会在gRPC内注册构造方法
`target`是`discovery://default/${appid}`,当gRPC内进行解析后会得到`scheme`=`discovery`和`appid`,然后进行以下逻辑:
1. `warden/resolver.Builder`会通过`scheme`获取到`naming/discovery.Builder`对象(靠`resolver.Register`注册过的)
2. 拿到`naming/discovery.Builder`后执行`Build(appid)`构造`naming/discovery.Discovery`
3. `naming/discovery.Discovery`对象基于`appid`就知道要获取哪个服务的实例信息
# 服务注册
客户端既然使用了[discovery](https://github.com/bilibili/discovery)进行服务发现,也就意味着服务端启动后必须将自己注册给[discovery](https://github.com/bilibili/discovery)知道。
相对服务发现来讲,服务注册则简单很多,看`naming/discovery/discovery.go`内的代码实现了`naming/naming.go`内的`Registry`接口,服务端启动时可以参考下面代码进行注册:
```go
// 该代码可放在main.go,当warden server进行初始化之后
// 省略...
ip := "" // NOTE: 必须拿到您实例节点的真实IP,
port := "" // NOTE: 必须拿到您实例grpc监听的真实端口,warden默认监听9000
hn, _ := os.Hostname()
dis := discovery.New(nil)
ins := &naming.Instance{
Zone: env.Zone,
Env: env.DeployEnv,
AppID: "your app id",
Hostname: hn,
Addrs: []string{
"grpc://" + ip + ":" + port,
},
}
cancel, err := dis.Register(context.Background(), ins)
if err != nil {
panic(err)
}
// 省略...
// 特别注意!!!
// cancel必须在进程退出时执行!!!
cancel()
```
# 扩展阅读
[warden快速开始](warden-quickstart.md) [warden拦截器](warden-mid.md) [warden基于pb生成](warden-pb.md) [warden负载均衡](warden-balancer.md)
-------------
[文档目录树](summary.md)

@ -0,0 +1,40 @@
# 背景
我们需要统一的rpc服务,经过选型讨论决定直接使用成熟的跨语言的gRPC。
# 概览
* 不改gRPC源码,基于接口进行包装集成trace、log、prom等组件
* 打通自有服务注册发现系统[discovery](https://github.com/bilibili/discovery)
* 实现更平滑可靠的负载均衡算法
# 拦截器
gRPC暴露了两个拦截器接口,分别是:
* `grpc.UnaryServerInterceptor`服务端拦截器
* `grpc.UnaryClientInterceptor`客户端拦截器
基于两个拦截器可以针对性的定制公共模块的封装代码,比如`warden/logging.go`是通用日志逻辑。
[warden拦截器](warden-mid.md)
# 服务发现
`warden`默认使用`direct`方式直连,正常线上都会使用第三方服务注册与发现中间件,`warden`内包含了[discovery](https://github.com/bilibili/discovery)的逻辑实现,想使用如`etcd`、`zookeeper`等也可以,都请看下面文档。
[warden服务发现](warden-resolver.md)
# 负载均衡
实现了`wrr`和`p2c`两种算法,默认使用`p2c`。
[warden负载均衡](warden-balancer.md)
# 扩展阅读
[warden快速开始](warden-quickstart.md) [warden拦截器](warden-mid.md) [warden负载均衡](warden-balancer.md) [warden基于pb生成](warden-pb.md) [warden服务发现](warden-resolver.md)
-------------
[文档目录树](summary.md)

@ -16,22 +16,39 @@ require (
github.com/go-sql-driver/mysql v1.4.1
github.com/gogo/protobuf v1.2.0
github.com/golang/protobuf v1.2.0
github.com/kr/pty v1.1.4
github.com/leodido/go-urn v1.1.0 // indirect
github.com/mattn/go-colorable v0.0.9 // indirect
github.com/mattn/go-isatty v0.0.4 // indirect
github.com/montanaflynn/stats v0.5.0
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.9.2
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect
github.com/sirupsen/logrus v1.4.1 // indirect
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726
github.com/sirupsen/logrus v1.4.1
github.com/stretchr/testify v1.3.0
github.com/tsuna/gohbase v0.0.0-20190201102810-d3184c1526df
github.com/urfave/cli v1.20.0
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
google.golang.org/grpc v1.18.0
gopkg.in/AlecAivazis/survey.v1 v1.8.2
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
google.golang.org/grpc v1.20.1
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/go-playground/validator.v9 v9.26.0
)
replace (
cloud.google.com/go => github.com/googleapis/google-cloud-go v0.26.0
golang.org/x/crypto => github.com/golang/crypto v0.0.0-20190123085648-057139ce5d2b
golang.org/x/lint => github.com/golang/lint v0.0.0-20181026193005-c67002cb31c3
golang.org/x/net => github.com/golang/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 => github.com/golang/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sync => github.com/golang/sync v0.0.0-20181108010431-42b317875d0f
golang.org/x/sys => github.com/golang/sys v0.0.0-20180905080454-ebe1bf3edb33
golang.org/x/text => github.com/golang/text v0.3.0
golang.org/x/time => github.com/golang/time v0.0.0-20190308202827-9d24e82272b4
golang.org/x/tools => github.com/golang/tools v0.0.0-20190328211700-ab21143f2384
google.golang.org/appengine => github.com/golang/appengine v1.1.0
google.golang.org/genproto => github.com/google/go-genproto v0.0.0-20180817151627-c66870c02cf8
google.golang.org/grpc => github.com/grpc/grpc-go v1.20.1
)

106
go.sum

@ -0,0 +1,106 @@
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/aristanetworks/goarista v0.0.0-20190409234242-46f4bc7b73ef h1:ajsnF5qTstiBlP+V/mgh91zZfoKP477KfSmRoCoyYGU=
github.com/aristanetworks/goarista v0.0.0-20190409234242-46f4bc7b73ef/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8=
github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM=
github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec h1:sElGDs3V8VdCxH5tWi0ycWJzteOPLJ3HtItSSKI95PY=
github.com/dgryski/go-farm v0.0.0-20190323231341-8198c7b169ec/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/appengine v1.1.0 h1:i5Me8ymxZ0UcvrKKqVyk7iemYgabJFc19gXizKBaTa0=
github.com/golang/appengine v1.1.0/go.mod h1:C7k13PpLjU5SHb7WgwfAwCmVFgi4cpy3kl2zlpoBck8=
github.com/golang/crypto v0.0.0-20190123085648-057139ce5d2b h1:9dkUhGlF9C+jJBMDKqq91ycBLyQMvFjTdBhOqchi7lU=
github.com/golang/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:uZvAcrsnNaCxlh1HorK5dUQHGmEKPh2H/Rl1kehswPo=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/net v0.0.0-20190311183353-d8887717615a h1:4V+LPwzBFLRg7XSXZw133Jsur1mTVMY73hIv/FTdrbg=
github.com/golang/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
github.com/golang/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:ovBFgdmJqyggKzXS0i5+osE+RsPEbEsUfp2sVCgys1Q=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/sync v0.0.0-20181108010431-42b317875d0f h1:vuwODIDRvDgwjIl6VTMf0c1Z9uVMUUxiu6UPUjiGhD4=
github.com/golang/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:YCHYtYb9c8Q7XgYVYjmJBPtFPKx5QvOcPxHZWjldabE=
github.com/golang/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:GJexUf2QgFNvMR9sjJ1iqs+2TxZqJko+Muhnu04tPuU=
github.com/golang/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:5JyrLPvD/ZdaYkT7IqKhsP5xt7aLjA99KXRtk4EIYDk=
github.com/golang/text v0.3.0 h1:uI5zIUA9cg047ctlTptnVc0Ghjfurf2eZMFrod8R7v8=
github.com/golang/text v0.3.0/go.mod h1:GUiq9pdJKRKKAZXiVgWFEvocYuREvC14NhI4OPgEjeE=
github.com/golang/time v0.0.0-20190308202827-9d24e82272b4 h1:F9e5QAps6/3zc8881JhdfJBCj+KjFaahs4YNEzAPc/Q=
github.com/golang/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
github.com/golang/tools v0.0.0-20190328211700-ab21143f2384 h1:8J6Yq2enLsHiOXruypwvT3wf8eAvi7wRmS5KCt7RbHo=
github.com/golang/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
github.com/google/go-genproto v0.0.0-20180817151627-c66870c02cf8 h1:I9PuChzQA31gMw88WmVPJaAwE0nZNHpMrLDUnTyzFAI=
github.com/google/go-genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:3Rcd9jSoLVkV/osPrt5CogLvLiarfI8U9/x78NwhuDU=
github.com/googleapis/google-cloud-go v0.26.0/go.mod h1:yJoOdPPE9UpqbamBhJvp7Ur6OUPPV4rUY3RnssPGNBA=
github.com/grpc/grpc-go v1.20.1 h1:pk72GtSPpOdZDTkPneppDMGW10HYPC7RqNJT/JvUpV0=
github.com/grpc/grpc-go v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk=
github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ=
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY=
github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/tsuna/gohbase v0.0.0-20190201102810-d3184c1526df h1:jYiwqXfoRWU6pJMzClEpLn9Jofi3U/8qS+w3iRNJ/hw=
github.com/tsuna/gohbase v0.0.0-20190201102810-d3184c1526df/go.mod h1:3HfLQly3YNLGxNv/2YOfmz30vcjG9hbuME1GpxoLlGs=
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v9 v9.26.0 h1:2NPPsBpD0ZoxshmLWewQru8rWmbT5JqSzz9D1ZrAjYQ=
gopkg.in/go-playground/validator.v9 v9.26.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

@ -1,25 +0,0 @@
# cache/memcache
##### 项目简介
1. 提供protobuf,gob,json序列化方式,gzip的memcache接口
#### 使用方式
```golang
// 初始化 注意这里只是示例 展示用法 不能每次都New 只需要初始化一次
mc := memcache.New(&memcache.Config{})
// 程序关闭的时候调用close方法
defer mc.Close()
// 增加 key
err = mc.Set(c, &memcache.Item{})
// 删除key
err := mc.Delete(c,key)
// 获得某个key的内容
err := mc.Get(c,key).Scan(&v)
// 获取多个key的内容
replies, err := mc.GetMulti(c, keys)
for _, key := range replies.Keys() {
if err = replies.Scan(key, &v); err != nil {
return
}
}
```

@ -0,0 +1,261 @@
package memcache
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"net"
"strconv"
"strings"
"time"
pkgerr "github.com/pkg/errors"
)
var (
crlf = []byte("\r\n")
space = []byte(" ")
replyOK = []byte("OK\r\n")
replyStored = []byte("STORED\r\n")
replyNotStored = []byte("NOT_STORED\r\n")
replyExists = []byte("EXISTS\r\n")
replyNotFound = []byte("NOT_FOUND\r\n")
replyDeleted = []byte("DELETED\r\n")
replyEnd = []byte("END\r\n")
replyTouched = []byte("TOUCHED\r\n")
replyClientErrorPrefix = []byte("CLIENT_ERROR ")
replyServerErrorPrefix = []byte("SERVER_ERROR ")
)
var _ protocolConn = &asiiConn{}
// asiiConn is the low-level implementation of Conn
type asiiConn struct {
err error
conn net.Conn
// Read & Write
readTimeout time.Duration
writeTimeout time.Duration
rw *bufio.ReadWriter
}
func replyToError(line []byte) error {
switch {
case bytes.Equal(line, replyStored):
return nil
case bytes.Equal(line, replyOK):
return nil
case bytes.Equal(line, replyDeleted):
return nil
case bytes.Equal(line, replyTouched):
return nil
case bytes.Equal(line, replyNotStored):
return ErrNotStored
case bytes.Equal(line, replyExists):
return ErrCASConflict
case bytes.Equal(line, replyNotFound):
return ErrNotFound
case bytes.Equal(line, replyNotStored):
return ErrNotStored
case bytes.Equal(line, replyExists):
return ErrCASConflict
}
return pkgerr.WithStack(protocolError(string(line)))
}
func (c *asiiConn) Populate(ctx context.Context, cmd string, key string, flags uint32, expiration int32, cas uint64, data []byte) error {
c.conn.SetWriteDeadline(shrinkDeadline(ctx, c.writeTimeout))
// <command name> <key> <flags> <exptime> <bytes> [noreply]\r\n
var err error
if cmd == "cas" {
_, err = fmt.Fprintf(c.rw, "%s %s %d %d %d %d\r\n", cmd, key, flags, expiration, len(data), cas)
} else {
_, err = fmt.Fprintf(c.rw, "%s %s %d %d %d\r\n", cmd, key, flags, expiration, len(data))
}
if err != nil {
return c.fatal(err)
}
c.rw.Write(data)
c.rw.Write(crlf)
if err = c.rw.Flush(); err != nil {
return c.fatal(err)
}
c.conn.SetReadDeadline(shrinkDeadline(ctx, c.readTimeout))
line, err := c.rw.ReadSlice('\n')
if err != nil {
return c.fatal(err)
}
return replyToError(line)
}
// newConn returns a new memcache connection for the given net connection.
func newASCIIConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) (protocolConn, error) {
if writeTimeout <= 0 || readTimeout <= 0 {
return nil, pkgerr.Errorf("readTimeout writeTimeout can't be zero")
}
c := &asiiConn{
conn: netConn,
rw: bufio.NewReadWriter(bufio.NewReader(netConn),
bufio.NewWriter(netConn)),
readTimeout: readTimeout,
writeTimeout: writeTimeout,
}
return c, nil
}
func (c *asiiConn) Close() error {
if c.err == nil {
c.err = pkgerr.New("memcache: closed")
}
return c.conn.Close()
}
func (c *asiiConn) fatal(err error) error {
if c.err == nil {
c.err = pkgerr.WithStack(err)
// Close connection to force errors on subsequent calls and to unblock
// other reader or writer.
c.conn.Close()
}
return c.err
}
func (c *asiiConn) Err() error {
return c.err
}
func (c *asiiConn) Get(ctx context.Context, key string) (result *Item, err error) {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
if _, err = fmt.Fprintf(c.rw, "gets %s\r\n", key); err != nil {
return nil, c.fatal(err)
}
if err = c.rw.Flush(); err != nil {
return nil, c.fatal(err)
}
if err = c.parseGetReply(func(it *Item) {
result = it
}); err != nil {
return
}
if result == nil {
return nil, ErrNotFound
}
return
}
func (c *asiiConn) GetMulti(ctx context.Context, keys ...string) (map[string]*Item, error) {
var err error
c.conn.SetWriteDeadline(shrinkDeadline(ctx, c.writeTimeout))
if _, err = fmt.Fprintf(c.rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
return nil, c.fatal(err)
}
if err = c.rw.Flush(); err != nil {
return nil, c.fatal(err)
}
results := make(map[string]*Item, len(keys))
if err = c.parseGetReply(func(it *Item) {
results[it.Key] = it
}); err != nil {
return nil, err
}
return results, nil
}
func (c *asiiConn) parseGetReply(f func(*Item)) error {
c.conn.SetReadDeadline(shrinkDeadline(context.TODO(), c.readTimeout))
for {
line, err := c.rw.ReadSlice('\n')
if err != nil {
return c.fatal(err)
}
if bytes.Equal(line, replyEnd) {
return nil
}
if bytes.HasPrefix(line, replyServerErrorPrefix) {
errMsg := line[len(replyServerErrorPrefix):]
return c.fatal(protocolError(errMsg))
}
it := new(Item)
size, err := scanGetReply(line, it)
if err != nil {
return c.fatal(err)
}
it.Value = make([]byte, size+2)
if _, err = io.ReadFull(c.rw, it.Value); err != nil {
return c.fatal(err)
}
if !bytes.HasSuffix(it.Value, crlf) {
return c.fatal(protocolError("corrupt get reply, no except CRLF"))
}
it.Value = it.Value[:size]
f(it)
}
}
func scanGetReply(line []byte, item *Item) (size int, err error) {
pattern := "VALUE %s %d %d %d\r\n"
dest := []interface{}{&item.Key, &item.Flags, &size, &item.cas}
if bytes.Count(line, space) == 3 {
pattern = "VALUE %s %d %d\r\n"
dest = dest[:3]
}
n, err := fmt.Sscanf(string(line), pattern, dest...)
if err != nil || n != len(dest) {
return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
}
return size, nil
}
func (c *asiiConn) Touch(ctx context.Context, key string, expire int32) error {
line, err := c.writeReadLine("touch %s %d\r\n", key, expire)
if err != nil {
return err
}
return replyToError(line)
}
func (c *asiiConn) IncrDecr(ctx context.Context, cmd, key string, delta uint64) (uint64, error) {
line, err := c.writeReadLine("%s %s %d\r\n", cmd, key, delta)
if err != nil {
return 0, err
}
switch {
case bytes.Equal(line, replyNotFound):
return 0, ErrNotFound
case bytes.HasPrefix(line, replyClientErrorPrefix):
errMsg := line[len(replyClientErrorPrefix):]
return 0, pkgerr.WithStack(protocolError(errMsg))
}
val, err := strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
if err != nil {
return 0, err
}
return val, nil
}
func (c *asiiConn) Delete(ctx context.Context, key string) error {
line, err := c.writeReadLine("delete %s\r\n", key)
if err != nil {
return err
}
return replyToError(line)
}
func (c *asiiConn) writeReadLine(format string, args ...interface{}) ([]byte, error) {
c.conn.SetWriteDeadline(shrinkDeadline(context.TODO(), c.writeTimeout))
_, err := fmt.Fprintf(c.rw, format, args...)
if err != nil {
return nil, c.fatal(pkgerr.WithStack(err))
}
if err = c.rw.Flush(); err != nil {
return nil, c.fatal(pkgerr.WithStack(err))
}
c.conn.SetReadDeadline(shrinkDeadline(context.TODO(), c.readTimeout))
line, err := c.rw.ReadSlice('\n')
if err != nil {
return line, c.fatal(pkgerr.WithStack(err))
}
return line, nil
}

@ -0,0 +1,569 @@
package memcache
import (
"bytes"
"strconv"
"strings"
"testing"
)
func TestASCIIConnAdd(t *testing.T) {
tests := []struct {
name string
a *Item
e error
}{
{
"Add",
&Item{
Key: "test_add",
Value: []byte("0"),
Flags: 0,
Expiration: 60,
},
nil,
},
{
"Add_Large",
&Item{
Key: "test_add_large",
Value: bytes.Repeat(space, _largeValue+1),
Flags: 0,
Expiration: 60,
},
nil,
},
{
"Add_Exist",
&Item{
Key: "test_add",
Value: []byte("0"),
Flags: 0,
Expiration: 60,
},
ErrNotStored,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := testConnASCII.Add(test.a); err != test.e {
t.Fatal(err)
}
if b, err := testConnASCII.Get(test.a.Key); err != nil {
t.Fatal(err)
} else {
compareItem(t, test.a, b)
}
})
}
}
func TestASCIIConnGet(t *testing.T) {
tests := []struct {
name string
a *Item
k string
e error
}{
{
"Get",
&Item{
Key: "test_get",
Value: []byte("0"),
Flags: 0,
Expiration: 60,
},
"test_get",
nil,
},
{
"Get_NotExist",
&Item{
Key: "test_get_not_exist",
Value: []byte("0"),
Flags: 0,
Expiration: 60,
},
"test_get_not_exist!",
ErrNotFound,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := testConnASCII.Add(test.a); err != nil {
t.Fatal(err)
}
if b, err := testConnASCII.Get(test.a.Key); err != nil {
t.Fatal(err)
} else {
compareItem(t, test.a, b)
}
})
}
}
//func TestGetHasErr(t *testing.T) {
// prepareEnv(t)
//
// st := &TestItem{Name: "json", Age: 10}
// itemx := &Item{Key: "test", Object: st, Flags: FlagJSON}
// c.Set(itemx)
//
// expected := errors.New("some error")
// monkey.Patch(scanGetReply, func(line []byte, item *Item) (size int, err error) {
// return 0, expected
// })
//
// if _, err := c.Get("test"); err.Error() != expected.Error() {
// t.Errorf("conn.Get() unexpected error(%v)", err)
// }
// if err := c.(*asciiConn).err; err.Error() != expected.Error() {
// t.Errorf("unexpected error(%v)", err)
// }
//}
func TestASCIIConnGetMulti(t *testing.T) {
tests := []struct {
name string
a []*Item
k []string
e error
}{
{"getMulti_Add",
[]*Item{
{
Key: "get_multi_1",
Value: []byte("test"),
Flags: FlagRAW,
Expiration: 60,
cas: 0,
},
{
Key: "get_multi_2",
Value: []byte("test2"),
Flags: FlagRAW,
Expiration: 60,
cas: 0,
},
},
[]string{"get_multi_1", "get_multi_2"},
nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
for _, i := range test.a {
if err := testConnASCII.Set(i); err != nil {
t.Fatal(err)
}
}
if r, err := testConnASCII.GetMulti(test.k); err != nil {
t.Fatal(err)
} else {
reply := r["get_multi_1"]
compareItem(t, reply, test.a[0])
reply = r["get_multi_2"]
compareItem(t, reply, test.a[1])
}
})
}
}
func TestASCIIConnSet(t *testing.T) {
tests := []struct {
name string
a *Item
e error
}{
{
"SetLowerBound",
&Item{
Key: strings.Repeat("a", 1),
Value: []byte("4"),
Flags: 0,
Expiration: 60,
},
nil,
},
{
"SetUpperBound",
&Item{
Key: strings.Repeat("a", 250),
Value: []byte("3"),
Flags: 0,
Expiration: 60,
},
nil,
},
{
"SetIllegalKeyZeroLength",
&Item{
Key: "",
Value: []byte("2"),
Flags: 0,
Expiration: 60,
},
ErrMalformedKey,
},
{
"SetIllegalKeyLengthExceededLimit",
&Item{
Key: " ",
Value: []byte("1"),
Flags: 0,
Expiration: 60,
},
ErrMalformedKey,
},
{
"SeJsonItem",
&Item{
Key: "set_obj",
Object: &struct {
Name string
Age int
}{"json", 10},
Expiration: 60,
Flags: FlagJSON,
},
nil,
},
{
"SeErrItemJSONGzip",
&Item{
Key: "set_err_item",
Expiration: 60,
Flags: FlagJSON | FlagGzip,
},
ErrItem,
},
{
"SeErrItemBytesValueWrongFlag",
&Item{
Key: "set_err_item",
Value: []byte("2"),
Expiration: 60,
Flags: FlagJSON,
},
ErrItem,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := testConnASCII.Set(test.a); err != test.e {
t.Fatal(err)
}
})
}
}
func TestASCIIConnCompareAndSwap(t *testing.T) {
tests := []struct {
name string
a *Item
b *Item
c *Item
k string
e error
}{
{
"CompareAndSwap",
&Item{
Key: "test_cas",
Value: []byte("2"),
Flags: 0,
Expiration: 60,
},
nil,
&Item{
Key: "test_cas",
Value: []byte("3"),
Flags: 0,
Expiration: 60,
},
"test_cas",
nil,
},
{
"CompareAndSwapErrCASConflict",
&Item{
Key: "test_cas_conflict",
Value: []byte("2"),
Flags: 0,
Expiration: 60,
},
&Item{
Key: "test_cas_conflict",
Value: []byte("1"),
Flags: 0,
Expiration: 60,
},
&Item{
Key: "test_cas_conflict",
Value: []byte("3"),
Flags: 0,
Expiration: 60,
},
"test_cas_conflict",
ErrCASConflict,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := testConnASCII.Set(test.a); err != nil {
t.Fatal(err)
}
r, err := testConnASCII.Get(test.k)
if err != nil {
t.Fatal(err)
}
if test.b != nil {
if err := testConnASCII.Set(test.b); err != nil {
t.Fatal(err)
}
}
r.Value = test.c.Value
if err := testConnASCII.CompareAndSwap(r); err != nil {
if err != test.e {
t.Fatal(err)
}
} else {
if fr, err := testConnASCII.Get(test.k); err != nil {
t.Fatal(err)
} else {
compareItem(t, fr, test.c)
}
}
})
}
t.Run("TestCompareAndSwapErrNotFound", func(t *testing.T) {
ti := &Item{
Key: "test_cas_notfound",
Value: []byte("2"),
Flags: 0,
Expiration: 60,
}
if err := testConnASCII.Set(ti); err != nil {
t.Fatal(err)
}
r, err := testConnASCII.Get(ti.Key)
if err != nil {
t.Fatal(err)
}
r.Key = "test_cas_notfound_boom"
r.Value = []byte("3")
if err := testConnASCII.CompareAndSwap(r); err != nil {
if err != ErrNotFound {
t.Fatal(err)
}
}
})
}
func TestASCIIConnReplace(t *testing.T) {
tests := []struct {
name string
a *Item
b *Item
e error
}{
{
"TestReplace",
&Item{
Key: "test_replace",
Value: []byte("2"),
Flags: 0,
Expiration: 60,
},
&Item{
Key: "test_replace",
Value: []byte("3"),
Flags: 0,
Expiration: 60,
},
nil,
},
{
"TestReplaceErrNotStored",
&Item{
Key: "test_replace_not_stored",
Value: []byte("2"),
Flags: 0,
Expiration: 60,
},
&Item{
Key: "test_replace_not_stored_boom",
Value: []byte("3"),
Flags: 0,
Expiration: 60,
},
ErrNotStored,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if err := testConnASCII.Set(test.a); err != nil {
t.Fatal(err)
}
if err := testConnASCII.Replace(test.b); err != nil {
if err == test.e {
return
}
t.Fatal(err)
}
if r, err := testConnASCII.Get(test.b.Key); err != nil {
t.Fatal(err)
} else {
compareItem(t, r, test.b)
}
})
}
}
func TestASCIIConnIncrDecr(t *testing.T) {
tests := []struct {
fn func(key string, delta uint64) (uint64, error)
name string
k string
v uint64
w uint64
}{
{
testConnASCII.Increment,
"Incr_10",
"test_incr",
10,
10,
},
{
testConnASCII.Increment,
"Incr_10(2)",
"test_incr",
10,
20,
},
{
testConnASCII.Decrement,
"Decr_10",
"test_incr",
10,
10,
},
}
if err := testConnASCII.Add(&Item{
Key: "test_incr",
Value: []byte("0"),
}); err != nil {
t.Fatal(err)
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if a, err := test.fn(test.k, test.v); err != nil {
t.Fatal(err)
} else {
if a != test.w {
t.Fatalf("want %d, got %d", test.w, a)
}
}
if b, err := testConnASCII.Get(test.k); err != nil {
t.Fatal(err)
} else {
if string(b.Value) != strconv.FormatUint(test.w, 10) {
t.Fatalf("want %s, got %d", b.Value, test.w)
}
}
})
}
}
func TestASCIIConnTouch(t *testing.T) {
tests := []struct {
name string
k string
a *Item
e error
}{
{
"Touch",
"test_touch",
&Item{
Key: "test_touch",
Value: []byte("0"),
Expiration: 60,
},
nil,
},
{
"Touch_NotExist",
"test_touch_not_exist",
nil,
ErrNotFound,
},
}
for _, test := range tests {
if test.a != nil {
if err := testConnASCII.Add(test.a); err != nil {
t.Fatal(err)
}
if err := testConnASCII.Touch(test.k, 1); err != test.e {
t.Fatal(err)
}
}
}
}
func TestASCIIConnDelete(t *testing.T) {
tests := []struct {
name string
k string
a *Item
e error
}{
{
"Delete",
"test_delete",
&Item{
Key: "test_delete",
Value: []byte("0"),
Expiration: 60,
},
nil,
},
{
"Delete_NotExist",
"test_delete_not_exist",
nil,
ErrNotFound,
},
}
for _, test := range tests {
if test.a != nil {
if err := testConnASCII.Add(test.a); err != nil {
t.Fatal(err)
}
if err := testConnASCII.Delete(test.k); err != test.e {
t.Fatal(err)
}
if _, err := testConnASCII.Get(test.k); err != ErrNotFound {
t.Fatal(err)
}
}
}
}
func compareItem(t *testing.T, a, b *Item) {
if a.Key != b.Key || !bytes.Equal(a.Value, b.Value) || a.Flags != b.Flags {
t.Fatalf("compareItem: a(%s, %d, %d) : b(%s, %d, %d)", a.Key, len(a.Value), a.Flags, b.Key, len(b.Value), b.Flags)
}
}

@ -1,187 +0,0 @@
package memcache
import (
"context"
)
// Memcache memcache client
type Memcache struct {
pool *Pool
}
// Reply is the result of Get
type Reply struct {
err error
item *Item
conn Conn
closed bool
}
// Replies is the result of GetMulti
type Replies struct {
err error
items map[string]*Item
usedItems map[string]struct{}
conn Conn
closed bool
}
// New get a memcache client
func New(c *Config) *Memcache {
return &Memcache{pool: NewPool(c)}
}
// Close close connection pool
func (mc *Memcache) Close() error {
return mc.pool.Close()
}
// Conn direct get a connection
func (mc *Memcache) Conn(c context.Context) Conn {
return mc.pool.Get(c)
}
// Set writes the given item, unconditionally.
func (mc *Memcache) Set(c context.Context, item *Item) (err error) {
conn := mc.pool.Get(c)
err = conn.Set(item)
conn.Close()
return
}
// Add writes the given item, if no value already exists for its key.
// ErrNotStored is returned if that condition is not met.
func (mc *Memcache) Add(c context.Context, item *Item) (err error) {
conn := mc.pool.Get(c)
err = conn.Add(item)
conn.Close()
return
}
// Replace writes the given item, but only if the server *does* already hold data for this key.
func (mc *Memcache) Replace(c context.Context, item *Item) (err error) {
conn := mc.pool.Get(c)
err = conn.Replace(item)
conn.Close()
return
}
// CompareAndSwap writes the given item that was previously returned by Get
func (mc *Memcache) CompareAndSwap(c context.Context, item *Item) (err error) {
conn := mc.pool.Get(c)
err = conn.CompareAndSwap(item)
conn.Close()
return
}
// Get sends a command to the server for gets data.
func (mc *Memcache) Get(c context.Context, key string) *Reply {
conn := mc.pool.Get(c)
item, err := conn.Get(key)
if err != nil {
conn.Close()
}
return &Reply{err: err, item: item, conn: conn}
}
// Item get raw Item
func (r *Reply) Item() *Item {
return r.item
}
// Scan converts value, read from the memcache
func (r *Reply) Scan(v interface{}) (err error) {
if r.err != nil {
return r.err
}
err = r.conn.Scan(r.item, v)
if !r.closed {
r.conn.Close()
r.closed = true
}
return
}
// GetMulti is a batch version of Get
func (mc *Memcache) GetMulti(c context.Context, keys []string) (*Replies, error) {
conn := mc.pool.Get(c)
items, err := conn.GetMulti(keys)
rs := &Replies{err: err, items: items, conn: conn, usedItems: make(map[string]struct{}, len(keys))}
if (err != nil) || (len(items) == 0) {
rs.Close()
}
return rs, err
}
// Close close rows.
func (rs *Replies) Close() (err error) {
if !rs.closed {
err = rs.conn.Close()
rs.closed = true
}
return
}
// Item get Item from rows
func (rs *Replies) Item(key string) *Item {
return rs.items[key]
}
// Scan converts value, read from key in rows
func (rs *Replies) Scan(key string, v interface{}) (err error) {
if rs.err != nil {
return rs.err
}
item, ok := rs.items[key]
if !ok {
rs.Close()
return ErrNotFound
}
rs.usedItems[key] = struct{}{}
err = rs.conn.Scan(item, v)
if (err != nil) || (len(rs.items) == len(rs.usedItems)) {
rs.Close()
}
return
}
// Keys keys of result
func (rs *Replies) Keys() (keys []string) {
keys = make([]string, 0, len(rs.items))
for key := range rs.items {
keys = append(keys, key)
}
return
}
// Touch updates the expiry for the given key.
func (mc *Memcache) Touch(c context.Context, key string, timeout int32) (err error) {
conn := mc.pool.Get(c)
err = conn.Touch(key, timeout)
conn.Close()
return
}
// Delete deletes the item with the provided key.
func (mc *Memcache) Delete(c context.Context, key string) (err error) {
conn := mc.pool.Get(c)
err = conn.Delete(key)
conn.Close()
return
}
// Increment atomically increments key by delta.
func (mc *Memcache) Increment(c context.Context, key string, delta uint64) (newValue uint64, err error) {
conn := mc.pool.Get(c)
newValue, err = conn.Increment(key, delta)
conn.Close()
return
}
// Decrement atomically decrements key by delta.
func (mc *Memcache) Decrement(c context.Context, key string, delta uint64) (newValue uint64, err error) {
conn := mc.pool.Get(c)
newValue, err = conn.Decrement(key, delta)
conn.Close()
return
}

@ -1,78 +1,30 @@
package memcache
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"encoding/gob"
"encoding/json"
"fmt"
"io"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/gogo/protobuf/proto"
pkgerr "github.com/pkg/errors"
)
var (
crlf = []byte("\r\n")
spaceStr = string(" ")
replyOK = []byte("OK\r\n")
replyStored = []byte("STORED\r\n")
replyNotStored = []byte("NOT_STORED\r\n")
replyExists = []byte("EXISTS\r\n")
replyNotFound = []byte("NOT_FOUND\r\n")
replyDeleted = []byte("DELETED\r\n")
replyEnd = []byte("END\r\n")
replyTouched = []byte("TOUCHED\r\n")
replyValueStr = "VALUE"
replyClientErrorPrefix = []byte("CLIENT_ERROR ")
replyServerErrorPrefix = []byte("SERVER_ERROR ")
)
const (
_encodeBuf = 4096 // 4kb
// 1024*1024 - 1, set error???
_largeValue = 1000 * 1000 // 1MB
)
type reader struct {
io.Reader
}
func (r *reader) Reset(rd io.Reader) {
r.Reader = rd
}
// conn is the low-level implementation of Conn
type conn struct {
// Shared
mu sync.Mutex
err error
conn net.Conn
// Read & Write
readTimeout time.Duration
writeTimeout time.Duration
rw *bufio.ReadWriter
// Item Reader
ir bytes.Reader
// Compress
gr gzip.Reader
gw *gzip.Writer
cb bytes.Buffer
// Encoding
edb bytes.Buffer
// json
jr reader
jd *json.Decoder
je *json.Encoder
// protobuffer
ped *proto.Buffer
// low level connection that implement memcache protocol provide basic operation.
type protocolConn interface {
Populate(ctx context.Context, cmd string, key string, flags uint32, expiration int32, cas uint64, data []byte) error
Get(ctx context.Context, key string) (*Item, error)
GetMulti(ctx context.Context, keys ...string) (map[string]*Item, error)
Touch(ctx context.Context, key string, expire int32) error
IncrDecr(ctx context.Context, cmd, key string, delta uint64) (uint64, error)
Delete(ctx context.Context, key string) error
Close() error
Err() error
}
// DialOption specifies an option for dialing a Memcache server.
@ -83,6 +35,7 @@ type DialOption struct {
type dialOptions struct {
readTimeout time.Duration
writeTimeout time.Duration
protocol string
dial func(network, addr string) (net.Conn, error)
}
@ -130,556 +83,205 @@ func Dial(network, address string, options ...DialOption) (Conn, error) {
if err != nil {
return nil, pkgerr.WithStack(err)
}
return NewConn(netConn, do.readTimeout, do.writeTimeout), nil
pconn, err := newASCIIConn(netConn, do.readTimeout, do.writeTimeout)
return &conn{pconn: pconn, ed: newEncodeDecoder()}, nil
}
// NewConn returns a new memcache connection for the given net connection.
func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
if writeTimeout <= 0 || readTimeout <= 0 {
panic("must config memcache timeout")
}
c := &conn{
conn: netConn,
rw: bufio.NewReadWriter(bufio.NewReader(netConn),
bufio.NewWriter(netConn)),
readTimeout: readTimeout,
writeTimeout: writeTimeout,
}
c.jd = json.NewDecoder(&c.jr)
c.je = json.NewEncoder(&c.edb)
c.gw = gzip.NewWriter(&c.cb)
c.edb.Grow(_encodeBuf)
// NOTE reuse bytes.Buffer internal buf
// DON'T concurrency call Scan
c.ped = proto.NewBuffer(c.edb.Bytes())
return c
type conn struct {
// low level connection.
pconn protocolConn
ed *encodeDecode
}
func (c *conn) Close() error {
c.mu.Lock()
err := c.err
if c.err == nil {
c.err = pkgerr.New("memcache: closed")
err = c.conn.Close()
}
c.mu.Unlock()
return err
}
func (c *conn) fatal(err error) error {
c.mu.Lock()
if c.err == nil {
c.err = pkgerr.WithStack(err)
// Close connection to force errors on subsequent calls and to unblock
// other reader or writer.
c.conn.Close()
}
c.mu.Unlock()
return c.err
return c.pconn.Close()
}
func (c *conn) Err() error {
c.mu.Lock()
err := c.err
c.mu.Unlock()
return err
return c.pconn.Err()
}
func (c *conn) Add(item *Item) error {
return c.populate("add", item)
func (c *conn) AddContext(ctx context.Context, item *Item) error {
return c.populate(ctx, "add", item)
}
func (c *conn) Set(item *Item) error {
return c.populate("set", item)
func (c *conn) SetContext(ctx context.Context, item *Item) error {
return c.populate(ctx, "set", item)
}
func (c *conn) Replace(item *Item) error {
return c.populate("replace", item)
func (c *conn) ReplaceContext(ctx context.Context, item *Item) error {
return c.populate(ctx, "replace", item)
}
func (c *conn) CompareAndSwap(item *Item) error {
return c.populate("cas", item)
func (c *conn) CompareAndSwapContext(ctx context.Context, item *Item) error {
return c.populate(ctx, "cas", item)
}
func (c *conn) populate(cmd string, item *Item) (err error) {
func (c *conn) populate(ctx context.Context, cmd string, item *Item) error {
if !legalKey(item.Key) {
return pkgerr.WithStack(ErrMalformedKey)
return ErrMalformedKey
}
var res []byte
if res, err = c.encode(item); err != nil {
return
}
l := len(res)
count := l/(_largeValue) + 1
if count == 1 {
item.Value = res
return c.populateOne(cmd, item)
data, err := c.ed.encode(item)
if err != nil {
return err
}
nItem := &Item{
Key: item.Key,
Value: []byte(strconv.Itoa(l)),
Expiration: item.Expiration,
Flags: item.Flags | flagLargeValue,
length := len(data)
if length < _largeValue {
return c.pconn.Populate(ctx, cmd, item.Key, item.Flags, item.Expiration, item.cas, data)
}
err = c.populateOne(cmd, nItem)
if err != nil {
return
count := length/_largeValue + 1
if err = c.pconn.Populate(ctx, cmd, item.Key, item.Flags|flagLargeValue, item.Expiration, item.cas, []byte(strconv.Itoa(length))); err != nil {
return err
}
k := item.Key
nItem.Flags = item.Flags
var chunk []byte
for i := 1; i <= count; i++ {
if i == count {
nItem.Value = res[_largeValue*(count-1):]
chunk = data[_largeValue*(count-1):]
} else {
nItem.Value = res[_largeValue*(i-1) : _largeValue*i]
chunk = data[_largeValue*(i-1) : _largeValue*i]
}
nItem.Key = fmt.Sprintf("%s%d", k, i)
if err = c.populateOne(cmd, nItem); err != nil {
return
key := fmt.Sprintf("%s%d", item.Key, i)
if err = c.pconn.Populate(ctx, cmd, key, item.Flags, item.Expiration, item.cas, chunk); err != nil {
return err
}
}
return
return nil
}
func (c *conn) populateOne(cmd string, item *Item) (err error) {
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
// <command name> <key> <flags> <exptime> <bytes> [noreply]\r\n
if cmd == "cas" {
_, err = fmt.Fprintf(c.rw, "%s %s %d %d %d %d\r\n",
cmd, item.Key, item.Flags, item.Expiration, len(item.Value), item.cas)
} else {
_, err = fmt.Fprintf(c.rw, "%s %s %d %d %d\r\n",
cmd, item.Key, item.Flags, item.Expiration, len(item.Value))
}
if err != nil {
return c.fatal(err)
}
c.rw.Write(item.Value)
c.rw.Write(crlf)
if err = c.rw.Flush(); err != nil {
return c.fatal(err)
}
if c.readTimeout != 0 {
c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
func (c *conn) GetContext(ctx context.Context, key string) (*Item, error) {
if !legalKey(key) {
return nil, ErrMalformedKey
}
line, err := c.rw.ReadSlice('\n')
result, err := c.pconn.Get(ctx, key)
if err != nil {
return c.fatal(err)
return nil, err
}
switch {
case bytes.Equal(line, replyStored):
return nil
case bytes.Equal(line, replyNotStored):
return ErrNotStored
case bytes.Equal(line, replyExists):
return ErrCASConflict
case bytes.Equal(line, replyNotFound):
return ErrNotFound
if result.Flags&flagLargeValue != flagLargeValue {
return result, err
}
return pkgerr.WithStack(protocolError(string(line)))
return c.getLargeItem(ctx, result)
}
func (c *conn) Get(key string) (r *Item, err error) {
if !legalKey(key) {
return nil, pkgerr.WithStack(ErrMalformedKey)
}
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
if _, err = fmt.Fprintf(c.rw, "gets %s\r\n", key); err != nil {
return nil, c.fatal(err)
}
if err = c.rw.Flush(); err != nil {
return nil, c.fatal(err)
func (c *conn) getLargeItem(ctx context.Context, result *Item) (*Item, error) {
length, err := strconv.Atoi(string(result.Value))
if err != nil {
return nil, err
}
if err = c.parseGetReply(func(it *Item) {
r = it
}); err != nil {
return
count := length/_largeValue + 1
keys := make([]string, 0, count)
for i := 1; i <= count; i++ {
keys = append(keys, fmt.Sprintf("%s%d", result.Key, i))
}
if r == nil {
err = ErrNotFound
return
var results map[string]*Item
if results, err = c.pconn.GetMulti(ctx, keys...); err != nil {
return nil, err
}
if r.Flags&flagLargeValue != flagLargeValue {
return
if len(results) < count {
return nil, ErrNotFound
}
if r, err = c.getLargeValue(r); err != nil {
return
result.Value = make([]byte, 0, length)
for _, k := range keys {
ti := results[k]
if ti == nil || ti.Value == nil {
return nil, ErrNotFound
}
result.Value = append(result.Value, ti.Value...)
}
return
result.Flags = result.Flags ^ flagLargeValue
return result, nil
}
func (c *conn) GetMulti(keys []string) (res map[string]*Item, err error) {
func (c *conn) GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error) {
// TODO: move to protocolConn?
for _, key := range keys {
if !legalKey(key) {
return nil, pkgerr.WithStack(ErrMalformedKey)
return nil, ErrMalformedKey
}
}
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
if _, err = fmt.Fprintf(c.rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
return nil, c.fatal(err)
}
if err = c.rw.Flush(); err != nil {
return nil, c.fatal(err)
}
res = make(map[string]*Item, len(keys))
if err = c.parseGetReply(func(it *Item) {
res[it.Key] = it
}); err != nil {
return
results, err := c.pconn.GetMulti(ctx, keys...)
if err != nil {
return results, err
}
for k, v := range res {
for k, v := range results {
if v.Flags&flagLargeValue != flagLargeValue {
continue
}
r, err := c.getLargeValue(v)
if err != nil {
return res, err
if v, err = c.getLargeItem(ctx, v); err != nil {
return results, err
}
res[k] = r
results[k] = v
}
return
return results, nil
}
func (c *conn) getMulti(keys []string) (res map[string]*Item, err error) {
for _, key := range keys {
if !legalKey(key) {
return nil, pkgerr.WithStack(ErrMalformedKey)
}
}
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
if _, err = fmt.Fprintf(c.rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
return nil, c.fatal(err)
}
if err = c.rw.Flush(); err != nil {
return nil, c.fatal(err)
func (c *conn) DeleteContext(ctx context.Context, key string) error {
if !legalKey(key) {
return ErrMalformedKey
}
res = make(map[string]*Item, len(keys))
err = c.parseGetReply(func(it *Item) {
res[it.Key] = it
})
return
return c.pconn.Delete(ctx, key)
}
func (c *conn) getLargeValue(it *Item) (r *Item, err error) {
l, err := strconv.Atoi(string(it.Value))
if err != nil {
return
}
count := l/_largeValue + 1
keys := make([]string, 0, count)
for i := 1; i <= count; i++ {
keys = append(keys, fmt.Sprintf("%s%d", it.Key, i))
}
items, err := c.getMulti(keys)
if err != nil {
return
}
if len(items) < count {
err = ErrNotFound
return
}
v := make([]byte, 0, l)
for _, k := range keys {
if items[k] == nil || items[k].Value == nil {
err = ErrNotFound
return
}
v = append(v, items[k].Value...)
func (c *conn) IncrementContext(ctx context.Context, key string, delta uint64) (uint64, error) {
if !legalKey(key) {
return 0, ErrMalformedKey
}
it.Value = v
it.Flags = it.Flags ^ flagLargeValue
r = it
return
return c.pconn.IncrDecr(ctx, "incr", key, delta)
}
func (c *conn) parseGetReply(f func(*Item)) error {
if c.readTimeout != 0 {
c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
}
for {
line, err := c.rw.ReadSlice('\n')
if err != nil {
return c.fatal(err)
}
if bytes.Equal(line, replyEnd) {
return nil
}
if bytes.HasPrefix(line, replyServerErrorPrefix) {
errMsg := line[len(replyServerErrorPrefix):]
return c.fatal(protocolError(errMsg))
}
it := new(Item)
size, err := scanGetReply(line, it)
if err != nil {
return c.fatal(err)
}
it.Value = make([]byte, size+2)
if _, err = io.ReadFull(c.rw, it.Value); err != nil {
return c.fatal(err)
}
if !bytes.HasSuffix(it.Value, crlf) {
return c.fatal(protocolError("corrupt get reply, no except CRLF"))
}
it.Value = it.Value[:size]
f(it)
func (c *conn) DecrementContext(ctx context.Context, key string, delta uint64) (uint64, error) {
if !legalKey(key) {
return 0, ErrMalformedKey
}
return c.pconn.IncrDecr(ctx, "decr", key, delta)
}
func scanGetReply(line []byte, item *Item) (size int, err error) {
if !bytes.HasSuffix(line, crlf) {
return 0, protocolError("corrupt get reply, no except CRLF")
}
// VALUE <key> <flags> <bytes> [<cas unique>]
chunks := strings.Split(string(line[:len(line)-2]), spaceStr)
if len(chunks) < 4 {
return 0, protocolError("corrupt get reply")
}
if chunks[0] != replyValueStr {
return 0, protocolError("corrupt get reply, no except VALUE")
}
item.Key = chunks[1]
flags64, err := strconv.ParseUint(chunks[2], 10, 32)
if err != nil {
return 0, err
}
item.Flags = uint32(flags64)
if size, err = strconv.Atoi(chunks[3]); err != nil {
return
}
if len(chunks) > 4 {
item.cas, err = strconv.ParseUint(chunks[4], 10, 64)
func (c *conn) TouchContext(ctx context.Context, key string, seconds int32) error {
if !legalKey(key) {
return ErrMalformedKey
}
return
return c.pconn.Touch(ctx, key, seconds)
}
func (c *conn) Touch(key string, expire int32) (err error) {
if !legalKey(key) {
return pkgerr.WithStack(ErrMalformedKey)
}
line, err := c.writeReadLine("touch %s %d\r\n", key, expire)
if err != nil {
return err
}
switch {
case bytes.Equal(line, replyTouched):
return nil
case bytes.Equal(line, replyNotFound):
return ErrNotFound
default:
return pkgerr.WithStack(protocolError(string(line)))
}
func (c *conn) Add(item *Item) error {
return c.AddContext(context.TODO(), item)
}
func (c *conn) Increment(key string, delta uint64) (uint64, error) {
return c.incrDecr("incr", key, delta)
func (c *conn) Set(item *Item) error {
return c.SetContext(context.TODO(), item)
}
func (c *conn) Decrement(key string, delta uint64) (newValue uint64, err error) {
return c.incrDecr("decr", key, delta)
func (c *conn) Replace(item *Item) error {
return c.ReplaceContext(context.TODO(), item)
}
func (c *conn) incrDecr(cmd, key string, delta uint64) (uint64, error) {
if !legalKey(key) {
return 0, pkgerr.WithStack(ErrMalformedKey)
}
line, err := c.writeReadLine("%s %s %d\r\n", cmd, key, delta)
if err != nil {
return 0, err
}
switch {
case bytes.Equal(line, replyNotFound):
return 0, ErrNotFound
case bytes.HasPrefix(line, replyClientErrorPrefix):
errMsg := line[len(replyClientErrorPrefix):]
return 0, pkgerr.WithStack(protocolError(errMsg))
}
val, err := strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
if err != nil {
return 0, err
}
return val, nil
func (c *conn) Get(key string) (*Item, error) {
return c.GetContext(context.TODO(), key)
}
func (c *conn) Delete(key string) (err error) {
if !legalKey(key) {
return pkgerr.WithStack(ErrMalformedKey)
}
line, err := c.writeReadLine("delete %s\r\n", key)
if err != nil {
return err
}
switch {
case bytes.Equal(line, replyOK):
return nil
case bytes.Equal(line, replyDeleted):
return nil
case bytes.Equal(line, replyNotStored):
return ErrNotStored
case bytes.Equal(line, replyExists):
return ErrCASConflict
case bytes.Equal(line, replyNotFound):
return ErrNotFound
}
return pkgerr.WithStack(protocolError(string(line)))
func (c *conn) GetMulti(keys []string) (map[string]*Item, error) {
return c.GetMultiContext(context.TODO(), keys)
}
func (c *conn) writeReadLine(format string, args ...interface{}) ([]byte, error) {
if c.writeTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
}
_, err := fmt.Fprintf(c.rw, format, args...)
if err != nil {
return nil, c.fatal(pkgerr.WithStack(err))
}
if err = c.rw.Flush(); err != nil {
return nil, c.fatal(pkgerr.WithStack(err))
}
if c.readTimeout != 0 {
c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
}
line, err := c.rw.ReadSlice('\n')
if err != nil {
return line, c.fatal(pkgerr.WithStack(err))
}
return line, nil
func (c *conn) Delete(key string) error {
return c.DeleteContext(context.TODO(), key)
}
func (c *conn) Scan(item *Item, v interface{}) (err error) {
c.ir.Reset(item.Value)
if item.Flags&FlagGzip == FlagGzip {
if err = c.gr.Reset(&c.ir); err != nil {
return
}
if err = c.decode(&c.gr, item, v); err != nil {
err = pkgerr.WithStack(err)
return
}
err = c.gr.Close()
} else {
err = c.decode(&c.ir, item, v)
}
err = pkgerr.WithStack(err)
return
func (c *conn) Increment(key string, delta uint64) (newValue uint64, err error) {
return c.IncrementContext(context.TODO(), key, delta)
}
func (c *conn) WithContext(ctx context.Context) Conn {
// FIXME: implement WithContext
return c
func (c *conn) Decrement(key string, delta uint64) (newValue uint64, err error) {
return c.DecrementContext(context.TODO(), key, delta)
}
func (c *conn) encode(item *Item) (data []byte, err error) {
if (item.Flags | _flagEncoding) == _flagEncoding {
if item.Value == nil {
return nil, ErrItem
}
} else if item.Object == nil {
return nil, ErrItem
}
// encoding
switch {
case item.Flags&FlagGOB == FlagGOB:
c.edb.Reset()
if err = gob.NewEncoder(&c.edb).Encode(item.Object); err != nil {
return
}
data = c.edb.Bytes()
case item.Flags&FlagProtobuf == FlagProtobuf:
c.edb.Reset()
c.ped.SetBuf(c.edb.Bytes())
pb, ok := item.Object.(proto.Message)
if !ok {
err = ErrItemObject
return
}
if err = c.ped.Marshal(pb); err != nil {
return
}
data = c.ped.Bytes()
case item.Flags&FlagJSON == FlagJSON:
c.edb.Reset()
if err = c.je.Encode(item.Object); err != nil {
return
}
data = c.edb.Bytes()
default:
data = item.Value
}
// compress
if item.Flags&FlagGzip == FlagGzip {
c.cb.Reset()
c.gw.Reset(&c.cb)
if _, err = c.gw.Write(data); err != nil {
return
}
if err = c.gw.Close(); err != nil {
return
}
data = c.cb.Bytes()
}
if len(data) > 8000000 {
err = ErrValueSize
}
return
}
func (c *conn) decode(rd io.Reader, item *Item, v interface{}) (err error) {
var data []byte
switch {
case item.Flags&FlagGOB == FlagGOB:
err = gob.NewDecoder(rd).Decode(v)
case item.Flags&FlagJSON == FlagJSON:
c.jr.Reset(rd)
err = c.jd.Decode(v)
default:
data = item.Value
if item.Flags&FlagGzip == FlagGzip {
c.edb.Reset()
if _, err = io.Copy(&c.edb, rd); err != nil {
return
}
data = c.edb.Bytes()
}
if item.Flags&FlagProtobuf == FlagProtobuf {
m, ok := v.(proto.Message)
if !ok {
err = ErrItemObject
return
}
c.ped.SetBuf(data)
err = c.ped.Unmarshal(m)
} else {
switch v.(type) {
case *[]byte:
d := v.(*[]byte)
*d = data
case *string:
d := v.(*string)
*d = string(data)
case interface{}:
err = json.Unmarshal(data, v)
}
}
}
return
func (c *conn) CompareAndSwap(item *Item) error {
return c.CompareAndSwapContext(context.TODO(), item)
}
func legalKey(key string) bool {
if len(key) > 250 || len(key) == 0 {
return false
}
for i := 0; i < len(key); i++ {
if key[i] <= ' ' || key[i] == 0x7f {
return false
}
}
return true
func (c *conn) Touch(key string, seconds int32) (err error) {
return c.TouchContext(context.TODO(), key, seconds)
}
func (c *conn) Scan(item *Item, v interface{}) (err error) {
return pkgerr.WithStack(c.ed.decode(item, v))
}

@ -0,0 +1,185 @@
package memcache
import (
"bytes"
"encoding/json"
"testing"
test "github.com/bilibili/kratos/pkg/cache/memcache/test"
"github.com/gogo/protobuf/proto"
)
func TestConnRaw(t *testing.T) {
item := &Item{
Key: "test",
Value: []byte("test"),
Flags: FlagRAW,
Expiration: 60,
cas: 0,
}
if err := testConnASCII.Set(item); err != nil {
t.Errorf("conn.Store() error(%v)", err)
}
}
func TestConnSerialization(t *testing.T) {
type TestObj struct {
Name string
Age int32
}
tests := []struct {
name string
a *Item
e error
}{
{
"JSON",
&Item{
Key: "test_json",
Object: &TestObj{"json", 1},
Expiration: 60,
Flags: FlagJSON,
},
nil,
},
{
"JSONGzip",
&Item{
Key: "test_json_gzip",
Object: &TestObj{"jsongzip", 2},
Expiration: 60,
Flags: FlagJSON | FlagGzip,
},
nil,
},
{
"GOB",
&Item{
Key: "test_gob",
Object: &TestObj{"gob", 3},
Expiration: 60,
Flags: FlagGOB,
},
nil,
},
{
"GOBGzip",
&Item{
Key: "test_gob_gzip",
Object: &TestObj{"gobgzip", 4},
Expiration: 60,
Flags: FlagGOB | FlagGzip,
},
nil,
},
{
"Protobuf",
&Item{
Key: "test_protobuf",
Object: &test.TestItem{Name: "protobuf", Age: 6},
Expiration: 60,
Flags: FlagProtobuf,
},
nil,
},
{
"ProtobufGzip",
&Item{
Key: "test_protobuf_gzip",
Object: &test.TestItem{Name: "protobufgzip", Age: 7},
Expiration: 60,
Flags: FlagProtobuf | FlagGzip,
},
nil,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if err := testConnASCII.Set(tc.a); err != nil {
t.Fatal(err)
}
if r, err := testConnASCII.Get(tc.a.Key); err != tc.e {
t.Fatal(err)
} else {
if (tc.a.Flags & FlagProtobuf) > 0 {
var no test.TestItem
if err := testConnASCII.Scan(r, &no); err != nil {
t.Fatal(err)
}
if (tc.a.Object.(*test.TestItem).Name != no.Name) || (tc.a.Object.(*test.TestItem).Age != no.Age) {
t.Fatalf("compare failed error, %v %v", tc.a.Object.(*test.TestItem), no)
}
} else {
var no TestObj
if err := testConnASCII.Scan(r, &no); err != nil {
t.Fatal(err)
}
if (tc.a.Object.(*TestObj).Name != no.Name) || (tc.a.Object.(*TestObj).Age != no.Age) {
t.Fatalf("compare failed error, %v %v", tc.a.Object.(*TestObj), no)
}
}
}
})
}
}
func BenchmarkConnJSON(b *testing.B) {
st := &struct {
Name string
Age int
}{"json", 10}
itemx := &Item{Key: "json", Object: st, Flags: FlagJSON}
var (
eb bytes.Buffer
je *json.Encoder
ir bytes.Reader
jd *json.Decoder
jr reader
nst test.TestItem
)
jd = json.NewDecoder(&jr)
je = json.NewEncoder(&eb)
eb.Grow(_encodeBuf)
// NOTE reuse bytes.Buffer internal buf
// DON'T concurrency call Scan
b.ResetTimer()
for i := 0; i < b.N; i++ {
eb.Reset()
if err := je.Encode(itemx.Object); err != nil {
return
}
data := eb.Bytes()
ir.Reset(data)
jr.Reset(&ir)
jd.Decode(&nst)
}
}
func BenchmarkConnProtobuf(b *testing.B) {
st := &test.TestItem{Name: "protobuf", Age: 10}
itemx := &Item{Key: "protobuf", Object: st, Flags: FlagJSON}
var (
eb bytes.Buffer
nst test.TestItem
ped *proto.Buffer
)
ped = proto.NewBuffer(eb.Bytes())
eb.Grow(_encodeBuf)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ped.Reset()
pb, ok := itemx.Object.(proto.Message)
if !ok {
return
}
if err := ped.Marshal(pb); err != nil {
return
}
data := ped.Bytes()
ped.SetBuf(data)
ped.Unmarshal(&nst)
}
}

@ -0,0 +1,162 @@
package memcache
import (
"bytes"
"compress/gzip"
"encoding/gob"
"encoding/json"
"io"
"github.com/gogo/protobuf/proto"
)
type reader struct {
io.Reader
}
func (r *reader) Reset(rd io.Reader) {
r.Reader = rd
}
const _encodeBuf = 4096 // 4kb
type encodeDecode struct {
// Item Reader
ir bytes.Reader
// Compress
gr gzip.Reader
gw *gzip.Writer
cb bytes.Buffer
// Encoding
edb bytes.Buffer
// json
jr reader
jd *json.Decoder
je *json.Encoder
// protobuffer
ped *proto.Buffer
}
func newEncodeDecoder() *encodeDecode {
ed := &encodeDecode{}
ed.jd = json.NewDecoder(&ed.jr)
ed.je = json.NewEncoder(&ed.edb)
ed.gw = gzip.NewWriter(&ed.cb)
ed.edb.Grow(_encodeBuf)
// NOTE reuse bytes.Buffer internal buf
// DON'T concurrency call Scan
ed.ped = proto.NewBuffer(ed.edb.Bytes())
return ed
}
func (ed *encodeDecode) encode(item *Item) (data []byte, err error) {
if (item.Flags | _flagEncoding) == _flagEncoding {
if item.Value == nil {
return nil, ErrItem
}
} else if item.Object == nil {
return nil, ErrItem
}
// encoding
switch {
case item.Flags&FlagGOB == FlagGOB:
ed.edb.Reset()
if err = gob.NewEncoder(&ed.edb).Encode(item.Object); err != nil {
return
}
data = ed.edb.Bytes()
case item.Flags&FlagProtobuf == FlagProtobuf:
ed.edb.Reset()
ed.ped.SetBuf(ed.edb.Bytes())
pb, ok := item.Object.(proto.Message)
if !ok {
err = ErrItemObject
return
}
if err = ed.ped.Marshal(pb); err != nil {
return
}
data = ed.ped.Bytes()
case item.Flags&FlagJSON == FlagJSON:
ed.edb.Reset()
if err = ed.je.Encode(item.Object); err != nil {
return
}
data = ed.edb.Bytes()
default:
data = item.Value
}
// compress
if item.Flags&FlagGzip == FlagGzip {
ed.cb.Reset()
ed.gw.Reset(&ed.cb)
if _, err = ed.gw.Write(data); err != nil {
return
}
if err = ed.gw.Close(); err != nil {
return
}
data = ed.cb.Bytes()
}
if len(data) > 8000000 {
err = ErrValueSize
}
return
}
func (ed *encodeDecode) decode(item *Item, v interface{}) (err error) {
var (
data []byte
rd io.Reader
)
ed.ir.Reset(item.Value)
rd = &ed.ir
if item.Flags&FlagGzip == FlagGzip {
rd = &ed.gr
if err = ed.gr.Reset(&ed.ir); err != nil {
return
}
defer func() {
if e := ed.gr.Close(); e != nil {
err = e
}
}()
}
switch {
case item.Flags&FlagGOB == FlagGOB:
err = gob.NewDecoder(rd).Decode(v)
case item.Flags&FlagJSON == FlagJSON:
ed.jr.Reset(rd)
err = ed.jd.Decode(v)
default:
data = item.Value
if item.Flags&FlagGzip == FlagGzip {
ed.edb.Reset()
if _, err = io.Copy(&ed.edb, rd); err != nil {
return
}
data = ed.edb.Bytes()
}
if item.Flags&FlagProtobuf == FlagProtobuf {
m, ok := v.(proto.Message)
if !ok {
err = ErrItemObject
return
}
ed.ped.SetBuf(data)
err = ed.ped.Unmarshal(m)
} else {
switch v.(type) {
case *[]byte:
d := v.(*[]byte)
*d = data
case *string:
d := v.(*string)
*d = string(data)
case interface{}:
err = json.Unmarshal(data, v)
}
}
}
return
}

@ -0,0 +1,220 @@
package memcache
import (
"bytes"
"testing"
mt "github.com/bilibili/kratos/pkg/cache/memcache/test"
)
func TestEncode(t *testing.T) {
type TestObj struct {
Name string
Age int32
}
testObj := TestObj{"abc", 1}
ed := newEncodeDecoder()
tests := []struct {
name string
a *Item
r []byte
e error
}{
{
"EncodeRawFlagErrItem",
&Item{
Object: &TestObj{"abc", 1},
Flags: FlagRAW,
},
[]byte{},
ErrItem,
},
{
"EncodeEncodeFlagErrItem",
&Item{
Value: []byte("test"),
Flags: FlagJSON,
},
[]byte{},
ErrItem,
},
{
"EncodeEmpty",
&Item{
Value: []byte(""),
Flags: FlagRAW,
},
[]byte(""),
nil,
},
{
"EncodeMaxSize",
&Item{
Value: bytes.Repeat([]byte("A"), 8000000),
Flags: FlagRAW,
},
bytes.Repeat([]byte("A"), 8000000),
nil,
},
{
"EncodeExceededMaxSize",
&Item{
Value: bytes.Repeat([]byte("A"), 8000000+1),
Flags: FlagRAW,
},
nil,
ErrValueSize,
},
{
"EncodeGOB",
&Item{
Object: testObj,
Flags: FlagGOB,
},
[]byte{38, 255, 131, 3, 1, 1, 7, 84, 101, 115, 116, 79, 98, 106, 1, 255, 132, 0, 1, 2, 1, 4, 78, 97, 109, 101, 1, 12, 0, 1, 3, 65, 103, 101, 1, 4, 0, 0, 0, 10, 255, 132, 1, 3, 97, 98, 99, 1, 2, 0},
nil,
},
{
"EncodeJSON",
&Item{
Object: testObj,
Flags: FlagJSON,
},
[]byte{123, 34, 78, 97, 109, 101, 34, 58, 34, 97, 98, 99, 34, 44, 34, 65, 103, 101, 34, 58, 49, 125, 10},
nil,
},
{
"EncodeProtobuf",
&Item{
Object: &mt.TestItem{Name: "abc", Age: 1},
Flags: FlagProtobuf,
},
[]byte{10, 3, 97, 98, 99, 16, 1},
nil,
},
{
"EncodeGzip",
&Item{
Value: bytes.Repeat([]byte("B"), 50),
Flags: FlagGzip,
},
[]byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 114, 34, 25, 0, 2, 0, 0, 255, 255, 252, 253, 67, 209, 50, 0, 0, 0},
nil,
},
{
"EncodeGOBGzip",
&Item{
Object: testObj,
Flags: FlagGOB | FlagGzip,
},
[]byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 82, 251, 223, 204, 204, 200, 200, 30, 146, 90, 92, 226, 159, 148, 197, 248, 191, 133, 129, 145, 137, 145, 197, 47, 49, 55, 149, 145, 135, 129, 145, 217, 49, 61, 149, 145, 133, 129, 129, 129, 235, 127, 11, 35, 115, 98, 82, 50, 35, 19, 3, 32, 0, 0, 255, 255, 211, 249, 1, 154, 50, 0, 0, 0},
nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if r, err := ed.encode(test.a); err != test.e {
t.Fatal(err)
} else {
if err == nil {
if !bytes.Equal(r, test.r) {
t.Fatalf("not equal, expect %v\n got %v", test.r, r)
}
}
}
})
}
}
func TestDecode(t *testing.T) {
type TestObj struct {
Name string
Age int32
}
testObj := &TestObj{"abc", 1}
ed := newEncodeDecoder()
tests := []struct {
name string
a *Item
r interface{}
e error
}{
{
"DecodeGOB",
&Item{
Flags: FlagGOB,
Value: []byte{38, 255, 131, 3, 1, 1, 7, 84, 101, 115, 116, 79, 98, 106, 1, 255, 132, 0, 1, 2, 1, 4, 78, 97, 109, 101, 1, 12, 0, 1, 3, 65, 103, 101, 1, 4, 0, 0, 0, 10, 255, 132, 1, 3, 97, 98, 99, 1, 2, 0},
},
testObj,
nil,
},
{
"DecodeJSON",
&Item{
Value: []byte{123, 34, 78, 97, 109, 101, 34, 58, 34, 97, 98, 99, 34, 44, 34, 65, 103, 101, 34, 58, 49, 125, 10},
Flags: FlagJSON,
},
testObj,
nil,
},
{
"DecodeProtobuf",
&Item{
Value: []byte{10, 3, 97, 98, 99, 16, 1},
Flags: FlagProtobuf,
},
&mt.TestItem{Name: "abc", Age: 1},
nil,
},
{
"DecodeGzip",
&Item{
Value: []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 114, 34, 25, 0, 2, 0, 0, 255, 255, 252, 253, 67, 209, 50, 0, 0, 0},
Flags: FlagGzip,
},
bytes.Repeat([]byte("B"), 50),
nil,
},
{
"DecodeGOBGzip",
&Item{
Value: []byte{31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 82, 251, 223, 204, 204, 200, 200, 30, 146, 90, 92, 226, 159, 148, 197, 248, 191, 133, 129, 145, 137, 145, 197, 47, 49, 55, 149, 145, 135, 129, 145, 217, 49, 61, 149, 145, 133, 129, 129, 129, 235, 127, 11, 35, 115, 98, 82, 50, 35, 19, 3, 32, 0, 0, 255, 255, 211, 249, 1, 154, 50, 0, 0, 0},
Flags: FlagGOB | FlagGzip,
},
testObj,
nil,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if (test.a.Flags & FlagProtobuf) > 0 {
var dd mt.TestItem
if err := ed.decode(test.a, &dd); err != nil {
t.Fatal(err)
}
if (test.r.(*mt.TestItem).Name != dd.Name) || (test.r.(*mt.TestItem).Age != dd.Age) {
t.Fatalf("compare failed error, expect %v\n got %v", test.r.(*mt.TestItem), dd)
}
} else if test.a.Flags == FlagGzip {
var dd []byte
if err := ed.decode(test.a, &dd); err != nil {
t.Fatal(err)
}
if !bytes.Equal(dd, test.r.([]byte)) {
t.Fatalf("compare failed error, expect %v\n got %v", test.r, dd)
}
} else {
var dd TestObj
if err := ed.decode(test.a, &dd); err != nil {
t.Fatal(err)
}
if (test.r.(*TestObj).Name != dd.Name) || (test.r.(*TestObj).Age != dd.Age) {
t.Fatalf("compare failed error, expect %v\n got %v", test.r.(*TestObj), dd)
}
}
})
}
}

@ -0,0 +1,177 @@
package memcache
import (
"encoding/json"
"fmt"
"time"
)
var testExampleAddr string
func ExampleConn_set() {
var (
err error
value []byte
conn Conn
expire int32 = 100
p = struct {
Name string
Age int64
}{"golang", 10}
)
cnop := DialConnectTimeout(time.Duration(time.Second))
rdop := DialReadTimeout(time.Duration(time.Second))
wrop := DialWriteTimeout(time.Duration(time.Second))
if value, err = json.Marshal(p); err != nil {
fmt.Println(err)
return
}
if conn, err = Dial("tcp", testExampleAddr, cnop, rdop, wrop); err != nil {
fmt.Println(err)
return
}
// FlagRAW test
itemRaw := &Item{
Key: "test_raw",
Value: value,
Expiration: expire,
}
if err = conn.Set(itemRaw); err != nil {
fmt.Println(err)
return
}
// FlagGzip
itemGZip := &Item{
Key: "test_gzip",
Value: value,
Flags: FlagGzip,
Expiration: expire,
}
if err = conn.Set(itemGZip); err != nil {
fmt.Println(err)
return
}
// FlagGOB
itemGOB := &Item{
Key: "test_gob",
Object: p,
Flags: FlagGOB,
Expiration: expire,
}
if err = conn.Set(itemGOB); err != nil {
fmt.Println(err)
return
}
// FlagJSON
itemJSON := &Item{
Key: "test_json",
Object: p,
Flags: FlagJSON,
Expiration: expire,
}
if err = conn.Set(itemJSON); err != nil {
fmt.Println(err)
return
}
// FlagJSON | FlagGzip
itemJSONGzip := &Item{
Key: "test_jsonGzip",
Object: p,
Flags: FlagJSON | FlagGzip,
Expiration: expire,
}
if err = conn.Set(itemJSONGzip); err != nil {
fmt.Println(err)
return
}
// Output:
}
func ExampleConn_get() {
var (
err error
item2 *Item
conn Conn
p struct {
Name string
Age int64
}
)
cnop := DialConnectTimeout(time.Duration(time.Second))
rdop := DialReadTimeout(time.Duration(time.Second))
wrop := DialWriteTimeout(time.Duration(time.Second))
if conn, err = Dial("tcp", testExampleAddr, cnop, rdop, wrop); err != nil {
fmt.Println(err)
return
}
if item2, err = conn.Get("test_raw"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagRAW conn.Scan error(%v)\n", err)
return
}
}
// FlagGZip
if item2, err = conn.Get("test_gzip"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagGZip conn.Scan error(%v)\n", err)
return
}
}
// FlagGOB
if item2, err = conn.Get("test_gob"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagGOB conn.Scan error(%v)\n", err)
return
}
}
// FlagJSON
if item2, err = conn.Get("test_json"); err != nil {
fmt.Println(err)
} else {
if err = conn.Scan(item2, &p); err != nil {
fmt.Printf("FlagJSON conn.Scan error(%v)\n", err)
return
}
}
// Output:
}
func ExampleConn_getMulti() {
var (
err error
conn Conn
res map[string]*Item
keys = []string{"test_raw", "test_gzip"}
p struct {
Name string
Age int64
}
)
cnop := DialConnectTimeout(time.Duration(time.Second))
rdop := DialReadTimeout(time.Duration(time.Second))
wrop := DialWriteTimeout(time.Duration(time.Second))
if conn, err = Dial("tcp", testExampleAddr, cnop, rdop, wrop); err != nil {
fmt.Println(err)
return
}
if res, err = conn.GetMulti(keys); err != nil {
fmt.Printf("conn.GetMulti(%v) error(%v)", keys, err)
return
}
for _, v := range res {
if err = conn.Scan(v, &p); err != nil {
fmt.Printf("conn.Scan error(%v)\n", err)
return
}
fmt.Println(p)
}
// Output:
//{golang 10}
//{golang 10}
}

@ -0,0 +1,85 @@
package memcache
import (
"log"
"os"
"testing"
"time"
"github.com/bilibili/kratos/pkg/container/pool"
xtime "github.com/bilibili/kratos/pkg/time"
)
var testConnASCII Conn
var testMemcache *Memcache
var testPool *Pool
var testMemcacheAddr string
func setupTestConnASCII(addr string) {
var err error
cnop := DialConnectTimeout(time.Duration(2 * time.Second))
rdop := DialReadTimeout(time.Duration(2 * time.Second))
wrop := DialWriteTimeout(time.Duration(2 * time.Second))
testConnASCII, err = Dial("tcp", addr, cnop, rdop, wrop)
if err != nil {
log.Fatal(err)
}
testConnASCII.Delete("test")
testConnASCII.Delete("test1")
testConnASCII.Delete("test2")
if err != nil {
log.Fatal(err)
}
}
func setupTestMemcache(addr string) {
testConfig := &Config{
Config: &pool.Config{
Active: 10,
Idle: 10,
IdleTimeout: xtime.Duration(time.Second),
WaitTimeout: xtime.Duration(time.Second),
Wait: false,
},
Addr: addr,
Proto: "tcp",
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}
testMemcache = New(testConfig)
}
func setupTestPool(addr string) {
config := &Config{
Name: "test",
Proto: "tcp",
Addr: addr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}
config.Config = &pool.Config{
Active: 10,
Idle: 5,
IdleTimeout: xtime.Duration(90 * time.Second),
}
testPool = NewPool(config)
}
func TestMain(m *testing.M) {
testMemcacheAddr = os.Getenv("TEST_MEMCACHE_ADDR")
if testExampleAddr == "" {
log.Print("TEST_MEMCACHE_ADDR not provide skip test.")
// ignored test.
os.Exit(0)
}
setupTestConnASCII(testMemcacheAddr)
setupTestMemcache(testMemcacheAddr)
setupTestPool(testMemcacheAddr)
// TODO: add setupexample?
testExampleAddr = testMemcacheAddr
ret := m.Run()
os.Exit(ret)
}

@ -2,12 +2,10 @@ package memcache
import (
"context"
)
// Error represents an error returned in a command reply.
type Error string
func (err Error) Error() string { return string(err) }
"github.com/bilibili/kratos/pkg/container/pool"
xtime "github.com/bilibili/kratos/pkg/time"
)
const (
// Flag, 15(encoding) bit+ 17(compress) bit
@ -87,20 +85,20 @@ type Conn interface {
GetMulti(keys []string) (map[string]*Item, error)
// Delete deletes the item with the provided key.
// The error ErrCacheMiss is returned if the item didn't already exist in
// The error ErrNotFound is returned if the item didn't already exist in
// the cache.
Delete(key string) error
// Increment atomically increments key by delta. The return value is the
// new value after being incremented or an error. If the value didn't exist
// in memcached the error is ErrCacheMiss. The value in memcached must be
// in memcached the error is ErrNotFound. The value in memcached must be
// an decimal number, or an error will be returned.
// On 64-bit overflow, the new value wraps around.
Increment(key string, delta uint64) (newValue uint64, err error)
// Decrement atomically decrements key by delta. The return value is the
// new value after being decremented or an error. If the value didn't exist
// in memcached the error is ErrCacheMiss. The value in memcached must be
// in memcached the error is ErrNotFound. The value in memcached must be
// an decimal number, or an error will be returned. On underflow, the new
// value is capped at zero and does not wrap around.
Decrement(key string, delta uint64) (newValue uint64, err error)
@ -116,7 +114,7 @@ type Conn interface {
// Touch updates the expiry for the given key. The seconds parameter is
// either a Unix timestamp or, if seconds is less than 1 month, the number
// of seconds into the future at which time the item will expire.
//ErrCacheMiss is returned if the key is not in the cache. The key must be
// ErrNotFound is returned if the key is not in the cache. The key must be
// at most 250 bytes in length.
Touch(key string, seconds int32) (err error)
@ -129,8 +127,251 @@ type Conn interface {
//
Scan(item *Item, v interface{}) (err error)
// WithContext return a Conn with its context changed to ctx
// the context controls the entire lifetime of Conn before you change it
// NOTE: this method is not thread-safe
WithContext(ctx context.Context) Conn
// Add writes the given item, if no value already exists for its key.
// ErrNotStored is returned if that condition is not met.
AddContext(ctx context.Context, item *Item) error
// Set writes the given item, unconditionally.
SetContext(ctx context.Context, item *Item) error
// Replace writes the given item, but only if the server *does* already
// hold data for this key.
ReplaceContext(ctx context.Context, item *Item) error
// Get sends a command to the server for gets data.
GetContext(ctx context.Context, key string) (*Item, error)
// GetMulti is a batch version of Get. The returned map from keys to items
// may have fewer elements than the input slice, due to memcache cache
// misses. Each key must be at most 250 bytes in length.
// If no error is returned, the returned map will also be non-nil.
GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error)
// Delete deletes the item with the provided key.
// The error ErrNotFound is returned if the item didn't already exist in
// the cache.
DeleteContext(ctx context.Context, key string) error
// Increment atomically increments key by delta. The return value is the
// new value after being incremented or an error. If the value didn't exist
// in memcached the error is ErrNotFound. The value in memcached must be
// an decimal number, or an error will be returned.
// On 64-bit overflow, the new value wraps around.
IncrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error)
// Decrement atomically decrements key by delta. The return value is the
// new value after being decremented or an error. If the value didn't exist
// in memcached the error is ErrNotFound. The value in memcached must be
// an decimal number, or an error will be returned. On underflow, the new
// value is capped at zero and does not wrap around.
DecrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error)
// CompareAndSwap writes the given item that was previously returned by
// Get, if the value was neither modified or evicted between the Get and
// the CompareAndSwap calls. The item's Key should not change between calls
// but all other item fields may differ. ErrCASConflict is returned if the
// value was modified in between the calls.
// ErrNotStored is returned if the value was evicted in between the calls.
CompareAndSwapContext(ctx context.Context, item *Item) error
// Touch updates the expiry for the given key. The seconds parameter is
// either a Unix timestamp or, if seconds is less than 1 month, the number
// of seconds into the future at which time the item will expire.
// ErrNotFound is returned if the key is not in the cache. The key must be
// at most 250 bytes in length.
TouchContext(ctx context.Context, key string, seconds int32) (err error)
}
// Config memcache config.
type Config struct {
*pool.Config
Name string // memcache name, for trace
Proto string
Addr string
DialTimeout xtime.Duration
ReadTimeout xtime.Duration
WriteTimeout xtime.Duration
}
// Memcache memcache client
type Memcache struct {
pool *Pool
}
// Reply is the result of Get
type Reply struct {
err error
item *Item
conn Conn
closed bool
}
// Replies is the result of GetMulti
type Replies struct {
err error
items map[string]*Item
usedItems map[string]struct{}
conn Conn
closed bool
}
// New get a memcache client
func New(cfg *Config) *Memcache {
return &Memcache{pool: NewPool(cfg)}
}
// Close close connection pool
func (mc *Memcache) Close() error {
return mc.pool.Close()
}
// Conn direct get a connection
func (mc *Memcache) Conn(ctx context.Context) Conn {
return mc.pool.Get(ctx)
}
// Set writes the given item, unconditionally.
func (mc *Memcache) Set(ctx context.Context, item *Item) (err error) {
conn := mc.pool.Get(ctx)
err = conn.SetContext(ctx, item)
conn.Close()
return
}
// Add writes the given item, if no value already exists for its key.
// ErrNotStored is returned if that condition is not met.
func (mc *Memcache) Add(ctx context.Context, item *Item) (err error) {
conn := mc.pool.Get(ctx)
err = conn.AddContext(ctx, item)
conn.Close()
return
}
// Replace writes the given item, but only if the server *does* already hold data for this key.
func (mc *Memcache) Replace(ctx context.Context, item *Item) (err error) {
conn := mc.pool.Get(ctx)
err = conn.ReplaceContext(ctx, item)
conn.Close()
return
}
// CompareAndSwap writes the given item that was previously returned by Get
func (mc *Memcache) CompareAndSwap(ctx context.Context, item *Item) (err error) {
conn := mc.pool.Get(ctx)
err = conn.CompareAndSwapContext(ctx, item)
conn.Close()
return
}
// Get sends a command to the server for gets data.
func (mc *Memcache) Get(ctx context.Context, key string) *Reply {
conn := mc.pool.Get(ctx)
item, err := conn.GetContext(ctx, key)
if err != nil {
conn.Close()
}
return &Reply{err: err, item: item, conn: conn}
}
// Item get raw Item
func (r *Reply) Item() *Item {
return r.item
}
// Scan converts value, read from the memcache
func (r *Reply) Scan(v interface{}) (err error) {
if r.err != nil {
return r.err
}
err = r.conn.Scan(r.item, v)
if !r.closed {
r.conn.Close()
r.closed = true
}
return
}
// GetMulti is a batch version of Get
func (mc *Memcache) GetMulti(ctx context.Context, keys []string) (*Replies, error) {
conn := mc.pool.Get(ctx)
items, err := conn.GetMultiContext(ctx, keys)
rs := &Replies{err: err, items: items, conn: conn, usedItems: make(map[string]struct{}, len(keys))}
if (err != nil) || (len(items) == 0) {
rs.Close()
}
return rs, err
}
// Close close rows.
func (rs *Replies) Close() (err error) {
if !rs.closed {
err = rs.conn.Close()
rs.closed = true
}
return
}
// Item get Item from rows
func (rs *Replies) Item(key string) *Item {
return rs.items[key]
}
// Scan converts value, read from key in rows
func (rs *Replies) Scan(key string, v interface{}) (err error) {
if rs.err != nil {
return rs.err
}
item, ok := rs.items[key]
if !ok {
rs.Close()
return ErrNotFound
}
rs.usedItems[key] = struct{}{}
err = rs.conn.Scan(item, v)
if (err != nil) || (len(rs.items) == len(rs.usedItems)) {
rs.Close()
}
return
}
// Keys keys of result
func (rs *Replies) Keys() (keys []string) {
keys = make([]string, 0, len(rs.items))
for key := range rs.items {
keys = append(keys, key)
}
return
}
// Touch updates the expiry for the given key.
func (mc *Memcache) Touch(ctx context.Context, key string, timeout int32) (err error) {
conn := mc.pool.Get(ctx)
err = conn.TouchContext(ctx, key, timeout)
conn.Close()
return
}
// Delete deletes the item with the provided key.
func (mc *Memcache) Delete(ctx context.Context, key string) (err error) {
conn := mc.pool.Get(ctx)
err = conn.DeleteContext(ctx, key)
conn.Close()
return
}
// Increment atomically increments key by delta.
func (mc *Memcache) Increment(ctx context.Context, key string, delta uint64) (newValue uint64, err error) {
conn := mc.pool.Get(ctx)
newValue, err = conn.IncrementContext(ctx, key, delta)
conn.Close()
return
}
// Decrement atomically decrements key by delta.
func (mc *Memcache) Decrement(ctx context.Context, key string, delta uint64) (newValue uint64, err error) {
conn := mc.pool.Get(ctx)
newValue, err = conn.DecrementContext(ctx, key, delta)
conn.Close()
return
}

@ -0,0 +1,300 @@
package memcache
import (
"context"
"fmt"
"reflect"
"testing"
"time"
)
func Test_client_Set(t *testing.T) {
type args struct {
c context.Context
item *Item
}
tests := []struct {
name string
args args
wantErr bool
}{
{name: "set value", args: args{c: context.Background(), item: &Item{Key: "Test_client_Set", Value: []byte("abc")}}, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := testMemcache.Set(tt.args.c, tt.args.item); (err != nil) != tt.wantErr {
t.Errorf("client.Set() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_client_Add(t *testing.T) {
type args struct {
c context.Context
item *Item
}
key := fmt.Sprintf("Test_client_Add_%d", time.Now().Unix())
tests := []struct {
name string
args args
wantErr bool
}{
{name: "add not exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: false},
{name: "add exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := testMemcache.Add(tt.args.c, tt.args.item); (err != nil) != tt.wantErr {
t.Errorf("client.Add() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_client_Replace(t *testing.T) {
key := fmt.Sprintf("Test_client_Replace_%d", time.Now().Unix())
ekey := "Test_client_Replace_exist"
testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("ok")})
type args struct {
c context.Context
item *Item
}
tests := []struct {
name string
args args
wantErr bool
}{
{name: "not exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: true},
{name: "exist value", args: args{c: context.Background(), item: &Item{Key: ekey, Value: []byte("abc")}}, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := testMemcache.Replace(tt.args.c, tt.args.item); (err != nil) != tt.wantErr {
t.Errorf("client.Replace() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_client_CompareAndSwap(t *testing.T) {
key := fmt.Sprintf("Test_client_CompareAndSwap_%d", time.Now().Unix())
ekey := "Test_client_CompareAndSwap_k"
testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")})
cas := testMemcache.Get(context.Background(), ekey).Item().cas
type args struct {
c context.Context
item *Item
}
tests := []struct {
name string
args args
wantErr bool
}{
{name: "not exist value", args: args{c: context.Background(), item: &Item{Key: key, Value: []byte("abc")}}, wantErr: true},
{name: "exist value", args: args{c: context.Background(), item: &Item{Key: ekey, cas: cas, Value: []byte("abc")}}, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := testMemcache.CompareAndSwap(tt.args.c, tt.args.item); (err != nil) != tt.wantErr {
t.Errorf("client.CompareAndSwap() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_client_Get(t *testing.T) {
key := fmt.Sprintf("Test_client_Get_%d", time.Now().Unix())
ekey := "Test_client_Get_k"
testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")})
type args struct {
c context.Context
key string
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{name: "not exist value", args: args{c: context.Background(), key: key}, wantErr: true},
{name: "exist value", args: args{c: context.Background(), key: ekey}, wantErr: false, want: "old"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var res string
if err := testMemcache.Get(tt.args.c, tt.args.key).Scan(&res); (err != nil) != tt.wantErr || res != tt.want {
t.Errorf("client.Get() = %v, want %v, got err: %v, want err: %v", err, tt.want, err, tt.wantErr)
}
})
}
}
func Test_client_Touch(t *testing.T) {
key := fmt.Sprintf("Test_client_Touch_%d", time.Now().Unix())
ekey := "Test_client_Touch_k"
testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")})
type args struct {
c context.Context
key string
timeout int32
}
tests := []struct {
name string
args args
wantErr bool
}{
{name: "not exist value", args: args{c: context.Background(), key: key, timeout: 100000}, wantErr: true},
{name: "exist value", args: args{c: context.Background(), key: ekey, timeout: 100000}, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := testMemcache.Touch(tt.args.c, tt.args.key, tt.args.timeout); (err != nil) != tt.wantErr {
t.Errorf("client.Touch() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_client_Delete(t *testing.T) {
key := fmt.Sprintf("Test_client_Delete_%d", time.Now().Unix())
ekey := "Test_client_Delete_k"
testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("old")})
type args struct {
c context.Context
key string
}
tests := []struct {
name string
args args
wantErr bool
}{
{name: "not exist value", args: args{c: context.Background(), key: key}, wantErr: true},
{name: "exist value", args: args{c: context.Background(), key: ekey}, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if err := testMemcache.Delete(tt.args.c, tt.args.key); (err != nil) != tt.wantErr {
t.Errorf("client.Delete() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_client_Increment(t *testing.T) {
key := fmt.Sprintf("Test_client_Increment_%d", time.Now().Unix())
ekey := "Test_client_Increment_k"
testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("1")})
type args struct {
c context.Context
key string
delta uint64
}
tests := []struct {
name string
args args
wantNewValue uint64
wantErr bool
}{
{name: "not exist value", args: args{c: context.Background(), key: key, delta: 10}, wantErr: true},
{name: "exist value", args: args{c: context.Background(), key: ekey, delta: 10}, wantErr: false, wantNewValue: 11},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotNewValue, err := testMemcache.Increment(tt.args.c, tt.args.key, tt.args.delta)
if (err != nil) != tt.wantErr {
t.Errorf("client.Increment() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotNewValue != tt.wantNewValue {
t.Errorf("client.Increment() = %v, want %v", gotNewValue, tt.wantNewValue)
}
})
}
}
func Test_client_Decrement(t *testing.T) {
key := fmt.Sprintf("Test_client_Decrement_%d", time.Now().Unix())
ekey := "Test_client_Decrement_k"
testMemcache.Set(context.Background(), &Item{Key: ekey, Value: []byte("100")})
type args struct {
c context.Context
key string
delta uint64
}
tests := []struct {
name string
args args
wantNewValue uint64
wantErr bool
}{
{name: "not exist value", args: args{c: context.Background(), key: key, delta: 10}, wantErr: true},
{name: "exist value", args: args{c: context.Background(), key: ekey, delta: 10}, wantErr: false, wantNewValue: 90},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotNewValue, err := testMemcache.Decrement(tt.args.c, tt.args.key, tt.args.delta)
if (err != nil) != tt.wantErr {
t.Errorf("client.Decrement() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotNewValue != tt.wantNewValue {
t.Errorf("client.Decrement() = %v, want %v", gotNewValue, tt.wantNewValue)
}
})
}
}
func Test_client_GetMulti(t *testing.T) {
key := fmt.Sprintf("Test_client_GetMulti_%d", time.Now().Unix())
ekey1 := "Test_client_GetMulti_k1"
ekey2 := "Test_client_GetMulti_k2"
testMemcache.Set(context.Background(), &Item{Key: ekey1, Value: []byte("1")})
testMemcache.Set(context.Background(), &Item{Key: ekey2, Value: []byte("2")})
keys := []string{key, ekey1, ekey2}
rows, err := testMemcache.GetMulti(context.Background(), keys)
if err != nil {
t.Errorf("client.GetMulti() error = %v, wantErr %v", err, nil)
}
tests := []struct {
key string
wantNewValue string
wantErr bool
nilItem bool
}{
{key: ekey1, wantErr: false, wantNewValue: "1", nilItem: false},
{key: ekey2, wantErr: false, wantNewValue: "2", nilItem: false},
{key: key, wantErr: true, nilItem: true},
}
if reflect.DeepEqual(keys, rows.Keys()) {
t.Errorf("got %v, expect: %v", rows.Keys(), keys)
}
for _, tt := range tests {
t.Run(tt.key, func(t *testing.T) {
var gotNewValue string
err = rows.Scan(tt.key, &gotNewValue)
if (err != nil) != tt.wantErr {
t.Errorf("rows.Scan() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotNewValue != tt.wantNewValue {
t.Errorf("rows.Value() = %v, want %v", gotNewValue, tt.wantNewValue)
}
if (rows.Item(tt.key) == nil) != tt.nilItem {
t.Errorf("rows.Item() = %v, want %v", rows.Item(tt.key) == nil, tt.nilItem)
}
})
}
err = rows.Close()
if err != nil {
t.Errorf("client.Replies.Close() error = %v, wantErr %v", err, nil)
}
}
func Test_client_Conn(t *testing.T) {
conn := testMemcache.Conn(context.Background())
defer conn.Close()
if conn == nil {
t.Errorf("expect get conn, get nil")
}
}

@ -1,59 +0,0 @@
package memcache
import (
"context"
)
// MockErr for unit test.
type MockErr struct {
Error error
}
var _ Conn = MockErr{}
// MockWith return a mock conn.
func MockWith(err error) MockErr {
return MockErr{Error: err}
}
// Err .
func (m MockErr) Err() error { return m.Error }
// Close .
func (m MockErr) Close() error { return m.Error }
// Add .
func (m MockErr) Add(item *Item) error { return m.Error }
// Set .
func (m MockErr) Set(item *Item) error { return m.Error }
// Replace .
func (m MockErr) Replace(item *Item) error { return m.Error }
// CompareAndSwap .
func (m MockErr) CompareAndSwap(item *Item) error { return m.Error }
// Get .
func (m MockErr) Get(key string) (*Item, error) { return nil, m.Error }
// GetMulti .
func (m MockErr) GetMulti(keys []string) (map[string]*Item, error) { return nil, m.Error }
// Touch .
func (m MockErr) Touch(key string, timeout int32) error { return m.Error }
// Delete .
func (m MockErr) Delete(key string) error { return m.Error }
// Increment .
func (m MockErr) Increment(key string, delta uint64) (uint64, error) { return 0, m.Error }
// Decrement .
func (m MockErr) Decrement(key string, delta uint64) (uint64, error) { return 0, m.Error }
// Scan .
func (m MockErr) Scan(item *Item, v interface{}) error { return m.Error }
// WithContext .
func (m MockErr) WithContext(ctx context.Context) Conn { return m }

@ -1,197 +0,0 @@
package memcache
import (
"context"
"io"
"time"
"github.com/bilibili/kratos/pkg/container/pool"
"github.com/bilibili/kratos/pkg/stat"
xtime "github.com/bilibili/kratos/pkg/time"
)
var stats = stat.Cache
// Config memcache config.
type Config struct {
*pool.Config
Name string // memcache name, for trace
Proto string
Addr string
DialTimeout xtime.Duration
ReadTimeout xtime.Duration
WriteTimeout xtime.Duration
}
// Pool memcache connection pool struct.
type Pool struct {
p pool.Pool
c *Config
}
// NewPool new a memcache conn pool.
func NewPool(c *Config) (p *Pool) {
if c.DialTimeout <= 0 || c.ReadTimeout <= 0 || c.WriteTimeout <= 0 {
panic("must config memcache timeout")
}
p1 := pool.NewList(c.Config)
cnop := DialConnectTimeout(time.Duration(c.DialTimeout))
rdop := DialReadTimeout(time.Duration(c.ReadTimeout))
wrop := DialWriteTimeout(time.Duration(c.WriteTimeout))
p1.New = func(ctx context.Context) (io.Closer, error) {
conn, err := Dial(c.Proto, c.Addr, cnop, rdop, wrop)
return &traceConn{Conn: conn, address: c.Addr}, err
}
p = &Pool{p: p1, c: c}
return
}
// Get gets a connection. The application must close the returned connection.
// This method always returns a valid connection so that applications can defer
// error handling to the first use of the connection. If there is an error
// getting an underlying connection, then the connection Err, Do, Send, Flush
// and Receive methods return that error.
func (p *Pool) Get(ctx context.Context) Conn {
c, err := p.p.Get(ctx)
if err != nil {
return errorConnection{err}
}
c1, _ := c.(Conn)
return &pooledConnection{p: p, c: c1.WithContext(ctx), ctx: ctx}
}
// Close release the resources used by the pool.
func (p *Pool) Close() error {
return p.p.Close()
}
type pooledConnection struct {
p *Pool
c Conn
ctx context.Context
}
func pstat(key string, t time.Time, err error) {
stats.Timing(key, int64(time.Since(t)/time.Millisecond))
if err != nil {
if msg := formatErr(err); msg != "" {
stats.Incr("memcache", msg)
}
}
}
func (pc *pooledConnection) Close() error {
c := pc.c
if _, ok := c.(errorConnection); ok {
return nil
}
pc.c = errorConnection{ErrConnClosed}
pc.p.p.Put(context.Background(), c, c.Err() != nil)
return nil
}
func (pc *pooledConnection) Err() error {
return pc.c.Err()
}
func (pc *pooledConnection) Set(item *Item) (err error) {
now := time.Now()
err = pc.c.Set(item)
pstat("memcache:set", now, err)
return
}
func (pc *pooledConnection) Add(item *Item) (err error) {
now := time.Now()
err = pc.c.Add(item)
pstat("memcache:add", now, err)
return
}
func (pc *pooledConnection) Replace(item *Item) (err error) {
now := time.Now()
err = pc.c.Replace(item)
pstat("memcache:replace", now, err)
return
}
func (pc *pooledConnection) CompareAndSwap(item *Item) (err error) {
now := time.Now()
err = pc.c.CompareAndSwap(item)
pstat("memcache:cas", now, err)
return
}
func (pc *pooledConnection) Get(key string) (r *Item, err error) {
now := time.Now()
r, err = pc.c.Get(key)
pstat("memcache:get", now, err)
return
}
func (pc *pooledConnection) GetMulti(keys []string) (res map[string]*Item, err error) {
// if keys is empty slice returns empty map direct
if len(keys) == 0 {
return make(map[string]*Item), nil
}
now := time.Now()
res, err = pc.c.GetMulti(keys)
pstat("memcache:gets", now, err)
return
}
func (pc *pooledConnection) Touch(key string, timeout int32) (err error) {
now := time.Now()
err = pc.c.Touch(key, timeout)
pstat("memcache:touch", now, err)
return
}
func (pc *pooledConnection) Scan(item *Item, v interface{}) error {
return pc.c.Scan(item, v)
}
func (pc *pooledConnection) WithContext(ctx context.Context) Conn {
// TODO: set context
pc.ctx = ctx
return pc
}
func (pc *pooledConnection) Delete(key string) (err error) {
now := time.Now()
err = pc.c.Delete(key)
pstat("memcache:delete", now, err)
return
}
func (pc *pooledConnection) Increment(key string, delta uint64) (newValue uint64, err error) {
now := time.Now()
newValue, err = pc.c.Increment(key, delta)
pstat("memcache:increment", now, err)
return
}
func (pc *pooledConnection) Decrement(key string, delta uint64) (newValue uint64, err error) {
now := time.Now()
newValue, err = pc.c.Decrement(key, delta)
pstat("memcache:decrement", now, err)
return
}
type errorConnection struct{ err error }
func (ec errorConnection) Err() error { return ec.err }
func (ec errorConnection) Close() error { return ec.err }
func (ec errorConnection) Add(item *Item) error { return ec.err }
func (ec errorConnection) Set(item *Item) error { return ec.err }
func (ec errorConnection) Replace(item *Item) error { return ec.err }
func (ec errorConnection) CompareAndSwap(item *Item) error { return ec.err }
func (ec errorConnection) Get(key string) (*Item, error) { return nil, ec.err }
func (ec errorConnection) GetMulti(keys []string) (map[string]*Item, error) { return nil, ec.err }
func (ec errorConnection) Touch(key string, timeout int32) error { return ec.err }
func (ec errorConnection) Delete(key string) error { return ec.err }
func (ec errorConnection) Increment(key string, delta uint64) (uint64, error) { return 0, ec.err }
func (ec errorConnection) Decrement(key string, delta uint64) (uint64, error) { return 0, ec.err }
func (ec errorConnection) Scan(item *Item, v interface{}) error { return ec.err }
func (ec errorConnection) WithContext(ctx context.Context) Conn { return ec }

@ -0,0 +1,204 @@
package memcache
import (
"context"
"fmt"
"io"
"time"
"github.com/bilibili/kratos/pkg/container/pool"
"github.com/bilibili/kratos/pkg/stat"
)
var stats = stat.Cache
// Pool memcache connection pool struct.
// Deprecated: Use Memcache instead
type Pool struct {
p pool.Pool
c *Config
}
// NewPool new a memcache conn pool.
// Deprecated: Use New instead
func NewPool(cfg *Config) (p *Pool) {
if cfg.DialTimeout <= 0 || cfg.ReadTimeout <= 0 || cfg.WriteTimeout <= 0 {
panic("must config memcache timeout")
}
p1 := pool.NewList(cfg.Config)
cnop := DialConnectTimeout(time.Duration(cfg.DialTimeout))
rdop := DialReadTimeout(time.Duration(cfg.ReadTimeout))
wrop := DialWriteTimeout(time.Duration(cfg.WriteTimeout))
p1.New = func(ctx context.Context) (io.Closer, error) {
conn, err := Dial(cfg.Proto, cfg.Addr, cnop, rdop, wrop)
return newTraceConn(conn, fmt.Sprintf("%s://%s", cfg.Proto, cfg.Addr)), err
}
p = &Pool{p: p1, c: cfg}
return
}
// Get gets a connection. The application must close the returned connection.
// This method always returns a valid connection so that applications can defer
// error handling to the first use of the connection. If there is an error
// getting an underlying connection, then the connection Err, Do, Send, Flush
// and Receive methods return that error.
func (p *Pool) Get(ctx context.Context) Conn {
c, err := p.p.Get(ctx)
if err != nil {
return errConn{err}
}
c1, _ := c.(Conn)
return &poolConn{p: p, c: c1, ctx: ctx}
}
// Close release the resources used by the pool.
func (p *Pool) Close() error {
return p.p.Close()
}
type poolConn struct {
c Conn
p *Pool
ctx context.Context
}
func pstat(key string, t time.Time, err error) {
stats.Timing(key, int64(time.Since(t)/time.Millisecond))
if err != nil {
if msg := formatErr(err); msg != "" {
stats.Incr("memcache", msg)
}
}
}
func (pc *poolConn) Close() error {
c := pc.c
if _, ok := c.(errConn); ok {
return nil
}
pc.c = errConn{ErrConnClosed}
pc.p.p.Put(context.Background(), c, c.Err() != nil)
return nil
}
func (pc *poolConn) Err() error {
return pc.c.Err()
}
func (pc *poolConn) Set(item *Item) (err error) {
return pc.c.SetContext(pc.ctx, item)
}
func (pc *poolConn) Add(item *Item) (err error) {
return pc.AddContext(pc.ctx, item)
}
func (pc *poolConn) Replace(item *Item) (err error) {
return pc.ReplaceContext(pc.ctx, item)
}
func (pc *poolConn) CompareAndSwap(item *Item) (err error) {
return pc.CompareAndSwapContext(pc.ctx, item)
}
func (pc *poolConn) Get(key string) (r *Item, err error) {
return pc.c.GetContext(pc.ctx, key)
}
func (pc *poolConn) GetMulti(keys []string) (res map[string]*Item, err error) {
return pc.c.GetMultiContext(pc.ctx, keys)
}
func (pc *poolConn) Touch(key string, timeout int32) (err error) {
return pc.c.TouchContext(pc.ctx, key, timeout)
}
func (pc *poolConn) Scan(item *Item, v interface{}) error {
return pc.c.Scan(item, v)
}
func (pc *poolConn) Delete(key string) (err error) {
return pc.c.DeleteContext(pc.ctx, key)
}
func (pc *poolConn) Increment(key string, delta uint64) (newValue uint64, err error) {
return pc.c.IncrementContext(pc.ctx, key, delta)
}
func (pc *poolConn) Decrement(key string, delta uint64) (newValue uint64, err error) {
return pc.c.DecrementContext(pc.ctx, key, delta)
}
func (pc *poolConn) AddContext(ctx context.Context, item *Item) error {
now := time.Now()
err := pc.c.AddContext(ctx, item)
pstat("memcache:add", now, err)
return err
}
func (pc *poolConn) SetContext(ctx context.Context, item *Item) error {
now := time.Now()
err := pc.c.SetContext(ctx, item)
pstat("memcache:set", now, err)
return err
}
func (pc *poolConn) ReplaceContext(ctx context.Context, item *Item) error {
now := time.Now()
err := pc.c.ReplaceContext(ctx, item)
pstat("memcache:replace", now, err)
return err
}
func (pc *poolConn) GetContext(ctx context.Context, key string) (*Item, error) {
now := time.Now()
item, err := pc.c.Get(key)
pstat("memcache:get", now, err)
return item, err
}
func (pc *poolConn) GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error) {
// if keys is empty slice returns empty map direct
if len(keys) == 0 {
return make(map[string]*Item), nil
}
now := time.Now()
items, err := pc.c.GetMulti(keys)
pstat("memcache:gets", now, err)
return items, err
}
func (pc *poolConn) DeleteContext(ctx context.Context, key string) error {
now := time.Now()
err := pc.c.Delete(key)
pstat("memcache:delete", now, err)
return err
}
func (pc *poolConn) IncrementContext(ctx context.Context, key string, delta uint64) (uint64, error) {
now := time.Now()
newValue, err := pc.c.IncrementContext(ctx, key, delta)
pstat("memcache:increment", now, err)
return newValue, err
}
func (pc *poolConn) DecrementContext(ctx context.Context, key string, delta uint64) (uint64, error) {
now := time.Now()
newValue, err := pc.c.DecrementContext(ctx, key, delta)
pstat("memcache:decrement", now, err)
return newValue, err
}
func (pc *poolConn) CompareAndSwapContext(ctx context.Context, item *Item) error {
now := time.Now()
err := pc.c.CompareAndSwap(item)
pstat("memcache:cas", now, err)
return err
}
func (pc *poolConn) TouchContext(ctx context.Context, key string, seconds int32) error {
now := time.Now()
err := pc.c.Touch(key, seconds)
pstat("memcache:touch", now, err)
return err
}

@ -0,0 +1,545 @@
package memcache
import (
"bytes"
"context"
"reflect"
"testing"
"time"
"github.com/bilibili/kratos/pkg/container/pool"
xtime "github.com/bilibili/kratos/pkg/time"
)
var itempool = &Item{
Key: "testpool",
Value: []byte("testpool"),
Flags: 0,
Expiration: 60,
cas: 0,
}
var itempool2 = &Item{
Key: "test_count",
Value: []byte("0"),
Flags: 0,
Expiration: 1000,
cas: 0,
}
type testObject struct {
Mid int64
Value []byte
}
var largeValue = &Item{
Key: "large_value",
Flags: FlagGOB | FlagGzip,
Expiration: 1000,
cas: 0,
}
var largeValueBoundary = &Item{
Key: "large_value",
Flags: FlagGOB | FlagGzip,
Expiration: 1000,
cas: 0,
}
func TestPoolSet(t *testing.T) {
conn := testPool.Get(context.Background())
defer conn.Close()
// set
if err := conn.Set(itempool); err != nil {
t.Errorf("memcache: set error(%v)", err)
} else {
t.Logf("memcache: set value: %s", itempool.Value)
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolGet(t *testing.T) {
key := "testpool"
conn := testPool.Get(context.Background())
defer conn.Close()
// get
if res, err := conn.Get(key); err != nil {
t.Errorf("memcache: get error(%v)", err)
} else {
t.Logf("memcache: get value: %s", res.Value)
}
if _, err := conn.Get("not_found"); err != ErrNotFound {
t.Errorf("memcache: expceted err is not found but got: %v", err)
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolGetMulti(t *testing.T) {
conn := testPool.Get(context.Background())
defer conn.Close()
s := []string{"testpool", "test1"}
// get
if res, err := conn.GetMulti(s); err != nil {
t.Errorf("memcache: gets error(%v)", err)
} else {
t.Logf("memcache: gets value: %d", len(res))
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolTouch(t *testing.T) {
key := "testpool"
conn := testPool.Get(context.Background())
defer conn.Close()
// touch
if err := conn.Touch(key, 10); err != nil {
t.Errorf("memcache: touch error(%v)", err)
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolIncrement(t *testing.T) {
key := "test_count"
conn := testPool.Get(context.Background())
defer conn.Close()
// set
if err := conn.Set(itempool2); err != nil {
t.Errorf("memcache: set error(%v)", err)
} else {
t.Logf("memcache: set value: 0")
}
// incr
if res, err := conn.Increment(key, 1); err != nil {
t.Errorf("memcache: incr error(%v)", err)
} else {
t.Logf("memcache: incr n: %d", res)
if res != 1 {
t.Errorf("memcache: expected res=1 but got %d", res)
}
}
// decr
if res, err := conn.Decrement(key, 1); err != nil {
t.Errorf("memcache: decr error(%v)", err)
} else {
t.Logf("memcache: decr n: %d", res)
if res != 0 {
t.Errorf("memcache: expected res=0 but got %d", res)
}
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolErr(t *testing.T) {
conn := testPool.Get(context.Background())
defer conn.Close()
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
if err := conn.Err(); err == nil {
t.Errorf("memcache: err not nil")
} else {
t.Logf("memcache: err: %v", err)
}
}
func TestPoolCompareAndSwap(t *testing.T) {
conn := testPool.Get(context.Background())
defer conn.Close()
key := "testpool"
//cas
if r, err := conn.Get(key); err != nil {
t.Errorf("conn.Get() error(%v)", err)
} else {
r.Value = []byte("shit")
if err := conn.CompareAndSwap(r); err != nil {
t.Errorf("conn.Get() error(%v)", err)
}
r, _ := conn.Get("testpool")
if r.Key != "testpool" || !bytes.Equal(r.Value, []byte("shit")) || r.Flags != 0 {
t.Error("conn.Get() error, value")
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
}
func TestPoolDel(t *testing.T) {
key := "testpool"
conn := testPool.Get(context.Background())
defer conn.Close()
// delete
if err := conn.Delete(key); err != nil {
t.Errorf("memcache: delete error(%v)", err)
} else {
t.Logf("memcache: delete key: %s", key)
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func BenchmarkMemcache(b *testing.B) {
c := &Config{
Name: "test",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}
c.Config = &pool.Config{
Active: 10,
Idle: 5,
IdleTimeout: xtime.Duration(90 * time.Second),
}
testPool = NewPool(c)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
conn := testPool.Get(context.Background())
if err := conn.Close(); err != nil {
b.Errorf("memcache: close error(%v)", err)
}
}
})
if err := testPool.Close(); err != nil {
b.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolSetLargeValue(t *testing.T) {
var b bytes.Buffer
for i := 0; i < 4000000; i++ {
b.WriteByte(1)
}
obj := &testObject{}
obj.Mid = 1000
obj.Value = b.Bytes()
largeValue.Object = obj
conn := testPool.Get(context.Background())
defer conn.Close()
// set
if err := conn.Set(largeValue); err != nil {
t.Errorf("memcache: set error(%v)", err)
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolGetLargeValue(t *testing.T) {
key := largeValue.Key
conn := testPool.Get(context.Background())
defer conn.Close()
// get
var err error
if _, err = conn.Get(key); err != nil {
t.Errorf("memcache: large get error(%+v)", err)
}
}
func TestPoolGetMultiLargeValue(t *testing.T) {
conn := testPool.Get(context.Background())
defer conn.Close()
s := []string{largeValue.Key, largeValue.Key}
// get
if res, err := conn.GetMulti(s); err != nil {
t.Errorf("memcache: gets error(%v)", err)
} else {
t.Logf("memcache: gets value: %d", len(res))
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolSetLargeValueBoundary(t *testing.T) {
var b bytes.Buffer
for i := 0; i < _largeValue; i++ {
b.WriteByte(1)
}
obj := &testObject{}
obj.Mid = 1000
obj.Value = b.Bytes()
largeValueBoundary.Object = obj
conn := testPool.Get(context.Background())
defer conn.Close()
// set
if err := conn.Set(largeValueBoundary); err != nil {
t.Errorf("memcache: set error(%v)", err)
}
if err := conn.Close(); err != nil {
t.Errorf("memcache: close error(%v)", err)
}
}
func TestPoolGetLargeValueBoundary(t *testing.T) {
key := largeValueBoundary.Key
conn := testPool.Get(context.Background())
defer conn.Close()
// get
var err error
if _, err = conn.Get(key); err != nil {
t.Errorf("memcache: large get error(%v)", err)
}
}
func TestPoolAdd(t *testing.T) {
var (
key = "test_add"
item = &Item{
Key: key,
Value: []byte("0"),
Flags: 0,
Expiration: 60,
cas: 0,
}
conn = testPool.Get(context.Background())
)
defer conn.Close()
conn.Delete(key)
if err := conn.Add(item); err != nil {
t.Errorf("memcache: add error(%v)", err)
}
if err := conn.Add(item); err != ErrNotStored {
t.Errorf("memcache: add error(%v)", err)
}
}
func TestNewPool(t *testing.T) {
type args struct {
cfg *Config
}
tests := []struct {
name string
args args
wantErr error
wantPanic bool
}{
{
"NewPoolIllegalDialTimeout",
args{
&Config{
Name: "test_illegal_dial_timeout",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(-time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
},
},
nil,
true,
},
{
"NewPoolIllegalReadTimeout",
args{
&Config{
Name: "test_illegal_read_timeout",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(-time.Second),
WriteTimeout: xtime.Duration(time.Second),
},
},
nil,
true,
},
{
"NewPoolIllegalWriteTimeout",
args{
&Config{
Name: "test_illegal_write_timeout",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(-time.Second),
},
},
nil,
true,
},
{
"NewPool",
args{
&Config{
Name: "test_new",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
},
},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
defer func() {
r := recover()
if (r != nil) != tt.wantPanic {
t.Errorf("wantPanic recover = %v, wantPanic = %v", r, tt.wantPanic)
}
}()
if gotP := NewPool(tt.args.cfg); gotP == nil {
t.Error("NewPool() failed, got nil")
}
})
}
}
func TestPool_Get(t *testing.T) {
type args struct {
ctx context.Context
}
tests := []struct {
name string
p *Pool
args args
wantErr bool
n int
}{
{
"Get",
NewPool(&Config{
Config: &pool.Config{
Active: 3,
Idle: 2,
},
Name: "test_get",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}),
args{context.TODO()},
false,
3,
},
{
"GetExceededPoolSize",
NewPool(&Config{
Config: &pool.Config{
Active: 3,
Idle: 2,
},
Name: "test_get_out",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}),
args{context.TODO()},
true,
6,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for i := 1; i <= tt.n; i++ {
got := tt.p.Get(tt.args.ctx)
if reflect.TypeOf(got) == reflect.TypeOf(errConn{}) {
if !tt.wantErr {
t.Errorf("got errConn, export Conn")
}
return
} else {
if tt.wantErr {
if i > tt.p.c.Active {
t.Errorf("got Conn, export errConn")
}
}
}
}
})
}
}
func TestPool_Close(t *testing.T) {
type args struct {
ctx context.Context
}
tests := []struct {
name string
p *Pool
args args
wantErr bool
g int
c int
}{
{
"Close",
NewPool(&Config{
Config: &pool.Config{
Active: 1,
Idle: 1,
},
Name: "test_get",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}),
args{context.TODO()},
false,
3,
3,
},
{
"CloseExceededPoolSize",
NewPool(&Config{
Config: &pool.Config{
Active: 1,
Idle: 1,
},
Name: "test_get_out",
Proto: "tcp",
Addr: testMemcacheAddr,
DialTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
}),
args{context.TODO()},
true,
5,
3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for i := 1; i <= tt.g; i++ {
got := tt.p.Get(tt.args.ctx)
if err := got.Close(); err != nil {
if !tt.wantErr {
t.Error(err)
}
}
if i <= tt.c {
if err := got.Close(); err != nil {
t.Error(err)
}
}
}
})
}
}

@ -0,0 +1,48 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load(
"@io_bazel_rules_go//proto:def.bzl",
"go_proto_library",
)
go_library(
name = "go_default_library",
srcs = [],
embed = [":proto_go_proto"],
importpath = "go-common/library/cache/memcache/test",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["@com_github_golang_protobuf//proto:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
proto_library(
name = "test_proto",
srcs = ["test.proto"],
import_prefix = "go-common/library/cache/memcache/test",
strip_import_prefix = "",
tags = ["automanaged"],
)
go_proto_library(
name = "proto_go_proto",
compilers = ["@io_bazel_rules_go//proto:go_proto"],
importpath = "go-common/library/cache/memcache/test",
proto = ":test_proto",
tags = ["automanaged"],
)

@ -0,0 +1,375 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: test.proto
/*
Package proto is a generated protocol buffer package.
It is generated from these files:
test.proto
It has these top-level messages:
TestItem
*/
package proto
import proto1 "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto1.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto1.ProtoPackageIsVersion2 // please upgrade the proto package
type FOO int32
const (
FOO_X FOO = 0
)
var FOO_name = map[int32]string{
0: "X",
}
var FOO_value = map[string]int32{
"X": 0,
}
func (x FOO) String() string {
return proto1.EnumName(FOO_name, int32(x))
}
func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptorTest, []int{0} }
type TestItem struct {
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
Age int32 `protobuf:"varint,2,opt,name=Age,proto3" json:"Age,omitempty"`
}
func (m *TestItem) Reset() { *m = TestItem{} }
func (m *TestItem) String() string { return proto1.CompactTextString(m) }
func (*TestItem) ProtoMessage() {}
func (*TestItem) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{0} }
func (m *TestItem) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *TestItem) GetAge() int32 {
if m != nil {
return m.Age
}
return 0
}
func init() {
proto1.RegisterType((*TestItem)(nil), "proto.TestItem")
proto1.RegisterEnum("proto.FOO", FOO_name, FOO_value)
}
func (m *TestItem) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TestItem) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintTest(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if m.Age != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintTest(dAtA, i, uint64(m.Age))
}
return i, nil
}
func encodeFixed64Test(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Test(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintTest(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *TestItem) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTest(uint64(l))
}
if m.Age != 0 {
n += 1 + sovTest(uint64(m.Age))
}
return n
}
func sovTest(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozTest(x uint64) (n int) {
return sovTest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *TestItem) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTest
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TestItem: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TestItem: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTest
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTest
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Age", wireType)
}
m.Age = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTest
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Age |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTest(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTest
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTest(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTest
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTest
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTest
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthTest
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTest
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipTest(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthTest = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTest = fmt.Errorf("proto: integer overflow")
)
func init() { proto1.RegisterFile("test.proto", fileDescriptorTest) }
var fileDescriptorTest = []byte{
// 122 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e,
0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x53, 0x4a, 0x06, 0x5c, 0x1c, 0x21, 0xa9,
0xc5, 0x25, 0x9e, 0x25, 0xa9, 0xb9, 0x42, 0x42, 0x5c, 0x2c, 0x7e, 0x89, 0xb9, 0xa9, 0x12, 0x8c,
0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x90, 0x00, 0x17, 0xb3, 0x63, 0x7a, 0xaa, 0x04, 0x93,
0x02, 0xa3, 0x06, 0x6b, 0x10, 0x88, 0xa9, 0xc5, 0xc3, 0xc5, 0xec, 0xe6, 0xef, 0x2f, 0xc4, 0xca,
0xc5, 0x18, 0x21, 0xc0, 0xe0, 0x24, 0x70, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f,
0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x36, 0xd8, 0x18, 0x10, 0x00, 0x00,
0xff, 0xff, 0x16, 0x80, 0x60, 0x15, 0x6d, 0x00, 0x00, 0x00,
}

@ -0,0 +1,12 @@
syntax = "proto3";
package proto;
enum FOO
{
X = 0;
};
message TestItem{
string Name = 1;
int32 Age = 2;
}

@ -1,109 +0,0 @@
package memcache
import (
"context"
"strconv"
"strings"
"time"
"github.com/bilibili/kratos/pkg/log"
"github.com/bilibili/kratos/pkg/net/trace"
)
const (
_traceFamily = "memcache"
_traceSpanKind = "client"
_traceComponentName = "library/cache/memcache"
_tracePeerService = "memcache"
_slowLogDuration = time.Millisecond * 250
)
type traceConn struct {
Conn
ctx context.Context
address string
}
func (t *traceConn) setTrace(action, statement string) func(error) error {
now := time.Now()
parent, ok := trace.FromContext(t.ctx)
if !ok {
return func(err error) error { return err }
}
span := parent.Fork(_traceFamily, "Memcache:"+action)
span.SetTag(
trace.String(trace.TagSpanKind, _traceSpanKind),
trace.String(trace.TagComponent, _traceComponentName),
trace.String(trace.TagPeerService, _tracePeerService),
trace.String(trace.TagPeerAddress, t.address),
trace.String(trace.TagDBStatement, action+" "+statement),
)
return func(err error) error {
span.Finish(&err)
t := time.Since(now)
if t > _slowLogDuration {
log.Warn("%s slow log action: %s key: %s time: %v", _traceFamily, action, statement, t)
}
return err
}
}
func (t *traceConn) WithContext(ctx context.Context) Conn {
t.ctx = ctx
t.Conn = t.Conn.WithContext(ctx)
return t
}
func (t *traceConn) Add(item *Item) error {
finishFn := t.setTrace("Add", item.Key)
return finishFn(t.Conn.Add(item))
}
func (t *traceConn) Set(item *Item) error {
finishFn := t.setTrace("Set", item.Key)
return finishFn(t.Conn.Set(item))
}
func (t *traceConn) Replace(item *Item) error {
finishFn := t.setTrace("Replace", item.Key)
return finishFn(t.Conn.Replace(item))
}
func (t *traceConn) Get(key string) (*Item, error) {
finishFn := t.setTrace("Get", key)
item, err := t.Conn.Get(key)
return item, finishFn(err)
}
func (t *traceConn) GetMulti(keys []string) (map[string]*Item, error) {
finishFn := t.setTrace("GetMulti", strings.Join(keys, " "))
items, err := t.Conn.GetMulti(keys)
return items, finishFn(err)
}
func (t *traceConn) Delete(key string) error {
finishFn := t.setTrace("Delete", key)
return finishFn(t.Conn.Delete(key))
}
func (t *traceConn) Increment(key string, delta uint64) (newValue uint64, err error) {
finishFn := t.setTrace("Increment", key+" "+strconv.FormatUint(delta, 10))
newValue, err = t.Conn.Increment(key, delta)
return newValue, finishFn(err)
}
func (t *traceConn) Decrement(key string, delta uint64) (newValue uint64, err error) {
finishFn := t.setTrace("Decrement", key+" "+strconv.FormatUint(delta, 10))
newValue, err = t.Conn.Decrement(key, delta)
return newValue, finishFn(err)
}
func (t *traceConn) CompareAndSwap(item *Item) error {
finishFn := t.setTrace("CompareAndSwap", item.Key)
return finishFn(t.Conn.CompareAndSwap(item))
}
func (t *traceConn) Touch(key string, seconds int32) (err error) {
finishFn := t.setTrace("Touch", key+" "+strconv.Itoa(int(seconds)))
return finishFn(t.Conn.Touch(key, seconds))
}

@ -0,0 +1,103 @@
package memcache
import (
"context"
"strconv"
"strings"
"time"
"github.com/bilibili/kratos/pkg/log"
"github.com/bilibili/kratos/pkg/net/trace"
)
const (
_slowLogDuration = time.Millisecond * 250
)
func newTraceConn(conn Conn, address string) Conn {
tags := []trace.Tag{
trace.String(trace.TagSpanKind, "client"),
trace.String(trace.TagComponent, "cache/memcache"),
trace.String(trace.TagPeerService, "memcache"),
trace.String(trace.TagPeerAddress, address),
}
return &traceConn{Conn: conn, tags: tags}
}
type traceConn struct {
Conn
tags []trace.Tag
}
func (t *traceConn) setTrace(ctx context.Context, action, statement string) func(error) error {
now := time.Now()
parent, ok := trace.FromContext(ctx)
if !ok {
return func(err error) error { return err }
}
span := parent.Fork("", "Memcache:"+action)
span.SetTag(t.tags...)
span.SetTag(trace.String(trace.TagDBStatement, action+" "+statement))
return func(err error) error {
span.Finish(&err)
t := time.Since(now)
if t > _slowLogDuration {
log.Warn("memcache slow log action: %s key: %s time: %v", action, statement, t)
}
return err
}
}
func (t *traceConn) AddContext(ctx context.Context, item *Item) error {
finishFn := t.setTrace(ctx, "Add", item.Key)
return finishFn(t.Conn.Add(item))
}
func (t *traceConn) SetContext(ctx context.Context, item *Item) error {
finishFn := t.setTrace(ctx, "Set", item.Key)
return finishFn(t.Conn.Set(item))
}
func (t *traceConn) ReplaceContext(ctx context.Context, item *Item) error {
finishFn := t.setTrace(ctx, "Replace", item.Key)
return finishFn(t.Conn.Replace(item))
}
func (t *traceConn) GetContext(ctx context.Context, key string) (*Item, error) {
finishFn := t.setTrace(ctx, "Get", key)
item, err := t.Conn.Get(key)
return item, finishFn(err)
}
func (t *traceConn) GetMultiContext(ctx context.Context, keys []string) (map[string]*Item, error) {
finishFn := t.setTrace(ctx, "GetMulti", strings.Join(keys, " "))
items, err := t.Conn.GetMulti(keys)
return items, finishFn(err)
}
func (t *traceConn) DeleteContext(ctx context.Context, key string) error {
finishFn := t.setTrace(ctx, "Delete", key)
return finishFn(t.Conn.Delete(key))
}
func (t *traceConn) IncrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error) {
finishFn := t.setTrace(ctx, "Increment", key+" "+strconv.FormatUint(delta, 10))
newValue, err = t.Conn.Increment(key, delta)
return newValue, finishFn(err)
}
func (t *traceConn) DecrementContext(ctx context.Context, key string, delta uint64) (newValue uint64, err error) {
finishFn := t.setTrace(ctx, "Decrement", key+" "+strconv.FormatUint(delta, 10))
newValue, err = t.Conn.Decrement(key, delta)
return newValue, finishFn(err)
}
func (t *traceConn) CompareAndSwapContext(ctx context.Context, item *Item) error {
finishFn := t.setTrace(ctx, "CompareAndSwap", item.Key)
return finishFn(t.Conn.CompareAndSwap(item))
}
func (t *traceConn) TouchContext(ctx context.Context, key string, seconds int32) (err error) {
finishFn := t.setTrace(ctx, "Touch", key+" "+strconv.Itoa(int(seconds)))
return finishFn(t.Conn.Touch(key, seconds))
}

@ -1,9 +1,57 @@
package memcache
import (
"context"
"time"
"github.com/gogo/protobuf/proto"
)
func legalKey(key string) bool {
if len(key) > 250 || len(key) == 0 {
return false
}
for i := 0; i < len(key); i++ {
if key[i] <= ' ' || key[i] == 0x7f {
return false
}
}
return true
}
// MockWith error
func MockWith(err error) Conn {
return errConn{err}
}
type errConn struct{ err error }
func (c errConn) Err() error { return c.err }
func (c errConn) Close() error { return c.err }
func (c errConn) Add(*Item) error { return c.err }
func (c errConn) Set(*Item) error { return c.err }
func (c errConn) Replace(*Item) error { return c.err }
func (c errConn) CompareAndSwap(*Item) error { return c.err }
func (c errConn) Get(string) (*Item, error) { return nil, c.err }
func (c errConn) GetMulti([]string) (map[string]*Item, error) { return nil, c.err }
func (c errConn) Touch(string, int32) error { return c.err }
func (c errConn) Delete(string) error { return c.err }
func (c errConn) Increment(string, uint64) (uint64, error) { return 0, c.err }
func (c errConn) Decrement(string, uint64) (uint64, error) { return 0, c.err }
func (c errConn) Scan(*Item, interface{}) error { return c.err }
func (c errConn) AddContext(context.Context, *Item) error { return c.err }
func (c errConn) SetContext(context.Context, *Item) error { return c.err }
func (c errConn) ReplaceContext(context.Context, *Item) error { return c.err }
func (c errConn) GetContext(context.Context, string) (*Item, error) { return nil, c.err }
func (c errConn) DecrementContext(context.Context, string, uint64) (uint64, error) { return 0, c.err }
func (c errConn) CompareAndSwapContext(context.Context, *Item) error { return c.err }
func (c errConn) TouchContext(context.Context, string, int32) error { return c.err }
func (c errConn) DeleteContext(context.Context, string) error { return c.err }
func (c errConn) IncrementContext(context.Context, string, uint64) (uint64, error) { return 0, c.err }
func (c errConn) GetMultiContext(context.Context, []string) (map[string]*Item, error) {
return nil, c.err
}
// RawItem item with FlagRAW flag.
//
// Expiration is the cache expiration time, in seconds: either a relative
@ -30,3 +78,12 @@ func JSONItem(key string, v interface{}, flags uint32, expiration int32) *Item {
func ProtobufItem(key string, message proto.Message, flags uint32, expiration int32) *Item {
return &Item{Key: key, Flags: flags | FlagProtobuf, Object: message, Expiration: expiration}
}
func shrinkDeadline(ctx context.Context, timeout time.Duration) time.Time {
// TODO: ignored context deadline to compatible old behaviour.
//deadline, ok := ctx.Deadline()
//if ok {
// return deadline
//}
return time.Now().Add(timeout)
}

@ -0,0 +1,75 @@
package memcache
import (
"testing"
pb "github.com/bilibili/kratos/pkg/cache/memcache/test"
"github.com/stretchr/testify/assert"
)
func TestItemUtil(t *testing.T) {
item1 := RawItem("test", []byte("hh"), 0, 0)
assert.Equal(t, "test", item1.Key)
assert.Equal(t, []byte("hh"), item1.Value)
assert.Equal(t, FlagRAW, FlagRAW&item1.Flags)
item1 = JSONItem("test", &Item{}, 0, 0)
assert.Equal(t, "test", item1.Key)
assert.NotNil(t, item1.Object)
assert.Equal(t, FlagJSON, FlagJSON&item1.Flags)
item1 = ProtobufItem("test", &pb.TestItem{}, 0, 0)
assert.Equal(t, "test", item1.Key)
assert.NotNil(t, item1.Object)
assert.Equal(t, FlagProtobuf, FlagProtobuf&item1.Flags)
}
func TestLegalKey(t *testing.T) {
type args struct {
key string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "test empty key",
want: false,
},
{
name: "test too large key",
args: args{func() string {
var data []byte
for i := 0; i < 255; i++ {
data = append(data, 'k')
}
return string(data)
}()},
want: false,
},
{
name: "test invalid char",
args: args{"hello world"},
want: false,
},
{
name: "test invalid char",
args: args{string([]byte{0x7f})},
want: false,
},
{
name: "test normal key",
args: args{"hello"},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := legalKey(tt.args.key); got != tt.want {
t.Errorf("legalKey() = %v, want %v", got, tt.want)
}
})
}
}

@ -39,6 +39,8 @@ var (
AppID string
// Color is the identification of different experimental group in one caster cluster.
Color string
// DiscoveryNodes is seed nodes.
DiscoveryNodes string
)
func init() {
@ -57,6 +59,7 @@ func addFlag(fs *flag.FlagSet) {
fs.StringVar(&AppID, "appid", os.Getenv("APP_ID"), "appid is global unique application id, register by service tree. or use APP_ID env variable.")
fs.StringVar(&DeployEnv, "deploy.env", defaultString("DEPLOY_ENV", _deployEnv), "deploy env. or use DEPLOY_ENV env variable, value: dev/fat1/uat/pre/prod etc.")
fs.StringVar(&Color, "deploy.color", os.Getenv("DEPLOY_COLOR"), "deploy.color is the identification of different experimental group.")
fs.StringVar(&DiscoveryNodes, "discovery.nodes", os.Getenv("DISCOVERY_NODES"), "discovery.nodes is seed nodes. value: 127.0.0.1:7171,127.0.0.2:7171 etc.")
}
func defaultString(env, value string) string {

@ -3,15 +3,12 @@ package paladin
import (
"context"
"flag"
"github.com/bilibili/kratos/pkg/log"
)
var (
// DefaultClient default client.
DefaultClient Client
confPath string
vars = make(map[string][]Setter) // NOTE: no thread safe
)
func init() {
@ -23,26 +20,12 @@ func Init() (err error) {
if confPath != "" {
DefaultClient, err = NewFile(confPath)
} else {
// TODO: config service
return
// TODO: Get the configuration from the remote service
panic("Please specify a file or dir name by -conf flag.")
}
if err != nil {
return
}
go func() {
for event := range DefaultClient.WatchEvent(context.Background()) {
if event.Event != EventUpdate && event.Event != EventAdd {
continue
}
if sets, ok := vars[event.Key]; ok {
for _, s := range sets {
if err := s.Set(event.Value); err != nil {
log.Error("paladin: vars:%v event:%v error(%v)", s, event, err)
}
}
}
}
}()
return
}
@ -56,7 +39,11 @@ func Watch(key string, s Setter) error {
if err := s.Set(str); err != nil {
return err
}
vars[key] = append(vars[key], s)
go func() {
for event := range WatchEvent(context.Background(), key) {
s.Set(event.Value)
}
}()
return nil
}

@ -2,6 +2,7 @@ package paladin
import (
"context"
"errors"
"fmt"
"io/ioutil"
"log"
@ -14,104 +15,67 @@ import (
"github.com/fsnotify/fsnotify"
)
const (
defaultChSize = 10
)
var _ Client = &file{}
// file is file config client.
type file struct {
values *Map
rawVal map[string]*Value
watchChs map[string][]chan Event
mx sync.Mutex
wg sync.WaitGroup
type watcher struct {
keys []string
C chan Event
}
base string
done chan struct{}
func newWatcher(keys []string) *watcher {
return &watcher{keys: keys, C: make(chan Event, 5)}
}
func readAllPaths(base string) ([]string, error) {
fi, err := os.Stat(base)
if err != nil {
return nil, fmt.Errorf("check local config file fail! error: %s", err)
func (w *watcher) HasKey(key string) bool {
if len(w.keys) == 0 {
return true
}
// dirs or file to paths
var paths []string
if fi.IsDir() {
files, err := ioutil.ReadDir(base)
if err != nil {
return nil, fmt.Errorf("read dir %s error: %s", base, err)
}
for _, file := range files {
if !file.IsDir() {
paths = append(paths, path.Join(base, file.Name()))
}
for _, k := range w.keys {
if keyNamed(k) == key {
return true
}
} else {
paths = append(paths, base)
}
return paths, nil
return false
}
func loadValuesFromPaths(paths []string) (map[string]*Value, error) {
// laod config file to values
var err error
values := make(map[string]*Value, len(paths))
for _, fpath := range paths {
if values[path.Base(fpath)], err = loadValue(fpath); err != nil {
return nil, err
}
func (w *watcher) Handle(event Event) {
select {
case w.C <- event:
default:
log.Printf("paladin: event channel full discard file %s update event", event.Key)
}
return values, nil
}
func loadValue(fpath string) (*Value, error) {
data, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
content := string(data)
return &Value{val: content, raw: content}, nil
// file is file config client.
type file struct {
values *Map
wmu sync.RWMutex
notify *fsnotify.Watcher
watchers map[*watcher]struct{}
}
// NewFile new a config file client.
// conf = /data/conf/app/
// conf = /data/conf/app/xxx.toml
func NewFile(base string) (Client, error) {
// paltform slash
base = filepath.FromSlash(base)
paths, err := readAllPaths(base)
raws, err := loadValues(base)
if err != nil {
return nil, err
}
if len(paths) == 0 {
return nil, fmt.Errorf("empty config path")
}
rawVal, err := loadValuesFromPaths(paths)
notify, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
valMap := &Map{}
valMap.Store(rawVal)
fc := &file{
values: valMap,
rawVal: rawVal,
watchChs: make(map[string][]chan Event),
base: base,
done: make(chan struct{}, 1),
values := new(Map)
values.Store(raws)
f := &file{
values: values,
notify: notify,
watchers: make(map[*watcher]struct{}),
}
fc.wg.Add(1)
go fc.daemon()
return fc, nil
go f.watchproc(base)
return f, nil
}
// Get return value by key.
@ -124,71 +88,109 @@ func (f *file) GetAll() *Map {
return f.values
}
// WatchEvent watch multi key.
// WatchEvent watch with the specified keys.
func (f *file) WatchEvent(ctx context.Context, keys ...string) <-chan Event {
f.mx.Lock()
defer f.mx.Unlock()
ch := make(chan Event, defaultChSize)
for _, key := range keys {
f.watchChs[key] = append(f.watchChs[key], ch)
}
return ch
w := newWatcher(keys)
f.wmu.Lock()
f.watchers[w] = struct{}{}
f.wmu.Unlock()
return w.C
}
// Close close watcher.
func (f *file) Close() error {
f.done <- struct{}{}
f.wg.Wait()
if err := f.notify.Close(); err != nil {
return err
}
f.wmu.RLock()
for w := range f.watchers {
close(w.C)
}
f.wmu.RUnlock()
return nil
}
// file config daemon to watch file modification
func (f *file) daemon() {
defer f.wg.Done()
fswatcher, err := fsnotify.NewWatcher()
if err != nil {
log.Printf("create file watcher fail! reload function will lose efficacy error: %s", err)
func (f *file) watchproc(base string) {
if err := f.notify.Add(base); err != nil {
log.Printf("paladin: create fsnotify for base path %s fail %s, reload function will lose efficacy", base, err)
return
}
if err = fswatcher.Add(f.base); err != nil {
log.Printf("create fsnotify for base path %s fail %s, reload function will lose efficacy", f.base, err)
return
}
log.Printf("start watch filepath: %s", f.base)
for event := range fswatcher.Events {
switch event.Op {
log.Printf("paladin: start watch config: %s", base)
for event := range f.notify.Events {
// use vim edit config will trigger rename
case fsnotify.Write, fsnotify.Create:
f.reloadFile(event.Name)
case fsnotify.Chmod:
switch {
case event.Op&fsnotify.Write == fsnotify.Write, event.Op&fsnotify.Create == fsnotify.Create:
if err := f.reloadFile(event.Name); err != nil {
log.Printf("paladin: load file: %s error: %s, skipped", event.Name, err)
}
default:
log.Printf("unsupport event %s ingored", event)
log.Printf("paladin: unsupport event %s ingored", event)
}
}
}
func (f *file) reloadFile(name string) {
func (f *file) reloadFile(fpath string) (err error) {
// NOTE: in some case immediately read file content after receive event
// will get old content, sleep 100ms make sure get correct content.
time.Sleep(100 * time.Millisecond)
key := filepath.Base(name)
val, err := loadValue(name)
value, err := loadValue(fpath)
if err != nil {
log.Printf("load file %s error: %s, skipped", name, err)
return
}
f.rawVal[key] = val
f.values.Store(f.rawVal)
f.mx.Lock()
chs := f.watchChs[key]
f.mx.Unlock()
key := keyNamed(path.Base(fpath))
raws := f.values.Load()
raws[key] = value
f.values.Store(raws)
f.wmu.RLock()
n := 0
for w := range f.watchers {
if w.HasKey(key) {
n++
w.Handle(Event{Event: EventUpdate, Key: key, Value: value.raw})
}
}
f.wmu.RUnlock()
log.Printf("paladin: reload config: %s events: %d\n", key, n)
return
}
for _, ch := range chs {
select {
case ch <- Event{Event: EventUpdate, Value: val.raw}:
default:
log.Printf("event channel full discard file %s update event", name)
func loadValues(base string) (map[string]*Value, error) {
fi, err := os.Stat(base)
if err != nil {
return nil, fmt.Errorf("paladin: check local config file fail! error: %s", err)
}
var paths []string
if fi.IsDir() {
files, err := ioutil.ReadDir(base)
if err != nil {
return nil, fmt.Errorf("paladin: read dir %s error: %s", base, err)
}
for _, file := range files {
if !file.IsDir() {
paths = append(paths, path.Join(base, file.Name()))
}
}
} else {
paths = append(paths, base)
}
if len(paths) == 0 {
return nil, errors.New("empty config path")
}
values := make(map[string]*Value, len(paths))
for _, fpath := range paths {
if values[path.Base(fpath)], err = loadValue(fpath); err != nil {
return nil, err
}
}
return values, nil
}
func loadValue(name string) (*Value, error) {
data, err := ioutil.ReadFile(name)
if err != nil {
return nil, err
}
content := string(data)
return &Value{val: content, raw: content}, nil
}

@ -82,8 +82,9 @@ func TestFileEvent(t *testing.T) {
cli, err := NewFile(path)
assert.Nil(t, err)
assert.NotNil(t, cli)
time.Sleep(time.Millisecond * 100)
ch := cli.WatchEvent(context.Background(), "test.toml", "abc.toml")
time.Sleep(time.Millisecond)
time.Sleep(time.Millisecond * 100)
ioutil.WriteFile(path+"test.toml", []byte(`hello`), 0644)
timeout := time.NewTimer(time.Second)
select {
@ -93,16 +94,4 @@ func TestFileEvent(t *testing.T) {
assert.Equal(t, EventUpdate, ev.Event)
assert.Equal(t, "hello", ev.Value)
}
ioutil.WriteFile(path+"abc.toml", []byte(`test`), 0644)
select {
case <-timeout.C:
t.Fatalf("run test timeout")
case ev := <-ch:
assert.Equal(t, EventUpdate, ev.Event)
assert.Equal(t, "test", ev.Value)
}
content1, _ := cli.Get("test.toml").String()
assert.Equal(t, "hello", content1)
content2, _ := cli.Get("abc.toml").String()
assert.Equal(t, "test", content2)
}

@ -26,7 +26,12 @@ func (m *Map) Store(values map[string]*Value) {
// Load returns the value set by the most recent Store.
func (m *Map) Load() map[string]*Value {
return m.values.Load().(map[string]*Value)
src := m.values.Load().(map[string]*Value)
dst := make(map[string]*Value, len(src))
for k, v := range src {
dst[k] = v
}
return dst
}
// Exist check if values map exist a key.

@ -0,0 +1,12 @@
#### group
##### 项目简介
懒加载对象容器
##### 编译环境
- **推荐 Golang v1.12.1 以上版本编译执行**
##### 依赖包

@ -0,0 +1,46 @@
package group
import "fmt"
type Counter struct {
Value int
}
func (c *Counter) Incr() {
c.Value++
}
func ExampleGroup_Get() {
new := func() interface{} {
fmt.Println("Only Once")
return &Counter{}
}
group := NewGroup(new)
// Create a new Counter
group.Get("pass").(*Counter).Incr()
// Get the created Counter again.
group.Get("pass").(*Counter).Incr()
// Output:
// Only Once
}
func ExampleGroup_Reset() {
new := func() interface{} {
return &Counter{}
}
group := NewGroup(new)
newV2 := func() interface{} {
fmt.Println("New V2")
return &Counter{}
}
// Reset the new function and clear all created objects.
group.Reset(newV2)
// Create a new Counter
group.Get("pass").(*Counter).Incr()
// Output:
// New V2
}

@ -0,0 +1,55 @@
// Package group provides a sample lazy load container.
// The group only creating a new object not until the object is needed by user.
// And it will cache all the objects to reduce the creation of object.
package group
import "sync"
// Group is a lazy load container.
type Group struct {
new func() interface{}
objs sync.Map
sync.RWMutex
}
// NewGroup news a group container.
func NewGroup(new func() interface{}) *Group {
if new == nil {
panic("container.group: can't assign a nil to the new function")
}
return &Group{
new: new,
}
}
// Get gets the object by the given key.
func (g *Group) Get(key string) interface{} {
g.RLock()
new := g.new
g.RUnlock()
obj, ok := g.objs.Load(key)
if !ok {
obj = new()
g.objs.Store(key, obj)
}
return obj
}
// Reset resets the new function and deletes all existing objects.
func (g *Group) Reset(new func() interface{}) {
if new == nil {
panic("container.group: can't assign a nil to the new function")
}
g.Lock()
g.new = new
g.Unlock()
g.Clear()
}
// Clear deletes all objects.
func (g *Group) Clear() {
g.objs.Range(func(key, value interface{}) bool {
g.objs.Delete(key)
return true
})
}

@ -0,0 +1,69 @@
package group
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGroupGet(t *testing.T) {
count := 0
g := NewGroup(func() interface{} {
count++
return count
})
v := g.Get("/x/internal/dummy/user")
assert.Equal(t, 1, v.(int))
v = g.Get("/x/internal/dummy/avatar")
assert.Equal(t, 2, v.(int))
v = g.Get("/x/internal/dummy/user")
assert.Equal(t, 1, v.(int))
assert.Equal(t, 2, count)
}
func TestGroupReset(t *testing.T) {
g := NewGroup(func() interface{} {
return 1
})
g.Get("/x/internal/dummy/user")
call := false
g.Reset(func() interface{} {
call = true
return 1
})
length := 0
g.objs.Range(func(_, _ interface{}) bool {
length++
return true
})
assert.Equal(t, 0, length)
g.Get("/x/internal/dummy/user")
assert.Equal(t, true, call)
}
func TestGroupClear(t *testing.T) {
g := NewGroup(func() interface{} {
return 1
})
g.Get("/x/internal/dummy/user")
length := 0
g.objs.Range(func(_, _ interface{}) bool {
length++
return true
})
assert.Equal(t, 1, length)
g.Clear()
length = 0
g.objs.Range(func(_, _ interface{}) bool {
length++
return true
})
assert.Equal(t, 0, length)
}

@ -16,14 +16,14 @@ var _schema = "tidb://"
func (db *DB) nodeList() (nodes []string) {
var (
insMap map[string][]*naming.Instance
ins []*naming.Instance
ok bool
insZone *naming.InstancesInfo
ins []*naming.Instance
ok bool
)
if insMap, ok = db.dis.Fetch(context.Background()); !ok {
if insZone, ok = db.dis.Fetch(context.Background()); !ok {
return
}
if ins, ok = insMap[env.Zone]; !ok || len(ins) == 0 {
if ins, ok = insZone.Instances[env.Zone]; !ok || len(ins) == 0 {
return
}
for _, in := range ins {

@ -5,27 +5,21 @@ import (
"time"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"github.com/bilibili/kratos/pkg/ecode/types"
)
func TestEqual(t *testing.T) {
convey.Convey("Equal", t, func(ctx convey.C) {
ctx.Convey("When err1=Error(RequestErr, 'test') and err2=Errorf(RequestErr, 'test')", func(ctx convey.C) {
err1 := Error(RequestErr, "test")
err2 := Errorf(RequestErr, "test")
ctx.Convey("Then err1=err2, err1 != nil", func(ctx convey.C) {
ctx.So(err1, convey.ShouldResemble, err2)
ctx.So(err1, convey.ShouldNotBeNil)
})
})
})
// assert.True(t, OK.Equal(nil))
// assert.True(t, err1.Equal(err2))
// assert.False(t, err1.Equal(nil))
// assert.True(t, Equal(nil, nil))
var (
err1 = Error(RequestErr, "test")
err2 = Errorf(RequestErr, "test")
)
assert.Equal(t, err1, err2)
assert.True(t, OK.Equal(nil))
assert.True(t, err1.Equal(err2))
assert.False(t, err1.Equal(nil))
assert.True(t, Equal(nil, nil))
}
func TestDetail(t *testing.T) {

@ -2,21 +2,23 @@
主要功能
1. 日志打印到本地
2. 日志打印到标准输出
3. verbose日志实现参考glog实现可通过设置不同verbose级别默认不开启
1. 日志打印到elk
2. 日志打印到本地内部使用log4go
3. 日志打印到标准输出
4. verbose日志实现参考glog实现可通过设置不同verbose级别默认不开启
日志配置
1. 默认配置
1. 默认agent配置
目前日志已经实现默认配置可以直接使用以下方式
目前日志已经实现默认配置可以根据env自动切换远程日志可以直接使用以下方式
log.Init(nil)
2. 启动参数 or 环境变量
启动参数 环境变量 说明
log.stdout LOG_STDOUT 是否开启标准输出
log.agent LOG_AGENT 远端日志地址unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024
log.dir LOG_DIR 文件日志路径
log.v LOG_V verbose日志级别
log.module LOG_MODULE 可单独配置每个文件的verbose级别file=1,file2=2
@ -30,9 +32,14 @@
stdout = true
vLevel = 3
filter = ["fileld1", "field2"]
[log.module]
"dao_user" = 2
"servic*" = 1
[log.module]
"dao_user" = 2
"servic*" = 1
[log.agent]
taskID = "00000x"
proto = "unixpacket"
addr = "/var/run/lancer/collector_tcp.sock"
chanSize = 10240
配置说明
@ -47,5 +54,16 @@
2. log.module
可单独配置每个文件的verbose级别
3. log.agent
远端日志配置项
taskID lancer分配的taskID
proto 网络协议常见tcp, udp, unixgram
addr 网络地址常见ip:prot, sock
chanSize 日志队列长度
最佳实践
1. KVString 使用 KVString 代替 KV 可以减少对象分配, 避免给 golang GC 造成压力.
*/
package log

@ -0,0 +1,50 @@
package log
import (
"bytes"
"fmt"
"strconv"
"strings"
)
type verboseModule map[string]int32
type logFilter []string
func (f *logFilter) String() string {
return fmt.Sprint(*f)
}
// Set sets the value of the named command-line flag.
// format: -log.filter key1,key2
func (f *logFilter) Set(value string) error {
for _, i := range strings.Split(value, ",") {
*f = append(*f, strings.TrimSpace(i))
}
return nil
}
func (m verboseModule) String() string {
// FIXME strings.Builder
var buf bytes.Buffer
for k, v := range m {
buf.WriteString(k)
buf.WriteString(strconv.FormatInt(int64(v), 10))
buf.WriteString(",")
}
return buf.String()
}
// Set sets the value of the named command-line flag.
// format: -log.module file=1,file2=2
func (m verboseModule) Set(value string) error {
for _, i := range strings.Split(value, ",") {
kv := strings.Split(i, "=")
if len(kv) == 2 {
if v, err := strconv.ParseInt(kv[1], 10, 64); err == nil {
m[strings.TrimSpace(kv[0])] = int32(v)
}
}
}
return nil
}

@ -16,6 +16,8 @@ const (
_level = "level"
// log time.
_time = "time"
// request path.
// _title = "title"
// log file.
_source = "source"
// common log filed.
@ -27,7 +29,7 @@ const (
// uniq ID from trace.
_tid = "traceid"
// request time.
_ts = "ts"
// _ts = "ts"
// requester.
_caller = "caller"
// container environment: prod, pre, uat, fat.
@ -38,6 +40,8 @@ const (
_mirror = "mirror"
// color.
_color = "color"
// env_color
_envColor = "env_color"
// cluster.
_cluster = "cluster"
)
@ -75,8 +79,8 @@ type Handlers struct {
}
// Log handlers logging.
func (hs Handlers) Log(c context.Context, lv Level, d ...D) {
var hasSource bool
func (hs Handlers) Log(ctx context.Context, lv Level, d ...D) {
hasSource := false
for i := range d {
if _, ok := hs.filters[d[i].Key]; ok {
d[i].Value = "***"
@ -87,11 +91,12 @@ func (hs Handlers) Log(c context.Context, lv Level, d ...D) {
}
if !hasSource {
fn := funcName(3)
errIncr(lv, fn)
d = append(d, KVString(_source, fn))
}
d = append(d, KV(_time, time.Now()), KVInt64(_levelValue, int64(lv)), KVString(_level, lv.String()))
for _, h := range hs.handlers {
h.Log(c, lv, d...)
h.Log(ctx, lv, d...)
}
}

@ -1,3 +1,23 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import (

@ -1,3 +1,25 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package core houses zap's shared internal buffer pool. Third-party
// packages can recreate the same functionality with buffers.NewPool.
package core
var (

@ -1,3 +1,23 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import "sync"

@ -1,3 +1,23 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import (

@ -4,17 +4,18 @@ import (
"context"
"flag"
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/bilibili/kratos/pkg/conf/env"
"github.com/bilibili/kratos/pkg/stat/prom"
)
// Config log config.
type Config struct {
AppID string
Host string
Family string
Host string
// stdout
Stdout bool
@ -43,21 +44,39 @@ type Config struct {
Filter []string
}
// errProm prometheus error counter.
var errProm = prom.BusinessErrCount
// Render render log output
type Render interface {
Render(io.Writer, map[string]interface{}) error
RenderString(map[string]interface{}) string
}
var (
h Handler
c *Config
)
func init() {
host, _ := os.Hostname()
c = &Config{
Family: env.AppID,
Host: host,
}
h = newHandlers([]string{}, NewStdout())
addFlag(flag.CommandLine)
}
var (
_v int
_stdout bool
_dir string
_filter logFilter
_module = verboseModule{}
_v int
_stdout bool
_dir string
_agentDSN string
_filter logFilter
_module = verboseModule{}
_noagent bool
)
// addFlag init log from dsn.
@ -73,12 +92,15 @@ func addFlag(fs *flag.FlagSet) {
if tf := os.Getenv("LOG_FILTER"); len(tf) > 0 {
_filter.Set(tf)
}
_noagent, _ = strconv.ParseBool(os.Getenv("LOG_NO_AGENT"))
// get val from flag
fs.IntVar(&_v, "log.v", _v, "log verbose level, or use LOG_V env variable.")
fs.BoolVar(&_stdout, "log.stdout", _stdout, "log enable stdout or not, or use LOG_STDOUT env variable.")
fs.StringVar(&_dir, "log.dir", _dir, "log file `path, or use LOG_DIR env variable.")
fs.StringVar(&_agentDSN, "log.agent", _agentDSN, "log agent dsn, or use LOG_AGENT env variable.")
fs.Var(&_module, "log.module", "log verbose for specified module, or use LOG_MODULE env variable, format: file=1,file2=2.")
fs.Var(&_filter, "log.filter", "log field for sensitive message, or use LOG_FILTER env variable, format: field1,field2.")
fs.BoolVar(&_noagent, "log.noagent", _noagent, "force disable log agent print log to stderr, or use LOG_NO_AGENT")
}
// Init create logger with context.
@ -94,8 +116,8 @@ func Init(conf *Config) {
Filter: _filter,
}
}
if conf.AppID == "" && len(env.AppID) != 0 {
conf.AppID = env.AppID
if len(env.AppID) != 0 {
conf.Family = env.AppID // for caster
}
conf.Host = env.Hostname
if len(conf.Host) == 0 {
@ -104,7 +126,7 @@ func Init(conf *Config) {
}
var hs []Handler
// when env is dev
if isNil || conf.Stdout {
if conf.Stdout || (isNil && (env.DeployEnv == "" || env.DeployEnv == env.DeployEnvDev)) || _noagent {
hs = append(hs, NewStdout())
}
if conf.Dir != "" {
@ -114,21 +136,6 @@ func Init(conf *Config) {
c = conf
}
type logFilter []string
func (f *logFilter) String() string {
return fmt.Sprint(*f)
}
// Set sets the value of the named command-line flag.
// format: -log.filter key1,key2
func (f *logFilter) Set(value string) error {
for _, i := range strings.Split(value, ",") {
*f = append(*f, strings.TrimSpace(i))
}
return nil
}
// Info logs a message at the info log level.
func Info(format string, args ...interface{}) {
h.Log(context.Background(), _infoLevel, KVString(_log, fmt.Sprintf(format, args...)))
@ -174,22 +181,19 @@ func Errorv(ctx context.Context, args ...D) {
h.Log(ctx, _errorLevel, args...)
}
// SetFormat only effective on stdout and file handler
// %T time format at "15:04:05.999" on stdout handler, "15:04:05 MST" on file handler
// %t time format at "15:04:05" on stdout handler, "15:04" on file on file handler
// %D data format at "2006/01/02"
// %d data format at "01/02"
// %L log level e.g. INFO WARN ERROR
// %M log message and additional fields: key=value this is log message
// NOTE below pattern not support on file handler
// %f function name and line number e.g. model.Get:121
// %i instance id
// %e deploy env e.g. dev uat fat prod
// %z zone
// %S full file name and line number: /a/b/c/d.go:23
// %s final file name element and line number: d.go:23
func SetFormat(format string) {
h.SetFormat(format)
func logw(args []interface{}) []D {
if len(args)%2 != 0 {
Warn("log: the variadic must be plural, the last one will ignored")
}
ds := make([]D, 0, len(args)/2)
for i := 0; i < len(args)-1; i = i + 2 {
if key, ok := args[i].(string); ok {
ds = append(ds, KV(key, args[i+1]))
} else {
Warn("log: key must be string, get %T, ignored", args[i])
}
}
return ds
}
// Infow logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
@ -207,19 +211,22 @@ func Errorw(ctx context.Context, args ...interface{}) {
h.Log(ctx, _errorLevel, logw(args)...)
}
func logw(args []interface{}) []D {
if len(args)%2 != 0 {
Warn("log: the variadic must be plural, the last one will ignored")
}
ds := make([]D, 0, len(args)/2)
for i := 0; i < len(args)-1; i = i + 2 {
if key, ok := args[i].(string); ok {
ds = append(ds, KV(key, args[i+1]))
} else {
Warn("log: key must be string, get %T, ignored", args[i])
}
}
return ds
// SetFormat only effective on stdout and file handler
// %T time format at "15:04:05.999" on stdout handler, "15:04:05 MST" on file handler
// %t time format at "15:04:05" on stdout handler, "15:04" on file on file handler
// %D data format at "2006/01/02"
// %d data format at "01/02"
// %L log level e.g. INFO WARN ERROR
// %M log message and additional fields: key=value this is log message
// NOTE below pattern not support on file handler
// %f function name and line number e.g. model.Get:121
// %i instance id
// %e deploy env e.g. dev uat fat prod
// %z zone
// %S full file name and line number: /a/b/c/d.go:23
// %s final file name element and line number: d.go:23
func SetFormat(format string) {
h.SetFormat(format)
}
// Close close resource.
@ -228,3 +235,9 @@ func Close() (err error) {
h = _defaultStdout
return
}
func errIncr(lv Level, source string) {
if lv == _errorLevel {
errProm.Incr(source)
}
}

@ -37,19 +37,16 @@ func testLog(t *testing.T) {
Error("hello %s", "world")
Errorv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Errorc(context.Background(), "keys: %s %s...", "key1", "key2")
Errorw(context.Background(), "key1", "value1", "key2", "value2")
})
t.Run("Warn", func(t *testing.T) {
Warn("hello %s", "world")
Warnv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Warnc(context.Background(), "keys: %s %s...", "key1", "key2")
Warnw(context.Background(), "key1", "value1", "key2", "value2")
})
t.Run("Info", func(t *testing.T) {
Info("hello %s", "world")
Infov(context.Background(), KV("key", 2222222), KV("test2", "test"))
Infoc(context.Background(), "keys: %s %s...", "key1", "key2")
Infow(context.Background(), "key1", "value1", "key2", "value2")
})
}
@ -87,3 +84,22 @@ func TestLogWithMirror(t *testing.T) {
Infov(context.Background(), KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
}
func TestOverwriteSouce(t *testing.T) {
ctx := context.Background()
t.Run("test source kv string", func(t *testing.T) {
Infov(ctx, KVString("source", "test"))
})
t.Run("test source kv string", func(t *testing.T) {
Infov(ctx, KV("source", "test"))
})
}
func BenchmarkLog(b *testing.B) {
ctx := context.Background()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
Infov(ctx, KVString("test", "hello"), KV("int", 34), KV("hhh", "hhhh"))
}
})
}

@ -0,0 +1,61 @@
package log
import (
"context"
"io/ioutil"
"os"
"github.com/sirupsen/logrus"
)
func init() {
redirectLogrus()
}
func redirectLogrus() {
// FIXME: because of different stack depth call runtime.Caller will get error function name.
logrus.AddHook(redirectHook{})
if os.Getenv("LOGRUS_STDOUT") == "" {
logrus.SetOutput(ioutil.Discard)
}
}
type redirectHook struct{}
func (redirectHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (redirectHook) Fire(entry *logrus.Entry) error {
lv := _infoLevel
var logrusLv string
var verbose int32
switch entry.Level {
case logrus.FatalLevel, logrus.PanicLevel:
logrusLv = entry.Level.String()
fallthrough
case logrus.ErrorLevel:
lv = _errorLevel
case logrus.WarnLevel:
lv = _warnLevel
case logrus.InfoLevel:
lv = _infoLevel
case logrus.DebugLevel:
// use verbose log replace of debuglevel
verbose = 10
}
args := make([]D, 0, len(entry.Data)+1)
args = append(args, D{Key: _log, Value: entry.Message})
for k, v := range entry.Data {
args = append(args, D{Key: k, Value: v})
}
if logrusLv != "" {
args = append(args, D{Key: "logrus_lv", Value: logrusLv})
}
if verbose != 0 {
V(verbose).Infov(context.Background(), args...)
} else {
h.Log(context.Background(), lv, args...)
}
return nil
}

@ -11,12 +11,6 @@ import (
"time"
)
// Render render log output
type Render interface {
Render(io.Writer, map[string]interface{}) error
RenderString(map[string]interface{}) string
}
var patternMap = map[string]func(map[string]interface{}) string{
"T": longTime,
"t": shortTime,
@ -80,6 +74,7 @@ func (p *pattern) Render(w io.Writer, d map[string]interface{}) error {
for _, f := range p.funcs {
buf.WriteString(f(d))
}
_, err := buf.WriteTo(w)
return err
}

@ -2,27 +2,22 @@ package log
import (
"context"
"io"
"os"
"time"
)
const defaultPattern = "%L %d-%T %f %M"
var _defaultStdout = NewStdout()
// StdoutHandler stdout log handler
type StdoutHandler struct {
out io.Writer
err io.Writer
render Render
}
// NewStdout create a stdout log handler
func NewStdout() *StdoutHandler {
return &StdoutHandler{
out: os.Stdout,
err: os.Stderr,
render: newPatternRender("[%D %T] [%s] %M"),
}
return &StdoutHandler{render: newPatternRender(defaultPattern)}
}
// Log stdout loging, only for developing env.
@ -31,12 +26,8 @@ func (h *StdoutHandler) Log(ctx context.Context, lv Level, args ...D) {
// add extra fields
addExtraField(ctx, d)
d[_time] = time.Now().Format(_timeFormat)
if lv <= _infoLevel {
h.render.Render(h.out, d)
} else {
h.render.Render(h.err, d)
}
h.out.Write([]byte("\n"))
h.render.Render(os.Stderr, d)
os.Stderr.Write([]byte("\n"))
}
// Close stdout loging

@ -2,7 +2,6 @@ package log
import (
"context"
"fmt"
"math"
"runtime"
"strconv"
@ -16,11 +15,7 @@ import (
func addExtraField(ctx context.Context, fields map[string]interface{}) {
if t, ok := trace.FromContext(ctx); ok {
if s, ok := t.(fmt.Stringer); ok {
fields[_tid] = s.String()
} else {
fields[_tid] = fmt.Sprintf("%s", t)
}
fields[_tid] = t.TraceID()
}
if caller := metadata.String(ctx, metadata.Caller); caller != "" {
fields[_caller] = caller
@ -28,18 +23,29 @@ func addExtraField(ctx context.Context, fields map[string]interface{}) {
if color := metadata.String(ctx, metadata.Color); color != "" {
fields[_color] = color
}
if env.Color != "" {
fields[_envColor] = env.Color
}
if cluster := metadata.String(ctx, metadata.Cluster); cluster != "" {
fields[_cluster] = cluster
}
fields[_deplyEnv] = env.DeployEnv
fields[_zone] = env.Zone
fields[_appID] = c.AppID
fields[_appID] = c.Family
fields[_instanceID] = c.Host
if metadata.Bool(ctx, metadata.Mirror) {
if metadata.String(ctx, metadata.Mirror) != "" {
fields[_mirror] = true
}
}
// funcName get func name.
func funcName(skip int) (name string) {
if _, file, lineNo, ok := runtime.Caller(skip); ok {
return file + ":" + strconv.Itoa(lineNo)
}
return "unknown:0"
}
// toMap convert D slice to map[string]interface{} for legacy file and stdout.
func toMap(args ...D) map[string]interface{} {
d := make(map[string]interface{}, 10+len(args))
@ -61,11 +67,3 @@ func toMap(args ...D) map[string]interface{} {
}
return d
}
// funcName get func name.
func funcName(skip int) (name string) {
if _, file, lineNo, ok := runtime.Caller(skip); ok {
return file + ":" + strconv.Itoa(lineNo)
}
return "unknown:0"
}

@ -0,0 +1,54 @@
package log
import (
"reflect"
"strings"
"testing"
"time"
)
func TestFuncName(t *testing.T) {
name := funcName(1)
if !strings.Contains(name, "util_test.go:11") {
t.Errorf("expect contains util_test.go:11 got %s", name)
}
}
func Test_toMap(t *testing.T) {
type args struct {
args []D
}
tests := []struct {
name string
args args
want map[string]interface{}
}{
{
args: args{[]D{KVString("test", "hello")}},
want: map[string]interface{}{"test": "hello"},
},
{
args: args{[]D{KVInt64("test", 123)}},
want: map[string]interface{}{"test": int64(123)},
},
{
args: args{[]D{KVFloat32("test", float32(1.01))}},
want: map[string]interface{}{"test": float32(1.01)},
},
{
args: args{[]D{KVFloat32("test", float32(1.01))}},
want: map[string]interface{}{"test": float32(1.01)},
},
{
args: args{[]D{KVDuration("test", time.Second)}},
want: map[string]interface{}{"test": time.Second},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := toMap(tt.args.args...); !reflect.DeepEqual(got, tt.want) {
t.Errorf("toMap() = %v, want %v", got, tt.want)
}
})
}
}

@ -1,42 +1,13 @@
package log
import (
"bytes"
"context"
"fmt"
"path/filepath"
"runtime"
"strconv"
"strings"
)
type verboseModule map[string]int32
func (m verboseModule) String() string {
// FIXME strings.Builder
var buf bytes.Buffer
for k, v := range m {
buf.WriteString(k)
buf.WriteString(strconv.FormatInt(int64(v), 10))
buf.WriteString(",")
}
return buf.String()
}
// Set sets the value of the named command-line flag.
// format: -log.module file=1,file2=2
func (m verboseModule) Set(value string) error {
for _, i := range strings.Split(value, ",") {
kv := strings.Split(i, "=")
if len(kv) == 2 {
if v, err := strconv.ParseInt(kv[1], 10, 64); err == nil {
m[strings.TrimSpace(kv[0])] = int32(v)
}
}
}
return nil
}
// V reports whether verbosity at the call site is at least the requested level.
// The returned value is a boolean of type Verbose, which implements Info, Infov etc.
// These methods will write to the Info log if called.

@ -7,7 +7,6 @@ import (
"fmt"
"math/rand"
"net/url"
"os"
"strconv"
"strings"
"sync"
@ -18,10 +17,7 @@ import (
"github.com/bilibili/kratos/pkg/ecode"
"github.com/bilibili/kratos/pkg/log"
"github.com/bilibili/kratos/pkg/naming"
bm "github.com/bilibili/kratos/pkg/net/http/blademaster"
"github.com/bilibili/kratos/pkg/net/netutil"
"github.com/bilibili/kratos/pkg/net/netutil/breaker"
xstr "github.com/bilibili/kratos/pkg/str"
http "github.com/bilibili/kratos/pkg/net/http/blademaster"
xtime "github.com/bilibili/kratos/pkg/time"
)
@ -30,16 +26,12 @@ const (
_setURL = "http://%s/discovery/set"
_cancelURL = "http://%s/discovery/cancel"
_renewURL = "http://%s/discovery/renew"
_pollURL = "http://%s/discovery/polls"
_nodesURL = "http://%s/discovery/nodes"
_pollURL = "http://%s/discovery/polls"
_registerGap = 30 * time.Second
_statusUP = "1"
)
const (
_appid = "infra.discovery"
)
@ -51,116 +43,118 @@ var (
ErrDuplication = errors.New("discovery: instance duplicate registration")
)
var (
_once sync.Once
_builder naming.Builder
)
// Builder return default discvoery resolver builder.
func Builder() naming.Builder {
_once.Do(func() {
_builder = New(nil)
})
return _builder
}
// Build register resolver into default discovery.
func Build(id string) naming.Resolver {
return Builder().Build(id)
}
// Config discovery configures.
type Config struct {
Nodes []string
Zone string
Env string
Host string
Nodes []string
Region string
Zone string
Env string
Host string
}
type appData struct {
ZoneInstances map[string][]*naming.Instance `json:"zone_instances"`
LastTs int64 `json:"latest_timestamp"`
Err string `json:"err"`
Instances map[string][]*naming.Instance `json:"instances"`
LastTs int64 `json:"latest_timestamp"`
}
// Discovery is discovery client.
type Discovery struct {
c *Config
once sync.Once
conf *Config
ctx context.Context
cancelFunc context.CancelFunc
httpClient *bm.Client
httpClient *http.Client
node atomic.Value
nodeIdx uint64
mutex sync.RWMutex
apps map[string]*appInfo
registry map[string]struct{}
lastHost string
cancelPolls context.CancelFunc
idx uint64
node atomic.Value
delete chan *appInfo
delete chan *appInfo
}
type appInfo struct {
resolver map[*Resolve]struct{}
zoneIns atomic.Value
resolver map[*Resolver]struct{}
lastTs int64 // latest timestamp
}
func fixConfig(c *Config) {
func fixConfig(c *Config) error {
if len(c.Nodes) == 0 {
c.Nodes = []string{"NOTE: please config a default HOST"}
c.Nodes = strings.Split(env.DiscoveryNodes, ",")
}
if c.Region == "" {
c.Region = env.Region
}
if env.Zone != "" {
if c.Zone == "" {
c.Zone = env.Zone
}
if env.DeployEnv != "" {
if c.Env == "" {
c.Env = env.DeployEnv
}
if env.Hostname != "" {
if c.Host == "" {
c.Host = env.Hostname
} else {
c.Host, _ = os.Hostname()
}
}
var (
once sync.Once
_defaultDiscovery *Discovery
)
func initDefault() {
once.Do(func() {
_defaultDiscovery = New(nil)
})
}
// Builder return default discvoery resolver builder.
func Builder() naming.Builder {
if _defaultDiscovery == nil {
initDefault()
}
return _defaultDiscovery
}
// Build register resolver into default discovery.
func Build(id string) naming.Resolver {
if _defaultDiscovery == nil {
initDefault()
if len(c.Nodes) == 0 || c.Region == "" || c.Zone == "" || c.Env == "" || c.Host == "" {
return fmt.Errorf(
"invalid discovery config nodes:%+v region:%s zone:%s deployEnv:%s host:%s",
c.Nodes,
c.Region,
c.Zone,
c.Env,
c.Host,
)
}
return _defaultDiscovery.Build(id)
return nil
}
// New new a discovery client.
func New(c *Config) (d *Discovery) {
if c == nil {
c = &Config{}
c = new(Config)
}
if err := fixConfig(c); err != nil {
panic(err)
}
fixConfig(c)
ctx, cancel := context.WithCancel(context.Background())
d = &Discovery{
c: c,
ctx: ctx,
cancelFunc: cancel,
conf: c,
apps: map[string]*appInfo{},
registry: map[string]struct{}{},
delete: make(chan *appInfo, 10),
}
// httpClient
cfg := &bm.ClientConfig{
Dial: xtime.Duration(3 * time.Second),
Timeout: xtime.Duration(40 * time.Second),
Breaker: &breaker.Config{
Window: 100,
Sleep: 3,
Bucket: 10,
Ratio: 0.5,
Request: 100,
},
}
d.httpClient = bm.NewClient(cfg)
cfg := &http.ClientConfig{
Dial: xtime.Duration(3 * time.Second),
Timeout: xtime.Duration(40 * time.Second),
KeepAlive: xtime.Duration(40 * time.Second),
}
d.httpClient = http.NewClient(cfg)
// discovery self
resolver := d.Build(_appid)
event := resolver.Watch()
_, ok := <-event
@ -169,7 +163,7 @@ func New(c *Config) (d *Discovery) {
}
ins, ok := resolver.Fetch(context.Background())
if ok {
d.newSelf(ins)
d.newSelf(ins.Instances)
}
go d.selfproc(resolver, event)
return
@ -183,13 +177,13 @@ func (d *Discovery) selfproc(resolver naming.Resolver, event <-chan struct{}) {
}
zones, ok := resolver.Fetch(context.Background())
if ok {
d.newSelf(zones)
d.newSelf(zones.Instances)
}
}
}
func (d *Discovery) newSelf(zones map[string][]*naming.Instance) {
ins, ok := zones[d.conf.Zone]
ins, ok := zones[d.c.Zone]
if !ok {
return
}
@ -203,22 +197,22 @@ func (d *Discovery) newSelf(zones map[string][]*naming.Instance) {
}
}
// diff old nodes
olds, ok := d.node.Load().([]string)
if ok {
var diff int
for _, n := range nodes {
for _, o := range olds {
var olds int
for _, n := range nodes {
if node, ok := d.node.Load().([]string); ok {
for _, o := range node {
if o == n {
diff++
olds++
break
}
}
}
if len(nodes) == diff {
return
}
}
rand.Shuffle(len(nodes), func(i, j int) {
if len(nodes) == olds {
return
}
// FIXME: we should use rand.Shuffle() in golang 1.10
shuffle(len(nodes), func(i, j int) {
nodes[i], nodes[j] = nodes[j], nodes[i]
})
d.node.Store(nodes)
@ -226,7 +220,7 @@ func (d *Discovery) newSelf(zones map[string][]*naming.Instance) {
// Build disovery resovler builder.
func (d *Discovery) Build(appid string) naming.Resolver {
r := &Resolver{
r := &Resolve{
id: appid,
d: d,
event: make(chan struct{}, 1),
@ -235,7 +229,7 @@ func (d *Discovery) Build(appid string) naming.Resolver {
app, ok := d.apps[appid]
if !ok {
app = &appInfo{
resolver: make(map[*Resolver]struct{}),
resolver: make(map[*Resolve]struct{}),
}
d.apps[appid] = app
cancel := d.cancelPolls
@ -263,32 +257,32 @@ func (d *Discovery) Scheme() string {
return "discovery"
}
// Resolver discveory resolver.
type Resolver struct {
// Resolve discveory resolver.
type Resolve struct {
id string
event chan struct{}
d *Discovery
}
// Watch watch instance.
func (r *Resolver) Watch() <-chan struct{} {
func (r *Resolve) Watch() <-chan struct{} {
return r.event
}
// Fetch fetch resolver instance.
func (r *Resolver) Fetch(c context.Context) (ins map[string][]*naming.Instance, ok bool) {
func (r *Resolve) Fetch(ctx context.Context) (ins *naming.InstancesInfo, ok bool) {
r.d.mutex.RLock()
app, ok := r.d.apps[r.id]
r.d.mutex.RUnlock()
if ok {
ins, ok = app.zoneIns.Load().(map[string][]*naming.Instance)
ins, ok = app.zoneIns.Load().(*naming.InstancesInfo)
return
}
return
}
// Close close resolver.
func (r *Resolver) Close() error {
func (r *Resolve) Close() error {
r.d.mutex.Lock()
if app, ok := r.d.apps[r.id]; ok && len(app.resolver) != 0 {
delete(app.resolver, r)
@ -298,23 +292,11 @@ func (r *Resolver) Close() error {
return nil
}
func (d *Discovery) pickNode() string {
nodes, ok := d.node.Load().([]string)
if !ok || len(nodes) == 0 {
return d.conf.Nodes[d.idx%uint64(len(d.conf.Nodes))]
}
return nodes[d.idx%uint64(len(nodes))]
}
func (d *Discovery) switchNode() {
atomic.AddUint64(&d.idx, 1)
}
// Reload reload the config
func (d *Discovery) Reload(c *Config) {
fixConfig(c)
d.mutex.Lock()
d.conf = c
d.c = c
d.mutex.Unlock()
}
@ -325,7 +307,7 @@ func (d *Discovery) Close() error {
}
// Register Register an instance with discovery and renew automatically
func (d *Discovery) Register(c context.Context, ins *naming.Instance) (cancelFunc context.CancelFunc, err error) {
func (d *Discovery) Register(ctx context.Context, ins *naming.Instance) (cancelFunc context.CancelFunc, err error) {
d.mutex.Lock()
if _, ok := d.registry[ins.AppID]; ok {
err = ErrDuplication
@ -336,13 +318,15 @@ func (d *Discovery) Register(c context.Context, ins *naming.Instance) (cancelFun
if err != nil {
return
}
if err = d.register(c, ins); err != nil {
ctx, cancel := context.WithCancel(d.ctx)
if err = d.register(ctx, ins); err != nil {
d.mutex.Lock()
delete(d.registry, ins.AppID)
d.mutex.Unlock()
cancel()
return
}
ctx, cancel := context.WithCancel(d.ctx)
ch := make(chan struct{}, 1)
cancelFunc = context.CancelFunc(func() {
cancel()
@ -355,10 +339,10 @@ func (d *Discovery) Register(c context.Context, ins *naming.Instance) (cancelFun
select {
case <-ticker.C:
if err := d.renew(ctx, ins); err != nil && ecode.NothingFound.Equal(err) {
d.register(ctx, ins)
_ = d.register(ctx, ins)
}
case <-ctx.Done():
d.cancel(ins)
_ = d.cancel(ins)
ch <- struct{}{}
return
}
@ -367,148 +351,146 @@ func (d *Discovery) Register(c context.Context, ins *naming.Instance) (cancelFun
return
}
// Set set ins status and metadata.
func (d *Discovery) Set(ins *naming.Instance) error {
return d.set(context.Background(), ins)
}
// cancel Remove the registered instance from discovery
func (d *Discovery) cancel(ins *naming.Instance) (err error) {
// register Register an instance with discovery
func (d *Discovery) register(ctx context.Context, ins *naming.Instance) (err error) {
d.mutex.RLock()
conf := d.conf
c := d.c
d.mutex.RUnlock()
var metadata []byte
if ins.Metadata != nil {
if metadata, err = json.Marshal(ins.Metadata); err != nil {
log.Error("discovery:register instance Marshal metadata(%v) failed!error(%v)", ins.Metadata, err)
}
}
res := new(struct {
Code int `json:"code"`
Message string `json:"message"`
})
uri := fmt.Sprintf(_cancelURL, d.pickNode())
params := d.newParams(conf)
uri := fmt.Sprintf(_registerURL, d.pickNode())
params := d.newParams(c)
params.Set("appid", ins.AppID)
// request
if err = d.httpClient.Post(context.Background(), uri, "", params, &res); err != nil {
params.Set("addrs", strings.Join(ins.Addrs, ","))
params.Set("version", ins.Version)
params.Set("status", _statusUP)
params.Set("metadata", string(metadata))
if err = d.httpClient.Post(ctx, uri, "", params, &res); err != nil {
d.switchNode()
log.Error("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) error(%v)",
uri, conf.Env, ins.AppID, conf.Host, err)
log.Error("discovery: register client.Get(%v) zone(%s) env(%s) appid(%s) addrs(%v) error(%v)",
uri, c.Zone, c.Env, ins.AppID, ins.Addrs, err)
return
}
if ec := ecode.Int(res.Code); !ec.Equal(ecode.OK) {
log.Warn("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) code(%v)",
uri, conf.Env, ins.AppID, conf.Host, res.Code)
log.Warn("discovery: register client.Get(%v) env(%s) appid(%s) addrs(%v) code(%v)", uri, c.Env, ins.AppID, ins.Addrs, res.Code)
err = ec
return
}
log.Info("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) success",
uri, conf.Env, ins.AppID, conf.Host)
log.Info("discovery: register client.Get(%v) env(%s) appid(%s) addrs(%s) success", uri, c.Env, ins.AppID, ins.Addrs)
return
}
// register Register an instance with discovery
func (d *Discovery) register(ctx context.Context, ins *naming.Instance) (err error) {
// renew Renew an instance with discovery
func (d *Discovery) renew(ctx context.Context, ins *naming.Instance) (err error) {
d.mutex.RLock()
conf := d.conf
c := d.c
d.mutex.RUnlock()
var metadata []byte
if ins.Metadata != nil {
if metadata, err = json.Marshal(ins.Metadata); err != nil {
log.Error("discovery:register instance Marshal metadata(%v) failed!error(%v)", ins.Metadata, err)
}
}
res := new(struct {
Code int `json:"code"`
Message string `json:"message"`
})
uri := fmt.Sprintf(_registerURL, d.pickNode())
params := d.newParams(conf)
uri := fmt.Sprintf(_renewURL, d.pickNode())
params := d.newParams(c)
params.Set("appid", ins.AppID)
params.Set("addrs", strings.Join(ins.Addrs, ","))
params.Set("version", ins.Version)
params.Set("status", _statusUP)
params.Set("metadata", string(metadata))
if err = d.httpClient.Post(ctx, uri, "", params, &res); err != nil {
d.switchNode()
log.Error("discovery: register client.Get(%v) zone(%s) env(%s) appid(%s) addrs(%v) error(%v)",
uri, conf.Zone, conf.Env, ins.AppID, ins.Addrs, err)
log.Error("discovery: renew client.Get(%v) env(%s) appid(%s) hostname(%s) error(%v)",
uri, c.Env, ins.AppID, c.Host, err)
return
}
if ec := ecode.Int(res.Code); !ec.Equal(ecode.OK) {
log.Warn("discovery: register client.Get(%v) env(%s) appid(%s) addrs(%v) code(%v)",
uri, conf.Env, ins.AppID, ins.Addrs, res.Code)
err = ec
if ec.Equal(ecode.NothingFound) {
return
}
log.Error("discovery: renew client.Get(%v) env(%s) appid(%s) hostname(%s) code(%v)",
uri, c.Env, ins.AppID, c.Host, res.Code)
return
}
log.Info("discovery: register client.Get(%v) env(%s) appid(%s) addrs(%s) success",
uri, conf.Env, ins.AppID, ins.Addrs)
return
}
// rset set instance info with discovery
func (d *Discovery) set(ctx context.Context, ins *naming.Instance) (err error) {
// cancel Remove the registered instance from discovery
func (d *Discovery) cancel(ins *naming.Instance) (err error) {
d.mutex.RLock()
conf := d.conf
c := d.c
d.mutex.RUnlock()
res := new(struct {
Code int `json:"code"`
Message string `json:"message"`
})
uri := fmt.Sprintf(_setURL, d.pickNode())
params := d.newParams(conf)
uri := fmt.Sprintf(_cancelURL, d.pickNode())
params := d.newParams(c)
params.Set("appid", ins.AppID)
params.Set("version", ins.Version)
params.Set("status", strconv.FormatInt(ins.Status, 10))
if ins.Metadata != nil {
var metadata []byte
if metadata, err = json.Marshal(ins.Metadata); err != nil {
log.Error("discovery:set instance Marshal metadata(%v) failed!error(%v)", ins.Metadata, err)
}
params.Set("metadata", string(metadata))
}
if err = d.httpClient.Post(ctx, uri, "", params, &res); err != nil {
// request
if err = d.httpClient.Post(context.TODO(), uri, "", params, &res); err != nil {
d.switchNode()
log.Error("discovery: set client.Get(%v) zone(%s) env(%s) appid(%s) addrs(%v) error(%v)",
uri, conf.Zone, conf.Env, ins.AppID, ins.Addrs, err)
log.Error("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) error(%v)",
uri, c.Env, ins.AppID, c.Host, err)
return
}
if ec := ecode.Int(res.Code); !ec.Equal(ecode.OK) {
log.Warn("discovery: set client.Get(%v) env(%s) appid(%s) addrs(%v) code(%v)",
uri, conf.Env, ins.AppID, ins.Addrs, res.Code)
log.Warn("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) code(%v)",
uri, c.Env, ins.AppID, c.Host, res.Code)
err = ec
return
}
log.Info("discovery: set client.Get(%v) env(%s) appid(%s) addrs(%s) success",
uri+"?"+params.Encode(), conf.Env, ins.AppID, ins.Addrs)
log.Info("discovery cancel client.Get(%v) env(%s) appid(%s) hostname(%s) success",
uri, c.Env, ins.AppID, c.Host)
return
}
// renew Renew an instance with discovery
func (d *Discovery) renew(ctx context.Context, ins *naming.Instance) (err error) {
// Set set ins status and metadata.
func (d *Discovery) Set(ins *naming.Instance) error {
return d.set(context.Background(), ins)
}
// set set instance info with discovery
func (d *Discovery) set(ctx context.Context, ins *naming.Instance) (err error) {
d.mutex.RLock()
conf := d.conf
conf := d.c
d.mutex.RUnlock()
res := new(struct {
Code int `json:"code"`
Message string `json:"message"`
})
uri := fmt.Sprintf(_renewURL, d.pickNode())
uri := fmt.Sprintf(_setURL, d.pickNode())
params := d.newParams(conf)
params.Set("appid", ins.AppID)
params.Set("version", ins.Version)
params.Set("status", _statusUP)
if ins.Metadata != nil {
var metadata []byte
if metadata, err = json.Marshal(ins.Metadata); err != nil {
log.Error("discovery:set instance Marshal metadata(%v) failed!error(%v)", ins.Metadata, err)
return
}
params.Set("metadata", string(metadata))
}
if err = d.httpClient.Post(ctx, uri, "", params, &res); err != nil {
d.switchNode()
log.Error("discovery: renew client.Get(%v) env(%s) appid(%s) hostname(%s) error(%v)",
uri, conf.Env, ins.AppID, conf.Host, err)
log.Error("discovery: set client.Get(%v) zone(%s) env(%s) appid(%s) addrs(%v) error(%v)",
uri, conf.Zone, conf.Env, ins.AppID, ins.Addrs, err)
return
}
if ec := ecode.Int(res.Code); !ec.Equal(ecode.OK) {
log.Warn("discovery: set client.Get(%v) env(%s) appid(%s) addrs(%v) code(%v)",
uri, conf.Env, ins.AppID, ins.Addrs, res.Code)
err = ec
if ec.Equal(ecode.NothingFound) {
return
}
log.Error("discovery: renew client.Get(%v) env(%s) appid(%s) hostname(%s) code(%v)",
uri, conf.Env, ins.AppID, conf.Host, res.Code)
return
}
log.Info("discovery: set client.Get(%v) env(%s) appid(%s) addrs(%s) success", uri+"?"+params.Encode(), conf.Env, ins.AppID, ins.Addrs)
return
}
@ -518,7 +500,6 @@ func (d *Discovery) serverproc() {
ctx context.Context
cancel context.CancelFunc
)
bc := netutil.DefaultBackoffConfig
ticker := time.NewTicker(time.Minute * 30)
defer ticker.Stop()
for {
@ -531,16 +512,17 @@ func (d *Discovery) serverproc() {
select {
case <-d.ctx.Done():
return
case <-ticker.C:
default:
}
apps, err := d.polls(ctx, d.pickNode())
apps, err := d.polls(ctx)
if err != nil {
d.switchNode()
if ctx.Err() == context.Canceled {
ctx = nil
continue
}
time.Sleep(bc.Backoff(retry))
time.Sleep(time.Second)
retry++
continue
}
@ -549,38 +531,23 @@ func (d *Discovery) serverproc() {
}
}
func (d *Discovery) nodes() (nodes []string) {
res := new(struct {
Code int `json:"code"`
Data []struct {
Addr string `json:"addr"`
} `json:"data"`
})
uri := fmt.Sprintf(_nodesURL, d.pickNode())
if err := d.httpClient.Get(d.ctx, uri, "", nil, res); err != nil {
d.switchNode()
log.Error("discovery: consumer client.Get(%v)error(%+v)", uri, err)
return
}
if ec := ecode.Int(res.Code); !ec.Equal(ecode.OK) {
log.Error("discovery: consumer client.Get(%v) error(%v)", uri, res.Code)
return
}
if len(res.Data) == 0 {
log.Warn("discovery: get nodes(%s) failed,no nodes found!", uri)
return
}
nodes = make([]string, 0, len(res.Data))
for i := range res.Data {
nodes = append(nodes, res.Data[i].Addr)
func (d *Discovery) pickNode() string {
nodes, ok := d.node.Load().([]string)
if !ok || len(nodes) == 0 {
return d.c.Nodes[rand.Intn(len(d.c.Nodes))]
}
return
return nodes[atomic.LoadUint64(&d.nodeIdx)%uint64(len(nodes))]
}
func (d *Discovery) switchNode() {
atomic.AddUint64(&d.nodeIdx, 1)
}
func (d *Discovery) polls(ctx context.Context, host string) (apps map[string]appData, err error) {
func (d *Discovery) polls(ctx context.Context) (apps map[string]*naming.InstancesInfo, err error) {
var (
lastTs []int64
appid []string
lastTss []int64
appIDs []string
host = d.pickNode()
changed bool
)
if host != d.lastHost {
@ -588,46 +555,41 @@ func (d *Discovery) polls(ctx context.Context, host string) (apps map[string]app
changed = true
}
d.mutex.RLock()
conf := d.conf
c := d.c
for k, v := range d.apps {
if changed {
v.lastTs = 0
}
appid = append(appid, k)
lastTs = append(lastTs, v.lastTs)
appIDs = append(appIDs, k)
lastTss = append(lastTss, v.lastTs)
}
d.mutex.RUnlock()
if len(appid) == 0 {
if len(appIDs) == 0 {
return
}
uri := fmt.Sprintf(_pollURL, host)
res := new(struct {
Code int `json:"code"`
Message string `json:"message"`
Data map[string]appData `json:"data"`
Code int `json:"code"`
Data map[string]*naming.InstancesInfo `json:"data"`
})
params := url.Values{}
params.Set("env", conf.Env)
params.Set("hostname", conf.Host)
params.Set("appid", strings.Join(appid, ","))
params.Set("latest_timestamp", xstr.JoinInts(lastTs))
params.Set("env", c.Env)
params.Set("hostname", c.Host)
for _, appid := range appIDs {
params.Add("appid", appid)
}
for _, ts := range lastTss {
params.Add("latest_timestamp", strconv.FormatInt(ts, 10))
}
if err = d.httpClient.Get(ctx, uri, "", params, res); err != nil {
d.switchNode()
log.Error("discovery: client.Get(%s) error(%+v)", uri+"?"+params.Encode(), err)
return
}
if ec := ecode.Int(res.Code); !ec.Equal(ecode.OK) {
if !ec.Equal(ecode.NotModified) {
log.Error("discovery: client.Get(%s) get error code(%d) message(%s)", uri+"?"+params.Encode(), res.Code, res.Message)
log.Error("discovery: client.Get(%s) get error code(%d)", uri+"?"+params.Encode(), res.Code)
err = ec
if ec.Equal(ecode.NothingFound) {
for appID, value := range res.Data {
if value.Err != "" {
errInfo := fmt.Sprintf("discovery: app(%s) on ENV(%s) %s!\n", appID, conf.Env, value.Err)
log.Error(errInfo)
fmt.Fprintf(os.Stderr, errInfo)
}
}
}
}
return
}
@ -639,18 +601,17 @@ func (d *Discovery) polls(ctx context.Context, host string) (apps map[string]app
return
}
}
log.Info("discovery: polls uri(%s)", uri+"?"+params.Encode())
log.Info("discovery: successfully polls(%s) instances (%s)", uri+"?"+params.Encode(), info)
apps = res.Data
return
}
func (d *Discovery) broadcast(apps map[string]appData) {
for id, v := range apps {
func (d *Discovery) broadcast(apps map[string]*naming.InstancesInfo) {
for appID, v := range apps {
var count int
for zone, ins := range v.ZoneInstances {
for zone, ins := range v.Instances {
if len(ins) == 0 {
delete(v.ZoneInstances, zone)
delete(v.Instances, zone)
}
count += len(ins)
}
@ -658,11 +619,11 @@ func (d *Discovery) broadcast(apps map[string]appData) {
continue
}
d.mutex.RLock()
app, ok := d.apps[id]
app, ok := d.apps[appID]
d.mutex.RUnlock()
if ok {
app.lastTs = v.LastTs
app.zoneIns.Store(v.ZoneInstances)
app.zoneIns.Store(v)
d.mutex.RLock()
for rs := range app.resolver {
select {
@ -675,10 +636,38 @@ func (d *Discovery) broadcast(apps map[string]appData) {
}
}
func (d *Discovery) newParams(conf *Config) url.Values {
func (d *Discovery) newParams(c *Config) url.Values {
params := url.Values{}
params.Set("zone", conf.Zone)
params.Set("env", conf.Env)
params.Set("hostname", conf.Host)
params.Set("region", c.Region)
params.Set("zone", c.Zone)
params.Set("env", c.Env)
params.Set("hostname", c.Host)
return params
}
var r = rand.New(rand.NewSource(time.Now().UnixNano()))
// shuffle pseudo-randomizes the order of elements.
// n is the number of elements. Shuffle panics if n < 0.
// swap swaps the elements with indexes i and j.
func shuffle(n int, swap func(i, j int)) {
if n < 0 {
panic("invalid argument to Shuffle")
}
// Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
// Shuffle really ought not be called with n that doesn't fit in 32 bits.
// Not only will it take a very long time, but with 2³¹! possible permutations,
// there's no way that any PRNG can have a big enough internal state to
// generate even a minuscule percentage of the possible permutations.
// Nevertheless, the right API signature accepts an int n, so handle it as best we can.
i := n - 1
for ; i > 1<<31-1-1; i-- {
j := int(r.Int63n(int64(i + 1)))
swap(i, j)
}
for ; i > 0; i-- {
j := int(r.Int31n(int32(i + 1)))
swap(i, j)
}
}

@ -2,27 +2,30 @@ package naming
import (
"context"
"strconv"
)
// metadata common key
const (
MetaZone = "zone"
MetaCluster = "cluster"
MetaWeight = "weight"
MetaCluster = "cluster"
MetaZone = "zone"
MetaColor = "color"
)
// Instance represents a server the client connects to.
type Instance struct {
// Region is region.
Region string `json:"region"`
// Zone is IDC.
Zone string `json:"zone"`
// Env prod/pre/uat/fat1
// Env prod/preuat/fat1
Env string `json:"env"`
// AppID is mapping servicetree appid.
AppID string `json:"appid"`
// Hostname is hostname from docker.
Hostname string `json:"hostname"`
// Addrs is the adress of app instance
// Addrs is the address of app instance
// format: scheme://host
Addrs []string `json:"addrs"`
// Version is publishing version.
@ -32,20 +35,18 @@ type Instance struct {
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
Metadata map[string]string `json:"metadata"`
// Status status
Status int64
}
// Resolver resolve naming service
type Resolver interface {
Fetch(context.Context) (map[string][]*Instance, bool)
Fetch(context.Context) (*InstancesInfo, bool)
Watch() <-chan struct{}
Close() error
}
// Registry Register an instance and renew automatically
// Registry Register an instance and renew automatically.
type Registry interface {
Register(context.Context, *Instance) (context.CancelFunc, error)
Register(ctx context.Context, ins *Instance) (cancel context.CancelFunc, err error)
Close() error
}
@ -54,3 +55,77 @@ type Builder interface {
Build(id string) Resolver
Scheme() string
}
// InstancesInfo instance info.
type InstancesInfo struct {
Instances map[string][]*Instance `json:"instances"`
LastTs int64 `json:"latest_timestamp"`
Scheduler []Zone `json:"scheduler"`
}
// Zone zone scheduler info.
type Zone struct {
Src string `json:"src"`
Dst map[string]int64 `json:"dst"`
}
// UseScheduler use scheduler info on instances.
// if instancesInfo contains scheduler info about zone,
// return releated zone's instances weighted by scheduler.
// if not,only zone instances be returned.
func (insInf *InstancesInfo) UseScheduler(zone string) (inss []*Instance) {
var scheduler struct {
zone []string
weights []int64
}
var oriWeights []int64
for _, sch := range insInf.Scheduler {
if sch.Src == zone {
for zone, schWeight := range sch.Dst {
if zins, ok := insInf.Instances[zone]; ok {
var totalWeight int64
for _, ins := range zins {
var weight int64
if weight, _ = strconv.ParseInt(ins.Metadata[MetaWeight], 10, 64); weight <= 0 {
weight = 10
}
totalWeight += weight
}
oriWeights = append(oriWeights, totalWeight)
inss = append(inss, zins...)
}
scheduler.weights = append(scheduler.weights, schWeight)
scheduler.zone = append(scheduler.zone, zone)
}
}
}
if len(inss) == 0 {
var ok bool
if inss, ok = insInf.Instances[zone]; ok {
return
}
for _, v := range insInf.Instances {
inss = append(inss, v...)
}
return
}
var comMulti int64 = 1
for _, weigth := range oriWeights {
comMulti *= weigth
}
var fixWeight = make(map[string]int64, len(scheduler.weights))
for i, zone := range scheduler.zone {
fixWeight[zone] = scheduler.weights[i] * comMulti / oriWeights[i]
}
for _, ins := range inss {
var weight int64
if weight, _ = strconv.ParseInt(ins.Metadata[MetaWeight], 10, 64); weight <= 0 {
weight = 10
}
if fix, ok := fixWeight[ins.Zone]; ok {
weight = weight * fix
}
ins.Metadata[MetaWeight] = strconv.FormatInt(weight, 10)
}
return
}

@ -0,0 +1,57 @@
package criticality
// Criticality is
type Criticality string
// criticality
var (
// EmptyCriticality is used to mark any invalid criticality, and the empty criticality will be parsed as the default criticality later.
EmptyCriticality = Criticality("")
// CriticalPlus is reserved for the most critical requests, those that will result in serious user-visible impact if they fail.
CriticalPlus = Criticality("CRITICAL_PLUS")
// Critical is the default value for requests sent from production jobs. These requests will result in user-visible impact, but the impact may be less severe than those of CRITICAL_PLUS. Services are expected to provision enough capacity for all expected CRITICAL and CRITICAL_PLUS traffic.
Critical = Criticality("CRITICAL")
// SheddablePlus is traffic for which partial unavailability is expected. This is the default for batch jobs, which can retry requests minutes or even hours later.
SheddablePlus = Criticality("SHEDDABLE_PLUS")
// Sheddable is traffic for which frequent partial unavailability and occasional full unavailability is expected.
Sheddable = Criticality("SHEDDABLE")
// higher is more critical
_criticalityEnum = map[Criticality]int{
CriticalPlus: 40,
Critical: 30,
SheddablePlus: 20,
Sheddable: 10,
}
_defaultCriticality = Critical
)
// Value is used to get criticality value, higher value is more critical.
func Value(in Criticality) int {
v, ok := _criticalityEnum[in]
if !ok {
return _criticalityEnum[_defaultCriticality]
}
return v
}
// Higher will compare the input criticality with self, return true if the input is more critical than self.
func (c Criticality) Higher(in Criticality) bool {
return Value(in) > Value(c)
}
// Parse will parse raw criticality string as valid critality. Any invalid input will parse as empty criticality.
func Parse(raw string) Criticality {
crtl := Criticality(raw)
if _, ok := _criticalityEnum[crtl]; ok {
return crtl
}
return EmptyCriticality
}
// Exist is used to check criticality is exist in several enumeration.
func Exist(c Criticality) bool {
_, ok := _criticalityEnum[c]
return ok
}

@ -253,9 +253,11 @@ func (client *Client) Raw(c context.Context, req *xhttp.Request, v ...string) (b
setTimeout(req, timeout)
req = req.WithContext(c)
setCaller(req)
if color := metadata.String(c, metadata.Color); color != "" {
setColor(req, color)
}
metadata.Range(c,
func(key string, value interface{}) {
setMetadata(req, key, value)
},
metadata.IsOutgoingKey)
if resp, err = client.client.Do(req); err != nil {
err = pkgerr.Wrapf(err, "host:%s, url:%s", req.URL.Host, realURL(req))
code = "failed"

@ -5,6 +5,7 @@ import (
"math"
"net/http"
"strconv"
"text/template"
"github.com/bilibili/kratos/pkg/ecode"
"github.com/bilibili/kratos/pkg/net/http/blademaster/binding"
@ -144,9 +145,8 @@ func (c *Context) Render(code int, r render.Render) {
}
params := c.Request.Form
cb := params.Get("callback")
jsonp := cb != "" && params.Get("jsonp") == "jsonp"
cb := template.JSEscapeString(params.Get("callback"))
jsonp := cb != ""
if jsonp {
c.Writer.Write([]byte(cb))
c.Writer.Write(_openParen)

@ -0,0 +1,21 @@
package blademaster
import (
criticalityPkg "github.com/bilibili/kratos/pkg/net/criticality"
"github.com/bilibili/kratos/pkg/net/metadata"
"github.com/pkg/errors"
)
// Criticality is
func Criticality(pathCriticality criticalityPkg.Criticality) HandlerFunc {
if !criticalityPkg.Exist(pathCriticality) {
panic(errors.Errorf("This criticality is not exist: %s", pathCriticality))
}
return func(ctx *Context) {
md, ok := metadata.FromContext(ctx)
if ok {
md[metadata.Criticality] = string(pathCriticality)
}
}
}

@ -1,6 +1,7 @@
package blademaster
import (
"fmt"
"net/http"
"strconv"
"strings"
@ -8,6 +9,8 @@ import (
"github.com/bilibili/kratos/pkg/conf/env"
"github.com/bilibili/kratos/pkg/log"
"github.com/bilibili/kratos/pkg/net/criticality"
"github.com/bilibili/kratos/pkg/net/metadata"
"github.com/pkg/errors"
)
@ -15,52 +18,64 @@ import (
const (
// http head
_httpHeaderUser = "x1-bmspy-user"
_httpHeaderColor = "x1-bmspy-color"
_httpHeaderTimeout = "x1-bmspy-timeout"
_httpHeaderMirror = "x1-bmspy-mirror"
_httpHeaderRemoteIP = "x-backend-bm-real-ip"
_httpHeaderRemoteIPPort = "x-backend-bm-real-ipport"
)
// mirror return true if x-bmspy-mirror in http header and its value is 1 or true.
func mirror(req *http.Request) bool {
mirrorStr := req.Header.Get(_httpHeaderMirror)
if mirrorStr == "" {
return false
}
val, err := strconv.ParseBool(mirrorStr)
if err != nil {
log.Warn("blademaster: failed to parse mirror: %+v", errors.Wrap(err, mirrorStr))
return false
}
if !val {
log.Warn("blademaster: request mirrorStr value :%s is false", mirrorStr)
}
return val
}
const (
_httpHeaderMetadata = "x-bm-metadata-"
)
// setCaller set caller into http request.
func setCaller(req *http.Request) {
req.Header.Set(_httpHeaderUser, env.AppID)
var _parser = map[string]func(string) interface{}{
"mirror": func(mirrorStr string) interface{} {
if mirrorStr == "" {
return false
}
val, err := strconv.ParseBool(mirrorStr)
if err != nil {
log.Warn("blademaster: failed to parse mirror: %+v", errors.Wrap(err, mirrorStr))
return false
}
if !val {
log.Warn("blademaster: request mirrorStr value :%s is false", mirrorStr)
}
return val
},
"criticality": func(in string) interface{} {
if crtl := criticality.Criticality(in); crtl != criticality.EmptyCriticality {
return string(crtl)
}
return string(criticality.Critical)
},
}
// caller get caller from http request.
func caller(req *http.Request) string {
return req.Header.Get(_httpHeaderUser)
func parseMetadataTo(req *http.Request, to metadata.MD) {
for rawKey := range req.Header {
key := strings.ReplaceAll(strings.TrimLeft(strings.ToLower(rawKey), _httpHeaderMetadata), "-", "_")
rawValue := req.Header.Get(rawKey)
var value interface{} = rawValue
parser, ok := _parser[key]
if ok {
value = parser(rawValue)
}
to[key] = value
}
return
}
// setColor set color into http request.
func setColor(req *http.Request, color string) {
req.Header.Set(_httpHeaderColor, color)
func setMetadata(req *http.Request, key string, value interface{}) {
strV, ok := value.(string)
if !ok {
return
}
header := fmt.Sprintf("%s%s", _httpHeaderMetadata, strings.ReplaceAll(key, "_", "-"))
req.Header.Set(header, strV)
}
// color get color from http request.
func color(req *http.Request) string {
c := req.Header.Get(_httpHeaderColor)
if c == "" {
c = env.Color
}
return c
// setCaller set caller into http request.
func setCaller(req *http.Request) {
req.Header.Set(_httpHeaderUser, env.AppID)
}
// setTimeout set timeout into http request.

@ -0,0 +1,62 @@
package blademaster
import (
"fmt"
"sync/atomic"
"time"
"github.com/bilibili/kratos/pkg/log"
limit "github.com/bilibili/kratos/pkg/ratelimit"
"github.com/bilibili/kratos/pkg/ratelimit/bbr"
"github.com/bilibili/kratos/pkg/stat/prom"
)
const (
_statName = "go_http_bbr"
)
var (
bbrStats = prom.New().WithState("go_http_bbr", []string{"url"})
)
// RateLimiter bbr middleware.
type RateLimiter struct {
group *bbr.Group
logTime int64
}
// New return a ratelimit middleware.
func NewRateLimiter(conf *bbr.Config) (s *RateLimiter) {
return &RateLimiter{
group: bbr.NewGroup(conf),
logTime: time.Now().UnixNano(),
}
}
func (b *RateLimiter) printStats(routePath string, limiter limit.Limiter) {
now := time.Now().UnixNano()
if now-atomic.LoadInt64(&b.logTime) > int64(time.Second*3) {
atomic.StoreInt64(&b.logTime, now)
log.Info("http.bbr path:%s stat:%+v", routePath, limiter.(*bbr.BBR).Stat())
}
}
// Limit return a bm handler func.
func (b *RateLimiter) Limit() HandlerFunc {
return func(c *Context) {
uri := fmt.Sprintf("%s://%s%s", c.Request.URL.Scheme, c.Request.Host, c.Request.URL.Path)
limiter := b.group.Get(uri)
done, err := limiter.Allow(c)
if err != nil {
bbrStats.Incr(_statName, uri)
c.JSON(nil, err)
c.Abort()
return
}
defer func() {
done(limit.DoneInfo{Op: limit.Success})
b.printStats(uri, limiter)
}()
c.Next()
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save