parent
693e7d3aea
commit
238bb11008
@ -0,0 +1,178 @@ |
||||
package naming |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"math/rand" |
||||
"net/url" |
||||
"os" |
||||
"sort" |
||||
|
||||
"github.com/bilibili/kratos/pkg/conf/env" |
||||
"github.com/bilibili/kratos/pkg/log" |
||||
|
||||
"github.com/dgryski/go-farm" |
||||
) |
||||
|
||||
// BuildOptions build options.
|
||||
type BuildOptions struct { |
||||
Filter func(map[string][]*Instance) map[string][]*Instance |
||||
Subset func([]*Instance, int) []*Instance |
||||
SubsetSize int |
||||
ClientZone string |
||||
Scheduler func(*InstancesInfo) []*Instance |
||||
} |
||||
|
||||
// BuildOpt build option interface.
|
||||
type BuildOpt interface { |
||||
Apply(*BuildOptions) |
||||
} |
||||
|
||||
type funcOpt struct { |
||||
f func(*BuildOptions) |
||||
} |
||||
|
||||
func (f *funcOpt) Apply(opt *BuildOptions) { |
||||
f.f(opt) |
||||
} |
||||
|
||||
// Filter filter option.
|
||||
func Filter(schema string, clusters map[string]struct{}) BuildOpt { |
||||
return &funcOpt{f: func(opt *BuildOptions) { |
||||
opt.Filter = func(inss map[string][]*Instance) map[string][]*Instance { |
||||
newInss := make(map[string][]*Instance) |
||||
for zone := range inss { |
||||
var instances []*Instance |
||||
for _, ins := range inss[zone] { |
||||
//如果r.clusters的长度大于0说明需要进行集群选择
|
||||
if len(clusters) > 0 { |
||||
if _, ok := clusters[ins.Metadata[MetaCluster]]; !ok { |
||||
continue |
||||
} |
||||
} |
||||
var addr string |
||||
for _, a := range ins.Addrs { |
||||
u, err := url.Parse(a) |
||||
if err == nil && u.Scheme == schema { |
||||
addr = u.Host |
||||
} |
||||
} |
||||
if addr == "" { |
||||
fmt.Fprintf(os.Stderr, "resolver: app(%s,%s) no valid grpc address(%v) found!", ins.AppID, ins.Hostname, ins.Addrs) |
||||
log.Warn("resolver: invalid rpc address(%s,%s,%v) found!", ins.AppID, ins.Hostname, ins.Addrs) |
||||
continue |
||||
} |
||||
instances = append(instances, ins) |
||||
} |
||||
newInss[zone] = instances |
||||
} |
||||
return newInss |
||||
} |
||||
}} |
||||
} |
||||
|
||||
func defulatSubset(inss []*Instance, size int) []*Instance { |
||||
backends := inss |
||||
if len(backends) <= int(size) { |
||||
return backends |
||||
} |
||||
clientID := env.Hostname |
||||
sort.Slice(backends, func(i, j int) bool { |
||||
return backends[i].Hostname < backends[j].Hostname |
||||
}) |
||||
count := len(backends) / size |
||||
// hash得到ID
|
||||
id := farm.Fingerprint64([]byte(clientID)) |
||||
// 获得rand轮数
|
||||
round := int64(id / uint64(count)) |
||||
|
||||
s := rand.NewSource(round) |
||||
ra := rand.New(s) |
||||
// 根据source洗牌
|
||||
ra.Shuffle(len(backends), func(i, j int) { |
||||
backends[i], backends[j] = backends[j], backends[i] |
||||
}) |
||||
start := (id % uint64(count)) * uint64(size) |
||||
return backends[int(start) : int(start)+int(size)] |
||||
} |
||||
|
||||
// Subset Subset option.
|
||||
func Subset(defaultSize int) BuildOpt { |
||||
return &funcOpt{f: func(opt *BuildOptions) { |
||||
opt.SubsetSize = defaultSize |
||||
opt.Subset = defulatSubset |
||||
}} |
||||
} |
||||
|
||||
// ScheduleNode ScheduleNode option.
|
||||
func ScheduleNode(clientZone string) BuildOpt { |
||||
return &funcOpt{f: func(opt *BuildOptions) { |
||||
opt.ClientZone = clientZone |
||||
opt.Scheduler = func(app *InstancesInfo) (instances []*Instance) { |
||||
type Zone struct { |
||||
inss []*Instance |
||||
weight int64 |
||||
name string |
||||
score float64 |
||||
} |
||||
var zones []*Zone |
||||
|
||||
if app.Scheduler != nil { |
||||
si, err := json.Marshal(app.Scheduler) |
||||
if err == nil { |
||||
log.Info("schedule info: %s", string(si)) |
||||
} |
||||
if strategy, ok := app.Scheduler.Clients[clientZone]; ok { |
||||
var min *Zone |
||||
for name, zone := range strategy.Zones { |
||||
inss := app.Instances[name] |
||||
if len(inss) == 0 { |
||||
continue |
||||
} |
||||
z := &Zone{ |
||||
inss: inss, |
||||
weight: zone.Weight, |
||||
name: name, |
||||
score: float64(len(inss)) / float64(zone.Weight), |
||||
} |
||||
if min == nil || z.score < min.score { |
||||
min = z |
||||
} |
||||
zones = append(zones, z) |
||||
} |
||||
if opt.SubsetSize != 0 && len(min.inss) > opt.SubsetSize { |
||||
min.score = float64(opt.SubsetSize) / float64(min.weight) |
||||
} |
||||
for _, z := range zones { |
||||
nums := int(min.score * float64(z.weight)) |
||||
if nums == 0 { |
||||
nums = 1 |
||||
} |
||||
if nums < len(z.inss) { |
||||
if opt.Subset != nil { |
||||
z.inss = opt.Subset(z.inss, nums) |
||||
} else { |
||||
z.inss = defulatSubset(z.inss, nums) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
for _, zone := range zones { |
||||
for _, ins := range zone.inss { |
||||
instances = append(instances, ins) |
||||
} |
||||
} |
||||
//如果没有拿到节点,则选择直接获取
|
||||
if len(instances) == 0 { |
||||
instances = app.Instances[clientZone] |
||||
if len(instances) == 0 { |
||||
for _, value := range app.Instances { |
||||
instances = append(instances, value...) |
||||
} |
||||
} |
||||
} |
||||
return |
||||
} |
||||
}} |
||||
} |
@ -0,0 +1,299 @@ |
||||
package naming |
||||
|
||||
import ( |
||||
"fmt" |
||||
"reflect" |
||||
"testing" |
||||
) |
||||
|
||||
func Test_Subset(t *testing.T) { |
||||
var inss1 []*Instance |
||||
for i := 0; i < 200; i++ { |
||||
ins := &Instance{ |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
} |
||||
inss1 = append(inss1, ins) |
||||
} |
||||
var opt BuildOptions |
||||
s := Subset(50) |
||||
s.Apply(&opt) |
||||
sub1 := opt.Subset(inss1, opt.SubsetSize) |
||||
if len(sub1) != 50 { |
||||
t.Fatalf("subset size should be 50") |
||||
} |
||||
sub2 := opt.Subset(inss1, opt.SubsetSize) |
||||
if !reflect.DeepEqual(sub1, sub2) { |
||||
t.Fatalf("two subsets should equal") |
||||
} |
||||
} |
||||
|
||||
func Test_FilterClusters(t *testing.T) { |
||||
inss := map[string][]*Instance{ |
||||
"sh001": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}, &Instance{ |
||||
Addrs: []string{"http://127.0.0.2:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}, &Instance{ |
||||
Addrs: []string{"grpc://127.0.0.3:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c2"}, |
||||
}}, |
||||
"sh002": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}, &Instance{ |
||||
Addrs: []string{"zk://127.0.0.2:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}}, |
||||
} |
||||
res := map[string][]*Instance{ |
||||
"sh001": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}}, |
||||
"sh002": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}}, |
||||
} |
||||
var opt BuildOptions |
||||
f := Filter("grpc", map[string]struct{}{"c1": struct{}{}, "c3": struct{}{}}) |
||||
f.Apply(&opt) |
||||
filtered := opt.Filter(inss) |
||||
equal := reflect.DeepEqual(filtered, res) |
||||
if !equal { |
||||
t.Fatalf("Filter grpc should equal,filtered:%v expected:%v", filtered, res) |
||||
} |
||||
} |
||||
|
||||
func Test_FilterInvalidAddr(t *testing.T) { |
||||
inss := map[string][]*Instance{ |
||||
"sh001": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}, &Instance{ |
||||
Addrs: []string{"http://127.0.0.2:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}, &Instance{ |
||||
Addrs: []string{"grpc://127.0.0.3:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c2"}, |
||||
}}, |
||||
"sh002": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}, &Instance{ |
||||
Addrs: []string{"zk://127.0.0.2:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}}, |
||||
} |
||||
res := map[string][]*Instance{ |
||||
"sh001": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}, &Instance{ |
||||
Addrs: []string{"grpc://127.0.0.3:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c2"}, |
||||
}}, |
||||
"sh002": []*Instance{&Instance{ |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}}, |
||||
} |
||||
var opt BuildOptions |
||||
f := Filter("grpc", nil) |
||||
f.Apply(&opt) |
||||
filtered := opt.Filter(inss) |
||||
equal := reflect.DeepEqual(filtered, res) |
||||
if !equal { |
||||
t.Fatalf("Filter grpc should equal,filtered:%v expected:%v", filtered, res) |
||||
} |
||||
} |
||||
|
||||
func Test_Schedule(t *testing.T) { |
||||
app := &InstancesInfo{ |
||||
Instances: map[string][]*Instance{ |
||||
"sh001": []*Instance{&Instance{ |
||||
Zone: "sh001", |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}, &Instance{ |
||||
Zone: "sh001", |
||||
Addrs: []string{"grpc://127.0.0.2:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
}, &Instance{ |
||||
Zone: "sh001", |
||||
Addrs: []string{"grpc://127.0.0.3:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c2"}, |
||||
}}, |
||||
"sh002": []*Instance{&Instance{ |
||||
Zone: "sh002", |
||||
Addrs: []string{"grpc://127.0.0.1:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}, &Instance{ |
||||
Zone: "sh002", |
||||
Addrs: []string{"grpc://127.0.0.2:9000"}, |
||||
Metadata: map[string]string{MetaCluster: "c3"}, |
||||
}}, |
||||
}, |
||||
Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": &ZoneStrategy{ |
||||
Zones: map[string]*Strategy{ |
||||
"sh001": &Strategy{10}, |
||||
"sh002": &Strategy{20}, |
||||
}, |
||||
}}}, |
||||
} |
||||
var opt BuildOptions |
||||
f := ScheduleNode("sh001") |
||||
f.Apply(&opt) |
||||
err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 2, "sh001": 1}) |
||||
if err != nil { |
||||
t.Fatalf(err.Error()) |
||||
} |
||||
} |
||||
|
||||
func Test_Schedule2(t *testing.T) { |
||||
app := &InstancesInfo{ |
||||
Instances: map[string][]*Instance{}, |
||||
Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": &ZoneStrategy{ |
||||
Zones: map[string]*Strategy{ |
||||
"sh001": &Strategy{10}, |
||||
"sh002": &Strategy{20}, |
||||
}, |
||||
}}}, |
||||
} |
||||
for i := 0; i < 30; i++ { |
||||
ins := &Instance{ |
||||
Zone: "sh001", |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
} |
||||
app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) |
||||
} |
||||
for i := 0; i < 30; i++ { |
||||
ins := &Instance{ |
||||
Zone: "sh002", |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c2"}, |
||||
} |
||||
app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) |
||||
} |
||||
var opt BuildOptions |
||||
f := ScheduleNode("sh001") |
||||
f.Apply(&opt) |
||||
err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 30, "sh001": 15}) |
||||
if err != nil { |
||||
t.Fatalf(err.Error()) |
||||
} |
||||
} |
||||
|
||||
func Test_Schedule3(t *testing.T) { |
||||
app := &InstancesInfo{ |
||||
Instances: map[string][]*Instance{}, |
||||
Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": &ZoneStrategy{ |
||||
Zones: map[string]*Strategy{ |
||||
"sh001": &Strategy{1}, |
||||
"sh002": &Strategy{30}, |
||||
}, |
||||
}}}, |
||||
} |
||||
for i := 0; i < 30; i++ { |
||||
ins := &Instance{ |
||||
Zone: "sh001", |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
} |
||||
app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) |
||||
} |
||||
for i := 0; i < 30; i++ { |
||||
ins := &Instance{ |
||||
Zone: "sh002", |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c2"}, |
||||
} |
||||
app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) |
||||
} |
||||
var opt BuildOptions |
||||
f := ScheduleNode("sh001") |
||||
f.Apply(&opt) |
||||
err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 30, "sh001": 1}) |
||||
if err != nil { |
||||
t.Fatalf(err.Error()) |
||||
} |
||||
} |
||||
|
||||
func Test_Schedule4(t *testing.T) { |
||||
app := &InstancesInfo{ |
||||
Instances: map[string][]*Instance{}, |
||||
Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": &ZoneStrategy{ |
||||
Zones: map[string]*Strategy{ |
||||
"sh001": &Strategy{1}, |
||||
"sh002": &Strategy{30}, |
||||
}, |
||||
}}}, |
||||
} |
||||
for i := 0; i < 30; i++ { |
||||
ins := &Instance{ |
||||
Zone: "sh001", |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
} |
||||
app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) |
||||
} |
||||
|
||||
var opt BuildOptions |
||||
f := ScheduleNode("sh001") |
||||
f.Apply(&opt) |
||||
err := compareAddr(opt.Scheduler(app), map[string]int{"sh001": 30, "sh002": 0}) |
||||
if err != nil { |
||||
t.Fatalf(err.Error()) |
||||
} |
||||
} |
||||
|
||||
func Test_Schedule5(t *testing.T) { |
||||
app := &InstancesInfo{ |
||||
Instances: map[string][]*Instance{}, |
||||
Scheduler: &Scheduler{map[string]*ZoneStrategy{"sh001": &ZoneStrategy{ |
||||
Zones: map[string]*Strategy{ |
||||
"sh002": &Strategy{30}, |
||||
}, |
||||
}}}, |
||||
} |
||||
for i := 0; i < 30; i++ { |
||||
ins := &Instance{ |
||||
Zone: "sh001", |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c1"}, |
||||
} |
||||
app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) |
||||
} |
||||
for i := 0; i < 30; i++ { |
||||
ins := &Instance{ |
||||
Zone: "sh002", |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
Metadata: map[string]string{MetaCluster: "c2"}, |
||||
} |
||||
app.Instances[ins.Zone] = append(app.Instances[ins.Zone], ins) |
||||
} |
||||
var opt BuildOptions |
||||
f := ScheduleNode("sh001") |
||||
f.Apply(&opt) |
||||
err := compareAddr(opt.Scheduler(app), map[string]int{"sh002": 30, "sh001": 0}) |
||||
if err != nil { |
||||
t.Fatalf(err.Error()) |
||||
} |
||||
} |
||||
|
||||
func compareAddr(inss []*Instance, c map[string]int) (err error) { |
||||
for _, ins := range inss { |
||||
c[ins.Zone] = c[ins.Zone] - 1 |
||||
} |
||||
for zone, v := range c { |
||||
if v != 0 { |
||||
err = fmt.Errorf("zone(%s) nums is %d", zone, v) |
||||
return |
||||
} |
||||
} |
||||
return |
||||
} |
@ -1,125 +0,0 @@ |
||||
package resolver |
||||
|
||||
import ( |
||||
"fmt" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/bilibili/kratos/pkg/naming" |
||||
) |
||||
|
||||
func Test_FilterLittle(t *testing.T) { |
||||
var backs []*naming.Instance |
||||
for i := 0; i < 3; i++ { |
||||
backs = append(backs, &naming.Instance{ |
||||
Zone: "sh1", |
||||
Env: "prod", |
||||
AppID: "2233", |
||||
Hostname: fmt.Sprintf("linux-%d", i), |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
LastTs: time.Now().Unix(), |
||||
}) |
||||
} |
||||
r := &Resolver{ |
||||
quit: make(chan struct{}, 1), |
||||
zone: "sh1", |
||||
subsetSize: 50, |
||||
} |
||||
|
||||
if len(r.filter(backs)) != 3 { |
||||
t.Fatalf("backends length must be 3") |
||||
} |
||||
} |
||||
|
||||
func Test_FilterBig(t *testing.T) { |
||||
var backs []*naming.Instance |
||||
for i := 0; i < 100; i++ { |
||||
backs = append(backs, &naming.Instance{ |
||||
Zone: "sh1", |
||||
Env: "prod", |
||||
AppID: "2233", |
||||
Hostname: fmt.Sprintf("linux-%d", i), |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
LastTs: time.Now().Unix(), |
||||
}) |
||||
} |
||||
r := &Resolver{ |
||||
quit: make(chan struct{}, 1), |
||||
zone: "sh1", |
||||
subsetSize: 50, |
||||
} |
||||
|
||||
if len(r.filter(backs)) != 50 { |
||||
t.Fatalf("backends length must be 50") |
||||
} |
||||
} |
||||
|
||||
func Test_FilterNone(t *testing.T) { |
||||
var backs []*naming.Instance |
||||
for i := 0; i < 100; i++ { |
||||
backs = append(backs, &naming.Instance{ |
||||
Zone: "sh1", |
||||
Env: "prod", |
||||
AppID: "2233", |
||||
Metadata: map[string]string{naming.MetaCluster: "c1"}, |
||||
Hostname: fmt.Sprintf("linux-%d", i), |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
LastTs: time.Now().Unix(), |
||||
}) |
||||
} |
||||
r := &Resolver{ |
||||
quit: make(chan struct{}, 1), |
||||
zone: "sh1", |
||||
subsetSize: 50, |
||||
clusters: map[string]struct{}{"c2": struct{}{}}, |
||||
} |
||||
|
||||
if len(r.filter(backs)) != 0 { |
||||
t.Fatalf("backends length must be 0") |
||||
} |
||||
} |
||||
|
||||
func Test_FilterSome(t *testing.T) { |
||||
var backs []*naming.Instance |
||||
for i := 0; i < 40; i++ { |
||||
backs = append(backs, &naming.Instance{ |
||||
Zone: "sh1", |
||||
Env: "prod", |
||||
AppID: "2233", |
||||
Metadata: map[string]string{naming.MetaCluster: "c1"}, |
||||
Hostname: fmt.Sprintf("linux-%d", i), |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
LastTs: time.Now().Unix(), |
||||
}) |
||||
} |
||||
for i := 50; i < 150; i++ { |
||||
backs = append(backs, &naming.Instance{ |
||||
Zone: "sh1", |
||||
Env: "prod", |
||||
AppID: "2233", |
||||
Metadata: map[string]string{naming.MetaCluster: "c2"}, |
||||
Hostname: fmt.Sprintf("linux-%d", i), |
||||
Addrs: []string{fmt.Sprintf("grpc://127.0.0.%d:9000", i)}, |
||||
LastTs: time.Now().Unix(), |
||||
}) |
||||
} |
||||
r := &Resolver{ |
||||
quit: make(chan struct{}, 1), |
||||
zone: "sh1", |
||||
subsetSize: 50, |
||||
clusters: map[string]struct{}{"c2": struct{}{}}, |
||||
} |
||||
if len(r.filter(backs)) != 50 { |
||||
t.Fatalf("backends length must be 0") |
||||
} |
||||
|
||||
r2 := &Resolver{ |
||||
quit: make(chan struct{}, 1), |
||||
zone: "sh1", |
||||
subsetSize: 50, |
||||
clusters: map[string]struct{}{"c1": struct{}{}}, |
||||
} |
||||
if len(r2.filter(backs)) != 40 { |
||||
t.Fatalf("backends length must be 0") |
||||
} |
||||
} |
Loading…
Reference in new issue