pull/3/head
felixhao 6 years ago
parent 471a525b59
commit f98cd0a5c0
  1. 51
      pkg/log/doc.go
  2. 92
      pkg/log/file.go
  3. 66
      pkg/log/handler.go
  4. 21
      pkg/log/internal/LICENSE.txt
  5. 97
      pkg/log/internal/buffer.go
  6. 71
      pkg/log/internal/buffer_test.go
  7. 7
      pkg/log/internal/bufferpool.go
  8. 187
      pkg/log/internal/encoder.go
  9. 6
      pkg/log/internal/field.go
  10. 344
      pkg/log/internal/filewriter/filewriter.go
  11. 221
      pkg/log/internal/filewriter/filewriter_test.go
  12. 69
      pkg/log/internal/filewriter/option.go
  13. 424
      pkg/log/internal/json_encoder.go
  14. 32
      pkg/log/internal/pool.go
  15. 32
      pkg/log/internal/pool_test.go
  16. 220
      pkg/log/log.go
  17. 89
      pkg/log/log_test.go
  18. 173
      pkg/log/pattern.go
  19. 35
      pkg/log/pattern_test.go
  20. 61
      pkg/log/stdout.go
  21. 54
      pkg/log/util.go
  22. 29
      pkg/log/verbose.go
  23. 2
      pkg/net/http/blademaster/client.go

@ -0,0 +1,51 @@
/*Package log 是kratos日志库.
主要功能
1. 日志打印到本地
2. 日志打印到标准输出
3. verbose日志实现参考glog实现可通过设置不同verbose级别默认不开启
日志配置
1. 默认配置
目前日志已经实现默认配置可以直接使用以下方式
log.Init(nil)
2. 启动参数 or 环境变量
启动参数 环境变量 说明
log.stdout LOG_STDOUT 是否开启标准输出
log.dir LOG_DIR 文件日志路径
log.v LOG_V verbose日志级别
log.module LOG_MODULE 可单独配置每个文件的verbose级别file=1,file2=2
log.filter LOG_FILTER 配置需要过滤的字段field1,field2
3. 配置文件
但是如果有特殊需要可以走一下格式配置
[log]
family = "xxx-service"
dir = "/data/log/xxx-service/"
stdout = true
vLevel = 3
filter = ["fileld1", "field2"]
[log.module]
"dao_user" = 2
"servic*" = 1
配置说明
1.log
family 项目名默认读环境变量$APPID
studout 标准输出prod环境不建议开启
filter 配置需要过滤掉的字段***替换
dir 文件日志地址prod环境不建议开启
v 开启verbose级别日志可指定全局级别
2. log.module
可单独配置每个文件的verbose级别
*/
package log

@ -0,0 +1,92 @@
package log
import (
"context"
"io"
"path/filepath"
"time"
"github.com/bilibili/Kratos/pkg/log/internal/filewriter"
)
// level idx
const (
_infoIdx = iota
_warnIdx
_errorIdx
_totalIdx
)
var _fileNames = map[int]string{
_infoIdx: "info.log",
_warnIdx: "warning.log",
_errorIdx: "error.log",
}
// FileHandler .
type FileHandler struct {
render Render
fws [_totalIdx]*filewriter.FileWriter
}
// NewFile crete a file logger.
func NewFile(dir string, bufferSize, rotateSize int64, maxLogFile int) *FileHandler {
// new info writer
newWriter := func(name string) *filewriter.FileWriter {
var options []filewriter.Option
if rotateSize > 0 {
options = append(options, filewriter.MaxSize(rotateSize))
}
if maxLogFile > 0 {
options = append(options, filewriter.MaxFile(maxLogFile))
}
w, err := filewriter.New(filepath.Join(dir, name), options...)
if err != nil {
panic(err)
}
return w
}
handler := &FileHandler{
render: newPatternRender("[%D %T] [%L] [%S] %M"),
}
for idx, name := range _fileNames {
handler.fws[idx] = newWriter(name)
}
return handler
}
// Log loggint to file .
func (h *FileHandler) Log(ctx context.Context, lv Level, args ...D) {
d := make(map[string]interface{}, 10+len(args))
for _, arg := range args {
d[arg.Key] = arg.Value
}
// add extra fields
addExtraField(ctx, d)
d[_time] = time.Now().Format(_timeFormat)
var w io.Writer
switch lv {
case _warnLevel:
w = h.fws[_warnIdx]
case _errorLevel:
w = h.fws[_errorIdx]
default:
w = h.fws[_infoIdx]
}
h.render.Render(w, d)
w.Write([]byte("\n"))
}
// Close log handler
func (h *FileHandler) Close() error {
for _, fw := range h.fws {
// ignored error
fw.Close()
}
return nil
}
// SetFormat set log format
func (h *FileHandler) SetFormat(format string) {
h.render = newPatternRender(format)
}

@ -2,12 +2,44 @@ package log
import (
"context"
"time"
pkgerr "github.com/pkg/errors"
)
const (
_timeFormat = "2006-01-02T15:04:05.999999"
// log level defined in level.go.
_levelValue = "level_value"
// log level name: INFO, WARN...
_level = "level"
// log time.
_time = "time"
// log file.
_source = "source"
// common log filed.
_log = "log"
// app name.
_appID = "app_id"
// container ID.
_instanceID = "instance_id"
// uniq ID from trace.
_tid = "traceid"
// request time.
_ts = "ts"
// requester.
_caller = "caller"
// container environment: prod, pre, uat, fat.
_deplyEnv = "env"
// container area.
_zone = "zone"
// mirror flag
_mirror = "mirror"
// color.
_color = "color"
// cluster.
_cluster = "cluster"
)
// Handler is used to handle log events, outputting them to
@ -28,19 +60,43 @@ type Handler interface {
Close() error
}
// Handlers .
type Handlers []Handler
func newHandlers(filters []string, handlers ...Handler) *Handlers {
set := make(map[string]struct{})
for _, k := range filters {
set[k] = struct{}{}
}
return &Handlers{filters: set, handlers: handlers}
}
// Handlers a bundle for hander with filter function.
type Handlers struct {
filters map[string]struct{}
handlers []Handler
}
// Log handlers logging.
func (hs Handlers) Log(c context.Context, lv Level, d ...D) {
for _, h := range hs {
var fn string
for i := range d {
if _, ok := hs.filters[d[i].Key]; ok {
d[i].Value = "***"
}
if d[i].Key == _source {
fn = d[i].Value.(string)
}
}
if fn == "" {
fn = funcName(4)
}
d = append(d, KV(_source, fn), KV(_time, time.Now()), KV(_levelValue, int(lv)), KV(_level, lv.String()))
for _, h := range hs.handlers {
h.Log(c, lv, d...)
}
}
// Close close resource.
func (hs Handlers) Close() (err error) {
for _, h := range hs {
for _, h := range hs.handlers {
if e := h.Close(); e != nil {
err = pkgerr.WithStack(e)
}
@ -50,7 +106,7 @@ func (hs Handlers) Close() (err error) {
// SetFormat .
func (hs Handlers) SetFormat(format string) {
for _, h := range hs {
for _, h := range hs.handlers {
h.SetFormat(format)
}
}

@ -0,0 +1,21 @@
COPY FROM: https://github.com/uber-go/zap
Copyright (c) 2016-2017 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

@ -0,0 +1,97 @@
package core
import "strconv"
const _size = 1024 // by default, create 1 KiB buffers
// NewBuffer is new buffer
func NewBuffer(_size int) *Buffer {
return &Buffer{bs: make([]byte, 0, _size)}
}
// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
// the only way to construct one is via a Pool.
type Buffer struct {
bs []byte
pool Pool
}
// AppendByte writes a single byte to the Buffer.
func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
}
// AppendInt appends an integer to the underlying buffer (assuming base 10).
func (b *Buffer) AppendInt(i int64) {
b.bs = strconv.AppendInt(b.bs, i, 10)
}
// AppendUint appends an unsigned integer to the underlying buffer (assuming
// base 10).
func (b *Buffer) AppendUint(i uint64) {
b.bs = strconv.AppendUint(b.bs, i, 10)
}
// AppendBool appends a bool to the underlying buffer.
func (b *Buffer) AppendBool(v bool) {
b.bs = strconv.AppendBool(b.bs, v)
}
// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
// or +/- Inf.
func (b *Buffer) AppendFloat(f float64, bitSize int) {
b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
}
// Len returns the length of the underlying byte slice.
func (b *Buffer) Len() int {
return len(b.bs)
}
// Cap returns the capacity of the underlying byte slice.
func (b *Buffer) Cap() int {
return cap(b.bs)
}
// Bytes returns a mutable reference to the underlying byte slice.
func (b *Buffer) Bytes() []byte {
return b.bs
}
// String returns a string copy of the underlying byte slice.
func (b *Buffer) String() string {
return string(b.bs)
}
// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
// backing array.
func (b *Buffer) Reset() {
b.bs = b.bs[:0]
}
// Write implements io.Writer.
func (b *Buffer) Write(bs []byte) (int, error) {
b.bs = append(b.bs, bs...)
return len(bs), nil
}
// TrimNewline trims any final "\n" byte from the end of the buffer.
func (b *Buffer) TrimNewline() {
if i := len(b.bs) - 1; i >= 0 {
if b.bs[i] == '\n' {
b.bs = b.bs[:i]
}
}
}
// Free returns the Buffer to its Pool.
//
// Callers must not retain references to the Buffer after calling Free.
func (b *Buffer) Free() {
b.pool.put(b)
}

@ -0,0 +1,71 @@
package core
import (
"bytes"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBufferWrites(t *testing.T) {
buf := NewPool(0).Get()
tests := []struct {
desc string
f func()
want string
}{
{"AppendByte", func() { buf.AppendByte('v') }, "v"},
{"AppendString", func() { buf.AppendString("foo") }, "foo"},
{"AppendIntPositive", func() { buf.AppendInt(42) }, "42"},
{"AppendIntNegative", func() { buf.AppendInt(-42) }, "-42"},
{"AppendUint", func() { buf.AppendUint(42) }, "42"},
{"AppendBool", func() { buf.AppendBool(true) }, "true"},
{"AppendFloat64", func() { buf.AppendFloat(3.14, 64) }, "3.14"},
// Intenationally introduce some floating-point error.
{"AppendFloat32", func() { buf.AppendFloat(float64(float32(3.14)), 32) }, "3.14"},
{"AppendWrite", func() { buf.Write([]byte("foo")) }, "foo"},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
buf.Reset()
tt.f()
assert.Equal(t, tt.want, buf.String(), "Unexpected buffer.String().")
assert.Equal(t, tt.want, string(buf.Bytes()), "Unexpected string(buffer.Bytes()).")
assert.Equal(t, len(tt.want), buf.Len(), "Unexpected buffer length.")
// We're not writing more than a kibibyte in tests.
assert.Equal(t, _size, buf.Cap(), "Expected buffer capacity to remain constant.")
})
}
}
func BenchmarkBuffers(b *testing.B) {
// Because we use the strconv.AppendFoo functions so liberally, we can't
// use the standard library's bytes.Buffer anyways (without incurring a
// bunch of extra allocations). Nevertheless, let's make sure that we're
// not losing any precious nanoseconds.
str := strings.Repeat("a", 1024)
slice := make([]byte, 1024)
buf := bytes.NewBuffer(slice)
custom := NewPool(0).Get()
b.Run("ByteSlice", func(b *testing.B) {
for i := 0; i < b.N; i++ {
slice = append(slice, str...)
slice = slice[:0]
}
})
b.Run("BytesBuffer", func(b *testing.B) {
for i := 0; i < b.N; i++ {
buf.WriteString(str)
buf.Reset()
}
})
b.Run("CustomBuffer", func(b *testing.B) {
for i := 0; i < b.N; i++ {
custom.AppendString(str)
custom.Reset()
}
})
}

@ -0,0 +1,7 @@
package core
var (
_pool = NewPool(_size)
// GetPool retrieves a buffer from the pool, creating one if necessary.
GetPool = _pool.Get
)

@ -0,0 +1,187 @@
package core
import (
"time"
)
// DefaultLineEnding defines the default line ending when writing logs.
// Alternate line endings specified in EncoderConfig can override this
// behavior.
const DefaultLineEnding = "\n"
// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
// map- or struct-like object to the logging context. Like maps, ObjectEncoders
// aren't safe for concurrent use (though typical use shouldn't require locks).
type ObjectEncoder interface {
// Logging-specific marshalers.
AddArray(key string, marshaler ArrayMarshaler) error
AddObject(key string, marshaler ObjectMarshaler) error
// Built-in types.
AddBinary(key string, value []byte) // for arbitrary bytes
AddByteString(key string, value []byte) // for UTF-8 encoded bytes
AddBool(key string, value bool)
AddComplex128(key string, value complex128)
AddComplex64(key string, value complex64)
AddDuration(key string, value time.Duration)
AddFloat64(key string, value float64)
AddFloat32(key string, value float32)
AddInt(key string, value int)
AddInt64(key string, value int64)
AddInt32(key string, value int32)
AddInt16(key string, value int16)
AddInt8(key string, value int8)
AddString(key, value string)
AddTime(key string, value time.Time)
AddUint(key string, value uint)
AddUint64(key string, value uint64)
AddUint32(key string, value uint32)
AddUint16(key string, value uint16)
AddUint8(key string, value uint8)
AddUintptr(key string, value uintptr)
// AddReflected uses reflection to serialize arbitrary objects, so it's slow
// and allocation-heavy.
AddReflected(key string, value interface{}) error
// OpenNamespace opens an isolated namespace where all subsequent fields will
// be added. Applications can use namespaces to prevent key collisions when
// injecting loggers into sub-components or third-party libraries.
OpenNamespace(key string)
}
// ObjectMarshaler allows user-defined types to efficiently add themselves to the
// logging context, and to selectively omit information which shouldn't be
// included in logs (e.g., passwords).
type ObjectMarshaler interface {
MarshalLogObject(ObjectEncoder) error
}
// ObjectMarshalerFunc is a type adapter that turns a function into an
// ObjectMarshaler.
type ObjectMarshalerFunc func(ObjectEncoder) error
// MarshalLogObject calls the underlying function.
func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
return f(enc)
}
// ArrayMarshaler allows user-defined types to efficiently add themselves to the
// logging context, and to selectively omit information which shouldn't be
// included in logs (e.g., passwords).
type ArrayMarshaler interface {
MarshalLogArray(ArrayEncoder) error
}
// ArrayMarshalerFunc is a type adapter that turns a function into an
// ArrayMarshaler.
type ArrayMarshalerFunc func(ArrayEncoder) error
// MarshalLogArray calls the underlying function.
func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
return f(enc)
}
// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
// array-like objects to the logging context. Of note, it supports mixed-type
// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
// aren't safe for concurrent use (though typical use shouldn't require locks).
type ArrayEncoder interface {
// Built-in types.
PrimitiveArrayEncoder
// Time-related types.
AppendDuration(time.Duration)
AppendTime(time.Time)
// Logging-specific marshalers.
AppendArray(ArrayMarshaler) error
AppendObject(ObjectMarshaler) error
// AppendReflected uses reflection to serialize arbitrary objects, so it's
// slow and allocation-heavy.
AppendReflected(value interface{}) error
}
// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
// only in Go's built-in types. It's included only so that Duration- and
// TimeEncoders cannot trigger infinite recursion.
type PrimitiveArrayEncoder interface {
// Built-in types.
AppendBool(bool)
AppendByteString([]byte) // for UTF-8 encoded bytes
AppendComplex128(complex128)
AppendComplex64(complex64)
AppendFloat64(float64)
AppendFloat32(float32)
AppendInt(int)
AppendInt64(int64)
AppendInt32(int32)
AppendInt16(int16)
AppendInt8(int8)
AppendString(string)
AppendUint(uint)
AppendUint64(uint64)
AppendUint32(uint32)
AppendUint16(uint16)
AppendUint8(uint8)
AppendUintptr(uintptr)
}
// An EncoderConfig allows users to configure the concrete encoders supplied by
// zapcore.
type EncoderConfig struct {
EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
/*EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`*/
}
// Encoder is a format-agnostic interface for all log entry marshalers. Since
// log encoders don't need to support the same wide range of use cases as
// general-purpose marshalers, it's possible to make them faster and
// lower-allocation.
//
// Implementations of the ObjectEncoder interface's methods can, of course,
// freely modify the receiver. However, the Clone and EncodeEntry methods will
// be called concurrently and shouldn't modify the receiver.
type Encoder interface {
ObjectEncoder
// Clone copies the encoder, ensuring that adding fields to the copy doesn't
// affect the original.
Clone() Encoder
// EncodeEntry encodes an entry and fields, along with any accumulated
// context, into a byte buffer and returns it.
Encode(*Buffer, ...Field) error
}
// A TimeEncoder serializes a time.Time to a primitive type.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// A DurationEncoder serializes a time.Duration to a primitive type.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
// since the Unix epoch.
func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
//var d []byte
enc.AppendString(t.Format("2006-01-02T15:04:05.999999"))
//enc.AppendByteString(t.AppendFormat(d, "2006-01-02T15:04:05.999999"))
/*nanos := t.UnixNano()
sec := float64(nanos) / float64(time.Second)
enc.AppendFloat64(sec)*/
}
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
enc.AppendFloat64(float64(d) / float64(time.Second))
}

@ -0,0 +1,6 @@
package core
// Field is for encoder
type Field interface {
AddTo(enc ObjectEncoder)
}

@ -0,0 +1,344 @@
package filewriter
import (
"bytes"
"container/list"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// FileWriter create file log writer
type FileWriter struct {
opt option
dir string
fname string
ch chan *bytes.Buffer
stdlog *log.Logger
pool *sync.Pool
lastRotateFormat string
lastSplitNum int
current *wrapFile
files *list.List
closed int32
wg sync.WaitGroup
}
type rotateItem struct {
rotateTime int64
rotateNum int
fname string
}
func parseRotateItem(dir, fname, rotateFormat string) (*list.List, error) {
fis, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
// parse exists log file filename
parse := func(s string) (rt rotateItem, err error) {
// remove filename and left "." error.log.2018-09-12.001 -> 2018-09-12.001
rt.fname = s
s = strings.TrimLeft(s[len(fname):], ".")
seqs := strings.Split(s, ".")
var t time.Time
switch len(seqs) {
case 2:
if rt.rotateNum, err = strconv.Atoi(seqs[1]); err != nil {
return
}
fallthrough
case 1:
if t, err = time.Parse(rotateFormat, seqs[0]); err != nil {
return
}
rt.rotateTime = t.Unix()
}
return
}
var items []rotateItem
for _, fi := range fis {
if strings.HasPrefix(fi.Name(), fname) && fi.Name() != fname {
rt, err := parse(fi.Name())
if err != nil {
// TODO deal with error
continue
}
items = append(items, rt)
}
}
sort.Slice(items, func(i, j int) bool {
if items[i].rotateTime == items[j].rotateTime {
return items[i].rotateNum > items[j].rotateNum
}
return items[i].rotateTime > items[j].rotateTime
})
l := list.New()
for _, item := range items {
l.PushBack(item)
}
return l, nil
}
type wrapFile struct {
fsize int64
fp *os.File
}
func (w *wrapFile) size() int64 {
return w.fsize
}
func (w *wrapFile) write(p []byte) (n int, err error) {
n, err = w.fp.Write(p)
w.fsize += int64(n)
return
}
func newWrapFile(fpath string) (*wrapFile, error) {
fp, err := os.OpenFile(fpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
fi, err := fp.Stat()
if err != nil {
return nil, err
}
return &wrapFile{fp: fp, fsize: fi.Size()}, nil
}
// New FileWriter A FileWriter is safe for use by multiple goroutines simultaneously.
func New(fpath string, fns ...Option) (*FileWriter, error) {
opt := defaultOption
for _, fn := range fns {
fn(&opt)
}
fname := filepath.Base(fpath)
if fname == "" {
return nil, fmt.Errorf("filename can't empty")
}
dir := filepath.Dir(fpath)
fi, err := os.Stat(dir)
if err == nil && !fi.IsDir() {
return nil, fmt.Errorf("%s already exists and not a directory", dir)
}
if os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("create dir %s error: %s", dir, err.Error())
}
}
current, err := newWrapFile(fpath)
if err != nil {
return nil, err
}
stdlog := log.New(os.Stderr, "flog ", log.LstdFlags)
ch := make(chan *bytes.Buffer, opt.ChanSize)
files, err := parseRotateItem(dir, fname, opt.RotateFormat)
if err != nil {
// set files a empty list
files = list.New()
stdlog.Printf("parseRotateItem error: %s", err)
}
lastRotateFormat := time.Now().Format(opt.RotateFormat)
var lastSplitNum int
if files.Len() > 0 {
rt := files.Front().Value.(rotateItem)
// check contains is mush esay than compared with timestamp
if strings.Contains(rt.fname, lastRotateFormat) {
lastSplitNum = rt.rotateNum
}
}
fw := &FileWriter{
opt: opt,
dir: dir,
fname: fname,
stdlog: stdlog,
ch: ch,
pool: &sync.Pool{New: func() interface{} { return new(bytes.Buffer) }},
lastSplitNum: lastSplitNum,
lastRotateFormat: lastRotateFormat,
files: files,
current: current,
}
fw.wg.Add(1)
go fw.daemon()
return fw, nil
}
// Write write data to log file, return write bytes is pseudo just for implement io.Writer.
func (f *FileWriter) Write(p []byte) (int, error) {
// atomic is not necessary
if atomic.LoadInt32(&f.closed) == 1 {
f.stdlog.Printf("%s", p)
return 0, fmt.Errorf("filewriter already closed")
}
// because write to file is asynchronousc,
// copy p to internal buf prevent p be change on outside
buf := f.getBuf()
buf.Write(p)
if f.opt.WriteTimeout == 0 {
select {
case f.ch <- buf:
return len(p), nil
default:
// TODO: write discard log to to stdout?
return 0, fmt.Errorf("log channel is full, discard log")
}
}
// write log with timeout
timeout := time.NewTimer(f.opt.WriteTimeout)
select {
case f.ch <- buf:
return len(p), nil
case <-timeout.C:
// TODO: write discard log to to stdout?
return 0, fmt.Errorf("log channel is full, discard log")
}
}
func (f *FileWriter) daemon() {
// TODO: check aggsbuf size prevent it too big
aggsbuf := &bytes.Buffer{}
tk := time.NewTicker(f.opt.RotateInterval)
// TODO: make it configrable
aggstk := time.NewTicker(10 * time.Millisecond)
var err error
for {
select {
case t := <-tk.C:
f.checkRotate(t)
case buf, ok := <-f.ch:
if ok {
aggsbuf.Write(buf.Bytes())
f.putBuf(buf)
}
case <-aggstk.C:
if aggsbuf.Len() > 0 {
if err = f.write(aggsbuf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
aggsbuf.Reset()
}
}
if atomic.LoadInt32(&f.closed) != 1 {
continue
}
// read all buf from channel and break loop
if err = f.write(aggsbuf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
for buf := range f.ch {
if err = f.write(buf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
f.putBuf(buf)
}
break
}
f.wg.Done()
}
// Close close file writer
func (f *FileWriter) Close() error {
atomic.StoreInt32(&f.closed, 1)
close(f.ch)
f.wg.Wait()
return nil
}
func (f *FileWriter) checkRotate(t time.Time) {
formatFname := func(format string, num int) string {
if num == 0 {
return fmt.Sprintf("%s.%s", f.fname, format)
}
return fmt.Sprintf("%s.%s.%03d", f.fname, format, num)
}
format := t.Format(f.opt.RotateFormat)
if f.opt.MaxFile != 0 {
for f.files.Len() > f.opt.MaxFile {
rt := f.files.Remove(f.files.Front()).(rotateItem)
fpath := filepath.Join(f.dir, rt.fname)
if err := os.Remove(fpath); err != nil {
f.stdlog.Printf("remove file %s error: %s", fpath, err)
}
}
}
if format != f.lastRotateFormat || (f.opt.MaxSize != 0 && f.current.size() > f.opt.MaxSize) {
var err error
// close current file first
if err = f.current.fp.Close(); err != nil {
f.stdlog.Printf("close current file error: %s", err)
}
// rename file
fname := formatFname(f.lastRotateFormat, f.lastSplitNum)
oldpath := filepath.Join(f.dir, f.fname)
newpath := filepath.Join(f.dir, fname)
if err = os.Rename(oldpath, newpath); err != nil {
f.stdlog.Printf("rename file %s to %s error: %s", oldpath, newpath, err)
return
}
f.files.PushBack(rotateItem{fname: fname /*rotateNum: f.lastSplitNum, rotateTime: t.Unix() unnecessary*/})
if format != f.lastRotateFormat {
f.lastRotateFormat = format
f.lastSplitNum = 0
} else {
f.lastSplitNum++
}
// recreate current file
f.current, err = newWrapFile(filepath.Join(f.dir, f.fname))
if err != nil {
f.stdlog.Printf("create log file error: %s", err)
}
}
}
func (f *FileWriter) write(p []byte) error {
// f.current may be nil, if newWrapFile return err in checkRotate, redirect log to stderr
if f.current == nil {
f.stdlog.Printf("can't write log to file, please check stderr log for detail")
f.stdlog.Printf("%s", p)
}
_, err := f.current.write(p)
return err
}
func (f *FileWriter) putBuf(buf *bytes.Buffer) {
buf.Reset()
f.pool.Put(buf)
}
func (f *FileWriter) getBuf() *bytes.Buffer {
return f.pool.Get().(*bytes.Buffer)
}

@ -0,0 +1,221 @@
package filewriter
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const logdir = "testlog"
func touch(dir, name string) {
os.MkdirAll(dir, 0755)
fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644)
if err != nil {
panic(err)
}
fp.Close()
}
func TestMain(m *testing.M) {
ret := m.Run()
// os.RemoveAll(logdir)
os.Exit(ret)
}
func TestParseRotate(t *testing.T) {
touch := func(dir, name string) {
os.MkdirAll(dir, 0755)
fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
fp.Close()
}
dir := filepath.Join(logdir, "test-parse-rotate")
names := []string{"info.log.2018-11-11", "info.log.2018-11-11.001", "info.log.2018-11-11.002", "info.log." + time.Now().Format("2006-01-02") + ".005"}
for _, name := range names {
touch(dir, name)
}
l, err := parseRotateItem(dir, "info.log", "2006-01-02")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, len(names), l.Len())
rt := l.Front().Value.(rotateItem)
assert.Equal(t, 5, rt.rotateNum)
}
func TestRotateExists(t *testing.T) {
dir := filepath.Join(logdir, "test-rotate-exists")
names := []string{"info.log." + time.Now().Format("2006-01-02") + ".005"}
for _, name := range names {
touch(dir, name)
}
fw, err := New(logdir+"/test-rotate-exists/info.log",
MaxSize(1024*1024),
func(opt *option) { opt.RotateInterval = time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-rotate-exists")
if err != nil {
t.Fatal(err)
}
var fnams []string
for _, fi := range fis {
fnams = append(fnams, fi.Name())
}
assert.Contains(t, fnams, "info.log."+time.Now().Format("2006-01-02")+".006")
}
func TestSizeRotate(t *testing.T) {
fw, err := New(logdir+"/test-rotate/info.log",
MaxSize(1024*1024),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-rotate")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) > 5, "expect more than 5 file get %d", len(fis))
}
func TestMaxFile(t *testing.T) {
fw, err := New(logdir+"/test-maxfile/info.log",
MaxSize(1024*1024),
MaxFile(1),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-maxfile")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) <= 2, fmt.Sprintf("expect 2 file get %d", len(fis)))
}
func TestMaxFile2(t *testing.T) {
files := []string{
"info.log.2018-12-01",
"info.log.2018-12-02",
"info.log.2018-12-03",
"info.log.2018-12-04",
"info.log.2018-12-05",
"info.log.2018-12-05.001",
}
for _, file := range files {
touch(logdir+"/test-maxfile2", file)
}
fw, err := New(logdir+"/test-maxfile2/info.log",
MaxSize(1024*1024),
MaxFile(3),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-maxfile2")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) == 4, fmt.Sprintf("expect 4 file get %d", len(fis)))
}
func TestFileWriter(t *testing.T) {
fw, err := New("testlog/info.log")
if err != nil {
t.Fatal(err)
}
defer fw.Close()
_, err = fw.Write([]byte("Hello World!\n"))
if err != nil {
t.Error(err)
}
}
func BenchmarkFileWriter(b *testing.B) {
fw, err := New("testlog/bench/info.log",
func(opt *option) { opt.WriteTimeout = time.Second }, MaxSize(1024*1024*8), /*32MB*/
func(opt *option) { opt.RotateInterval = 10 * time.Millisecond },
)
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
_, err = fw.Write([]byte("Hello World!\n"))
if err != nil {
b.Error(err)
}
}
}

@ -0,0 +1,69 @@
package filewriter
import (
"fmt"
"strings"
"time"
)
// RotateFormat
const (
RotateDaily = "2006-01-02"
)
var defaultOption = option{
RotateFormat: RotateDaily,
MaxSize: 1 << 30,
ChanSize: 1024 * 8,
RotateInterval: 10 * time.Second,
}
type option struct {
RotateFormat string
MaxFile int
MaxSize int64
ChanSize int
// TODO export Option
RotateInterval time.Duration
WriteTimeout time.Duration
}
// Option filewriter option
type Option func(opt *option)
// RotateFormat e.g 2006-01-02 meaning rotate log file every day.
// NOTE: format can't contain ".", "." will cause panic ヽ(*。>Д<)o゜.
func RotateFormat(format string) Option {
if strings.Contains(format, ".") {
panic(fmt.Sprintf("rotate format can't contain '.' format: %s", format))
}
return func(opt *option) {
opt.RotateFormat = format
}
}
// MaxFile default 999, 0 meaning unlimit.
// TODO: don't create file list if MaxSize is unlimt.
func MaxFile(n int) Option {
return func(opt *option) {
opt.MaxFile = n
}
}
// MaxSize set max size for single log file,
// defult 1GB, 0 meaning unlimit.
func MaxSize(n int64) Option {
return func(opt *option) {
opt.MaxSize = n
}
}
// ChanSize set internal chan size default 8192 use about 64k memory on x64 platfrom static,
// because filewriter has internal object pool, change chan size bigger may cause filewriter use
// a lot of memory, because sync.Pool can't set expire time memory won't free until program exit.
func ChanSize(n int) Option {
return func(opt *option) {
opt.ChanSize = n
}
}

@ -0,0 +1,424 @@
package core
import (
"encoding/base64"
"encoding/json"
"math"
"sync"
"time"
"unicode/utf8"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
var _ ObjectEncoder = &jsonEncoder{}
var _jsonPool = sync.Pool{New: func() interface{} {
return &jsonEncoder{}
}}
func getJSONEncoder() *jsonEncoder {
return _jsonPool.Get().(*jsonEncoder)
}
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
enc.reflectBuf.Free()
}
enc.EncoderConfig = nil
enc.buf = nil
enc.spaced = false
enc.openNamespaces = 0
enc.reflectBuf = nil
enc.reflectEnc = nil
_jsonPool.Put(enc)
}
type jsonEncoder struct {
*EncoderConfig
buf *Buffer
spaced bool // include spaces after colons and commas
openNamespaces int
// for encoding generic values by reflection
reflectBuf *Buffer
reflectEnc *json.Encoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
// appropriately escapes all field keys and values.
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
// {"foo":"bar","foo":"baz"}
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
// keys.
func NewJSONEncoder(cfg EncoderConfig, buf *Buffer) Encoder {
return newJSONEncoder(cfg, false, buf)
}
func newJSONEncoder(cfg EncoderConfig, spaced bool, buf *Buffer) *jsonEncoder {
return &jsonEncoder{
EncoderConfig: &cfg,
buf: buf,
spaced: spaced,
}
}
func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
enc.addKey(key)
return enc.AppendArray(arr)
}
func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
enc.addKey(key)
return enc.AppendObject(obj)
}
func (enc *jsonEncoder) AddBinary(key string, val []byte) {
enc.AddString(key, base64.StdEncoding.EncodeToString(val))
}
func (enc *jsonEncoder) AddByteString(key string, val []byte) {
enc.addKey(key)
enc.AppendByteString(val)
}
func (enc *jsonEncoder) AddBool(key string, val bool) {
enc.addKey(key)
enc.AppendBool(val)
}
func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.addKey(key)
enc.AppendComplex128(val)
}
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
}
func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.addKey(key)
enc.AppendFloat64(val)
}
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
}
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = GetPool()
enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
}
func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
enc.resetReflectBuf()
err := enc.reflectEnc.Encode(obj)
if err != nil {
return err
}
enc.reflectBuf.TrimNewline()
enc.addKey(key)
_, err = enc.buf.Write(enc.reflectBuf.Bytes())
return err
}
func (enc *jsonEncoder) OpenNamespace(key string) {
enc.addKey(key)
enc.buf.AppendByte('{')
enc.openNamespaces++
}
func (enc *jsonEncoder) AddString(key, val string) {
enc.addKey(key)
enc.AppendString(val)
}
func (enc *jsonEncoder) AddTime(key string, val time.Time) {
enc.addKey(key)
enc.AppendTime(val)
}
func (enc *jsonEncoder) AddUint64(key string, val uint64) {
enc.addKey(key)
enc.AppendUint64(val)
}
func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('[')
err := arr.MarshalLogArray(enc)
enc.buf.AppendByte(']')
return err
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
return err
}
func (enc *jsonEncoder) AppendBool(val bool) {
enc.addElementSeparator()
enc.buf.AppendBool(val)
}
func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddByteString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendComplex128(val complex128) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
enc.buf.AppendFloat(r, 64)
enc.buf.AppendByte('+')
enc.buf.AppendFloat(i, 64)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendDuration(val time.Duration) {
cur := enc.buf.Len()
enc.EncodeDuration(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
// JSON valid.
enc.AppendInt64(int64(val))
}
}
func (enc *jsonEncoder) AppendInt64(val int64) {
enc.addElementSeparator()
enc.buf.AppendInt(val)
}
func (enc *jsonEncoder) AppendReflected(val interface{}) error {
enc.resetReflectBuf()
err := enc.reflectEnc.Encode(val)
if err != nil {
return err
}
enc.reflectBuf.TrimNewline()
enc.addElementSeparator()
_, err = enc.buf.Write(enc.reflectBuf.Bytes())
return err
}
func (enc *jsonEncoder) AppendString(val string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendTime(val time.Time) {
cur := enc.buf.Len()
enc.EncodeTime(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
// output JSON valid.
enc.AppendInt64(val.UnixNano())
}
}
func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.addElementSeparator()
enc.buf.AppendUint(val)
}
func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
return clone
}
func (enc *jsonEncoder) clone() *jsonEncoder {
clone := getJSONEncoder()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
clone.buf = GetPool()
return clone
}
func (enc *jsonEncoder) Encode(buf *Buffer, fields ...Field) error {
final := enc.clone()
final.buf = buf
final.buf.AppendByte('{')
if enc.buf.Len() > 0 {
final.addElementSeparator()
final.buf.Write(enc.buf.Bytes())
}
for i := range fields {
fields[i].AddTo(final)
}
final.closeOpenNamespaces()
final.buf.AppendString("}\n")
putJSONEncoder(final)
return nil
}
func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
}
func (enc *jsonEncoder) addKey(key string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(key)
enc.buf.AppendByte('"')
enc.buf.AppendByte(':')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
func (enc *jsonEncoder) addElementSeparator() {
last := enc.buf.Len() - 1
if last < 0 {
return
}
switch enc.buf.Bytes()[last] {
case '{', '[', ':', ',', ' ':
return
default:
enc.buf.AppendByte(',')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
}
func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
enc.addElementSeparator()
switch {
case math.IsNaN(val):
enc.buf.AppendString(`"NaN"`)
case math.IsInf(val, 1):
enc.buf.AppendString(`"+Inf"`)
case math.IsInf(val, -1):
enc.buf.AppendString(`"-Inf"`)
default:
enc.buf.AppendFloat(val, bitSize)
}
}
// safeAddString JSON-escapes a string and appends it to the internal buffer.
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRuneInString(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRune(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.Write(s[i : i+size])
i += size
}
}
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
if b >= utf8.RuneSelf {
return false
}
if 0x20 <= b && b != '\\' && b != '"' {
enc.buf.AppendByte(b)
return true
}
switch b {
case '\\', '"':
enc.buf.AppendByte('\\')
enc.buf.AppendByte(b)
case '\n':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('n')
case '\r':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('r')
case '\t':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4])
enc.buf.AppendByte(_hex[b&0xF])
}
return true
}
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
if r == utf8.RuneError && size == 1 {
enc.buf.AppendString(`\ufffd`)
return true
}
return false
}

@ -0,0 +1,32 @@
package core
import "sync"
// A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct {
p *sync.Pool
}
// NewPool constructs a new Pool.
func NewPool(size int) Pool {
if size == 0 {
size = _size
}
return Pool{p: &sync.Pool{
New: func() interface{} {
return &Buffer{bs: make([]byte, 0, size)}
},
}}
}
// Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer {
buf := p.p.Get().(*Buffer)
buf.Reset()
buf.pool = p
return buf
}
func (p Pool) put(buf *Buffer) {
p.p.Put(buf)
}

@ -0,0 +1,32 @@
package core
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBuffers(t *testing.T) {
const dummyData = "dummy data"
p := NewPool(0)
var wg sync.WaitGroup
for g := 0; g < 10; g++ {
wg.Add(1)
go func() {
for i := 0; i < 100; i++ {
buf := p.Get()
assert.Zero(t, buf.Len(), "Expected truncated buffer")
assert.NotZero(t, buf.Cap(), "Expected non-zero capacity")
buf.AppendString(dummyData)
assert.Equal(t, buf.Len(), len(dummyData), "Expected buffer to contain dummy data")
buf.Free()
}
wg.Done()
}()
}
wg.Wait()
}

@ -2,7 +2,14 @@ package log
import (
"context"
"flag"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/bilibili/Kratos/pkg/conf/env"
)
// Config log config.
@ -42,6 +49,72 @@ var (
c *Config
)
func init() {
addFlag(flag.CommandLine)
}
var (
_v int
_stdout bool
_dir string
_filter logFilter
_module = verboseModule{}
)
// addFlag init log from dsn.
func addFlag(fs *flag.FlagSet) {
if lv, err := strconv.ParseInt(os.Getenv("LOG_V"), 10, 64); err == nil {
_v = int(lv)
}
_stdout, _ = strconv.ParseBool(os.Getenv("LOG_STDOUT"))
_dir = os.Getenv("LOG_DIR")
if tm := os.Getenv("LOG_MODULE"); len(tm) > 0 {
_module.Set(tm)
}
if tf := os.Getenv("LOG_FILTER"); len(tf) > 0 {
_filter.Set(tf)
}
// get val from flag
fs.IntVar(&_v, "log.v", _v, "log verbose level, or use LOG_V env variable.")
fs.BoolVar(&_stdout, "log.stdout", _stdout, "log enable stdout or not, or use LOG_STDOUT env variable.")
fs.StringVar(&_dir, "log.dir", _dir, "log file `path, or use LOG_DIR env variable.")
fs.Var(&_module, "log.module", "log verbose for specified module, or use LOG_MODULE env variable, format: file=1,file2=2.")
fs.Var(&_filter, "log.filter", "log field for sensitive message, or use LOG_FILTER env variable, format: field1,field2.")
}
// Init create logger with context.
func Init(conf *Config) {
var isNil bool
if conf == nil {
isNil = true
conf = &Config{
Stdout: _stdout,
Dir: _dir,
V: int32(_v),
Module: _module,
Filter: _filter,
}
}
if len(env.AppID) != 0 {
conf.Family = env.AppID // for caster
}
conf.Host = env.Hostname
if len(conf.Host) == 0 {
host, _ := os.Hostname()
conf.Host = host
}
var hs []Handler
// when env is dev
if isNil || conf.Stdout {
hs = append(hs, NewStdout())
}
if conf.Dir != "" {
hs = append(hs, NewFile(conf.Dir, conf.FileBufferSize, conf.RotateSize, conf.MaxLogFile))
}
h = newHandlers(conf.Filter, hs...)
c = conf
}
// D represents a map of entry level data used for structured logging.
// type D map[string]interface{}
type D struct {
@ -49,6 +122,68 @@ type D struct {
Value interface{}
}
// AddTo exports a field through the ObjectEncoder interface. It's primarily
// useful to library authors, and shouldn't be necessary in most applications.
func (d D) AddTo(enc core.ObjectEncoder) {
var err error
switch val := d.Value.(type) {
case bool:
enc.AddBool(d.Key, val)
case complex128:
enc.AddComplex128(d.Key, val)
case complex64:
enc.AddComplex64(d.Key, val)
case float64:
enc.AddFloat64(d.Key, val)
case float32:
enc.AddFloat32(d.Key, val)
case int:
enc.AddInt(d.Key, val)
case int64:
enc.AddInt64(d.Key, val)
case int32:
enc.AddInt32(d.Key, val)
case int16:
enc.AddInt16(d.Key, val)
case int8:
enc.AddInt8(d.Key, val)
case string:
enc.AddString(d.Key, val)
case uint:
enc.AddUint(d.Key, val)
case uint64:
enc.AddUint64(d.Key, val)
case uint32:
enc.AddUint32(d.Key, val)
case uint16:
enc.AddUint16(d.Key, val)
case uint8:
enc.AddUint8(d.Key, val)
case []byte:
enc.AddByteString(d.Key, val)
case uintptr:
enc.AddUintptr(d.Key, val)
case time.Time:
enc.AddTime(d.Key, val)
case xtime.Time:
enc.AddTime(d.Key, val.Time())
case time.Duration:
enc.AddDuration(d.Key, val)
case xtime.Duration:
enc.AddDuration(d.Key, time.Duration(val))
case error:
enc.AddString(d.Key, val.Error())
case fmt.Stringer:
enc.AddString(d.Key, val.String())
default:
err = enc.AddReflected(d.Key, val)
}
if err != nil {
enc.AddString(fmt.Sprintf("%sError", d.Key), err.Error())
}
}
// KV return a log kv for logging field.
func KV(key string, value interface{}) D {
return D{
@ -57,6 +192,21 @@ func KV(key string, value interface{}) D {
}
}
type logFilter []string
func (f *logFilter) String() string {
return fmt.Sprint(*f)
}
// Set sets the value of the named command-line flag.
// format: -log.filter key1,key2
func (f *logFilter) Set(value string) error {
for _, i := range strings.Split(value, ",") {
*f = append(*f, strings.TrimSpace(i))
}
return nil
}
// Info logs a message at the info log level.
func Info(format string, args ...interface{}) {
h.Log(context.Background(), _infoLevel, KV(_log, fmt.Sprintf(format, args...)))
@ -72,6 +222,69 @@ func Error(format string, args ...interface{}) {
h.Log(context.Background(), _errorLevel, KV(_log, fmt.Sprintf(format, args...)))
}
// Infoc logs a message at the info log level.
func Infoc(ctx context.Context, format string, args ...interface{}) {
h.Log(ctx, _infoLevel, KV(_log, fmt.Sprintf(format, args...)))
}
// Errorc logs a message at the error log level.
func Errorc(ctx context.Context, format string, args ...interface{}) {
h.Log(ctx, _errorLevel, KV(_log, fmt.Sprintf(format, args...)))
}
// Warnc logs a message at the warning log level.
func Warnc(ctx context.Context, format string, args ...interface{}) {
h.Log(ctx, _warnLevel, KV(_log, fmt.Sprintf(format, args...)))
}
// Infov logs a message at the info log level.
func Infov(ctx context.Context, args ...D) {
h.Log(ctx, _infoLevel, args...)
}
// Warnv logs a message at the warning log level.
func Warnv(ctx context.Context, args ...D) {
h.Log(ctx, _warnLevel, args...)
}
// Errorv logs a message at the error log level.
func Errorv(ctx context.Context, args ...D) {
h.Log(ctx, _errorLevel, args...)
}
// SetFormat only effective on stdout and file handler
// %T time format at "15:04:05.999" on stdout handler, "15:04:05 MST" on file handler
// %t time format at "15:04:05" on stdout handler, "15:04" on file on file handler
// %D data format at "2006/01/02"
// %d data format at "01/02"
// %L log level e.g. INFO WARN ERROR
// %M log message and additional fields: key=value this is log message
// NOTE below pattern not support on file handler
// %f function name and line number e.g. model.Get:121
// %i instance id
// %e deploy env e.g. dev uat fat prod
// %z zone
// %S full file name and line number: /a/b/c/d.go:23
// %s final file name element and line number: d.go:23
func SetFormat(format string) {
h.SetFormat(format)
}
// Infow logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
func Infow(ctx context.Context, args ...interface{}) {
h.Log(ctx, _infoLevel, logw(args)...)
}
// Warnw logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
func Warnw(ctx context.Context, args ...interface{}) {
h.Log(ctx, _warnLevel, logw(args)...)
}
// Errorw logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
func Errorw(ctx context.Context, args ...interface{}) {
h.Log(ctx, _errorLevel, logw(args)...)
}
func logw(args []interface{}) []D {
if len(args)%2 != 0 {
Warn("log: the variadic must be plural, the last one will ignored")
@ -86,3 +299,10 @@ func logw(args []interface{}) []D {
}
return ds
}
// Close close resource.
func Close() (err error) {
err = h.Close()
h = _defaultStdout
return
}

@ -0,0 +1,89 @@
package log
import (
"context"
"testing"
"github.com/bilibili/Kratos/pkg/net/metadata"
"github.com/stretchr/testify/assert"
)
func initStdout() {
conf := &Config{
Stdout: true,
}
Init(conf)
}
func initFile() {
conf := &Config{
Dir: "/tmp",
// VLevel: 2,
Module: map[string]int32{"log_test": 1},
}
Init(conf)
}
type TestLog struct {
A string
B int
C string
D string
}
func testLog(t *testing.T) {
t.Run("Error", func(t *testing.T) {
Error("hello %s", "world")
Errorv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Errorc(context.Background(), "keys: %s %s...", "key1", "key2")
Errorw(context.Background(), "key1", "value1", "key2", "value2")
})
t.Run("Warn", func(t *testing.T) {
Warn("hello %s", "world")
Warnv(context.Background(), KV("key", 2222222), KV("test2", "test"))
Warnc(context.Background(), "keys: %s %s...", "key1", "key2")
Warnw(context.Background(), "key1", "value1", "key2", "value2")
})
t.Run("Info", func(t *testing.T) {
Info("hello %s", "world")
Infov(context.Background(), KV("key", 2222222), KV("test2", "test"))
Infoc(context.Background(), "keys: %s %s...", "key1", "key2")
Infow(context.Background(), "key1", "value1", "key2", "value2")
})
}
func TestFile(t *testing.T) {
initFile()
testLog(t)
assert.Equal(t, nil, Close())
}
func TestStdout(t *testing.T) {
initStdout()
testLog(t)
assert.Equal(t, nil, Close())
}
func TestLogW(t *testing.T) {
D := logw([]interface{}{"i", "like", "a", "dog"})
if len(D) != 2 || D[0].Key != "i" || D[0].Value != "like" || D[1].Key != "a" || D[1].Value != "dog" {
t.Fatalf("logw out put should be ' {i like} {a dog}'")
}
D = logw([]interface{}{"i", "like", "dog"})
if len(D) != 1 || D[0].Key != "i" || D[0].Value != "like" {
t.Fatalf("logw out put should be ' {i like}'")
}
}
func TestLogWithMirror(t *testing.T) {
Info("test log")
mdcontext := metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "true"})
Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
mdcontext = metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "***"})
Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
Infov(context.Background(), KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
}

@ -0,0 +1,173 @@
package log
import (
"bytes"
"fmt"
"io"
"path"
"runtime"
"strings"
"sync"
"time"
)
// Render render log output
type Render interface {
Render(io.Writer, map[string]interface{}) error
RenderString(map[string]interface{}) string
}
var patternMap = map[string]func(map[string]interface{}) string{
"T": longTime,
"t": shortTime,
"D": longDate,
"d": shortDate,
"L": keyFactory(_level),
"f": keyFactory(_source),
"i": keyFactory(_instanceID),
"e": keyFactory(_deplyEnv),
"z": keyFactory(_zone),
"S": longSource,
"s": shortSource,
"M": message,
}
// newPatternRender new pattern render
func newPatternRender(format string) Render {
p := &pattern{
bufPool: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
}
b := make([]byte, 0, len(format))
for i := 0; i < len(format); i++ {
if format[i] != '%' {
b = append(b, format[i])
continue
}
if i+1 >= len(format) {
b = append(b, format[i])
continue
}
f, ok := patternMap[string(format[i+1])]
if !ok {
b = append(b, format[i])
continue
}
if len(b) != 0 {
p.funcs = append(p.funcs, textFactory(string(b)))
b = b[:0]
}
p.funcs = append(p.funcs, f)
i++
}
if len(b) != 0 {
p.funcs = append(p.funcs, textFactory(string(b)))
}
return p
}
type pattern struct {
funcs []func(map[string]interface{}) string
bufPool sync.Pool
}
// Render implemet Formater
func (p *pattern) Render(w io.Writer, d map[string]interface{}) error {
buf := p.bufPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
p.bufPool.Put(buf)
}()
for _, f := range p.funcs {
buf.WriteString(f(d))
}
_, err := buf.WriteTo(w)
return err
}
// Render implemet Formater as string
func (p *pattern) RenderString(d map[string]interface{}) string {
// TODO strings.Builder
buf := p.bufPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
p.bufPool.Put(buf)
}()
for _, f := range p.funcs {
buf.WriteString(f(d))
}
return buf.String()
}
func textFactory(text string) func(map[string]interface{}) string {
return func(map[string]interface{}) string {
return text
}
}
func keyFactory(key string) func(map[string]interface{}) string {
return func(d map[string]interface{}) string {
if v, ok := d[key]; ok {
if s, ok := v.(string); ok {
return s
}
return fmt.Sprint(v)
}
return ""
}
}
func longSource(map[string]interface{}) string {
if _, file, lineNo, ok := runtime.Caller(6); ok {
return fmt.Sprintf("%s:%d", file, lineNo)
}
return "unknown:0"
}
func shortSource(map[string]interface{}) string {
if _, file, lineNo, ok := runtime.Caller(6); ok {
return fmt.Sprintf("%s:%d", path.Base(file), lineNo)
}
return "unknown:0"
}
func longTime(map[string]interface{}) string {
return time.Now().Format("15:04:05.000")
}
func shortTime(map[string]interface{}) string {
return time.Now().Format("15:04")
}
func longDate(map[string]interface{}) string {
return time.Now().Format("2006/01/02")
}
func shortDate(map[string]interface{}) string {
return time.Now().Format("01/02")
}
func isInternalKey(k string) bool {
switch k {
case _level, _levelValue, _time, _source, _instanceID, _appID, _deplyEnv, _zone:
return true
}
return false
}
func message(d map[string]interface{}) string {
var m string
var s []string
for k, v := range d {
if k == _log {
m = fmt.Sprint(v)
continue
}
if isInternalKey(k) {
continue
}
s = append(s, fmt.Sprintf("%s=%v", k, v))
}
s = append(s, m)
return strings.Join(s, " ")
}

@ -0,0 +1,35 @@
package log
import (
"bytes"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestPatternDefault(t *testing.T) {
buf := &bytes.Buffer{}
p := newPatternRender("%L %T %f %M")
p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "hello", _time: time.Now().Format(_timeFormat), _source: "xxx:123"})
fields := strings.Fields(buf.String())
assert.Equal(t, 4, len(fields))
assert.Equal(t, "INFO", fields[0])
assert.Equal(t, "hello", fields[3])
}
func TestKV(t *testing.T) {
buf := &bytes.Buffer{}
p := newPatternRender("%M")
p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "2233", "hello": "test"})
assert.Equal(t, "hello=test 2233", buf.String())
}
func TestBadSymbol(t *testing.T) {
buf := &bytes.Buffer{}
p := newPatternRender("%12 %% %xd %M")
p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "2233"})
assert.Equal(t, "%12 %% %xd 2233", buf.String())
}

@ -0,0 +1,61 @@
package log
import (
"context"
"io"
"os"
"time"
)
const defaultPattern = "%L %d-%T %f %M"
var _defaultStdout = NewStdout()
// StdoutHandler stdout log handler
type StdoutHandler struct {
out io.Writer
render Render
}
// NewStdout create a stdout log handler
func NewStdout() *StdoutHandler {
return &StdoutHandler{
out: os.Stderr,
render: newPatternRender(defaultPattern),
}
}
// Log stdout loging, only for developing env.
func (h *StdoutHandler) Log(ctx context.Context, lv Level, args ...D) {
d := make(map[string]interface{}, 10+len(args))
for _, arg := range args {
d[arg.Key] = arg.Value
}
// add extra fields
addExtraField(ctx, d)
d[_time] = time.Now().Format(_timeFormat)
h.render.Render(h.out, d)
h.out.Write([]byte("\n"))
}
// Close stdout loging
func (h *StdoutHandler) Close() error {
return nil
}
// SetFormat set stdout log output format
// %T time format at "15:04:05.999"
// %t time format at "15:04:05"
// %D data format at "2006/01/02"
// %d data format at "01/02"
// %L log level e.g. INFO WARN ERROR
// %f function name and line number e.g. model.Get:121
// %i instance id
// %e deploy env e.g. dev uat fat prod
// %z zone
// %S full file name and line number: /a/b/c/d.go:23
// %s final file name element and line number: d.go:23
// %M log message and additional fields: key=value this is log message
func (h *StdoutHandler) SetFormat(format string) {
h.render = newPatternRender(format)
}

@ -0,0 +1,54 @@
package log
import (
"context"
"fmt"
"runtime"
"strconv"
"sync"
"github.com/bilibili/Kratos/pkg/conf/env"
"github.com/bilibili/Kratos/pkg/net/metadata"
"github.com/bilibili/Kratos/pkg/net/trace"
)
var fm sync.Map
func addExtraField(ctx context.Context, fields map[string]interface{}) {
if t, ok := trace.FromContext(ctx); ok {
if s, ok := t.(fmt.Stringer); ok {
fields[_tid] = s.String()
} else {
fields[_tid] = fmt.Sprintf("%s", t)
}
}
if caller := metadata.String(ctx, metadata.Caller); caller != "" {
fields[_caller] = caller
}
if color := metadata.String(ctx, metadata.Color); color != "" {
fields[_color] = color
}
if cluster := metadata.String(ctx, metadata.Cluster); cluster != "" {
fields[_cluster] = cluster
}
fields[_deplyEnv] = env.DeployEnv
fields[_zone] = env.Zone
fields[_appID] = c.Family
fields[_instanceID] = c.Host
if metadata.Bool(ctx, metadata.Mirror) {
fields[_mirror] = true
}
}
// funcName get func name.
func funcName(skip int) (name string) {
if pc, _, lineNo, ok := runtime.Caller(skip); ok {
if v, ok := fm.Load(pc); ok {
name = v.(string)
} else {
name = runtime.FuncForPC(pc).Name() + ":" + strconv.FormatInt(int64(lineNo), 10)
fm.Store(pc, name)
}
}
return
}

@ -1,13 +1,42 @@
package log
import (
"bytes"
"context"
"fmt"
"path/filepath"
"runtime"
"strconv"
"strings"
)
type verboseModule map[string]int32
func (m verboseModule) String() string {
// FIXME strings.Builder
var buf bytes.Buffer
for k, v := range m {
buf.WriteString(k)
buf.WriteString(strconv.FormatInt(int64(v), 10))
buf.WriteString(",")
}
return buf.String()
}
// Set sets the value of the named command-line flag.
// format: -log.module file=1,file2=2
func (m verboseModule) Set(value string) error {
for _, i := range strings.Split(value, ",") {
kv := strings.Split(i, "=")
if len(kv) == 2 {
if v, err := strconv.ParseInt(kv[1], 10, 64); err == nil {
m[strings.TrimSpace(kv[0])] = int32(v)
}
}
}
return nil
}
// V reports whether verbosity at the call site is at least the requested level.
// The returned value is a boolean of type Verbose, which implements Info, Infov etc.
// These methods will write to the Info log if called.

@ -29,8 +29,6 @@ import (
const (
_minRead = 16 * 1024 // 16kb
_ts = "ts"
)
var (

Loading…
Cancel
Save