-
Notifications
You must be signed in to change notification settings - Fork 4.9k
/
Copy pathreader.go
158 lines (137 loc) · 3.69 KB
/
reader.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
package harvester
import (
"errors"
"io"
"os"
"time"
"github.com/elastic/beats/filebeat/input"
"github.com/elastic/beats/libbeat/logp"
)
type logFileReader struct {
fs FileSource
offset int64
config logFileReaderConfig
truncated bool
lastTimeRead time.Time
backoff time.Duration
}
type logFileReaderConfig struct {
forceClose bool
closeOlder time.Duration
backoffDuration time.Duration
maxBackoffDuration time.Duration
backoffFactor int
}
var (
errFileTruncate = errors.New("detected file being truncated")
errForceClose = errors.New("file must be closed")
errInactive = errors.New("file inactive")
)
func newLogFileReader(
fs FileSource,
config logFileReaderConfig,
) (*logFileReader, error) {
var offset int64
if seeker, ok := fs.(io.Seeker); ok {
var err error
offset, err = seeker.Seek(0, os.SEEK_CUR)
if err != nil {
return nil, err
}
}
return &logFileReader{
fs: fs,
offset: offset,
config: config,
lastTimeRead: time.Now(),
backoff: config.backoffDuration,
}, nil
}
func (r *logFileReader) Read(buf []byte) (int, error) {
if r.truncated {
var offset int64
if seeker, ok := r.fs.(io.Seeker); ok {
var err error
offset, err = seeker.Seek(0, os.SEEK_CUR)
if err != nil {
return 0, err
}
}
r.offset = offset
r.truncated = false
}
for {
n, err := r.fs.Read(buf)
if n > 0 {
r.offset += int64(n)
r.lastTimeRead = time.Now()
}
if err == nil {
// reset backoff
r.backoff = r.config.backoffDuration
return n, nil
}
continuable := r.fs.Continuable()
if err == io.EOF && !continuable {
logp.Info("Reached end of file: %s", r.fs.Name())
return n, err
}
if err != io.EOF || !continuable {
logp.Err("Unexpected state reading from %s; error: %s", r.fs.Name(), err)
return n, err
}
// Refetch fileinfo to check if the file was truncated or disappeared.
// Errors if the file was removed/rotated after reading and before
// calling the stat function
info, statErr := r.fs.Stat()
if statErr != nil {
logp.Err("Unexpected error reading from %s; error: %s", r.fs.Name(), statErr)
return n, statErr
}
// handle fails if file was truncated
if info.Size() < r.offset {
logp.Debug("harvester",
"File was truncated as offset (%s) > size (%s). Begin reading file from offset 0: %s",
r.offset, info.Size(), r.fs.Name())
r.truncated = true
return n, errFileTruncate
}
age := time.Since(r.lastTimeRead)
if age > r.config.closeOlder {
// If the file hasn't change for longer then maxInactive, harvester stops
// and file handle will be closed.
return n, errInactive
}
if r.config.forceClose {
// Check if the file name exists (see #93)
_, statErr := os.Stat(r.fs.Name())
// Error means file does not exist. If no error, check if same file. If
// not close as rotated.
if statErr != nil || !input.IsSameFile(r.fs.Name(), info) {
logp.Info("Force close file: %s; error: %s", r.fs.Name(), statErr)
// Return directly on windows -> file is closing
return n, errForceClose
}
}
if err != io.EOF {
logp.Err("Unexpected state reading from %s; error: %s", r.fs.Name(), err)
}
logp.Debug("harvester", "End of file reached: %s; Backoff now.", r.fs.Name())
buf = buf[n:]
if len(buf) == 0 {
return n, nil
}
r.wait()
}
}
func (r *logFileReader) wait() {
// Wait before trying to read file wr.ch reached EOF again
time.Sleep(r.backoff)
// Increment backoff up to maxBackoff
if r.backoff < r.config.maxBackoffDuration {
r.backoff = r.backoff * time.Duration(r.config.backoffFactor)
if r.backoff > r.config.maxBackoffDuration {
r.backoff = r.config.maxBackoffDuration
}
}
}