Skip to content

Commit 4a90c8b

Browse files
authored
*: Added errcheck and fixed not check errors [PART1]. (thanos-io#389)
Due to high number of these issues, let's split the PR. Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
1 parent 71d519f commit 4a90c8b

File tree

22 files changed

+175
-84
lines changed

22 files changed

+175
-84
lines changed

Makefile

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,11 @@ vet:
3131
@echo ">> vetting code"
3232
@go vet ./...
3333

34+
# TODO(bplotka): Make errcheck required stage and validate it on CI (once we fix all the issues claimed by errcheck).
35+
errcheck:
36+
@echo ">> errchecking the code"
37+
@errcheck -verbose -exclude .errcheck_excludes.txt ./...
38+
3439
build: deps $(PROMU)
3540
@echo ">> building binaries"
3641
@$(PROMU) build --prefix $(PREFIX)
@@ -82,4 +87,4 @@ docs:
8287
@go build ./cmd/thanos/...
8388
@scripts/genflagdocs.sh
8489

85-
.PHONY: all install-tools format vet build assets docker docker-push docs deps
90+
.PHONY: all install-tools format vet errcheck build assets docker docker-push docs deps

cmd/thanos/bucket.go

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
"github.com/improbable-eng/thanos/pkg/block"
1313
"github.com/improbable-eng/thanos/pkg/objstore/client"
1414
"github.com/improbable-eng/thanos/pkg/objstore/s3"
15+
"github.com/improbable-eng/thanos/pkg/runutil"
1516
"github.com/improbable-eng/thanos/pkg/verifier"
1617
"github.com/oklog/run"
1718
"github.com/oklog/ulid"
@@ -57,31 +58,28 @@ func registerBucket(m map[string]setupFunc, app *kingpin.Application, name strin
5758
verifyIDWhitelist := verify.Flag("id-whitelist", "Block IDs to verify (and optionally repair) only. "+
5859
"If none is specified, all blocks will be verified. Repeated field").Strings()
5960
m[name+" verify"] = func(g *run.Group, logger log.Logger, reg *prometheus.Registry, _ opentracing.Tracer, _ bool) error {
60-
bkt, closeFn, err := client.NewBucket(gcsBucket, *s3Config, reg, name)
61+
bkt, err := client.NewBucket(gcsBucket, *s3Config, reg, name)
6162
if err != nil {
6263
return err
6364
}
65+
defer runutil.LogOnErr(logger, bkt, "bucket client")
6466

6567
backupS3Config := *s3Config
6668
backupS3Config.Bucket = *verifyBackupS3Bucket
67-
backupBkt, backupCloseFn, err := client.NewBucket(verifyBackupGCSBucket, backupS3Config, reg, name)
69+
backupBkt, err := client.NewBucket(verifyBackupGCSBucket, backupS3Config, reg, name)
6870
if err == client.ErrNotFound {
6971
if *verifyRepair {
7072
return errors.Wrap(err, "repair is specified, so backup client is required")
7173
}
72-
// No repair - no need for backup bucket.
73-
backupCloseFn = func() error { return nil }
74-
7574
} else if err != nil {
7675
return err
76+
} else {
77+
defer runutil.LogOnErr(logger, backupBkt, "backup bucket client")
7778
}
7879

7980
// Dummy actor to immediately kill the group after the run function returns.
8081
g.Add(func() error { return nil }, func(error) {})
8182

82-
defer closeFn()
83-
defer backupCloseFn()
84-
8583
var (
8684
ctx = context.Background()
8785
v *verifier.Verifier
@@ -128,15 +126,15 @@ func registerBucket(m map[string]setupFunc, app *kingpin.Application, name strin
128126
lsOutput := ls.Flag("output", "Format in which to print each block's information. May be 'json' or custom template.").
129127
Short('o').Default("").String()
130128
m[name+" ls"] = func(g *run.Group, logger log.Logger, reg *prometheus.Registry, _ opentracing.Tracer, _ bool) error {
131-
bkt, closeFn, err := client.NewBucket(gcsBucket, *s3Config, reg, name)
129+
bkt, err := client.NewBucket(gcsBucket, *s3Config, reg, name)
132130
if err != nil {
133131
return err
134132
}
135133

136134
// Dummy actor to immediately kill the group after the run function returns.
137135
g.Add(func() error { return nil }, func(error) {})
138136

139-
defer closeFn()
137+
defer runutil.LogOnErr(logger, bkt, "bucket client")
140138

141139
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
142140
defer cancel()

cmd/thanos/compact.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -82,15 +82,15 @@ func runCompact(
8282

8383
reg.MustRegister(halted)
8484

85-
bkt, closeFn, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
85+
bkt, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
8686
if err != nil {
8787
return err
8888
}
8989

9090
// Ensure we close up everything properly.
9191
defer func() {
9292
if err != nil {
93-
closeFn()
93+
runutil.LogOnErr(logger, bkt, "bucket client")
9494
}
9595
}()
9696

@@ -185,7 +185,7 @@ func runCompact(
185185
}
186186

187187
g.Add(func() error {
188-
defer closeFn()
188+
defer runutil.LogOnErr(logger, bkt, "bucket client")
189189

190190
if !wait {
191191
return f()

cmd/thanos/downsample.go

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ import (
1717
"github.com/improbable-eng/thanos/pkg/objstore"
1818
"github.com/improbable-eng/thanos/pkg/objstore/client"
1919
"github.com/improbable-eng/thanos/pkg/objstore/s3"
20+
"github.com/improbable-eng/thanos/pkg/runutil"
2021
"github.com/oklog/run"
2122
"github.com/oklog/ulid"
2223
"github.com/opentracing/opentracing-go"
@@ -52,15 +53,15 @@ func runDownsample(
5253
component string,
5354
) error {
5455

55-
bkt, closeFn, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
56+
bkt, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
5657
if err != nil {
5758
return err
5859
}
5960

6061
// Ensure we close up everything properly.
6162
defer func() {
6263
if err != nil {
63-
closeFn()
64+
runutil.LogOnErr(logger, bkt, "bucket client")
6465
}
6566
}()
6667

@@ -69,7 +70,8 @@ func runDownsample(
6970
ctx, cancel := context.WithCancel(context.Background())
7071

7172
g.Add(func() error {
72-
defer closeFn()
73+
defer runutil.LogOnErr(logger, bkt, "bucket client")
74+
7375
level.Info(logger).Log("msg", "start first pass of downsampling")
7476

7577
if err := downsampleBucket(ctx, logger, bkt, dataDir); err != nil {
@@ -116,7 +118,7 @@ func downsampleBucket(
116118
if err != nil {
117119
return errors.Wrapf(err, "get meta for block %s", id)
118120
}
119-
defer rc.Close()
121+
defer runutil.LogOnErr(logger, rc, "block reader")
120122

121123
var m block.Meta
122124
if err := json.NewDecoder(rc).Decode(&m); err != nil {
@@ -227,7 +229,7 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu
227229
if err != nil {
228230
return errors.Wrapf(err, "open block %s", m.ULID)
229231
}
230-
defer b.Close()
232+
defer runutil.LogOnErr(log.With(logger, "outcome", "potential left mmap file handlers left"), b, "tsdb reader")
231233

232234
id, err := downsample.Downsample(m, b, dir, resolution)
233235
if err != nil {
@@ -253,8 +255,13 @@ func processDownsampling(ctx context.Context, logger log.Logger, bkt objstore.Bu
253255

254256
begin = time.Now()
255257

256-
os.RemoveAll(bdir)
257-
os.RemoveAll(resdir)
258+
// It is not harmful if these fails.
259+
if err := os.RemoveAll(bdir); err != nil {
260+
level.Warn(logger).Log("msg", "failed to clean directory", "dir", bdir, "err", err)
261+
}
262+
if err := os.RemoveAll(resdir); err != nil {
263+
level.Warn(logger).Log("msg", "failed to clean directory", "resdir", bdir, "err", err)
264+
}
258265

259266
return nil
260267
}

cmd/thanos/main.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ import (
2222
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
2323
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
2424
"github.com/grpc-ecosystem/go-grpc-prometheus"
25+
"github.com/improbable-eng/thanos/pkg/runutil"
2526
"github.com/improbable-eng/thanos/pkg/tracing"
2627
"github.com/oklog/run"
2728
"github.com/opentracing/opentracing-go"
@@ -133,7 +134,9 @@ func main() {
133134
<-ctx.Done()
134135
return ctx.Err()
135136
}, func(error) {
136-
closeFn()
137+
if err := closeFn(); err != nil {
138+
level.Warn(logger).Log("msg", "closing tracer failed", "err", err)
139+
}
137140
cancel()
138141
})
139142
}
@@ -240,7 +243,7 @@ func metricHTTPListenGroup(g *run.Group, logger log.Logger, reg *prometheus.Regi
240243
level.Info(logger).Log("msg", "Listening for metrics", "address", httpBindAddr)
241244
return errors.Wrap(http.Serve(l, mux), "serve metrics")
242245
}, func(error) {
243-
l.Close()
246+
runutil.LogOnErr(logger, l, "metric listener")
244247
})
245248
return nil
246249
}

cmd/thanos/query.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ func runQuery(
225225
level.Info(logger).Log("msg", "Listening for query and metrics", "address", httpBindAddr)
226226
return errors.Wrap(http.Serve(l, mux), "serve query")
227227
}, func(error) {
228-
l.Close()
228+
runutil.LogOnErr(logger, l, "query and metric listener")
229229
})
230230
}
231231
// Start query (proxy) gRPC StoreAPI.
@@ -244,7 +244,7 @@ func runQuery(
244244
return errors.Wrap(s.Serve(l), "serve gRPC")
245245
}, func(error) {
246246
s.Stop()
247-
l.Close()
247+
runutil.LogOnErr(logger, l, "store gRPC listener")
248248
})
249249
}
250250

cmd/thanos/rule.go

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,10 @@ func runRule(
240240

241241
g.Add(func() error {
242242
for {
243-
sdr.Send(ctx, alertQ.Pop(ctx.Done()))
243+
// TODO(bplotka): Investigate what errors it can return and if just "sdr.Send" retry is enough.
244+
if err := sdr.Send(ctx, alertQ.Pop(ctx.Done())); err != nil {
245+
level.Warn(logger).Log("msg", "sending alerts failed", "err", err)
246+
}
244247

245248
select {
246249
case <-ctx.Done():
@@ -341,7 +344,7 @@ func runRule(
341344
return errors.Wrap(s.Serve(l), "serve gRPC")
342345
}, func(error) {
343346
s.Stop()
344-
l.Close()
347+
runutil.LogOnErr(logger, l, "store gRPC listener")
345348
})
346349
}
347350
if err := metricHTTPListenGroup(g, logger, reg, httpBindAddr); err != nil {
@@ -352,7 +355,7 @@ func runRule(
352355

353356
// The background shipper continuously scans the data directory and uploads
354357
// new blocks to Google Cloud Storage or an S3-compatible storage service.
355-
bkt, closeFn, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
358+
bkt, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
356359
if err != nil && err != client.ErrNotFound {
357360
return err
358361
}
@@ -366,7 +369,7 @@ func runRule(
366369
// Ensure we close up everything properly.
367370
defer func() {
368371
if err != nil {
369-
closeFn()
372+
runutil.LogOnErr(logger, bkt, "bucket client")
370373
}
371374
}()
372375

@@ -375,7 +378,7 @@ func runRule(
375378
ctx, cancel := context.WithCancel(context.Background())
376379

377380
g.Add(func() error {
378-
defer closeFn()
381+
defer runutil.LogOnErr(logger, bkt, "bucket client")
379382

380383
return runutil.Repeat(30*time.Second, ctx.Done(), func() error {
381384
s.Sync(ctx)
@@ -425,7 +428,7 @@ func queryPrometheusInstant(ctx context.Context, logger log.Logger, addr, query
425428
if err != nil {
426429
return nil, err
427430
}
428-
defer resp.Body.Close()
431+
defer runutil.LogOnErr(logger, resp.Body, "query body")
429432

430433
// Always try to decode a vector. Scalar rules won't work for now and arguably
431434
// have no relevant use case.

cmd/thanos/sidecar.go

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ func runSidecar(
127127
// Blocking query of external labels before joining as a Source Peer into gossip.
128128
// We retry infinitely until we reach and fetch labels from our Prometheus.
129129
err := runutil.Retry(2*time.Second, ctx.Done(), func() error {
130-
if err := metadata.UpdateLabels(ctx); err != nil {
130+
if err := metadata.UpdateLabels(ctx, logger); err != nil {
131131
level.Warn(logger).Log(
132132
"msg", "failed to fetch initial external labels. Is Prometheus running? Retrying",
133133
"err", err,
@@ -164,7 +164,7 @@ func runSidecar(
164164
iterCtx, iterCancel := context.WithTimeout(context.Background(), 5*time.Second)
165165
defer iterCancel()
166166

167-
if err := metadata.UpdateLabels(iterCtx); err != nil {
167+
if err := metadata.UpdateLabels(iterCtx, logger); err != nil {
168168
level.Warn(logger).Log("msg", "heartbeat failed", "err", err)
169169
promUp.Set(0)
170170
} else {
@@ -216,15 +216,15 @@ func runSidecar(
216216
return errors.Wrap(s.Serve(l), "serve gRPC")
217217
}, func(error) {
218218
s.Stop()
219-
l.Close()
219+
runutil.LogOnErr(logger, l, "store gRPC listener")
220220
})
221221
}
222222

223223
var uploads = true
224224

225225
// The background shipper continuously scans the data directory and uploads
226226
// new blocks to Google Cloud Storage or an S3-compatible storage service.
227-
bkt, closeFn, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
227+
bkt, err := client.NewBucket(&gcsBucket, *s3Config, reg, component)
228228
if err != nil && err != client.ErrNotFound {
229229
return err
230230
}
@@ -238,15 +238,15 @@ func runSidecar(
238238
// Ensure we close up everything properly.
239239
defer func() {
240240
if err != nil {
241-
closeFn()
241+
runutil.LogOnErr(logger, bkt, "bucket client")
242242
}
243243
}()
244244

245245
s := shipper.New(logger, nil, dataDir, bkt, metadata.Labels, block.SidecarSource)
246246
ctx, cancel := context.WithCancel(context.Background())
247247

248248
g.Add(func() error {
249-
defer closeFn()
249+
defer runutil.LogOnErr(logger, bkt, "bucket client")
250250

251251
return runutil.Repeat(30*time.Second, ctx.Done(), func() error {
252252
s.Sync(ctx)
@@ -280,8 +280,8 @@ type metadata struct {
280280
labels labels.Labels
281281
}
282282

283-
func (s *metadata) UpdateLabels(ctx context.Context) error {
284-
elset, err := queryExternalLabels(ctx, s.promURL)
283+
func (s *metadata) UpdateLabels(ctx context.Context, logger log.Logger) error {
284+
elset, err := queryExternalLabels(ctx, logger, s.promURL)
285285
if err != nil {
286286
return err
287287
}
@@ -293,13 +293,12 @@ func (s *metadata) UpdateLabels(ctx context.Context) error {
293293
return nil
294294
}
295295

296-
func (s *metadata) UpdateTimestamps(mint int64, maxt int64) error {
296+
func (s *metadata) UpdateTimestamps(mint int64, maxt int64) {
297297
s.mtx.Lock()
298298
defer s.mtx.Unlock()
299299

300300
s.mint = mint
301301
s.maxt = maxt
302-
return nil
303302
}
304303

305304
func (s *metadata) Labels() labels.Labels {
@@ -330,7 +329,7 @@ func (s *metadata) Timestamps() (mint int64, maxt int64) {
330329
return s.mint, s.maxt
331330
}
332331

333-
func queryExternalLabels(ctx context.Context, base *url.URL) (labels.Labels, error) {
332+
func queryExternalLabels(ctx context.Context, logger log.Logger, base *url.URL) (labels.Labels, error) {
334333
u := *base
335334
u.Path = path.Join(u.Path, "/api/v1/status/config")
336335

@@ -342,7 +341,7 @@ func queryExternalLabels(ctx context.Context, base *url.URL) (labels.Labels, err
342341
if err != nil {
343342
return nil, errors.Wrapf(err, "request config against %s", u.String())
344343
}
345-
defer resp.Body.Close()
344+
defer runutil.LogOnErr(logger, resp.Body, "query body")
346345

347346
var d struct {
348347
Data struct {

0 commit comments

Comments
 (0)