Skip to content

Commit 269ad1b

Browse files
committed
remove usless error output in log
1 parent 5e76472 commit 269ad1b

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

78 files changed

+254
-247
lines changed

client/clientpool.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ func NewCoreRPCClientPool(ctx context.Context, config *PoolConfig) (*Pool, error
4242
rpc, err = NewClient(ctx, addr, config.Auth)
4343
})
4444
if err != nil {
45-
log.Errorf(ctx, err, "[NewCoreRPCClientPool] connect to %s failed, err: %s", addr, err)
45+
log.Errorf(ctx, err, "[NewCoreRPCClientPool] connect to %s failed", addr)
4646
continue
4747
}
4848
rpcClient := rpc.GetRPCClient()
@@ -96,7 +96,7 @@ func checkAlive(ctx context.Context, rpc *clientWithStatus, timeout time.Duratio
9696
_, err = rpc.client.Info(ctx, &pb.Empty{})
9797
})
9898
if err != nil {
99-
log.Errorf(ctx, err, "[ClientPool] connect to %s failed, err: %s", rpc.addr, err)
99+
log.Errorf(ctx, err, "[ClientPool] connect to %s failed", rpc.addr)
100100
return false
101101
}
102102
log.Debugf(ctx, "[ClientPool] connect to %s success", rpc.addr)

client/resolver/eru/resolver.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,13 +51,13 @@ func (r *Resolver) sync() {
5151

5252
ch, err := r.discovery.Watch(ctx)
5353
if err != nil {
54-
log.Errorf(ctx, err, "[EruResolver] failed to watch service status: %v", err)
54+
log.Error(ctx, err, "[EruResolver] failed to watch service status")
5555
return
5656
}
5757
for {
5858
select {
5959
case <-ctx.Done():
60-
log.Errorf(ctx, ctx.Err(), "[EruResolver] watch interrupted: %v", ctx.Err())
60+
log.Error(ctx, ctx.Err(), "[EruResolver] watch interrupted")
6161
return
6262
case endpoints, ok := <-ch:
6363
if !ok {

client/servicediscovery/eru_service_discovery.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ func New(endpoint string, authConfig types.AuthConfig) *EruServiceDiscovery {
3434
func (w *EruServiceDiscovery) Watch(ctx context.Context) (_ <-chan []string, err error) {
3535
cc, err := w.dial(ctx, w.endpoint, w.authConfig)
3636
if err != nil {
37-
log.Errorf(ctx, err, "[EruServiceWatch] dial failed: %v", err)
37+
log.Error(ctx, err, "[EruServiceWatch] dial failed")
3838
return
3939
}
4040
client := pb.NewCoreRPCClient(cc)
@@ -48,7 +48,7 @@ func (w *EruServiceDiscovery) Watch(ctx context.Context) (_ <-chan []string, err
4848
watchCtx, cancelWatch := context.WithCancel(ctx)
4949
stream, err := client.WatchServiceStatus(watchCtx, &pb.Empty{})
5050
if err != nil {
51-
log.Errorf(ctx, err, "[EruServiceWatch] watch failed, try later: %v", err)
51+
log.Error(ctx, err, "[EruServiceWatch] watch failed, try later")
5252
time.Sleep(10 * time.Second)
5353
continue
5454
}
@@ -69,7 +69,7 @@ func (w *EruServiceDiscovery) Watch(ctx context.Context) (_ <-chan []string, err
6969
status, err := stream.Recv()
7070
close(cancelTimer)
7171
if err != nil {
72-
log.Errorf(ctx, err, "[EruServiceWatch] recv failed: %v", err)
72+
log.Error(ctx, err, "[EruServiceWatch] recv failed")
7373
break
7474
}
7575
expectedInterval = time.Duration(status.GetIntervalInSecond())

client/utils/servicepusher.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ func (p *EndpointPusher) pollReachability(ctx context.Context, endpoint string)
111111
func (p *EndpointPusher) checkReachability(host string) (err error) {
112112
pinger, err := ping.NewPinger(host)
113113
if err != nil {
114-
log.Errorf(nil, err, "[EruResolver] failed to create pinger: %+v", err) //nolint
114+
log.Error(nil, err, "[EruResolver] failed to create pinger") //nolint
115115
return
116116
}
117117
pinger.SetPrivileged(os.Getuid() == 0)

cluster/calcium/build.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"os"
1010
"time"
1111

12+
"github.com/pkg/errors"
1213
enginetypes "github.com/projecteru2/core/engine/types"
1314
"github.com/projecteru2/core/log"
1415
"github.com/projecteru2/core/types"
@@ -140,28 +141,27 @@ func (c *Calcium) pushImageAndClean(ctx context.Context, resp io.ReadCloser, nod
140141
lastMessage := &types.BuildImageMessage{}
141142
for {
142143
message := &types.BuildImageMessage{}
143-
err := decoder.Decode(message)
144-
if err != nil {
144+
if err := decoder.Decode(message); err != nil {
145145
if err == io.EOF {
146146
break
147147
}
148148
if err == context.Canceled || err == context.DeadlineExceeded {
149-
log.Errorf(ctx, err, "[BuildImage] context timeout")
149+
log.Error(ctx, err, "[BuildImage] context timeout")
150150
lastMessage.ErrorDetail.Code = -1
151151
lastMessage.ErrorDetail.Message = err.Error()
152152
lastMessage.Error = err.Error()
153153
break
154154
}
155155
malformed, _ := io.ReadAll(decoder.Buffered()) // TODO err check
156-
logger.Errorf(ctx, nil, "[BuildImage] Decode build image message failed %+v, buffered: %v", err, malformed)
156+
logger.Errorf(ctx, err, "[BuildImage] Decode build image message failed, buffered: %v", malformed)
157157
return
158158
}
159159
ch <- message
160160
lastMessage = message
161161
}
162162

163163
if lastMessage.Error != "" {
164-
log.Errorf(ctx, nil, "[BuildImage] Build image failed %v", lastMessage.ErrorDetail.Message)
164+
logger.Errorf(ctx, errors.New(lastMessage.Error), "[BuildImage] Build image failed %v", lastMessage.ErrorDetail.Message)
165165
return
166166
}
167167

@@ -216,11 +216,11 @@ func cleanupNodeImages(ctx context.Context, node *types.Node, ids []string, ttl
216216
defer cancel()
217217
for _, id := range ids {
218218
if _, err := node.Engine.ImageRemove(ctx, id, false, true); err != nil {
219-
logger.Errorf(ctx, err, "[BuildImage] Remove image error: %+v", err)
219+
logger.Error(ctx, err, "[BuildImage] Remove image error")
220220
}
221221
}
222222
if spaceReclaimed, err := node.Engine.ImageBuildCachePrune(ctx, true); err != nil {
223-
logger.Errorf(ctx, err, "[BuildImage] Remove build image cache error: %+v", err)
223+
logger.Error(ctx, err, "[BuildImage] Remove build image cache error")
224224
} else {
225225
logger.Infof(ctx, "[BuildImage] Clean cached image and release space %d", spaceReclaimed)
226226
}

cluster/calcium/calcium.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ func New(ctx context.Context, config types.Config, t *testing.T) (*Calcium, erro
5555
log.Warn(ctx, "[Calcium] SCM not set, build API disabled")
5656
}
5757
if err != nil {
58-
log.Errorf(ctx, err, "[Calcium] SCM failed: %+v", err)
58+
log.Error(ctx, err, "[Calcium] SCM failed")
5959
return nil, err
6060
}
6161

@@ -72,12 +72,12 @@ func New(ctx context.Context, config types.Config, t *testing.T) (*Calcium, erro
7272
// load internal plugins
7373
cpumem, err := cpumem.NewPlugin(config)
7474
if err != nil {
75-
log.Errorf(ctx, err, "[NewPluginManager] new cpumem plugin error: %v", err)
75+
log.Error(ctx, err, "[NewPluginManager] new cpumem plugin error")
7676
return nil, err
7777
}
7878
volume, err := volume.NewPlugin(config)
7979
if err != nil {
80-
log.Errorf(ctx, err, "[NewPluginManager] new volume plugin error: %v", err)
80+
log.Error(ctx, err, "[NewPluginManager] new volume plugin error")
8181
return nil, err
8282
}
8383
rmgr.AddPlugins(cpumem, volume)

cluster/calcium/capacity.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio
2828

2929
if opts.DeployStrategy != strategy.Dummy {
3030
if msg.NodeCapacities, err = c.doGetDeployStrategy(ctx, nodenames, opts); err != nil {
31-
logger.Errorf(ctx, err, "[Calcium.CalculateCapacity] doGetDeployMap failed: %+v", err)
31+
logger.Error(ctx, err, "[Calcium.CalculateCapacity] doGetDeployMap failed")
3232
return err
3333
}
3434

@@ -41,7 +41,7 @@ func (c *Calcium) CalculateCapacity(ctx context.Context, opts *types.DeployOptio
4141
var infos map[string]*resources.NodeCapacityInfo
4242
infos, msg.Total, err = c.rmgr.GetNodesDeployCapacity(ctx, nodenames, opts.ResourceOpts)
4343
if err != nil {
44-
logger.Errorf(ctx, err, "[Calcium.CalculateCapacity] failed to get nodes capacity: %+v", err)
44+
logger.Error(ctx, err, "[Calcium.CalculateCapacity] failed to get nodes capacity")
4545
return err
4646
}
4747
if msg.Total <= 0 {

cluster/calcium/create.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
6262
for nodename := range deployMap {
6363
processing := opts.GetProcessing(nodename)
6464
if err := c.store.DeleteProcessing(cctx, processing); err != nil {
65-
logger.Errorf(ctx, err, "[Calcium.doCreateWorkloads] delete processing failed for %s: %+v", nodename, err)
65+
logger.Errorf(ctx, err, "[Calcium.doCreateWorkloads] delete processing failed for %s", nodename)
6666
}
6767
}
6868
close(ch)
@@ -73,7 +73,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
7373
defer func() {
7474
if resourceCommit != nil {
7575
if err := resourceCommit(); err != nil {
76-
logger.Errorf(ctx, err, "commit wal failed: %s, %+v", eventWorkloadResourceAllocated, err)
76+
logger.Errorf(ctx, err, "commit wal failed: %s", eventWorkloadResourceAllocated)
7777
}
7878
}
7979
}()
@@ -85,7 +85,7 @@ func (c *Calcium) doCreateWorkloads(ctx context.Context, opts *types.DeployOptio
8585
continue
8686
}
8787
if err := processingCommits[nodename](); err != nil {
88-
logger.Errorf(ctx, err, "commit wal failed: %s, %s, %+v", eventProcessingCreated, nodename, err)
88+
logger.Errorf(ctx, err, "commit wal failed: %s, %s", eventProcessingCreated, nodename)
8989
}
9090
}
9191
}()
@@ -186,7 +186,7 @@ func (c *Calcium) doDeployWorkloads(ctx context.Context,
186186
for nodename, deploy := range deployMap {
187187
_ = c.pool.Invoke(func(deploy int) func() {
188188
return func() {
189-
metrics.Client.SendDeployCount(deploy)
189+
metrics.Client.SendDeployCount(ctx, deploy)
190190
}
191191
}(deploy))
192192
_ = c.pool.Invoke(func(nodename string, deploy, seq int) func() {
@@ -322,7 +322,7 @@ func (c *Calcium) doDeployOneWorkload(
322322
defer func() {
323323
if commit != nil {
324324
if err := commit(); err != nil {
325-
logger.Errorf(ctx, err, "Commit WAL %s failed: %+v", eventWorkloadCreated, err)
325+
logger.Errorf(ctx, err, "Commit WAL %s failed", eventWorkloadCreated)
326326
}
327327
}
328328
}()
@@ -427,7 +427,7 @@ func (c *Calcium) doDeployOneWorkload(
427427

428428
// remove workload
429429
func(ctx context.Context, _ bool) error {
430-
logger.Errorf(ctx, nil, "[doDeployOneWorkload] failed to deploy workload %s, rollback", workload.ID)
430+
logger.Infof(ctx, "[doDeployOneWorkload] failed to deploy workload %s, rollback", workload.ID)
431431
if workload.ID == "" {
432432
return nil
433433
}

cluster/calcium/dissociate.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ func (c *Calcium) DissociateWorkload(ctx context.Context, ids []string) (chan *t
1515

1616
nodeWorkloadGroup, err := c.groupWorkloadsByNode(ctx, ids)
1717
if err != nil {
18-
logger.Errorf(ctx, err, "failed to group workloads by node: %+v", err)
18+
logger.Error(ctx, err, "failed to group workloads by node")
1919
return nil, err
2020
}
2121

@@ -58,15 +58,15 @@ func (c *Calcium) DissociateWorkload(ctx context.Context, ids []string) (chan *t
5858
c.config.GlobalTimeout,
5959
)
6060
}); err != nil {
61-
logger.WithField("id", workloadID).Errorf(ctx, err, "failed to lock workload: %+v", err)
61+
logger.WithField("id", workloadID).Error(ctx, err, "failed to lock workload")
6262
msg.Error = err
6363
}
6464
ch <- msg
6565
}
6666
_ = c.pool.Invoke(func() { c.doRemapResourceAndLog(ctx, logger, node) })
6767
return nil
6868
}); err != nil {
69-
logger.WithField("nodename", nodename).Errorf(ctx, err, "failed to lock node: %+v", err)
69+
logger.WithField("nodename", nodename).Error(ctx, err, "failed to lock node")
7070
}
7171
}
7272
})

cluster/calcium/execute.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ func (c *Calcium) ExecuteWorkload(ctx context.Context, opts *types.ExecuteWorklo
2727

2828
workload, err := c.GetWorkload(ctx, opts.WorkloadID)
2929
if err != nil {
30-
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to get workload: %+v", err)
30+
logger.Error(ctx, err, "[ExecuteWorkload] Failed to get workload")
3131
return
3232
}
3333

@@ -44,7 +44,7 @@ func (c *Calcium) ExecuteWorkload(ctx context.Context, opts *types.ExecuteWorklo
4444

4545
execID, stdout, stderr, inStream, err := workload.Engine.Execute(ctx, opts.WorkloadID, execConfig)
4646
if err != nil {
47-
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to attach execID: %+v", err)
47+
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to attach execID %s", execID)
4848
return
4949
}
5050

@@ -62,7 +62,7 @@ func (c *Calcium) ExecuteWorkload(ctx context.Context, opts *types.ExecuteWorklo
6262

6363
execCode, err := workload.Engine.ExecExitCode(ctx, opts.WorkloadID, execID)
6464
if err != nil {
65-
logger.Errorf(ctx, err, "[ExecuteWorkload] Failed to get exitcode: %+v", err)
65+
logger.Error(ctx, err, "[ExecuteWorkload] Failed to get exitcode")
6666
return
6767
}
6868

cluster/calcium/helper.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import (
1111
func distributionInspect(ctx context.Context, node *types.Node, image string, digests []string) bool {
1212
remoteDigest, err := node.Engine.ImageRemoteDigest(ctx, image)
1313
if err != nil {
14-
log.Errorf(ctx, err, "[distributionInspect] get manifest failed %v", err)
14+
log.Error(ctx, err, "[distributionInspect] get manifest failed")
1515
return false
1616
}
1717

@@ -51,7 +51,7 @@ func pullImage(ctx context.Context, node *types.Node, image string) error {
5151
rc, err := node.Engine.ImagePull(ctx, image, false)
5252
defer utils.EnsureReaderClosed(ctx, rc)
5353
if err != nil {
54-
log.Errorf(ctx, err, "[pullImage] Error during pulling image %s: %v", image, err)
54+
log.Errorf(ctx, err, "[pullImage] Error during pulling image %s", image)
5555
return err
5656
}
5757
log.Infof(ctx, "[pullImage] Done pulling image %s", image)

cluster/calcium/image.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ func (c *Calcium) RemoveImage(ctx context.Context, opts *types.ImageOptions) (ch
111111
}
112112
if opts.Prune {
113113
if err := node.Engine.ImagesPrune(ctx); err != nil {
114-
logger.Errorf(ctx, err, "[RemoveImage] Prune %s pod %s node failed: %+v", opts.Podname, node.Name, err)
114+
logger.Errorf(ctx, err, "[RemoveImage] Prune %s pod %s node failed", opts.Podname, node.Name)
115115
} else {
116116
logger.Infof(ctx, "[RemoveImage] Prune %s pod %s node", opts.Podname, node.Name)
117117
}

cluster/calcium/lambda.go

+8-8
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
3737

3838
createChan, err := c.CreateWorkload(ctx, opts)
3939
if err != nil {
40-
logger.Errorf(ctx, err, "[RunAndWait] Create workload error %+v", err)
40+
logger.Error(ctx, err, "[RunAndWait] Create workload error")
4141
return workloadIDs, nil, err
4242
}
4343

@@ -58,7 +58,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
5858
// we don't need to remove this non-existing workload
5959
// so just send the error message and return
6060
if message.Error != nil || message.WorkloadID == "" {
61-
logger.Errorf(ctx, message.Error, "[RunAndWait] Create workload failed %+v", message.Error)
61+
logger.Error(ctx, message.Error, "[RunAndWait] Create workload failed")
6262
return &types.AttachWorkloadMessage{
6363
WorkloadID: "",
6464
Data: []byte(fmt.Sprintf("Create workload failed %+v", message.Error)),
@@ -77,7 +77,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
7777
}
7878
defer func() {
7979
if err := commit(); err != nil {
80-
logger.Errorf(ctx, err, "[RunAndWait] Commit WAL %s failed: %s, %v", eventCreateLambda, message.WorkloadID, err)
80+
logger.Errorf(ctx, err, "[RunAndWait] Commit WAL %s failed: %s", eventCreateLambda, message.WorkloadID)
8181
}
8282
}()
8383

@@ -87,7 +87,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
8787
ctx, cancel := context.WithCancel(utils.InheritTracingInfo(ctx, context.TODO()))
8888
defer cancel()
8989
if err := c.doRemoveWorkloadSync(ctx, []string{message.WorkloadID}); err != nil {
90-
logger.Errorf(ctx, err, "[RunAndWait] Remove lambda workload failed %+v", err)
90+
logger.Error(ctx, err, "[RunAndWait] Remove lambda workload failed")
9191
} else {
9292
logger.Infof(ctx, "[RunAndWait] Workload %s finished and removed", utils.ShortID(message.WorkloadID))
9393
}
@@ -97,7 +97,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
9797
// this is weird, we return the error directly and try to delete data
9898
workload, err := c.GetWorkload(ctx, message.WorkloadID)
9999
if err != nil {
100-
logger.Errorf(ctx, err, "[RunAndWait] Get workload failed %+v", err)
100+
logger.Error(ctx, err, "[RunAndWait] Get workload failed")
101101
return &types.AttachWorkloadMessage{
102102
WorkloadID: message.WorkloadID,
103103
Data: []byte(fmt.Sprintf("Get workload %s failed %+v", message.WorkloadID, err)),
@@ -115,7 +115,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
115115
Stdout: true,
116116
Stderr: true,
117117
}); err != nil {
118-
logger.Errorf(ctx, err, "[RunAndWait] Can't fetch log of workload %s error %+v", message.WorkloadID, err)
118+
logger.Errorf(ctx, err, "[RunAndWait] Can't fetch log of workload %s", message.WorkloadID)
119119
return &types.AttachWorkloadMessage{
120120
WorkloadID: message.WorkloadID,
121121
Data: []byte(fmt.Sprintf("Fetch log for workload %s failed %+v", message.WorkloadID, err)),
@@ -130,7 +130,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
130130
var inStream io.WriteCloser
131131
stdout, stderr, inStream, err = workload.Engine.VirtualizationAttach(ctx, message.WorkloadID, true, true)
132132
if err != nil {
133-
logger.Errorf(ctx, err, "[RunAndWait] Can't attach workload %s error %+v", message.WorkloadID, err)
133+
logger.Errorf(ctx, err, "[RunAndWait] Can't attach workload %s", message.WorkloadID)
134134
return &types.AttachWorkloadMessage{
135135
WorkloadID: message.WorkloadID,
136136
Data: []byte(fmt.Sprintf("Attach to workload %s failed %+v", message.WorkloadID, err)),
@@ -156,7 +156,7 @@ func (c *Calcium) RunAndWait(ctx context.Context, opts *types.DeployOptions, inC
156156
// wait and forward exitcode
157157
r, err := workload.Engine.VirtualizationWait(ctx, message.WorkloadID, "")
158158
if err != nil {
159-
logger.Errorf(ctx, err, "[RunAndWait] %s wait failed %+v", utils.ShortID(message.WorkloadID), err)
159+
logger.Errorf(ctx, err, "[RunAndWait] %s wait failed", utils.ShortID(message.WorkloadID))
160160
return &types.AttachWorkloadMessage{
161161
WorkloadID: message.WorkloadID,
162162
Data: []byte(fmt.Sprintf("Wait workload %s failed %+v", message.WorkloadID, err)),

cluster/calcium/lock.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ func (c *Calcium) doLock(ctx context.Context, name string, timeout time.Duration
2323
defer cancel()
2424
rollbackCtx = utils.InheritTracingInfo(rollbackCtx, ctx)
2525
if e := lock.Unlock(rollbackCtx); e != nil {
26-
log.Errorf(rollbackCtx, err, "failed to unlock %s: %+v", name, err)
26+
log.Errorf(rollbackCtx, err, "failed to unlock %s", name)
2727
}
2828
}
2929
}()
@@ -47,7 +47,7 @@ func (c *Calcium) doUnlockAll(ctx context.Context, locks map[string]lock.Distrib
4747
}
4848
for _, key := range order {
4949
if err := c.doUnlock(ctx, locks[key], key); err != nil {
50-
log.Errorf(ctx, err, "[doUnlockAll] Unlock %s failed %v", key, err)
50+
log.Errorf(ctx, err, "[doUnlockAll] Unlock %s failed", key)
5151
continue
5252
}
5353
}

0 commit comments

Comments
 (0)