forked from projecteru2/core
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcontrol.go
104 lines (96 loc) · 3.21 KB
/
control.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
package calcium
import (
"bytes"
"context"
"sync"
"github.com/pkg/errors"
"github.com/projecteru2/core/cluster"
"github.com/projecteru2/core/log"
"github.com/projecteru2/core/types"
"github.com/projecteru2/core/utils"
)
// ControlWorkload control workloads status
func (c *Calcium) ControlWorkload(ctx context.Context, ids []string, t string, force bool) (chan *types.ControlWorkloadMessage, error) {
logger := log.WithField("Calcium", "ControlWorkload").WithField("ids", ids).WithField("t", t).WithField("force", force)
ch := make(chan *types.ControlWorkloadMessage)
go func() {
defer close(ch)
wg := sync.WaitGroup{}
for _, id := range ids {
wg.Add(1)
go func(id string) {
defer wg.Done()
var message []*bytes.Buffer
err := c.withWorkloadLocked(ctx, id, func(ctx context.Context, workload *types.Workload) error {
var err error
switch t {
case cluster.WorkloadStop:
message, err = c.doStopWorkload(ctx, workload, force)
return errors.WithStack(err)
case cluster.WorkloadStart:
message, err = c.doStartWorkload(ctx, workload, force)
return errors.WithStack(err)
case cluster.WorkloadRestart:
message, err = c.doStopWorkload(ctx, workload, force)
if err != nil {
return errors.WithStack(err)
}
startHook, err := c.doStartWorkload(ctx, workload, force)
message = append(message, startHook...)
return errors.WithStack(err)
}
return errors.WithStack(types.ErrUnknownControlType)
})
if err == nil {
log.Infof("[ControlWorkload] Workload %s %s", id, t)
log.Info("[ControlWorkload] Hook Output:")
log.Info(string(utils.MergeHookOutputs(message)))
}
ch <- &types.ControlWorkloadMessage{
WorkloadID: id,
Error: logger.Err(err),
Hook: message,
}
}(id)
}
wg.Wait()
}()
return ch, nil
}
func (c *Calcium) doStartWorkload(ctx context.Context, workload *types.Workload, force bool) (message []*bytes.Buffer, err error) {
if err = workload.Start(ctx); err != nil {
return message, errors.WithStack(err)
}
// TODO healthcheck first
if workload.Hook != nil && len(workload.Hook.AfterStart) > 0 {
message, err = c.doHook(
ctx,
workload.ID, workload.User,
workload.Hook.AfterStart, workload.Env,
workload.Hook.Force, workload.Privileged,
force, workload.Engine,
)
}
return message, errors.WithStack(err)
}
func (c *Calcium) doStopWorkload(ctx context.Context, workload *types.Workload, force bool) (message []*bytes.Buffer, err error) {
if workload.Hook != nil && len(workload.Hook.BeforeStop) > 0 {
message, err = c.doHook(
ctx,
workload.ID, workload.User,
workload.Hook.BeforeStop, workload.Env,
workload.Hook.Force, workload.Privileged,
force, workload.Engine,
)
if err != nil {
return message, errors.WithStack(err)
}
}
// 这里 block 的问题很严重,按照目前的配置是 5 分钟一级的 block
// 一个简单的处理方法是相信 ctx 不相信 engine 自身的处理
// 另外我怀疑 engine 自己的 timeout 实现是完全的等 timeout 而非结束了就退出
if err = workload.Stop(ctx); err != nil {
message = append(message, bytes.NewBufferString(err.Error()))
}
return message, errors.WithStack(err)
}