Skip to content

Commit 3ba426e

Browse files
committed
Merge branch 'master' into satisfy-yifu
2 parents 66a16dd + 6d713b9 commit 3ba426e

14 files changed

+426
-115
lines changed

VERSION

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0.7.22
1+
0.7.23

cluster/calcium/build_image.go

+8-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,9 @@ func getRandomNode(c *calcium, podname string) (*types.Node, error) {
7272
return nil, err
7373
}
7474
if len(nodes) == 0 {
75-
return nil, fmt.Errorf("No nodes available in pod %s", podname)
75+
err = fmt.Errorf("No nodes available in pod %s", podname)
76+
log.Debugf("Error during getRandomNode from %s: %v", podname, err)
77+
return nil, err
7678
}
7779

7880
nodemap := make(map[string]types.CPUMap)
@@ -81,6 +83,11 @@ func getRandomNode(c *calcium, podname string) (*types.Node, error) {
8183
}
8284
nodename, err := c.scheduler.RandomNode(nodemap)
8385
if err != nil {
86+
log.Debugf("Error during getRandomNode from %s: %v", podname, err)
87+
return nil, err
88+
}
89+
if nodename == "" {
90+
err = fmt.Errorf("Got empty node during getRandomNode from %s", podname)
8491
return nil, err
8592
}
8693

cluster/calcium/create_container.go

+5-2
Original file line numberDiff line numberDiff line change
@@ -26,9 +26,8 @@ func (c *calcium) CreateContainer(specs types.Specs, opts *types.DeployOptions)
2626
pod, _ := c.store.GetPod(opts.Podname)
2727
if pod.Scheduler == "CPU" {
2828
return c.createContainerWithScheduler(specs, opts)
29-
} else {
30-
return c.createContainerWithCPUPeriod(specs, opts)
3129
}
30+
return c.createContainerWithCPUPeriod(specs, opts)
3231
}
3332

3433
func (c *calcium) createContainerWithCPUPeriod(specs types.Specs, opts *types.DeployOptions) (chan *types.CreateContainerMessage, error) {
@@ -38,14 +37,18 @@ func (c *calcium) createContainerWithCPUPeriod(specs types.Specs, opts *types.De
3837
return ch, fmt.Errorf("Minimum memory limit allowed is 4MB")
3938
}
4039

40+
log.Debugf("Deploy options: %v", opts)
41+
log.Debugf("Deploy specs: %v", specs)
4142
cpuandmem, _, err := c.getCPUAndMem(opts.Podname, opts.Nodename, 1.0)
4243
if err != nil {
4344
return ch, err
4445
}
4546
go utils.SendMemCap(cpuandmem, "before-alloc")
4647
nodesInfo := utils.GetNodesInfo(cpuandmem)
4748

49+
log.Debugf("Input opts.CPUQuota: %f", opts.CPUQuota)
4850
cpuQuota := int(opts.CPUQuota * float64(utils.CpuPeriodBase))
51+
log.Debugf("Tranfered cpuQuota: %d", cpuQuota)
4952
plan, err := utils.AllocContainerPlan(nodesInfo, cpuQuota, opts.Memory, opts.Count) // 还是以 Bytes 作单位, 不转换了
5053

5154
if err != nil {

cluster/calcium/helper.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ func makeMountPaths(specs types.Specs, config types.Config) ([]string, map[strin
140140
func runExec(client *engineapi.Client, container enginetypes.ContainerJSON, label string) error {
141141
cmd, ok := container.Config.Labels[label]
142142
if !ok || cmd == "" {
143-
log.Debug("No %s found in container %s", label, container.ID)
143+
log.Debugf("No %s found in container %s", label, container.ID)
144144
return nil
145145
}
146146

cluster/calcium/meta.go

+6-2
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,10 @@ package calcium
33
// All functions are just proxy to store, since I don't want store to be exported.
44
// All these functions are meta data related.
55

6-
import "gitlab.ricebook.net/platform/core/types"
6+
import (
7+
log "github.com/Sirupsen/logrus"
8+
"gitlab.ricebook.net/platform/core/types"
9+
)
710

811
func (c *calcium) ListPods() ([]*types.Pod, error) {
912
return c.store.GetAllPods()
@@ -50,14 +53,15 @@ func (c *calcium) ListPodNodes(podname string, all bool) ([]*types.Node, error)
5053
var nodes []*types.Node
5154
candidates, err := c.store.GetNodesByPod(podname)
5255
if err != nil {
56+
log.Debugf("Error during ListPodNodes from %s: %v", podname, err)
5357
return nodes, err
5458
}
5559
for _, candidate := range candidates {
5660
if candidate.Available || all {
5761
nodes = append(nodes, candidate)
5862
}
5963
}
60-
return nodes, err
64+
return nodes, nil
6165
}
6266

6367
func (c *calcium) GetContainer(id string) (*types.Container, error) {

cluster/calcium/run_and_wait.go

+94
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
package calcium
2+
3+
import (
4+
"bufio"
5+
"fmt"
6+
"sync"
7+
8+
log "github.com/Sirupsen/logrus"
9+
enginetypes "github.com/docker/docker/api/types"
10+
"gitlab.ricebook.net/platform/core/types"
11+
"golang.org/x/net/context"
12+
)
13+
14+
// FUCK DOCKER
15+
const PREFIXLEN int = 8
16+
17+
func (c *calcium) RunAndWait(specs types.Specs, opts *types.DeployOptions) (chan *types.RunAndWaitMessage, error) {
18+
ch := make(chan *types.RunAndWaitMessage)
19+
20+
// 强制为 json-file 输出
21+
entry, _ := specs.Entrypoints[opts.Entrypoint]
22+
entry.LogConfig = "json-file"
23+
specs.Entrypoints[opts.Entrypoint] = entry
24+
25+
createChan, err := c.CreateContainer(specs, opts)
26+
if err != nil {
27+
log.Errorf("[RunAndWait] Create container error, %s", err.Error())
28+
return ch, err
29+
}
30+
31+
go func() {
32+
wg := &sync.WaitGroup{}
33+
defer log.Info("[RunAndWait] Finish run and wait for containers")
34+
defer close(ch)
35+
defer wg.Wait()
36+
logsOpts := enginetypes.ContainerLogsOptions{Follow: true, ShowStdout: true, ShowStderr: true}
37+
38+
for message := range createChan {
39+
wg.Add(1)
40+
if message.ContainerID == "" {
41+
log.Errorf("[RunAndWait] Can't find container id %s", err.Error())
42+
continue
43+
}
44+
45+
node, err := c.store.GetNode(message.Podname, message.Nodename)
46+
if err != nil {
47+
log.Errorf("[RunAndWait] Can't find node, %s", err.Error())
48+
continue
49+
}
50+
51+
go func(node *types.Node, message *types.CreateContainerMessage) {
52+
defer wg.Done()
53+
resp, err := node.Engine.ContainerLogs(context.Background(), message.ContainerID, logsOpts)
54+
if err != nil {
55+
data := fmt.Sprintf("[RunAndWait] Failed to get logs, %s", err.Error())
56+
ch <- &types.RunAndWaitMessage{ContainerID: message.ContainerID, Data: data}
57+
return
58+
}
59+
60+
scanner := bufio.NewScanner(resp)
61+
for scanner.Scan() {
62+
data := scanner.Bytes()[PREFIXLEN:]
63+
log.Debugf("[RunAndWait] %s %s", message.ContainerID[:12], data)
64+
m := &types.RunAndWaitMessage{ContainerID: message.ContainerID, Data: string(data)}
65+
ch <- m
66+
}
67+
68+
if err := scanner.Err(); err != nil {
69+
data := fmt.Sprintf("[RunAndWait] Parse log failed, %s", err.Error())
70+
ch <- &types.RunAndWaitMessage{ContainerID: message.ContainerID, Data: data}
71+
return
72+
}
73+
74+
container, err := c.GetContainer(message.ContainerID)
75+
if err != nil {
76+
data := fmt.Sprintf("[RunAndWait] Container not found, %s", err.Error())
77+
ch <- &types.RunAndWaitMessage{ContainerID: message.ContainerID, Data: data}
78+
return
79+
}
80+
defer c.removeOneContainer(container)
81+
82+
containerJSON, err := container.Inspect()
83+
if err == nil {
84+
ch <- &types.RunAndWaitMessage{ContainerID: message.ContainerID, Data: fmt.Sprintf("[exitcode] %d", containerJSON.State.ExitCode)}
85+
} else {
86+
ch <- &types.RunAndWaitMessage{ContainerID: message.ContainerID, Data: fmt.Sprintf("[exitcode]unknown %s", err.Error())}
87+
}
88+
log.Infof("[RunAndWait] Container %s finished, remove", message.ContainerID)
89+
}(node, message)
90+
}
91+
}()
92+
93+
return ch, nil
94+
}

cluster/cluster.go

+1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ type Cluster interface {
2121
// cluster methods
2222
BuildImage(repository, version, uid, artifact string) (chan *types.BuildImageMessage, error)
2323
CreateContainer(specs types.Specs, opts *types.DeployOptions) (chan *types.CreateContainerMessage, error)
24+
RunAndWait(specs types.Specs, opts *types.DeployOptions) (chan *types.RunAndWaitMessage, error)
2425
UpgradeContainer(ids []string, image string) (chan *types.UpgradeContainerMessage, error)
2526
RemoveContainer(ids []string) (chan *types.RemoveContainerMessage, error)
2627
RemoveImage(podname, nodename string, images []string) (chan *types.RemoveImageMessage, error)

0 commit comments

Comments
 (0)