@@ -22,6 +22,148 @@ import (
22
22
// Use specs and options to create
23
23
// TODO what about networks?
24
24
func (c * calcium ) CreateContainer (specs types.Specs , opts * types.DeployOptions ) (chan * types.CreateContainerMessage , error ) {
25
+ if c .config .ResourceAlloc == "scheduler" {
26
+ return c .createContainerWithScheduler (specs , opts )
27
+ } else {
28
+ return c .createContainerWithCPUPeriod (specs , opts )
29
+ }
30
+ }
31
+
32
+ func (c * calcium ) createContainerWithCPUPeriod (specs types.Specs , opts * types.DeployOptions ) (chan * types.CreateContainerMessage , error ) {
33
+ ch := make (chan * types.CreateContainerMessage )
34
+
35
+ cpumap , _ , err := c .getCPUMap (opts .Podname , opts .Nodename , 1.0 )
36
+ if err != nil {
37
+ return ch , err
38
+ }
39
+ nodesInfo := utils .GetNodesInfo (cpumap )
40
+
41
+ cpuQuota := int (opts .CPUQuota * float64 (utils .CpuPeriodBase ))
42
+ plan , err := utils .AllocContainerPlan (nodesInfo , cpuQuota , opts .Count )
43
+ if err != nil {
44
+ return ch , err
45
+ }
46
+
47
+ go func () {
48
+ wg := sync.WaitGroup {}
49
+ wg .Add (len (plan ))
50
+ for nodename , num := range plan {
51
+ go func (nodename string , num int , opts * types.DeployOptions ) {
52
+ defer wg .Done ()
53
+ for _ , m := range c .doCreateContainerWithCPUPeriod (nodename , num , opts .CPUQuota , specs , opts ) {
54
+ ch <- m
55
+ }
56
+ }(nodename , num , opts )
57
+ }
58
+ wg .Wait ()
59
+ close (ch )
60
+ }()
61
+
62
+ return ch , nil
63
+ }
64
+
65
+ func (c * calcium ) doCreateContainerWithCPUPeriod (nodename string , connum int , quota float64 , specs types.Specs , opts * types.DeployOptions ) []* types.CreateContainerMessage {
66
+ ms := make ([]* types.CreateContainerMessage , connum )
67
+ for i := 0 ; i < len (ms ); i ++ {
68
+ ms [i ] = & types.CreateContainerMessage {}
69
+ }
70
+
71
+ node , err := c .GetNode (opts .Podname , nodename )
72
+ if err != nil {
73
+ return ms
74
+ }
75
+
76
+ if err := pullImage (node , opts .Image ); err != nil {
77
+ return ms
78
+ }
79
+
80
+ for i := 0 ; i < connum ; i ++ {
81
+ config , hostConfig , networkConfig , containerName , err := c .makeContainerOptions (nil , specs , opts , "cpuperiod" )
82
+ if err != nil {
83
+ log .Errorf ("error when creating CreateContainerOptions, %v" , err )
84
+ ms [i ].Error = err .Error ()
85
+ continue
86
+ }
87
+
88
+ //create container
89
+ container , err := node .Engine .ContainerCreate (context .Background (), config , hostConfig , networkConfig , containerName )
90
+ if err != nil {
91
+ log .Errorf ("error when creating container, %v" , err )
92
+ ms [i ].Error = err .Error ()
93
+ continue
94
+ }
95
+
96
+ // connect container to network
97
+ // if network manager uses docker plugin, then connect must be called before container starts
98
+ if c .network .Type () == "plugin" {
99
+ ctx := utils .ToDockerContext (node .Engine )
100
+ breaked := false
101
+
102
+ // need to ensure all networks are correctly connected
103
+ for networkID , ipv4 := range opts .Networks {
104
+ if err = c .network .ConnectToNetwork (ctx , container .ID , networkID , ipv4 ); err != nil {
105
+ log .Errorf ("error when connecting container %q to network %q, %q" , container .ID , networkID , err .Error ())
106
+ breaked = true
107
+ break
108
+ }
109
+ }
110
+
111
+ // remove bridge network
112
+ // only when user defined networks is given
113
+ if len (opts .Networks ) != 0 {
114
+ if err := c .network .DisconnectFromNetwork (ctx , container .ID , "bridge" ); err != nil {
115
+ log .Errorf ("error when disconnecting container %q from network %q, %q" , container .ID , "bridge" , err .Error ())
116
+ }
117
+ }
118
+
119
+ // if any break occurs, then this container needs to be removed
120
+ if breaked {
121
+ ms [i ].Error = err .Error ()
122
+ go node .Engine .ContainerRemove (context .Background (), container .ID , enginetypes.ContainerRemoveOptions {})
123
+ continue
124
+ }
125
+ }
126
+
127
+ err = node .Engine .ContainerStart (context .Background (), container .ID , enginetypes.ContainerStartOptions {})
128
+ if err != nil {
129
+ log .Errorf ("error when starting container, %v" , err )
130
+ ms [i ].Error = err .Error ()
131
+ go node .Engine .ContainerRemove (context .Background (), container .ID , enginetypes.ContainerRemoveOptions {})
132
+ continue
133
+ }
134
+
135
+ // TODO
136
+ // if network manager uses our own, then connect must be called after container starts
137
+ // here
138
+
139
+ info , err := node .Engine .ContainerInspect (context .Background (), container .ID )
140
+ if err != nil {
141
+ log .Errorf ("error when inspecting container, %v" , err )
142
+ ms [i ].Error = err .Error ()
143
+ continue
144
+ }
145
+
146
+ _ , err = c .store .AddContainer (info .ID , opts .Podname , node .Name , containerName , nil )
147
+ if err != nil {
148
+ ms [i ].Error = err .Error ()
149
+ continue
150
+ }
151
+
152
+ ms [i ] = & types.CreateContainerMessage {
153
+ Podname : opts .Podname ,
154
+ Nodename : node .Name ,
155
+ ContainerID : info .ID ,
156
+ ContainerName : containerName ,
157
+ Error : "" ,
158
+ Success : true ,
159
+ CPU : nil ,
160
+ }
161
+
162
+ }
163
+ return ms
164
+ }
165
+
166
+ func (c * calcium ) createContainerWithScheduler (specs types.Specs , opts * types.DeployOptions ) (chan * types.CreateContainerMessage , error ) {
25
167
ch := make (chan * types.CreateContainerMessage )
26
168
27
169
result , err := c .prepareNodes (opts .Podname , opts .Nodename , opts .CPUQuota , opts .Count )
@@ -50,7 +192,7 @@ func (c *calcium) CreateContainer(specs types.Specs, opts *types.DeployOptions)
50
192
go func (nodename string , cpumap []types.CPUMap , opts * types.DeployOptions ) {
51
193
defer wg .Done ()
52
194
53
- for _ , m := range c .doCreateContainer (nodename , cpumap , specs , opts ) {
195
+ for _ , m := range c .doCreateContainerWithScheduler (nodename , cpumap , specs , opts ) {
54
196
ch <- m
55
197
}
56
198
}(nodename , cpumap , opts )
@@ -71,18 +213,14 @@ func makeCPUMap(nodes []*types.Node) map[string]types.CPUMap {
71
213
return r
72
214
}
73
215
74
- // Prepare nodes for deployment.
75
- // Later if any error occurs, these nodes can be restored.
76
- func (c * calcium ) prepareNodes (podname , nodename string , quota float64 , num int ) (map [string ][]types.CPUMap , error ) {
77
- result := make (map [string ][]types.CPUMap )
78
-
79
- // use podname as lock key to prevent scheduling on the same node at one time
216
+ func (c * calcium ) getCPUMap (podname , nodename string , quota float64 ) (map [string ]types.CPUMap , []* types.Node , error ) {
217
+ result := make (map [string ]types.CPUMap )
80
218
lock , err := c .store .CreateLock (podname , 30 )
81
219
if err != nil {
82
- return result , err
220
+ return result , nil , err
83
221
}
84
222
if err := lock .Lock (); err != nil {
85
- return result , err
223
+ return result , nil , err
86
224
}
87
225
defer lock .Unlock ()
88
226
@@ -92,28 +230,40 @@ func (c *calcium) prepareNodes(podname, nodename string, quota float64, num int)
92
230
if nodename == "" {
93
231
nodes , err = c .ListPodNodes (podname )
94
232
if err != nil {
95
- return result , err
233
+ return result , nil , err
96
234
}
97
235
} else {
98
236
n , err := c .GetNode (podname , nodename )
99
237
if err != nil {
100
- return result , err
238
+ return result , nil , err
101
239
}
102
240
nodes = append (nodes , n )
103
241
}
104
242
105
- // if public, use only public nodes
106
243
if quota == 0 { // 因为要考虑quota=0.5这种需求,所以这里有点麻烦
107
244
nodes = filterNodes (nodes , true )
108
245
} else {
109
246
nodes = filterNodes (nodes , false )
110
247
}
111
248
112
249
if len (nodes ) == 0 {
113
- return result , fmt .Errorf ("No available nodes" )
250
+ return result , nil , fmt .Errorf ("No available nodes" )
114
251
}
115
252
116
- cpumap := makeCPUMap (nodes )
253
+ result = makeCPUMap (nodes )
254
+ return result , nodes , nil
255
+ }
256
+
257
+ // Prepare nodes for deployment.
258
+ // Later if any error occurs, these nodes can be restored.
259
+ func (c * calcium ) prepareNodes (podname , nodename string , quota float64 , num int ) (map [string ][]types.CPUMap , error ) {
260
+ result := make (map [string ][]types.CPUMap )
261
+
262
+ cpumap , nodes , err := c .getCPUMap (podname , nodename , quota )
263
+ if err != nil {
264
+ return result , err
265
+ }
266
+ // use podname as lock key to prevent scheduling on the same node at one time
117
267
result , changed , err := c .scheduler .SelectNodes (cpumap , quota , num ) // 这个接口统一使用float64了
118
268
if err != nil {
119
269
return result , err
@@ -167,7 +317,7 @@ func pullImage(node *types.Node, image string) error {
167
317
return nil
168
318
}
169
319
170
- func (c * calcium ) doCreateContainer (nodename string , cpumap []types.CPUMap , specs types.Specs , opts * types.DeployOptions ) []* types.CreateContainerMessage {
320
+ func (c * calcium ) doCreateContainerWithScheduler (nodename string , cpumap []types.CPUMap , specs types.Specs , opts * types.DeployOptions ) []* types.CreateContainerMessage {
171
321
ms := make ([]* types.CreateContainerMessage , len (cpumap ))
172
322
for i := 0 ; i < len (ms ); i ++ {
173
323
ms [i ] = & types.CreateContainerMessage {}
@@ -184,7 +334,7 @@ func (c *calcium) doCreateContainer(nodename string, cpumap []types.CPUMap, spec
184
334
185
335
for i , quota := range cpumap {
186
336
// create options
187
- config , hostConfig , networkConfig , containerName , err := c .makeContainerOptions (quota , specs , opts )
337
+ config , hostConfig , networkConfig , containerName , err := c .makeContainerOptions (quota , specs , opts , "scheduler" )
188
338
if err != nil {
189
339
log .Errorf ("error when creating CreateContainerOptions, %v" , err )
190
340
ms [i ].Error = err .Error ()
@@ -285,7 +435,7 @@ func (c *calcium) releaseQuota(node *types.Node, quota types.CPUMap) {
285
435
c .store .UpdateNodeCPU (node .Podname , node .Name , quota , "+" )
286
436
}
287
437
288
- func (c * calcium ) makeContainerOptions (quota map [string ]int , specs types.Specs , opts * types.DeployOptions ) (
438
+ func (c * calcium ) makeContainerOptions (quota map [string ]int , specs types.Specs , opts * types.DeployOptions , optionMode string ) (
289
439
* enginecontainer.Config ,
290
440
* enginecontainer.HostConfig ,
291
441
* enginenetwork.NetworkingConfig ,
@@ -322,16 +472,20 @@ func (c *calcium) makeContainerOptions(quota map[string]int, specs types.Specs,
322
472
// calculate CPUShares and CPUSet
323
473
// scheduler won't return more than 1 share quota
324
474
// so the smallest share is the share numerator
325
- shareQuota := 10
326
- labels := []string {}
327
- for label , share := range quota {
328
- labels = append (labels , label )
329
- if share < shareQuota {
330
- shareQuota = share
475
+ var cpuShares int64
476
+ var cpuSetCpus string
477
+ if optionMode == "scheduler" {
478
+ shareQuota := 10
479
+ labels := []string {}
480
+ for label , share := range quota {
481
+ labels = append (labels , label )
482
+ if share < shareQuota {
483
+ shareQuota = share
484
+ }
331
485
}
486
+ cpuShares = int64 (float64 (shareQuota ) / float64 (10 ) * float64 (1024 ))
487
+ cpuSetCpus = strings .Join (labels , "," )
332
488
}
333
- cpuShares := int64 (float64 (shareQuota ) / float64 (10 ) * float64 (1024 ))
334
- cpuSetCpus := strings .Join (labels , "," )
335
489
336
490
// env
337
491
env := append (opts .Env , fmt .Sprintf ("APP_NAME=%s" , specs .Appname ))
@@ -425,6 +579,22 @@ func (c *calcium) makeContainerOptions(quota map[string]int, specs types.Specs,
425
579
NetworkDisabled : false ,
426
580
Labels : containerLabels ,
427
581
}
582
+
583
+ var resource enginecontainer.Resources
584
+ if optionMode == "scheduler" {
585
+ resource = enginecontainer.Resources {
586
+ CPUShares : cpuShares ,
587
+ CpusetCpus : cpuSetCpus ,
588
+ Ulimits : ulimits ,
589
+ }
590
+ } else {
591
+ resource = enginecontainer.Resources {
592
+ CPUPeriod : utils .CpuPeriodBase ,
593
+ CPUQuota : int64 (opts .CPUQuota * float64 (utils .CpuPeriodBase )),
594
+ Ulimits : ulimits ,
595
+ }
596
+ }
597
+
428
598
hostConfig := & enginecontainer.HostConfig {
429
599
Binds : binds ,
430
600
LogConfig : enginecontainer.LogConfig {Type : logConfig },
@@ -433,11 +603,7 @@ func (c *calcium) makeContainerOptions(quota map[string]int, specs types.Specs,
433
603
CapAdd : engineslice .StrSlice (capAdd ),
434
604
ExtraHosts : entry .ExtraHosts ,
435
605
Privileged : entry .Privileged != "" ,
436
- Resources : enginecontainer.Resources {
437
- CPUShares : cpuShares ,
438
- CpusetCpus : cpuSetCpus ,
439
- Ulimits : ulimits ,
440
- },
606
+ Resources : resource ,
441
607
}
442
608
// this is empty because we don't use any plugin for Docker
443
609
// networkConfig := &enginenetwork.NetworkingConfig{
0 commit comments