@@ -135,7 +135,8 @@ type k8s struct {
135
135
master executor.Executor
136
136
minion executor.Executor // Current single minion is strictly connected with getReadyNodes() function and expectedKubeletNodesCount const.
137
137
config Config
138
- client * kubernetes.Clientset
138
+
139
+ k8sPodAPI // Private interface
139
140
140
141
isListening netutil.IsListeningFunction // For mocking purposes.
141
142
getReadyNodes getReadyNodesFunc // For mocking purposes.
@@ -147,66 +148,39 @@ type k8s struct {
147
148
// In case of the same executor they will be on the same host (high risk of interferences).
148
149
// NOTE: Currently we support only single-kubelet (single-minion) kubernetes.
149
150
func New (master executor.Executor , minion executor.Executor , config Config ) executor.Launcher {
150
- client , err := kubernetes .NewForConfig (
151
- & rest.Config {
152
- Host : config .KubeAPIAddr ,
153
- },
154
- )
155
- if err != nil {
156
- panic (err )
157
- }
158
151
159
- return k8s {
152
+ return & k8s {
160
153
master : master ,
161
154
minion : minion ,
162
155
config : config ,
163
- client : client ,
156
+ k8sPodAPI : newK8sPodAPI ( config ) ,
164
157
isListening : netutil .IsListening ,
165
158
getReadyNodes : getReadyNodes ,
166
159
}
167
160
}
168
161
169
162
// Name returns human readable name for job.
170
- func (m k8s ) Name () string {
163
+ func (m * k8s ) Name () string {
171
164
return "Kubernetes [single-kubelet]"
172
165
}
173
166
174
167
// Launch starts the kubernetes cluster. It returns a cluster
175
168
// represented as a Task Handle instance.
176
169
// Error is returned when Launcher is unable to start a cluster.
177
- func (m k8s ) Launch () (handle executor.TaskHandle , err error ) {
170
+ func (m * k8s ) Launch () (handle executor.TaskHandle , err error ) {
178
171
for retry := uint64 (0 ); retry <= m .config .RetryCount ; retry ++ {
179
172
handle , err = m .tryLaunchCluster ()
180
173
if err != nil {
181
174
log .Warningf ("could not launch Kubernetes cluster: %q. Retry number: %d" , err .Error (), retry )
182
175
continue
183
176
}
184
-
185
177
return handle , nil
186
178
}
187
-
188
- pods , err := m .getPodsFromNode (m .kubeletHost )
189
- if err != nil {
190
- log .Warnf ("Could not check if there are dangling nodes on Kubelet: %s" , err )
191
- } else {
192
- if len (pods ) != 0 && kubeCleanLeftPods .Value () == false {
193
- log .Warnf ("Kubelet on node %q has %d dangling nodes. Use `kubectl` to delete them or set %q flag to let Swan remove them" , m .kubeletHost , len (pods ), kubeCleanLeftPods .Name )
194
- } else if len (pods ) != 0 && kubeCleanLeftPods .Value () == true {
195
- log .Infof ("Kubelet on node %q has %d dangling nodes. Attempt to clean them" , m .kubeletHost , len (pods ))
196
- err = m .cleanNode (m .kubeletHost , pods )
197
- if err != nil {
198
- log .Errorf ("Could not clean dangling pods: %s" , err )
199
- } else {
200
- log .Infof ("Dangling pods on node %q has been deleted" , m .kubeletHost )
201
- }
202
- }
203
- }
204
-
205
179
log .Errorf ("Could not launch Kubernetes cluster: %q" , err .Error ())
206
180
return nil , err
207
181
}
208
182
209
- func (m k8s ) tryLaunchCluster () (executor.TaskHandle , error ) {
183
+ func (m * k8s ) tryLaunchCluster () (executor.TaskHandle , error ) {
210
184
handle , err := m .launchCluster ()
211
185
if err != nil {
212
186
return nil , err
@@ -221,10 +195,34 @@ func (m k8s) tryLaunchCluster() (executor.TaskHandle, error) {
221
195
}
222
196
return nil , err
223
197
}
198
+ // Optional removal of the unwanted pods in swan's namespace
199
+ pods , err := m .getPodsFromNode (m .kubeletHost )
200
+ if err != nil {
201
+ log .Warningf ("Could not retreive list of pods from host %s. Error: %s" , m .kubeletHost , err )
202
+ // if getPodsFromNode returns error it means cluster is not useable. Delete it.
203
+ stopErr := handle .Stop ()
204
+ if stopErr != nil {
205
+ log .Warningf ("Errors while stopping k8s cluster: %v" , stopErr )
206
+ }
207
+ return nil , err
208
+ }
209
+ if len (pods ) != 0 {
210
+ if kubeCleanLeftPods .Value () {
211
+ log .Infof ("Kubelet on node %q has %d dangling pods. Attempt to clean them" , m .kubeletHost , len (pods ))
212
+ err = m .cleanNode (m .kubeletHost , pods )
213
+ if err != nil {
214
+ log .Errorf ("Could not clean dangling pods: %s" , err )
215
+ } else {
216
+ log .Infof ("Dangling pods on node %q has been deleted" , m .kubeletHost )
217
+ }
218
+ } else {
219
+ log .Warnf ("Kubelet on node %q has %d dangling pods. Use `kubectl` to delete them or set %q flag to let Swan remove them" , m .kubeletHost , len (pods ), kubeCleanLeftPods .Name )
220
+ }
221
+ }
224
222
return handle , nil
225
223
}
226
224
227
- func (m k8s ) launchCluster () (executor.TaskHandle , error ) {
225
+ func (m * k8s ) launchCluster () (executor.TaskHandle , error ) {
228
226
// Launch apiserver using master executor.
229
227
kubeAPIServer := m .getKubeAPIServerCommand ()
230
228
apiHandle , err := m .launchService (kubeAPIServer )
@@ -283,7 +281,7 @@ func (m k8s) launchCluster() (executor.TaskHandle, error) {
283
281
}
284
282
285
283
// launchService executes service and check if it is listening on it's endpoint.
286
- func (m k8s ) launchService (command kubeCommand ) (executor.TaskHandle , error ) {
284
+ func (m * k8s ) launchService (command kubeCommand ) (executor.TaskHandle , error ) {
287
285
handle , err := command .exec .Execute (command .raw )
288
286
if err != nil {
289
287
return nil , errors .Wrapf (err , "execution of command %q on %q failed" , command .raw , command .exec .Name ())
@@ -303,7 +301,7 @@ func (m k8s) launchService(command kubeCommand) (executor.TaskHandle, error) {
303
301
}
304
302
305
303
// getKubeAPIServerCommand returns command for apiserver.
306
- func (m k8s ) getKubeAPIServerCommand () kubeCommand {
304
+ func (m * k8s ) getKubeAPIServerCommand () kubeCommand {
307
305
return kubeCommand {m .master ,
308
306
fmt .Sprint (
309
307
fmt .Sprintf ("hyperkube apiserver" ),
@@ -323,7 +321,7 @@ func (m k8s) getKubeAPIServerCommand() kubeCommand {
323
321
}
324
322
325
323
// getKubeControllerCommand returns command for controller-manager.
326
- func (m k8s ) getKubeControllerCommand () kubeCommand {
324
+ func (m * k8s ) getKubeControllerCommand () kubeCommand {
327
325
return kubeCommand {m .master ,
328
326
fmt .Sprint (
329
327
fmt .Sprintf ("hyperkube controller-manager" ),
@@ -335,7 +333,7 @@ func (m k8s) getKubeControllerCommand() kubeCommand {
335
333
}
336
334
337
335
// getKubeSchedulerCommand returns command for scheduler.
338
- func (m k8s ) getKubeSchedulerCommand () kubeCommand {
336
+ func (m * k8s ) getKubeSchedulerCommand () kubeCommand {
339
337
return kubeCommand {m .master ,
340
338
fmt .Sprint (
341
339
fmt .Sprintf ("hyperkube scheduler" ),
@@ -347,7 +345,7 @@ func (m k8s) getKubeSchedulerCommand() kubeCommand {
347
345
}
348
346
349
347
// getKubeletCommand returns command for kubelet.
350
- func (m k8s ) getKubeletCommand () kubeCommand {
348
+ func (m * k8s ) getKubeletCommand () kubeCommand {
351
349
return kubeCommand {m .minion ,
352
350
fmt .Sprint (
353
351
fmt .Sprintf ("hyperkube kubelet" ),
@@ -361,7 +359,7 @@ func (m k8s) getKubeletCommand() kubeCommand {
361
359
}
362
360
363
361
// getKubeProxyCommand returns command for proxy.
364
- func (m k8s ) getKubeProxyCommand () kubeCommand {
362
+ func (m * k8s ) getKubeProxyCommand () kubeCommand {
365
363
return kubeCommand {m .minion ,
366
364
fmt .Sprint (
367
365
fmt .Sprintf ("hyperkube proxy" ),
@@ -372,7 +370,7 @@ func (m k8s) getKubeProxyCommand() kubeCommand {
372
370
), m .config .KubeProxyPort }
373
371
}
374
372
375
- func (m k8s ) waitForReadyNode (apiServerAddress string ) error {
373
+ func (m * k8s ) waitForReadyNode (apiServerAddress string ) error {
376
374
for idx := 0 ; idx < nodeCheckRetryCount ; idx ++ {
377
375
nodes , err := m .getReadyNodes (apiServerAddress )
378
376
if err != nil {
0 commit comments