Skip to content
This repository has been archived by the owner on Sep 24, 2021. It is now read-only.

Commit

Permalink
Fixes for cluster api v0.1.4
Browse files Browse the repository at this point in the history
Signed-off-by: Chuck Ha <chuckh@vmware.com>
  • Loading branch information
chuckha committed Jul 3, 2019
1 parent 1448394 commit c29fe4e
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 133 deletions.
19 changes: 18 additions & 1 deletion actuators/actuators.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package actuators

import (
"bytes"
"fmt"
"io/ioutil"

Expand Down Expand Up @@ -71,6 +72,22 @@ func kubeconfigToSecret(clusterName, namespace string) (*v1.Secret, error) {
return nil, errors.WithStack(err)
}

allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return nil, errors.WithStack(err)
}

// This is necessary so the management cluster in a container can talk to another container.
// They share the same bridged network and the load balancer does respond on 6443 at its docker IP
// however, the *HOST* is listening on some random port (the one returned from the GetLoadBalancerHostAndPort).
lbip, _, err := actions.GetLoadBalancerHostAndPort(allNodes)
lines := bytes.Split(data, []byte("\n"))
for i, line := range lines {
if bytes.Contains(line, []byte("https://")) {
lines[i] = []byte(fmt.Sprintf(" server: https://%s:%d", lbip, 6443))
}
}

// write it to a secret
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -80,7 +97,7 @@ func kubeconfigToSecret(clusterName, namespace string) (*v1.Secret, error) {
},
Data: map[string][]byte{
// TODO pull in constant from cluster api
"value": data,
"value": bytes.Join(lines, []byte("\n")),
},
}, nil
}
49 changes: 7 additions & 42 deletions actuators/machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"time"

"github.com/go-logr/logr"
apicorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"sigs.k8s.io/cluster-api-provider-docker/kind/actions"
Expand Down Expand Up @@ -81,14 +80,9 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
m.Log.Error(err, "Error adding control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
m.Log.Error(err, "Error getting node reference UID")
return err
}
providerID := providerID(controlPlaneNode.Name())
providerID := actions.ProviderID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID))
return m.save(old, machine)
}

m.Log.Info("Creating a brand new cluster")
Expand All @@ -107,15 +101,10 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
m.Log.Error(err, "Error creating control plane")
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
m.Log.Error(err, "Error getting node reference UID")
return err
}
// set the machine's providerID
providerID := providerID(controlPlaneNode.Name())
providerID := actions.ProviderID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil {
if err := m.save(old, machine); err != nil {
m.Log.Error(err, "Error setting machine's provider ID")
return err
}
Expand Down Expand Up @@ -144,14 +133,9 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
m.Log.Error(err, "Error creating new worker node")
return err
}
providerID := providerID(worker.Name())
providerID := actions.ProviderID(worker.Name())
machine.Spec.ProviderID = &providerID
nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name())
if err != nil {
m.Log.Error(err, "Error getting node reference ID")
return err
}
return m.save(old, machine, getNodeRef(worker.Name(), nodeUID))
return m.save(old, machine)
}

// Delete returns nil when the machine no longer exists or when a successful delete has happened.
Expand Down Expand Up @@ -201,7 +185,7 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin
}

// patches the object and saves the status.
func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apicorev1.ObjectReference) error {
func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine) error {
m.Log.Info("updating machine")
p, err := patch.NewJSONPatch(oldMachine, newMachine)
if err != nil {
Expand All @@ -222,19 +206,9 @@ func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apico
}
m.Log.Info("updated machine")
}
// set the noderef after so we don't try and patch it in during the first update
newMachine.Status.NodeRef = noderef
if _, err := m.ClusterAPI.Machines(oldMachine.Namespace).UpdateStatus(newMachine); err != nil {
m.Log.Error(err, "Error setting node reference")
return err
}
return nil
}

func providerID(name string) string {
return fmt.Sprintf("docker:////%s", name)
}

// CAPIroleToKindRole converts a CAPI role to kind role
// TODO there is a better way to do this.
func CAPIroleToKindRole(CAPIRole string) string {
Expand All @@ -243,12 +217,3 @@ func CAPIroleToKindRole(CAPIRole string) string {
}
return CAPIRole
}

func getNodeRef(name, uid string) *apicorev1.ObjectReference {
return &apicorev1.ObjectReference{
Kind: "Node",
APIVersion: apicorev1.SchemeGroupVersion.String(),
Name: name,
UID: types.UID(uid),
}
}
5 changes: 5 additions & 0 deletions cmd/capd-manager/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package main

import (
"flag"
"fmt"
"time"

Expand All @@ -35,6 +36,9 @@ import (
)

func main() {
flag.Set("v", "0")
flag.Parse()

cfg, err := config.GetConfig()
if err != nil {
panic(err)
Expand Down Expand Up @@ -67,6 +71,7 @@ func main() {

machineLogger := logger.Log{}
machineLogger.Logger = klogr.New().WithName("[machine-actuator]")

machineActuator := actuators.Machine{
Core: k8sclientset.CoreV1(),
ClusterAPI: cs.ClusterV1alpha1(),
Expand Down
78 changes: 28 additions & 50 deletions cmd/capdctl/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,15 +277,17 @@ func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverrid
panic(err)
}
defer os.Remove(f.Name())
crds, err := getCRDs(capiVersion)
fmt.Println("Downloading the latest CRDs for CAPI version", capiVersion)
crds, err := getCRDs(capiVersion, capiImage)
if err != nil {
panic(err)
}
fmt.Fprintln(f, crds)
fmt.Fprintln(f, "---")
fmt.Fprintln(f, capdRBAC)
fmt.Fprintln(f, "---")
fmt.Fprintln(f, getCAPDPlane(capdImage, capiImage))
fmt.Fprintln(f, getCAPDPlane(capdImage))
fmt.Println("Applying the control plane", f.Name())
cmd := exec.Command("kubectl", "apply", "-f", f.Name())
cmd.SetEnv(fmt.Sprintf("KUBECONFIG=%s/.kube/kind-config-%s", os.Getenv("HOME"), clusterName))
cmd.SetStdout(os.Stdout)
Expand All @@ -295,8 +297,8 @@ func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverrid
}
}

func getCAPDPlane(capdImage, capiImage string) string {
return fmt.Sprintf(capiPlane, capdImage, capiImage)
func getCAPDPlane(capdImage string) string {
return fmt.Sprintf(capiPlane, capdImage)
}

var capiPlane = `
Expand All @@ -307,13 +309,6 @@ metadata:
controller-tools.k8s.io: "1.0"
name: docker-provider-system
---
apiVersion: v1
kind: Namespace
metadata:
labels:
controller-tools.k8s.io: "1.0"
name: cluster-api-system
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
Expand Down Expand Up @@ -362,47 +357,12 @@ spec:
- effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
name: cluster-api-controller-manager
namespace: cluster-api-system
spec:
selector:
matchLabels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
serviceName: cluster-api-controller-manager-service
template:
metadata:
labels:
control-plane: controller-manager
controller-tools.k8s.io: "1.0"
spec:
containers:
- command:
- /manager
image: %s
name: manager
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.alpha.kubernetes.io/notReady
operator: Exists
- effect: NoExecute
key: node.alpha.kubernetes.io/unreachable
operator: Exists
`

func getCRDs(version string) (string, error) {
crds := []string{"crds", "rbac"}
// getCRDs should actually use kustomize to correctly build the manager yaml.
// HACK: this is a hacked function
func getCRDs(version, capiImage string) (string, error) {
crds := []string{"crds", "rbac", "manager"}
releaseCode := fmt.Sprintf("https://github.com/kubernetes-sigs/cluster-api/archive/%s.tar.gz", version)

resp, err := http.Get(releaseCode)
Expand Down Expand Up @@ -434,10 +394,28 @@ func getCRDs(version string) (string, error) {
continue
case tar.TypeReg:
for _, crd := range crds {
// Skip the kustomization files for now. Would like to use kustomize in future
if strings.HasSuffix(header.Name, "kustomization.yaml") {
continue
}

// This is a poor person's kustomize
if strings.HasSuffix(header.Name, "manager.yaml") {
var managerBuf bytes.Buffer
io.Copy(&managerBuf, tgz)
lines := strings.Split(managerBuf.String(), "\n")
for _, line := range lines {
if strings.Contains(line, "image:") {
buf.WriteString(strings.Replace(line, "image: controller:latest", fmt.Sprintf("image: %s", capiImage), 1))
buf.WriteString("\n")
continue
}
buf.WriteString(line)
buf.WriteString("\n")
}
}

// These files don't need kustomize at all.
if strings.Contains(header.Name, fmt.Sprintf("config/%s/", crd)) {
io.Copy(&buf, tgz)
fmt.Fprintln(&buf, "---")
Expand Down
44 changes: 8 additions & 36 deletions kind/actions/cluster_actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ func KubeadmConfig(node *nodes.Node, clusterName, lbip string) error {
func KubeadmInit(clusterName, version string) error {
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return nil
return err
}

node, err := nodes.BootstrapControlPlaneNode(allNodes)
Expand Down Expand Up @@ -179,7 +179,7 @@ func KubeadmInit(clusterName, version string) error {
}

// save the kubeconfig on the host with the loadbalancer endpoint
hostPort, err := getLoadBalancerPort(allNodes)
_, hostPort, err := GetLoadBalancerHostAndPort(allNodes)
if err != nil {
return errors.Wrap(err, "failed to get kubeconfig from node")
}
Expand Down Expand Up @@ -270,7 +270,7 @@ func SetNodeProviderRef(clusterName, nodeName string) error {
return err
}

patch := fmt.Sprintf(`{"spec": {"providerID": "docker://%s"}}`, nodeName)
patch := fmt.Sprintf(`{"spec": {"providerID": "%s"}}`, ProviderID(nodeName))
fmt.Println("trying to apply:", patch)
cmd := node.Command(
"kubectl",
Expand All @@ -290,39 +290,6 @@ func SetNodeProviderRef(clusterName, nodeName string) error {
return nil
}

// GetNodeRefUID returns the node reference UID
func GetNodeRefUID(clusterName, nodeName string) (string, error) {
// k get nodes my-cluster-worker -o custom-columns=UID:.metadata.uid --no-headers
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return "", err
}

node, err := nodes.BootstrapControlPlaneNode(allNodes)
if err != nil {
return "", err
}

patch := fmt.Sprintf(`{"spec": {"providerID": "docker://%s"}}`, nodeName)
fmt.Println("trying to apply:", patch)
cmd := node.Command(
"kubectl",
"--kubeconfig", "/etc/kubernetes/admin.conf",
"get",
"node", nodeName,
"--output=custom-columns=UID:.metadata.uid",
"--no-headers",
)
lines, err := exec.CombinedOutputLines(cmd)
if err != nil {
for _, line := range lines {
fmt.Println(line)
}
return "", errors.Wrap(err, "failed get node ref UID")
}
return strings.TrimSpace(lines[0]), nil
}

// DeleteClusterNode will remove the kubernetes node from the list of nodes (during a kubectl get nodes).
func DeleteClusterNode(clusterName, nodeName string) error {
// get all control plane nodes
Expand Down Expand Up @@ -379,3 +346,8 @@ func KubeadmReset(clusterName, nodeName string) error {

return nil
}

// ProviderID formats the provider id needed to set on the node
func ProviderID(name string) string {
return fmt.Sprintf("docker:////%s", name)
}
13 changes: 9 additions & 4 deletions kind/actions/kind.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,13 +236,18 @@ func ListControlPlanes(clusterName string) ([]nodes.Node, error) {
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, constants.ControlPlaneNodeRoleValue))
}

// getLoadBalancerPort returns the port on the host on which the APIServer is exposed
func getLoadBalancerPort(allNodes []nodes.Node) (int32, error) {
// GetLoadBalancerHostAndPort returns the port on the host on which the APIServer is exposed
func GetLoadBalancerHostAndPort(allNodes []nodes.Node) (string, int32, error) {
node, err := nodes.ExternalLoadBalancerNode(allNodes)
if err != nil {
return 0, err
return "", 0, err
}
return node.Ports(6443)
ipv4, _, err := node.IP()
if err != nil {
return "", 0, err
}
port, err := node.Ports(6443)
return ipv4, port, err
}

// matches kubeconfig server entry like:
Expand Down

0 comments on commit c29fe4e

Please sign in to comment.