Skip to content
This repository has been archived by the owner on Sep 24, 2021. It is now read-only.

Commit

Permalink
Several updates
Browse files Browse the repository at this point in the history
* Use machine name instead of generated name tied to kind
* Improve delete functionality to remove node from child cluster
* Set nodeRefs

Signed-off-by: Chuck Ha <chuckh@vmware.com>
  • Loading branch information
chuckha committed Jun 21, 2019
1 parent cbf3654 commit d180db9
Show file tree
Hide file tree
Showing 6 changed files with 147 additions and 73 deletions.
98 changes: 75 additions & 23 deletions actuators/machine.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"time"

"github.com/chuckha/cluster-api-provider-docker/kind/actions"
apicorev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
Expand Down Expand Up @@ -78,14 +79,25 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
if setValue == clusterAPIControlPlaneSetLabel {
if len(controlPlanes) > 0 {
fmt.Println("Adding a control plane")
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.Spec.Versions.ControlPlane)
controlPlaneNode, err := actions.AddControlPlane(c.Name, machine.GetName(), machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v", err)
return err
}
name := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &name
return m.save(old, machine)
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
return err
}
nodeRef := &apicorev1.ObjectReference{
Kind: "Node",
APIVersion: apicorev1.SchemeGroupVersion.String(),
Name: controlPlaneNode.Name(),
UID: types.UID(nodeUID),
}
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
return m.save(old, machine, nodeRef)
}

fmt.Println("Creating a brand new cluster")
Expand All @@ -99,16 +111,27 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
fmt.Printf("%+v\n", err)
return err
}
controlPlaneNode, err := actions.CreateControlPlane(c.Name, lbip, machine.Spec.Versions.ControlPlane)
controlPlaneNode, err := actions.CreateControlPlane(c.Name, machine.GetName(), lbip, machine.Spec.Versions.ControlPlane)
if err != nil {
fmt.Printf("%+v\n", err)
return err
}
nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name())
if err != nil {
fmt.Printf("%+v", err)
return err
}
nodeRef := &apicorev1.ObjectReference{
Kind: "Node",
APIVersion: apicorev1.SchemeGroupVersion.String(),
Name: controlPlaneNode.Name(),
UID: types.UID(nodeUID),
}

// set the machine's providerID
name := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &name
if err := m.save(old, machine); err != nil {
providerID := providerID(controlPlaneNode.Name())
machine.Spec.ProviderID = &providerID
if err := m.save(old, machine, nodeRef); err != nil {
fmt.Printf("%+v\n", err)
return err
}
Expand All @@ -132,18 +155,43 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu
}

fmt.Println("Creating a new worker node")
worker, err := actions.AddWorker(c.Name, machine.Spec.Versions.Kubelet)
worker, err := actions.AddWorker(c.Name, machine.GetName(), machine.Spec.Versions.Kubelet)
if err != nil {
fmt.Printf("%+v", err)
return err
}
name := providerID(worker.Name())
machine.Spec.ProviderID = &name
return m.save(old, machine)
providerID := providerID(worker.Name())
machine.Spec.ProviderID = &providerID
nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name())
if err != nil {
fmt.Printf("%+v", err)
return err
}
nodeRef := &apicorev1.ObjectReference{
Kind: "Node",
APIVersion: apicorev1.SchemeGroupVersion.String(),
Name: worker.Name(),
UID: types.UID(nodeUID),
}
return m.save(old, machine, nodeRef)
}

// Delete returns nil when the machine no longer exists or when a successful delete has happened.
func (m *Machine) Delete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
return actions.DeleteNode(cluster.Name, providerNameToLookupID(*machine.Spec.ProviderID))
if machine.Status.NodeRef != nil {
fmt.Printf("[delete] machine status noderef name: %q/n", machine.Status.NodeRef.Name)
} else {
fmt.Println("[delete] machine noderef is nil...")
}

exists, err := m.Exists(ctx, cluster, machine)
if err != nil {
return err
}
if exists {
return actions.DeleteNode(cluster.Name, machine.GetName())
}
return nil
}

func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
Expand All @@ -152,16 +200,16 @@ func (m *Machine) Update(ctx context.Context, cluster *clusterv1.Cluster, machin
}

func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) {
if machine.Spec.ProviderID == nil {
return false, nil
if machine.Spec.ProviderID != nil {
return true, nil
}
fmt.Println("Looking for a docker container named", providerNameToLookupID(*machine.Spec.ProviderID))

role := getRole(machine)
kindRole := CAPIroleToKindRole(role)
labels := []string{
fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, kindRole),
fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, cluster.Name),
fmt.Sprintf("name=^%s$", providerNameToLookupID(*machine.Spec.ProviderID)),
fmt.Sprintf("name=^%s$", machine.GetName()),
}
fmt.Printf("using labels: %v\n", labels)
nodeList, err := nodes.List(labels...)
Expand All @@ -172,7 +220,8 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin
return len(nodeList) >= 1, nil
}

func (m *Machine) save(old, new *clusterv1.Machine) error {
// patches the object and saves the status.
func (m *Machine) save(old, new *clusterv1.Machine, noderef *apicorev1.ObjectReference) error {
fmt.Println("updating machine")
p, err := patch.NewJSONPatch(old, new)
if err != nil {
Expand All @@ -186,19 +235,22 @@ func (m *Machine) save(old, new *clusterv1.Machine) error {
fmt.Printf("%+v\n", err)
return err
}
if _, err := m.ClusterAPI.Machines(old.Namespace).Patch(new.Name, types.JSONPatchType, pb); err != nil {
new, err = m.ClusterAPI.Machines(old.Namespace).Patch(new.Name, types.JSONPatchType, pb)
if err != nil {
fmt.Printf("%+v\n", err)
return err
}
fmt.Println("updated machine")
}
// set the noderef after so we don't try and patch it in during the first update
new.Status.NodeRef = noderef
if _, err := m.ClusterAPI.Machines(old.Namespace).UpdateStatus(new); err != nil {
fmt.Printf("%+v\n", err)
return err
}
return nil
}

func providerNameToLookupID(providerName string) string {
return providerName[len("docker://"):]
}

func providerID(name string) string {
return fmt.Sprintf("docker://%s", name)
}
Expand Down
8 changes: 4 additions & 4 deletions cmd/capdctl/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ import (
"sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
)

// TODO: Generate the RBAC stuff from somewhere instead of copy pasta

const (
// Important to keep this consistent.
controlPlaneSet = "controlplane"
Expand All @@ -53,7 +55,7 @@ func main() {

capd := flag.NewFlagSet("capd", flag.ExitOnError)
capdImage := capd.String("capd-image", "gcr.io/kubernetes1-226021/capd-manager:latest", "The capd manager image to run")
capiImage := capd.String("capi-image", "gcr.io/k8s-cluster-api/cluster-api-controller:0.1.1", "The capi manager image to run")
capiImage := capd.String("capi-image", "gcr.io/k8s-cluster-api/cluster-api-controller:0.1.3", "The capi manager image to run")

controlPlane := flag.NewFlagSet("control-plane", flag.ExitOnError)
controlPlaneOpts := new(machineOptions)
Expand Down Expand Up @@ -122,7 +124,6 @@ subcommands are:
cluster - Write a capd cluster object to stdout
example: capdctl cluster -cluster-name my-cluster -namespace my-namespace | kubectl apply -f -
`
}

Expand Down Expand Up @@ -153,14 +154,13 @@ func machineYAML(opts *machineOptions) string {
Namespace: *opts.namespace,
Labels: map[string]string{
"cluster.k8s.io/cluster-name": *opts.clusterName,
"set": *opts.set,
"set": *opts.set,
},
},
Spec: v1alpha1.MachineSpec{
ProviderSpec: v1alpha1.ProviderSpec{},
},
}
// TODO: 🤔
if *opts.set == controlPlaneSet {
machine.Spec.Versions.ControlPlane = *opts.version
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/kind-test/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@ func main() {
if err != nil {
panic(fmt.Sprintf("%+v", err))
}
if _, err := actions.CreateControlPlane(clusterName, ip, version); err != nil {
if _, err := actions.CreateControlPlane(clusterName, inputs[1], ip, version); err != nil {
panic(fmt.Sprintf("%+v", err))
}
case "add-worker":
if _, err := actions.AddWorker(clusterName, version); err != nil {
if _, err := actions.AddWorker(clusterName, inputs[1], version); err != nil {
panic(fmt.Sprintf("%+v", err))
}
case "delete-node":
Expand All @@ -65,7 +65,7 @@ func main() {
panic(fmt.Sprintf("%+v", err))
}
case "add-control-plane":
if _, err := actions.AddControlPlane(clusterName, version); err != nil {
if _, err := actions.AddControlPlane(clusterName, inputs[1], version); err != nil {
panic(fmt.Sprintf("%+v", err))
}
case "set-cluster-name":
Expand Down
49 changes: 42 additions & 7 deletions kind/actions/cluster_actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,10 +243,10 @@ func KubeadmJoin(clusterName string, node *nodes.Node) error {
return nil
}

func SetNodeRef(clusterName, nodeName string) error {
func SetNodeProviderRef(clusterName, nodeName string) error {
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return nil
return err
}

node, err := nodes.BootstrapControlPlaneNode(allNodes)
Expand Down Expand Up @@ -274,28 +274,63 @@ func SetNodeRef(clusterName, nodeName string) error {
return nil
}

func RemoveNode(clusterName, nodeName string) error {
func GetNodeRefUID(clusterName, nodeName string) (string, error) {
// k get nodes my-cluster-worker -o custom-columns=UID:.metadata.uid --no-headers
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return nil
return "", err
}

node, err := nodes.BootstrapControlPlaneNode(allNodes)
if err != nil {
return err
return "", err
}

patch := fmt.Sprintf(`{"spec": {"providerID": "docker://%s"}}`, nodeName)
fmt.Println("trying to apply:", patch)
cmd := node.Command(
"kubectl",
"--kubeconfig", "/etc/kubernetes/admin.conf",
"delete",
"get",
"node", nodeName,
"--output=custom-columns=UID:.metadata.uid",
"--no-headers",
)
lines, err := exec.CombinedOutputLines(cmd)
if err != nil {
for _, line := range lines {
fmt.Println(line)
}
return "", errors.Wrap(err, "failed get node ref UID")
}
return strings.TrimSpace(lines[0]), nil
}

func DeleteClusterNode(clusterName, nodeName string) error {
// get all control plane nodes
allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName))
if err != nil {
return err
}
var node nodes.Node
// pick one that doesn't match the node name we are trying to delete
for _, n := range allNodes {
if n.Name() != nodeName {
node = n
break
}
}
cmd := node.Command(
"kubectl",
"--kubeconfig", "/etc/kubernetes/admin.conf",
"delete", "node", nodeName,
)
lines, err := exec.CombinedOutputLines(cmd)
if err != nil {
for _, line := range lines {
fmt.Println(line)
}
return errors.Wrap(err, "failed to remove node from cluster")
return errors.Wrap(err, "failed update providerID")
}

return nil
Expand Down
Loading

0 comments on commit d180db9

Please sign in to comment.