diff --git a/actuators/actuators.go b/actuators/actuators.go index c1bd5dc..0efd7d5 100644 --- a/actuators/actuators.go +++ b/actuators/actuators.go @@ -17,6 +17,7 @@ limitations under the License. package actuators import ( + "bytes" "fmt" "io/ioutil" @@ -71,6 +72,22 @@ func kubeconfigToSecret(clusterName, namespace string) (*v1.Secret, error) { return nil, errors.WithStack(err) } + allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName)) + if err != nil { + return nil, errors.WithStack(err) + } + + // This is necessary so the management cluster in a container can talk to another container. + // They share the same bridged network and the load balancer does respond on 6443 at its docker IP + // however, the *HOST* is listening on some random port (the one returned from the GetLoadBalancerHostAndPort). + lbip, _, err := actions.GetLoadBalancerHostAndPort(allNodes) + lines := bytes.Split(data, []byte("\n")) + for i, line := range lines { + if bytes.Contains(line, []byte("https://")) { + lines[i] = []byte(fmt.Sprintf(" server: https://%s:%d", lbip, 6443)) + } + } + // write it to a secret return &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -80,7 +97,7 @@ func kubeconfigToSecret(clusterName, namespace string) (*v1.Secret, error) { }, Data: map[string][]byte{ // TODO pull in constant from cluster api - "value": data, + "value": bytes.Join(lines, []byte("\n")), }, }, nil } diff --git a/actuators/machine.go b/actuators/machine.go index 8c561f1..97f269b 100644 --- a/actuators/machine.go +++ b/actuators/machine.go @@ -23,7 +23,6 @@ import ( "time" "github.com/go-logr/logr" - apicorev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "sigs.k8s.io/cluster-api-provider-docker/kind/actions" @@ -81,14 +80,9 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu m.Log.Error(err, "Error adding control plane") return err } - nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name()) - if err != nil { - m.Log.Error(err, "Error getting node reference UID") - return err - } - providerID := providerID(controlPlaneNode.Name()) + providerID := actions.ProviderID(controlPlaneNode.Name()) machine.Spec.ProviderID = &providerID - return m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)) + return m.save(old, machine) } m.Log.Info("Creating a brand new cluster") @@ -107,15 +101,10 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu m.Log.Error(err, "Error creating control plane") return err } - nodeUID, err := actions.GetNodeRefUID(c.GetName(), controlPlaneNode.Name()) - if err != nil { - m.Log.Error(err, "Error getting node reference UID") - return err - } // set the machine's providerID - providerID := providerID(controlPlaneNode.Name()) + providerID := actions.ProviderID(controlPlaneNode.Name()) machine.Spec.ProviderID = &providerID - if err := m.save(old, machine, getNodeRef(controlPlaneNode.Name(), nodeUID)); err != nil { + if err := m.save(old, machine); err != nil { m.Log.Error(err, "Error setting machine's provider ID") return err } @@ -144,14 +133,9 @@ func (m *Machine) Create(ctx context.Context, c *clusterv1.Cluster, machine *clu m.Log.Error(err, "Error creating new worker node") return err } - providerID := providerID(worker.Name()) + providerID := actions.ProviderID(worker.Name()) machine.Spec.ProviderID = &providerID - nodeUID, err := actions.GetNodeRefUID(c.GetName(), worker.Name()) - if err != nil { - m.Log.Error(err, "Error getting node reference ID") - return err - } - return m.save(old, machine, getNodeRef(worker.Name(), nodeUID)) + return m.save(old, machine) } // Delete returns nil when the machine no longer exists or when a successful delete has happened. @@ -201,7 +185,7 @@ func (m *Machine) Exists(ctx context.Context, cluster *clusterv1.Cluster, machin } // patches the object and saves the status. -func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apicorev1.ObjectReference) error { +func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine) error { m.Log.Info("updating machine") p, err := patch.NewJSONPatch(oldMachine, newMachine) if err != nil { @@ -222,19 +206,9 @@ func (m *Machine) save(oldMachine, newMachine *clusterv1.Machine, noderef *apico } m.Log.Info("updated machine") } - // set the noderef after so we don't try and patch it in during the first update - newMachine.Status.NodeRef = noderef - if _, err := m.ClusterAPI.Machines(oldMachine.Namespace).UpdateStatus(newMachine); err != nil { - m.Log.Error(err, "Error setting node reference") - return err - } return nil } -func providerID(name string) string { - return fmt.Sprintf("docker:////%s", name) -} - // CAPIroleToKindRole converts a CAPI role to kind role // TODO there is a better way to do this. func CAPIroleToKindRole(CAPIRole string) string { @@ -243,12 +217,3 @@ func CAPIroleToKindRole(CAPIRole string) string { } return CAPIRole } - -func getNodeRef(name, uid string) *apicorev1.ObjectReference { - return &apicorev1.ObjectReference{ - Kind: "Node", - APIVersion: apicorev1.SchemeGroupVersion.String(), - Name: name, - UID: types.UID(uid), - } -} diff --git a/cmd/capd-manager/main.go b/cmd/capd-manager/main.go index f015a6e..f640c80 100644 --- a/cmd/capd-manager/main.go +++ b/cmd/capd-manager/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "flag" "fmt" "time" @@ -35,6 +36,9 @@ import ( ) func main() { + flag.Set("v", "0") + flag.Parse() + cfg, err := config.GetConfig() if err != nil { panic(err) @@ -67,6 +71,7 @@ func main() { machineLogger := logger.Log{} machineLogger.Logger = klogr.New().WithName("[machine-actuator]") + machineActuator := actuators.Machine{ Core: k8sclientset.CoreV1(), ClusterAPI: cs.ClusterV1alpha1(), diff --git a/cmd/capdctl/main.go b/cmd/capdctl/main.go index 3ef6f42..53c43cf 100644 --- a/cmd/capdctl/main.go +++ b/cmd/capdctl/main.go @@ -277,7 +277,8 @@ func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverrid panic(err) } defer os.Remove(f.Name()) - crds, err := getCRDs(capiVersion) + fmt.Println("Downloading the latest CRDs for CAPI version", capiVersion) + crds, err := getCRDs(capiVersion, capiImage) if err != nil { panic(err) } @@ -285,7 +286,8 @@ func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverrid fmt.Fprintln(f, "---") fmt.Fprintln(f, capdRBAC) fmt.Fprintln(f, "---") - fmt.Fprintln(f, getCAPDPlane(capdImage, capiImage)) + fmt.Fprintln(f, getCAPDPlane(capdImage)) + fmt.Println("Applying the control plane", f.Name()) cmd := exec.Command("kubectl", "apply", "-f", f.Name()) cmd.SetEnv(fmt.Sprintf("KUBECONFIG=%s/.kube/kind-config-%s", os.Getenv("HOME"), clusterName)) cmd.SetStdout(os.Stdout) @@ -295,8 +297,8 @@ func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverrid } } -func getCAPDPlane(capdImage, capiImage string) string { - return fmt.Sprintf(capiPlane, capdImage, capiImage) +func getCAPDPlane(capdImage string) string { + return fmt.Sprintf(capiPlane, capdImage) } var capiPlane = ` @@ -307,13 +309,6 @@ metadata: controller-tools.k8s.io: "1.0" name: docker-provider-system --- -apiVersion: v1 -kind: Namespace -metadata: - labels: - controller-tools.k8s.io: "1.0" - name: cluster-api-system ---- apiVersion: apps/v1 kind: StatefulSet metadata: @@ -362,47 +357,12 @@ spec: - effect: NoExecute key: node.alpha.kubernetes.io/unreachable operator: Exists ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - name: cluster-api-controller-manager - namespace: cluster-api-system -spec: - selector: - matchLabels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - serviceName: cluster-api-controller-manager-service - template: - metadata: - labels: - control-plane: controller-manager - controller-tools.k8s.io: "1.0" - spec: - containers: - - command: - - /manager - image: %s - name: manager - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - key: node.alpha.kubernetes.io/notReady - operator: Exists - - effect: NoExecute - key: node.alpha.kubernetes.io/unreachable - operator: Exists ` -func getCRDs(version string) (string, error) { - crds := []string{"crds", "rbac"} +// getCRDs should actually use kustomize to correctly build the manager yaml. +// HACK: this is a hacked function +func getCRDs(version, capiImage string) (string, error) { + crds := []string{"crds", "rbac", "manager"} releaseCode := fmt.Sprintf("https://github.com/kubernetes-sigs/cluster-api/archive/%s.tar.gz", version) resp, err := http.Get(releaseCode) @@ -434,10 +394,28 @@ func getCRDs(version string) (string, error) { continue case tar.TypeReg: for _, crd := range crds { + // Skip the kustomization files for now. Would like to use kustomize in future if strings.HasSuffix(header.Name, "kustomization.yaml") { continue } + // This is a poor person's kustomize + if strings.HasSuffix(header.Name, "manager.yaml") { + var managerBuf bytes.Buffer + io.Copy(&managerBuf, tgz) + lines := strings.Split(managerBuf.String(), "\n") + for _, line := range lines { + if strings.Contains(line, "image:") { + buf.WriteString(strings.Replace(line, "image: controller:latest", fmt.Sprintf("image: %s", capiImage), 1)) + buf.WriteString("\n") + continue + } + buf.WriteString(line) + buf.WriteString("\n") + } + } + + // These files don't need kustomize at all. if strings.Contains(header.Name, fmt.Sprintf("config/%s/", crd)) { io.Copy(&buf, tgz) fmt.Fprintln(&buf, "---") diff --git a/kind/actions/cluster_actions.go b/kind/actions/cluster_actions.go index 042b39d..9b9176c 100644 --- a/kind/actions/cluster_actions.go +++ b/kind/actions/cluster_actions.go @@ -143,7 +143,7 @@ func KubeadmConfig(node *nodes.Node, clusterName, lbip string) error { func KubeadmInit(clusterName, version string) error { allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName)) if err != nil { - return nil + return err } node, err := nodes.BootstrapControlPlaneNode(allNodes) @@ -179,7 +179,7 @@ func KubeadmInit(clusterName, version string) error { } // save the kubeconfig on the host with the loadbalancer endpoint - hostPort, err := getLoadBalancerPort(allNodes) + _, hostPort, err := GetLoadBalancerHostAndPort(allNodes) if err != nil { return errors.Wrap(err, "failed to get kubeconfig from node") } @@ -270,7 +270,7 @@ func SetNodeProviderRef(clusterName, nodeName string) error { return err } - patch := fmt.Sprintf(`{"spec": {"providerID": "docker://%s"}}`, nodeName) + patch := fmt.Sprintf(`{"spec": {"providerID": "%s"}}`, ProviderID(nodeName)) fmt.Println("trying to apply:", patch) cmd := node.Command( "kubectl", @@ -290,39 +290,6 @@ func SetNodeProviderRef(clusterName, nodeName string) error { return nil } -// GetNodeRefUID returns the node reference UID -func GetNodeRefUID(clusterName, nodeName string) (string, error) { - // k get nodes my-cluster-worker -o custom-columns=UID:.metadata.uid --no-headers - allNodes, err := nodes.List(fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName)) - if err != nil { - return "", err - } - - node, err := nodes.BootstrapControlPlaneNode(allNodes) - if err != nil { - return "", err - } - - patch := fmt.Sprintf(`{"spec": {"providerID": "docker://%s"}}`, nodeName) - fmt.Println("trying to apply:", patch) - cmd := node.Command( - "kubectl", - "--kubeconfig", "/etc/kubernetes/admin.conf", - "get", - "node", nodeName, - "--output=custom-columns=UID:.metadata.uid", - "--no-headers", - ) - lines, err := exec.CombinedOutputLines(cmd) - if err != nil { - for _, line := range lines { - fmt.Println(line) - } - return "", errors.Wrap(err, "failed get node ref UID") - } - return strings.TrimSpace(lines[0]), nil -} - // DeleteClusterNode will remove the kubernetes node from the list of nodes (during a kubectl get nodes). func DeleteClusterNode(clusterName, nodeName string) error { // get all control plane nodes @@ -379,3 +346,8 @@ func KubeadmReset(clusterName, nodeName string) error { return nil } + +// ProviderID formats the provider id needed to set on the node +func ProviderID(name string) string { + return fmt.Sprintf("docker:////%s", name) +} diff --git a/kind/actions/kind.go b/kind/actions/kind.go index 714e194..3264455 100644 --- a/kind/actions/kind.go +++ b/kind/actions/kind.go @@ -236,13 +236,18 @@ func ListControlPlanes(clusterName string) ([]nodes.Node, error) { fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, constants.ControlPlaneNodeRoleValue)) } -// getLoadBalancerPort returns the port on the host on which the APIServer is exposed -func getLoadBalancerPort(allNodes []nodes.Node) (int32, error) { +// GetLoadBalancerHostAndPort returns the port on the host on which the APIServer is exposed +func GetLoadBalancerHostAndPort(allNodes []nodes.Node) (string, int32, error) { node, err := nodes.ExternalLoadBalancerNode(allNodes) if err != nil { - return 0, err + return "", 0, err } - return node.Ports(6443) + ipv4, _, err := node.IP() + if err != nil { + return "", 0, err + } + port, err := node.Ports(6443) + return ipv4, port, err } // matches kubeconfig server entry like: