fix: remove broken kube configs from state

Broke when i redid the cluster ca stuff to fix an expired cert. Cant
figure out how to fix the state to read from the cluster.
This commit is contained in:
Julian Tölle 2023-02-25 23:21:49 +01:00
parent ad3ba9d059
commit 6ea9911f29
11 changed files with 203 additions and 409 deletions

2
k3s_cluster_v2/README.md Normal file
View file

@ -0,0 +1,2 @@
This module unfortunetly broke when I updated the CA cert in k3s and now
I can't get the state to work with terraform.

View file

@ -1,10 +1,3 @@
provider "kubectl" {
host = module.k3s.kubernetes.api_endpoint
cluster_ca_certificate = module.k3s.kubernetes.cluster_ca_certificate
client_certificate = module.k3s.kubernetes.client_certificate
client_key = module.k3s.kubernetes.client_key
}
provider "github" {
owner = var.github_owner
token = var.github_token
@ -32,79 +25,6 @@ data "flux_sync" "main" {
branch = var.branch
}
# Kubernetes
resource "kubernetes_namespace" "flux_system" {
metadata {
name = "flux-system"
}
lifecycle {
ignore_changes = [
metadata[0].labels,
]
}
}
data "kubectl_file_documents" "install" {
content = data.flux_install.main.content
}
data "kubectl_file_documents" "sync" {
content = data.flux_sync.main.content
}
locals {
install = [for v in data.kubectl_file_documents.install.documents : {
data : yamldecode(v)
content : v
}
]
sync = [for v in data.kubectl_file_documents.sync.documents : {
data : yamldecode(v)
content : v
}
]
}
resource "kubectl_manifest" "install" {
for_each = { for v in local.install : lower(join("/", compact([v.data.apiVersion, v.data.kind, lookup(v.data.metadata, "namespace", ""), v.data.metadata.name]))) => v.content }
depends_on = [kubernetes_namespace.flux_system]
yaml_body = each.value
}
resource "kubectl_manifest" "sync" {
for_each = { for v in local.sync : lower(join("/", compact([v.data.apiVersion, v.data.kind, lookup(v.data.metadata, "namespace", ""), v.data.metadata.name]))) => v.content }
depends_on = [kubernetes_namespace.flux_system]
yaml_body = each.value
}
resource "kubernetes_secret" "main" {
depends_on = [kubectl_manifest.install]
metadata {
name = data.flux_sync.main.secret
namespace = data.flux_sync.main.namespace
}
data = {
identity = tls_private_key.main.private_key_pem
"identity.pub" = tls_private_key.main.public_key_pem
known_hosts = local.known_hosts
}
}
resource "kubernetes_secret" "github_notifications" {
metadata {
name = "github"
namespace = data.flux_sync.main.namespace
}
data = {
token = var.github_token_flux_notifications
}
}
# GitHub
resource "github_repository" "main" {
name = var.repository_name

View file

@ -1,128 +0,0 @@
module "k3s" {
source = "xunleii/k3s/module"
depends_on_ = hcloud_server.agents
k3s_version = var.install_k3s_version
cluster_domain = "cluster.local"
cidr = {
pods = "10.42.0.0/16"
services = "10.43.0.0/16"
}
drain_timeout = "30s"
managed_fields = ["label", "taint"] // ignore annotations
global_flags = [
"--kubelet-arg cloud-provider=external" // required to use https://github.com/hetznercloud/hcloud-cloud-controller-manager
]
servers = {
for i in range(length(hcloud_server.control_planes)) :
hcloud_server.control_planes[i].name => {
ip = hcloud_server_network.control_planes[i].ip
connection = {
type = "ssh"
host = hcloud_server.control_planes[i].ipv4_address
agent = true
}
flags = [
"--disable-cloud-controller",
"--tls-san ${var.domain}",
# We need to modify the helm release to work with one loadbalancer for api+ingress
"--disable traefik"
]
annotations = { "server_id" : i } // theses annotations will not be managed by this module
}
}
agents = {
for i in range(length(hcloud_server.agents)) :
"${hcloud_server.agents[i].name}_node" => {
name = hcloud_server.agents[i].name
ip = hcloud_server_network.agents_network[i].ip
connection = {
type = "ssh"
host = hcloud_server.agents[i].ipv4_address
}
labels = {}
taints = {}
}
}
}
provider "kubernetes" {
host = module.k3s.kubernetes.api_endpoint
cluster_ca_certificate = module.k3s.kubernetes.cluster_ca_certificate
client_certificate = module.k3s.kubernetes.client_certificate
client_key = module.k3s.kubernetes.client_key
}
resource "kubernetes_service_account" "admin" {
depends_on = [module.k3s.kubernetes_ready]
metadata {
name = "admin"
namespace = "default"
}
}
resource "kubernetes_cluster_role_binding" "admin" {
depends_on = [module.k3s.kubernetes_ready]
metadata {
name = "admin"
}
subject {
kind = "ServiceAccount"
name = "admin"
namespace = "default"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
}
data "kubernetes_secret" "sa_credentials" {
metadata {
name = kubernetes_service_account.admin.default_secret_name
namespace = "default"
}
}
## hcloud-cloud-controller-manager is necessary for cluster bootstrap
data "http" "hcloud_cloud_controller_manager" {
url = "https://raw.githubusercontent.com/hetznercloud/hcloud-cloud-controller-manager/v1.12.1/deploy/ccm-networks.yaml"
}
locals {
hccm_all_manifests = split("---", data.http.hcloud_cloud_controller_manager.body)
// first element is only comment
hccm_actual_manifests = slice(local.hccm_all_manifests, 1, length(local.hccm_all_manifests))
}
resource "kubernetes_manifest" "hcloud_cloud_controller_manager" {
for_each = toset(
local.hccm_actual_manifests
)
manifest = yamldecode(each.key)
}
resource "kubernetes_secret" "hcloud_token" {
metadata {
name = "hcloud"
namespace = "kube-system"
}
data = {
token = var.hcloud_ccm_token
network = hcloud_network.k3s.id
}
}

View file

@ -39,12 +39,18 @@ resource "hcloud_load_balancer_network" "k3s" {
}
resource "hcloud_rdns" "k3s" {
resource "hcloud_rdns" "k3s_ipv4" {
load_balancer_id = hcloud_load_balancer.k3s.id
ip_address = hcloud_load_balancer.k3s.ipv4
dns_ptr = var.domain
}
resource "hcloud_rdns" "k3s_ipv6" {
load_balancer_id = hcloud_load_balancer.k3s.id
ip_address = hcloud_load_balancer.k3s.ipv6
dns_ptr = var.domain
}
### LB Ingress
resource "hcloud_load_balancer_service" "ingress_https" {
@ -60,3 +66,30 @@ resource "hcloud_load_balancer_service" "ingress_http" {
listen_port = 80
destination_port = 32080
}
### Domain
resource "hetznerdns_record" "ipv4" {
zone_id = var.dns_zone_id
name = var.domain
value = hcloud_load_balancer.k3s.ipv4
type = "A"
ttl = 60
}
resource "hetznerdns_record" "ipv6" {
zone_id = var.dns_zone_id
name = var.domain
value = hcloud_load_balancer.k3s.ipv6
type = "AAAA"
ttl = 60
}
resource "hetznerdns_record" "wildcard" {
# *.domain CNAME domain
zone_id = var.dns_zone_id
name = "*"
value = var.domain
type = "CNAME"
ttl = 60
}

View file

@ -1,18 +0,0 @@
output "summary" {
value = module.k3s.summary
}
output "kubernetes" {
description = "Authentication credentials of Kubernetes (full administrator)."
value = {
token = data.kubernetes_secret.sa_credentials.data.token
cluster_ca_certificate = module.k3s.kubernetes.cluster_ca_certificate
api_endpoint = "https://${var.domain}:6443"
}
sensitive = true
}
output "kubernetes_ready" {
description = "Dependency endpoint to synchronize k3s installation and provisioning."
value = module.k3s.kubernetes_ready
}

View file

@ -41,6 +41,10 @@ variable "domain" {
type = string
}
variable "dns_zone_id" {
type = string
}
variable "install_k3s_version" {
type = string
default = "v1.22.4+k3s1"

View file

@ -6,6 +6,9 @@ terraform {
hcloud = {
source = "hetznercloud/hcloud"
}
hetznerdns = {
source = "timohirt/hetznerdns"
}
tls = {
source = "hashicorp/tls"
}