deploy with k3s

This commit is contained in:
Julian Tölle 2020-04-04 23:23:13 +02:00
parent 0f08cfb0d2
commit 2863062e44
26 changed files with 456 additions and 207 deletions

2
.gitignore vendored
View file

@ -2,3 +2,5 @@ keys/id_terraform*
credentials.tfvars credentials.tfvars
terraform.tfstate* terraform.tfstate*
.terraform .terraform
kubeconfig.yaml

View file

@ -17,9 +17,15 @@ lint: init
$(VALIDATE) services/bitwarden $(VALIDATE) services/bitwarden
$(VALIDATE) . $(VALIDATE) .
init: keys init: keys/id_terraform
$(TF) init $(TF) init
keys: keys/id_terraform keys/id_terraform:
echo "No private key found! Generating Terraform SSH Keys." echo "No private key found! Generating Terraform SSH Keys."
./bootstrap-keys.sh ./scripts/bootstrap-keys.sh
kubeconfig: keys/id_terraform
scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i keys/id_terraform root@`terraform output cluster_public_ip`:/etc/rancher/k3s/k3s.yaml ./kubeconfig.yaml
sed -i "s/127.0.0.1/`terraform output cluster_public_ip`/g" ./kubeconfig.yaml
sed -i "s/default/`terraform output cluster_name`/g" ./kubeconfig.yaml

View file

@ -1,2 +0,0 @@
ssh-keygen -t rsa -C "terraform@narando.de" -f keys/id_terraform
chmod 600 keys/id_terraform*

View file

@ -1,32 +0,0 @@
resource "kubernetes_service_account" "dashboard" {
metadata {
name = "dashboard-admin"
namespace = "kube-system"
labels = {
app = "dashboard"
}
}
}
resource "kubernetes_cluster_role_binding" "dashboard" {
metadata {
name = "dashboard-admin"
labels = {
app = "dashboard"
}
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
kind = "ServiceAccount"
name = "dashboard-admin"
namespace = "kube-system"
}
}

66
k3s_cluster/cloud_init.tf Normal file
View file

@ -0,0 +1,66 @@
locals {
install_k3s_version = var.install_k3s_version
k3s_storage_endpoint = "sqlite"
k3s_cluster_secret = var.k3s_cluster_secret != null ? var.k3s_cluster_secret : random_password.k3s_cluster_secret.result
k3s_tls_san = "--tls-san ${var.domain}"
floating_ip_use_netdata = var.server_image == "ubuntu-20.04"
}
resource "random_password" "k3s_cluster_secret" {
length = 30
special = false
}
data "template_cloudinit_config" "k3s_server" {
gzip = false
base64_encode = false
# Main cloud-config configuration file.
part {
filename = "init.cfg"
content_type = "text/cloud-config"
content = templatefile("${path.module}/files/cloud-config-base.yaml", {})
}
part {
content_type = "text/x-shellscript"
content = templatefile("${path.module}/files/setup-floating-ip.sh", {
floating_ip = hcloud_floating_ip.server.ip_address,
use_netdata = local.floating_ip_use_netdata
})
}
part {
content_type = "text/x-shellscript"
content = templatefile("${path.module}/files/k3s-install.sh", {
is_k3s_server = true,
install_k3s_version = local.install_k3s_version,
k3s_cluster_secret = local.k3s_cluster_secret,
k3s_url = hcloud_floating_ip.server.ip_address,
k3s_tls_san = local.k3s_tls_san,
})
}
}
data "template_cloudinit_config" "k3s_agent" {
gzip = false
base64_encode = false
# Main cloud-config configuration file.
part {
filename = "init.cfg"
content_type = "text/cloud-config"
content = templatefile("${path.module}/files/cloud-config-base.yaml", {})
}
part {
content_type = "text/x-shellscript"
content = templatefile("${path.module}/files/k3s-install.sh", {
is_k3s_server = false,
install_k3s_version = local.install_k3s_version,
k3s_cluster_secret = local.k3s_cluster_secret,
k3s_url = hcloud_floating_ip.server.ip_address,
k3s_tls_san = local.k3s_tls_san,
})
}
}

View file

@ -0,0 +1,5 @@
#cloud-config
runcmd:
- apt-get update
- apt-get install -y software-properties-common
- DEBIAN_FRONTEND=noninteractive apt-get upgrade -y

View file

@ -0,0 +1,21 @@
#!/bin/bash
until ( \
curl -sfL https://get.k3s.io | \
INSTALL_K3S_VERSION='v${install_k3s_version}' \
K3S_CLUSTER_SECRET='${k3s_cluster_secret}' \
INSTALL_K3S_EXEC='%{ if is_k3s_server } ${k3s_tls_san} %{ endif }' \
%{ if !is_k3s_server } K3S_URL='https://${k3s_url}:6443'%{ endif } \
sh - \
); do
echo 'k3s did not install correctly'
sleep 2
done
%{ if is_k3s_server }
until kubectl get pods -A | grep 'Running';
do
echo 'Waiting for k3s startup'
sleep 5
done
%{ endif }

View file

@ -0,0 +1,5 @@
#!/bin/bash
MANIFEST_FILE=https://github.com/jetstack/cert-manager/releases/download/${version}/cert-manager.crds.yaml
K3S_MANIFEST_FOLDER=${k3s_manifest_folder}
curl -sfL $MANIFEST_FILE > $K3S_MANIFEST_FOLDER/cert-manager-crds.yml

View file

@ -0,0 +1,39 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: cert-manager
namespace: kube-system
spec:
chart: cert-manager
repo: https://charts.jetstack.io
version: ${version}
targetNamespace: cert-manager
set:
ingressShim.defaultIssuerName: "letsencrypt-prod"
ingressShim.defaultIssuerKind: "ClusterIssuer"
---
apiVersion: cert-manager.io/v1alpha2
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# You must replace this email address with your own.
# Let's Encrypt will use this to contact you about expiring
# certificates, and issues related to your account.
email: ${email}
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource that will be used to store the account's private key.
name: letsencrypt-prod-cluster-issuer-account
# Add a single challenge solver, HTTP01 using nginx
solvers:
- http01:
ingress: {}

View file

@ -0,0 +1,16 @@
#!/bin/bash
MANIFEST_FILE=https://raw.githubusercontent.com/hetznercloud/csi-driver/${version}/deploy/kubernetes/hcloud-csi.yml
K3S_MANIFEST_FOLDER=${k3s_manifest_folder}
curl -sfL $MANIFEST_FILE > $K3S_MANIFEST_FOLDER/hcloud-csi.yml
cat <<EOF > $K3S_MANIFEST_FOLDER/hcloud-csi-token.yml
apiVersion: v1
kind: Secret
metadata:
name: hcloud-csi
namespace: kube-system
stringData:
token: ${token}
EOF

View file

@ -0,0 +1,20 @@
#!/bin/bash
%{ if use_netdata }
cat >> /etc/netplan/60-floating.cfg <<- EOM
network:
version: 2
ethernets:
eth0:
addresses:
- ${floating_ip}/32
EOM
netplan apply
%{ else }
cat >> /etc/network/interfaces.d/99-floating.cfg <<- EOM
auto eth0:1
iface eth0:1 inet static
address ${floating_ip}
netmask 255.255.255.255
EOM
ifdown eth0:1 ; ifup eth0:1
%{ endif }

51
k3s_cluster/k3s.tf Normal file
View file

@ -0,0 +1,51 @@
locals {
k3s_manifest_folder = "/var/lib/rancher/k3s/server/manifests"
manifest_hcloud_csi_driver = templatefile("${path.module}/files/k8s-apps/hcloud-csi-driver.sh", {
version = var.hcloud_csi_driver_version
token = var.hcloud_csi_driver_token
k3s_manifest_folder = local.k3s_manifest_folder
})
manifest_cert_manager_crds = templatefile("${path.module}/files/k8s-apps/cert-manager-crds.sh", {
version = var.cert_manager_version
k3s_manifest_folder = local.k3s_manifest_folder
})
manifest_cert_manager = templatefile("${path.module}/files/k8s-apps/cert-manager.yaml", {
version = var.cert_manager_version
email = var.letsencrypt_email
})
}
resource null_resource install_manifests {
triggers = {
server_id = hcloud_server.server.id
# File hashes to trigger on update
hcloud_csi_driver = sha256(local.manifest_hcloud_csi_driver)
cert_manager_crds = sha256(local.manifest_cert_manager_crds)
cert_manager = sha256(local.manifest_cert_manager)
}
connection {
type = "ssh"
host = hcloud_server.server.ipv4_address
private_key = var.ssh_keys[0]
}
provisioner remote-exec {
inline = [local.manifest_hcloud_csi_driver]
}
provisioner remote-exec {
inline = [local.manifest_cert_manager_crds]
}
provisioner file {
content = local.manifest_cert_manager
destination = "${local.k3s_manifest_folder}/cert-manager.yaml"
}
}

93
k3s_cluster/main.tf Executable file
View file

@ -0,0 +1,93 @@
data tls_public_key default {
count = length(var.ssh_keys)
private_key_pem = var.ssh_keys[count.index]
}
resource hcloud_ssh_key default {
count = length(var.ssh_keys)
name = "${var.name}-${count.index}"
public_key = data.tls_public_key.default[count.index].public_key_openssh
}
resource hcloud_floating_ip server {
name = "${var.name}-server"
home_location = var.server_location
description = "Persistent IP for K3s Server. Used in Domain and for Ingress."
type = "ipv4"
}
resource hcloud_rdns server {
floating_ip_id = hcloud_floating_ip.server.id
ip_address = hcloud_floating_ip.server.ip_address
dns_ptr = var.domain
}
resource hcloud_floating_ip_assignment server {
floating_ip_id = hcloud_floating_ip.server.id
server_id = hcloud_server.server.id
}
resource hcloud_server server {
name = "${var.name}-server"
image = var.server_image
server_type = var.control_server_type
location = var.server_location
ssh_keys = hcloud_ssh_key.default.*.id
connection {
type = "ssh"
host = self.ipv4_address
private_key = var.ssh_keys[0]
}
user_data = data.template_cloudinit_config.k3s_server.rendered
provisioner "remote-exec" {
inline = [
"cloud-init status --wait",
]
}
}
resource "random_id" "agent" {
count = var.compute_count
keepers = {
image = var.server_image
server_type = var.control_server_type
user_data = sha256(data.template_cloudinit_config.k3s_agent.rendered)
}
byte_length = 3
}
resource hcloud_server agent {
count = var.compute_count
name = "${var.name}-agent-${random_id.agent[count.index].hex}"
image = random_id.agent[count.index].keepers.image
server_type = random_id.agent[count.index].keepers.server_type
location = var.server_location
ssh_keys = hcloud_ssh_key.default.*.id
connection {
type = "ssh"
host = self.ipv4_address
private_key = var.ssh_keys[0]
}
user_data = data.template_cloudinit_config.k3s_agent.rendered
provisioner "remote-exec" {
inline = [
"cloud-init status --wait",
]
}
}

3
k3s_cluster/output.tf Normal file
View file

@ -0,0 +1,3 @@
output server_public_ip {
value = hcloud_floating_ip.server.ip_address
}

67
k3s_cluster/variables.tf Normal file
View file

@ -0,0 +1,67 @@
variable name {
type = string
}
variable server_image {
type = string
# With ubuntu-20.04 k3s crashes on start (v1.17.4+k3s1)
default = "ubuntu-18.04"
}
variable server_location {
type = string
}
variable control_server_type {
type = string
default = "cx11"
}
variable compute_server_type {
type = string
default = "cx21"
}
variable compute_count {
type = number
default = 1
}
variable domain {
type = string
}
variable letsencrypt_email {
type = string
default = "none@none.com"
description = "LetsEncrypt email address to use"
}
variable ssh_keys {
type = list(string)
default = []
}
variable install_k3s_version {
type = string
default = "1.17.4+k3s1"
}
variable k3s_cluster_secret {
type = string
default = null
}
variable hcloud_csi_driver_version {
type = string
default = "v1.2.3"
}
variable hcloud_csi_driver_token {
type = string
}
variable cert_manager_version {
type = string
default = "v0.14.3"
}

11
k3s_cluster/versions.tf Normal file
View file

@ -0,0 +1,11 @@
terraform {
required_version = "~> 0.12.0"
required_providers {
hcloud = "~> 1.2"
tls = "~> 2.1"
template = "~> 2.1"
random = "~> 2.2"
}
}

149
main.tf Executable file → Normal file
View file

@ -1,139 +1,18 @@
resource hcloud_server control { locals {
count = 3 cluster_name = "home-cloud"
name = "control${count.index}"
image = "ubuntu-18.04"
server_type = "cx21"
ssh_keys = ["${hcloud_ssh_key.terraform.id}"]
connection {
private_key = "${file("./keys/id_terraform")}"
} }
user_data = <<END module k3s_cluster {
#cloud-config source = "./k3s_cluster"
package_upgrade: true
packages:
- docker.io
END
provisioner "remote-exec" { name = local.cluster_name
inline = [ server_image = "ubuntu-18.04"
"cloud-init status --wait", server_location = "nbg1"
] control_server_type = "cx11"
} compute_server_type = "cx21"
compute_count = 1
lifecycle { domain = "c.apricote.de"
create_before_destroy = false letsencrypt_email = "julian.toelle97+le@gmail.com"
} ssh_keys = [file("./keys/id_terraform")]
} hcloud_csi_driver_token = var.hcloud_csi_driver_token
resource hcloud_server compute {
count = 3
name = "compute${count.index}"
image = "ubuntu-18.04"
server_type = "cx21"
ssh_keys = ["${hcloud_ssh_key.terraform.id}"]
connection {
private_key = "${file("./keys/id_terraform")}"
}
user_data = <<END
#cloud-config
package_upgrade: true
packages:
- docker.io
END
provisioner "remote-exec" {
inline = [
"cloud-init status --wait",
]
}
lifecycle {
create_before_destroy = false
}
}
resource rke_cluster "cluster" {
services_kube_api {
extra_args = {
feature-gates = "CSINodeInfo=true,CSIDriverRegistry=true"
}
}
services_kubelet {
extra_args = {
feature-gates = "CSINodeInfo=true,CSIDriverRegistry=true"
}
}
addons = <<EOL
---
apiVersion: v1
kind: Secret
metadata:
name: hcloud-csi
namespace: kube-system
stringData:
token: ${var.hcloud_csi_token}
---
EOL
addons_include = [
"https://raw.githubusercontent.com/kubernetes/csi-api/release-1.13/pkg/crd/manifests/csidriver.yaml",
"https://raw.githubusercontent.com/kubernetes/csi-api/release-1.13/pkg/crd/manifests/csinodeinfo.yaml",
"https://raw.githubusercontent.com/hetznercloud/csi-driver/master/deploy/kubernetes/hcloud-csi.yml",
"https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml",
]
nodes {
address = "${hcloud_server.control.0.ipv4_address}"
user = "root"
role = ["controlplane", "etcd"]
ssh_key = "${file("keys/id_terraform")}"
}
nodes {
address = "${hcloud_server.control.1.ipv4_address}"
user = "root"
role = ["controlplane", "etcd"]
ssh_key = "${file("keys/id_terraform")}"
}
nodes {
address = "${hcloud_server.control.2.ipv4_address}"
user = "root"
role = ["controlplane", "etcd"]
ssh_key = "${file("keys/id_terraform")}"
}
nodes {
address = "${hcloud_server.compute.0.ipv4_address}"
user = "root"
role = ["worker"]
ssh_key = "${file("keys/id_terraform")}"
}
nodes {
address = "${hcloud_server.compute.1.ipv4_address}"
user = "root"
role = ["worker"]
ssh_key = "${file("keys/id_terraform")}"
}
nodes {
address = "${hcloud_server.compute.2.ipv4_address}"
user = "root"
role = ["worker"]
ssh_key = "${file("keys/id_terraform")}"
}
}
resource local_file kube_cluster_yaml {
filename = "${path.root}/kube_config_cluster.yml"
content = "${rke_cluster.cluster.kube_config_yaml}"
} }

7
output.tf Normal file
View file

@ -0,0 +1,7 @@
output cluster_public_ip {
value = module.k3s_cluster.server_public_ip
}
output cluster_name {
value = local.cluster_name
}

View file

@ -2,20 +2,9 @@
# or using -var="hcloud_token=..." CLI option # or using -var="hcloud_token=..." CLI option
variable "hcloud_token" {} variable "hcloud_token" {}
variable "hcloud_location" {}
# Configure the Hetzner Cloud Provider # Configure the Hetzner Cloud Provider
provider "hcloud" { provider hcloud {
version = "~> 1.7.0" version = "~> 1.16.0"
token = "${var.hcloud_token}" token = var.hcloud_token
}
#######################
## Terraform SSH Key ##
#######################
resource "hcloud_ssh_key" "terraform" {
name = "terraform"
public_key = "${file("./keys/id_terraform.pub")}"
} }

View file

@ -1,9 +0,0 @@
provider "kubernetes" {
version = "~> 1.6"
host = "${rke_cluster.cluster.api_server_url}"
client_certificate = "${rke_cluster.cluster.client_cert}"
client_key = "${rke_cluster.cluster.client_key}"
cluster_ca_certificate = "${rke_cluster.cluster.ca_crt}"
}

View file

@ -1,11 +1,12 @@
provider "null" {
version = "~> 1.0" provider random {
version = "~> 2.2"
} }
provider "local" { provider tls {
version = "~> 1.2" version = "~> 2.1"
} }
provider "template" { provider template {
version = "~> 1.0" version = "~> 2.1"
} }

View file

@ -1,3 +0,0 @@
provider "rke" {
version = "~> 0.11"
}

13
scripts/bootstrap-keys.sh Executable file
View file

@ -0,0 +1,13 @@
#!/usr/bin/env sh
set -e
CERT_FILE=./keys/id_terraform
if [ -f "$CERT_FILE" ]; then
echo "$CERT_FILE already exists. To avoid loosing data, I will not generate a new SSH key."
else
echo "Generating a new SSH Key at $CERT_FILE"
ssh-keygen -t rsa -C "terraform@apricote.de" -m PEM -f $CERT_FILE
chmod 600 keys/id_terraform*
fi

View file

@ -1 +0,0 @@
hcloud_location = "nbg1"

3
variables.tf Normal file
View file

@ -0,0 +1,3 @@
variable hcloud_csi_driver_token {
type = string
}

View file

@ -1 +0,0 @@
variable hcloud_csi_token {}