init: copied modules from lawndale-infra

This commit is contained in:
2022-05-26 00:40:29 +02:00
commit 414feb48ee
39 changed files with 1435 additions and 0 deletions

View File

@@ -0,0 +1,39 @@
resource "kubernetes_config_map" "this" {
metadata {
namespace = var.namespace
name = "kube-flannel-cfg"
labels = {
app = "flannel"
"k8s-app" = "cni"
}
}
data = {
"cni-conf.json" = jsonencode({
name = "cbr0",
cniVersion = "0.3.1",
plugins = [
{
type = "flannel",
delegate = {
hairpinMode = true,
isDefaultGateway = true,
}
},
{
type = "portmap",
capabilities = {
portMappings = true,
}
},
]
})
"net-conf.json" = jsonencode({
Network = var.cluster_cidr,
Backend = {
Type = "vxlan",
VNI = var.vxlan_id,
Port = var.vxlan_port,
}
})
}
}

View File

@@ -0,0 +1,167 @@
resource "kubernetes_daemonset" "this" {
metadata {
name = var.daemonset_name
namespace = var.namespace
labels = {
app = var.daemonset_name
"k8s-app" = "cni"
}
}
spec {
selector {
match_labels = {
app = var.daemonset_name
"k8s-app" = "cni"
}
}
template {
metadata {
labels = {
app = var.daemonset_name
"k8s-app" = "cni"
}
}
spec {
affinity {
node_affinity {
required_during_scheduling_ignored_during_execution {
node_selector_term {
match_expressions {
key = "kubernetes.io/os"
operator = "In"
values = var.node_selector_os
}
}
}
}
}
host_network = true
priority_class_name = "system-node-critical"
toleration {
operator = "Exists"
effect = "NoSchedule"
}
service_account_name = kubernetes_service_account.this.metadata.0.name
init_container {
name = "install-cni-plugin"
image = var.flannel_cni_plugin_image
command = ["cp"]
args = ["-f", "/flannel", "/opt/cni/bin/flannel"]
volume_mount {
name = "cni-plugin"
mount_path = "/opt/cni/bin"
}
}
init_container {
name = "install-cni"
image = var.flannel_image
command = ["cp"]
args = ["-f", "/etc/kube-flannel/cni-conf.json", "/etc/cni/net.d/10-flannel.conflist"]
volume_mount {
name = "cni"
mount_path = "/etc/cni/net.d"
}
volume_mount {
name = "flannel-cfg"
mount_path = "/etc/kube-flannel/"
}
}
container {
name = "kube-flannel"
image = var.flannel_image
command = ["/opt/bin/flanneld"]
args = ["--ip-masq", "--kube-subnet-mgr"]
resources {
requests = {
cpu = "100m"
memory = "50Mi"
}
limits = {
cpu = "100m"
memory = "50Mi"
}
}
security_context {
privileged = false
capabilities {
add = ["NET_ADMIN", "NET_RAW"]
}
}
env {
name = "POD_NAME"
value_from {
field_ref {
field_path = "metadata.name"
}
}
}
env {
name = "POD_NAMESPACE"
value_from {
field_ref {
field_path = "metadata.namespace"
}
}
}
env {
name = "EVENT_QUEUE_DEPTH"
value = "5000"
}
volume_mount {
name = "run"
mount_path = "/run/flannel"
}
volume_mount {
name = "flannel-cfg"
mount_path = "/etc/kube-flannel/"
}
volume_mount {
name = "xtables-lock"
mount_path = "/run/xtables.lock"
}
}
volume {
name = "run"
host_path {
path = "/run/flannel"
}
}
volume {
name = "cni-plugin"
host_path {
path = "/opt/cni/bin"
}
}
volume {
name = "cni"
host_path {
path = "/etc/cni/net.d"
}
}
volume {
name = "flannel-cfg"
config_map {
name = kubernetes_config_map.this.metadata.0.name
}
}
volume {
name = "xtables-lock"
host_path {
path = "/run/xtables.lock"
type = "FileOrCreate"
}
}
}
}
}
}

View File

@@ -0,0 +1,23 @@
output "service_account" {
value = kubernetes_service_account.this
}
output "cluster_role" {
value = kubernetes_cluster_role.this
}
output "cluster_role_binding" {
value = kubernetes_cluster_role_binding.this
}
output "pod_security_policy" {
value = kubernetes_pod_security_policy.this
}
output "daemonset" {
value = kubernetes_daemonset.this
}
output "configmap" {
value = kubernetes_config_map.this
}

View File

@@ -0,0 +1,65 @@
resource "kubernetes_pod_security_policy" "this" {
metadata {
name = "psp.flannel.unprivileged"
annotations = {
"seccomp.security.alpha.kubernetes.io/allowedProfileNames" : "docker/default",
"seccomp.security.alpha.kubernetes.io/defaultProfileName" : "docker/default",
"apparmor.security.beta.kubernetes.io/allowedProfileNames" : "runtime/default",
"apparmor.security.beta.kubernetes.io/defaultProfileName" : "runtime/default",
}
}
spec {
# Privilege Escalation
allow_privilege_escalation = false
default_allow_privilege_escalation = false
privileged = false
volumes = [
"configMap",
"secret",
"emptyDir",
"hostPath",
]
allowed_host_paths {
path_prefix = "/etc/cni/net.d"
}
allowed_host_paths {
path_prefix = "/etc/kube-flannel"
}
allowed_host_paths {
path_prefix = "/run/flannel"
}
read_only_root_filesystem = false
# Users and groups
run_as_user {
rule = "MustRunAsNonRoot"
}
supplemental_groups {
rule = "RunAsAny"
}
fs_group {
rule = "RunAsAny"
}
# Capabilities
allowed_capabilities = ["NET_ADMIN", "NET_RAW"]
default_add_capabilities = []
required_drop_capabilities = []
# Host namespaces
host_pid = false
host_ipc = false
host_network = true
host_ports {
min = 0
max = 65535
}
# SELinux
se_linux {
# SELinux is unused in CaaSP
rule = "RunAsAny"
}
}
}

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.11"
}
}
}

View File

@@ -0,0 +1,52 @@
resource "kubernetes_cluster_role" "this" {
count = var.create_cluster_role ? 1 : 0
metadata {
name = var.cluster_role_name
}
rule {
api_groups = ["extensions"]
resources = ["podsecuritypolicies"]
verbs = ["use"]
resource_names = [kubernetes_pod_security_policy.this.metadata.0.name]
}
rule {
api_groups = [""]
resources = ["pods"]
verbs = ["get"]
}
rule {
api_groups = [""]
resources = ["nodes"]
verbs = ["list", "watch"]
}
rule {
api_groups = [""]
resources = ["nodes/status"]
verbs = ["patch"]
}
}
resource "kubernetes_service_account" "this" {
metadata {
name = "flannel"
namespace = var.namespace
}
}
resource "kubernetes_cluster_role_binding" "this" {
metadata {
name = "flannel"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = var.cluster_role_name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.this.metadata.0.name
namespace = kubernetes_service_account.this.metadata.0.namespace
}
}

View File

@@ -0,0 +1,52 @@
variable "namespace" {
type = string
description = "namespace for resources"
default = "kube-system"
}
variable "vxlan_id" {
type = number
description = "VXLAN ID (VNI)"
}
variable "vxlan_port" {
type = number
description = "UDP port to use for sending encapsulated packets"
}
variable "flannel_image" {
type = string
default = "rancher/mirrored-flannelcni-flannel:v0.17.0"
}
variable "flannel_cni_plugin_image" {
type = string
default = "rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1"
}
variable "cluster_role_name" {
type = string
description = "Name of the cluster role flannel will use"
default = "flannel"
}
variable "create_cluster_role" {
type = bool
description = "Weather or not to create a suitable cluster role or use an already exisiting specified by cluster_role_name variable"
default = true
}
variable "daemonset_name" {
type = string
description = "Name of the daeomonset"
default = "flannel"
}
variable "node_selector_os" {
type = list(string)
description = "Which kubernetes.io/os node-label to match for execution"
default = ["linux"]
}
variable "cluster_cidr" {
type = string
description = "ClusterCIDR"
}

View File

@@ -0,0 +1,26 @@
resource "kubernetes_config_map" "this" {
metadata {
name = "${var.daemonset_name}-config"
namespace = var.namespace
}
data = {
"kube-proxy-config.yaml" = yamlencode(merge({
kind = "KubeProxyConfiguration"
apiVersion = "kubeproxy.config.k8s.io/v1alpha1"
clientConnection = {
kubeconfig = "/kubeconfig/kubeconfig"
}
mode = var.mode
clusterCIDR = var.cluster_cidr
}, var.additional_config))
"kubeconfig.sh" : <<EOM
#!/bin/sh
kubectl --kubeconfig=/kubeconfig/kubeconfig config set-cluster default --certificate-authority=/run/secrets/kubernetes.io/serviceaccount/ca.crt --server=${var.kubernetes_server}
kubectl --kubeconfig=/kubeconfig/kubeconfig config set-credentials kube-proxy --token=$(cat /run/secrets/kubernetes.io/serviceaccount/token)
kubectl --kubeconfig=/kubeconfig/kubeconfig config set-context default --cluster=default --user=kube-proxy
kubectl --kubeconfig=/kubeconfig/kubeconfig config use-context default
EOM
}
}

View File

@@ -0,0 +1,145 @@
resource "kubernetes_daemonset" "this" {
metadata {
name = var.daemonset_name
namespace = var.namespace
labels = {
"k8s-app" = var.daemonset_name
}
}
spec {
selector {
match_labels = {
"k8s-app" = var.daemonset_name
}
}
template {
metadata {
labels = {
"k8s-app" = var.daemonset_name
"prometheus.io/scrape" = "true"
"prometheus.io/port" = "10249"
"prometheus.io/scheme" = "http"
}
}
spec {
affinity {
node_affinity {
required_during_scheduling_ignored_during_execution {
node_selector_term {
match_expressions {
key = "kubernetes.io/os"
operator = "In"
values = ["linux"]
}
}
}
}
}
host_network = true
priority_class_name = "system-node-critical"
service_account_name = kubernetes_service_account.this.metadata.0.name
toleration {
operator = "Exists"
effect = "NoSchedule"
}
volume {
name = "kube-proxy-config"
config_map {
name = kubernetes_config_map.this.metadata.0.name
}
}
volume {
name = "kubeconfig"
empty_dir {
medium = "Memory"
}
}
volume {
name = "lib-modules"
host_path {
path = "/lib/modules"
}
}
init_container {
name = "kubeconfig"
image = "bitnami/kubectl:${var.kubernetes_version}"
command = ["/bin/bash"]
args = ["/kubeconfig.sh"]
volume_mount {
name = "kube-proxy-config"
mount_path = "/kubeconfig.sh"
sub_path = "kubeconfig.sh"
}
volume_mount {
name = "kubeconfig"
mount_path = "/kubeconfig"
}
}
container {
security_context {
privileged = true
}
image = "k8s.gcr.io/kube-proxy:v${var.kubernetes_version}"
command = ["kube-proxy"]
args = [
"--config=/var/lib/kube-proxy/kube-proxy-config.yaml",
]
name = "kube-proxy"
resources {
limits = {
cpu = "100m"
memory = "50Mi"
}
requests = {
cpu = "100m"
memory = "50Mi"
}
}
volume_mount {
name = "kube-proxy-config"
mount_path = "/var/lib/kube-proxy"
}
volume_mount {
name = "lib-modules"
mount_path = "/lib/modules"
}
volume_mount {
name = "kubeconfig"
mount_path = "/kubeconfig"
}
# liveness_probe {
# http_get {
# path = "/"
# port = 80
# http_header {
# name = "X-Custom-Header"
# value = "Awesome"
# }
# }
# initial_delay_seconds = 3
# period_seconds = 3
# }
}
}
}
}
}

View File

@@ -0,0 +1,13 @@
output "daemonset" {
value = kubernetes_daemonset.this
}
output "configmap" {
value = kubernetes_config_map.this
}
output "service_account" {
value = kubernetes_service_account.this
}
output "cluster_role_binding" {
value = kubernetes_cluster_role_binding.this
}

View File

@@ -0,0 +1,8 @@
terraform {
required_providers {
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.11"
}
}
}

View File

@@ -0,0 +1,22 @@
resource "kubernetes_service_account" "this" {
metadata {
name = "kube-proxy"
namespace = "kube-system"
}
}
resource "kubernetes_cluster_role_binding" "this" {
metadata {
name = "kube-proxy-is-system-node-proxier"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "system:node-proxier"
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.this.metadata.0.name
namespace = "kube-system"
}
}

View File

@@ -0,0 +1,17 @@
resource "kubernetes_service" "prometheus" {
metadata {
name = var.daemonset_name
namespace = var.namespace
}
spec {
selector = {
"k8s-app" = var.daemonset_name
}
port {
port = 10249
target_port = 10249
}
type = "ClusterIP"
}
}

View File

@@ -0,0 +1,34 @@
variable "kubernetes_version" {
type = string
description = "Kubernetes cluster version (eg: 1.23.5)"
}
variable "cluster_cidr" {
type = string
description = "Kubernetes cluster CIDR"
}
variable "kubernetes_server" {
type = string
description = "Kubernetes (master) server address (eg: https://k8s.my.domain:6443/)"
}
variable "mode" {
type = string
default = "iptables"
}
variable "daemonset_name" {
type = string
default = "kube-proxy"
}
variable "additional_config" {
type = map(any)
default = {}
}
variable "namespace" {
type = string
default = "kube-system"
}