init: copied modules from lawndale-infra
This commit is contained in:
23
9p-persistent-volume/outputs.tf
Normal file
23
9p-persistent-volume/outputs.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
output "namespace" {
|
||||
value = var.namespace
|
||||
}
|
||||
|
||||
output "host_path" {
|
||||
value = local.host_path
|
||||
}
|
||||
|
||||
output "pv_name" {
|
||||
value = kubernetes_persistent_volume.this.metadata.0.name
|
||||
}
|
||||
|
||||
output "pvc_name" {
|
||||
value = kubernetes_persistent_volume_claim.this.metadata.0.name
|
||||
}
|
||||
|
||||
output "persistent_volume" {
|
||||
value = kubernetes_persistent_volume.this
|
||||
}
|
||||
|
||||
output "persistent_volume_claim" {
|
||||
value = kubernetes_persistent_volume_claim.this
|
||||
}
|
||||
9
9p-persistent-volume/providers.tf
Normal file
9
9p-persistent-volume/providers.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.11"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
9p-persistent-volume/variables.tf
Normal file
15
9p-persistent-volume/variables.tf
Normal file
@@ -0,0 +1,15 @@
|
||||
variable "name" {
|
||||
type = string
|
||||
description = "The name of the volume"
|
||||
}
|
||||
|
||||
variable "namespace" {
|
||||
type = string
|
||||
description = "The namespace for the persistent volume claim"
|
||||
}
|
||||
|
||||
variable "volume_storage_capacity" {
|
||||
type = string
|
||||
description = "Size of the persistent volume reported to Kubernetes"
|
||||
default = "1Gi"
|
||||
}
|
||||
37
9p-persistent-volume/volume.tf
Normal file
37
9p-persistent-volume/volume.tf
Normal file
@@ -0,0 +1,37 @@
|
||||
locals {
|
||||
pv_name = "pv-p9hostpath-${var.name}"
|
||||
host_path = "/mnt/datastore/${var.name}"
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume" "this" {
|
||||
metadata {
|
||||
name = local.pv_name
|
||||
}
|
||||
spec {
|
||||
capacity = {
|
||||
storage = var.volume_storage_capacity
|
||||
}
|
||||
access_modes = ["ReadWriteMany"]
|
||||
persistent_volume_source {
|
||||
host_path {
|
||||
path = local.host_path
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_persistent_volume_claim" "this" {
|
||||
metadata {
|
||||
name = var.name
|
||||
namespace = var.namespace
|
||||
}
|
||||
spec {
|
||||
access_modes = ["ReadWriteMany"]
|
||||
resources {
|
||||
requests = {
|
||||
storage = var.volume_storage_capacity
|
||||
}
|
||||
}
|
||||
volume_name = kubernetes_persistent_volume.this.metadata.0.name
|
||||
}
|
||||
}
|
||||
39
kubernetes/flannel/configmap.tf
Normal file
39
kubernetes/flannel/configmap.tf
Normal file
@@ -0,0 +1,39 @@
|
||||
resource "kubernetes_config_map" "this" {
|
||||
metadata {
|
||||
namespace = var.namespace
|
||||
name = "kube-flannel-cfg"
|
||||
labels = {
|
||||
app = "flannel"
|
||||
"k8s-app" = "cni"
|
||||
}
|
||||
}
|
||||
data = {
|
||||
"cni-conf.json" = jsonencode({
|
||||
name = "cbr0",
|
||||
cniVersion = "0.3.1",
|
||||
plugins = [
|
||||
{
|
||||
type = "flannel",
|
||||
delegate = {
|
||||
hairpinMode = true,
|
||||
isDefaultGateway = true,
|
||||
}
|
||||
},
|
||||
{
|
||||
type = "portmap",
|
||||
capabilities = {
|
||||
portMappings = true,
|
||||
}
|
||||
},
|
||||
]
|
||||
})
|
||||
"net-conf.json" = jsonencode({
|
||||
Network = var.cluster_cidr,
|
||||
Backend = {
|
||||
Type = "vxlan",
|
||||
VNI = var.vxlan_id,
|
||||
Port = var.vxlan_port,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
167
kubernetes/flannel/daemonset.tf
Normal file
167
kubernetes/flannel/daemonset.tf
Normal file
@@ -0,0 +1,167 @@
|
||||
resource "kubernetes_daemonset" "this" {
|
||||
metadata {
|
||||
name = var.daemonset_name
|
||||
namespace = var.namespace
|
||||
labels = {
|
||||
app = var.daemonset_name
|
||||
"k8s-app" = "cni"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
selector {
|
||||
match_labels = {
|
||||
app = var.daemonset_name
|
||||
"k8s-app" = "cni"
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = var.daemonset_name
|
||||
"k8s-app" = "cni"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
affinity {
|
||||
node_affinity {
|
||||
required_during_scheduling_ignored_during_execution {
|
||||
node_selector_term {
|
||||
match_expressions {
|
||||
key = "kubernetes.io/os"
|
||||
operator = "In"
|
||||
values = var.node_selector_os
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
host_network = true
|
||||
priority_class_name = "system-node-critical"
|
||||
|
||||
toleration {
|
||||
operator = "Exists"
|
||||
effect = "NoSchedule"
|
||||
}
|
||||
|
||||
service_account_name = kubernetes_service_account.this.metadata.0.name
|
||||
|
||||
init_container {
|
||||
name = "install-cni-plugin"
|
||||
image = var.flannel_cni_plugin_image
|
||||
command = ["cp"]
|
||||
args = ["-f", "/flannel", "/opt/cni/bin/flannel"]
|
||||
volume_mount {
|
||||
name = "cni-plugin"
|
||||
mount_path = "/opt/cni/bin"
|
||||
}
|
||||
}
|
||||
|
||||
init_container {
|
||||
name = "install-cni"
|
||||
image = var.flannel_image
|
||||
command = ["cp"]
|
||||
args = ["-f", "/etc/kube-flannel/cni-conf.json", "/etc/cni/net.d/10-flannel.conflist"]
|
||||
volume_mount {
|
||||
name = "cni"
|
||||
mount_path = "/etc/cni/net.d"
|
||||
}
|
||||
volume_mount {
|
||||
name = "flannel-cfg"
|
||||
mount_path = "/etc/kube-flannel/"
|
||||
}
|
||||
}
|
||||
|
||||
container {
|
||||
name = "kube-flannel"
|
||||
image = var.flannel_image
|
||||
command = ["/opt/bin/flanneld"]
|
||||
args = ["--ip-masq", "--kube-subnet-mgr"]
|
||||
resources {
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "50Mi"
|
||||
}
|
||||
limits = {
|
||||
cpu = "100m"
|
||||
memory = "50Mi"
|
||||
}
|
||||
}
|
||||
security_context {
|
||||
privileged = false
|
||||
capabilities {
|
||||
add = ["NET_ADMIN", "NET_RAW"]
|
||||
}
|
||||
}
|
||||
|
||||
env {
|
||||
name = "POD_NAME"
|
||||
value_from {
|
||||
field_ref {
|
||||
field_path = "metadata.name"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
env {
|
||||
name = "POD_NAMESPACE"
|
||||
value_from {
|
||||
field_ref {
|
||||
field_path = "metadata.namespace"
|
||||
}
|
||||
}
|
||||
}
|
||||
env {
|
||||
name = "EVENT_QUEUE_DEPTH"
|
||||
value = "5000"
|
||||
}
|
||||
volume_mount {
|
||||
name = "run"
|
||||
mount_path = "/run/flannel"
|
||||
}
|
||||
volume_mount {
|
||||
name = "flannel-cfg"
|
||||
mount_path = "/etc/kube-flannel/"
|
||||
}
|
||||
volume_mount {
|
||||
name = "xtables-lock"
|
||||
mount_path = "/run/xtables.lock"
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "run"
|
||||
host_path {
|
||||
path = "/run/flannel"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "cni-plugin"
|
||||
host_path {
|
||||
path = "/opt/cni/bin"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "cni"
|
||||
host_path {
|
||||
path = "/etc/cni/net.d"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "flannel-cfg"
|
||||
config_map {
|
||||
name = kubernetes_config_map.this.metadata.0.name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "xtables-lock"
|
||||
host_path {
|
||||
path = "/run/xtables.lock"
|
||||
type = "FileOrCreate"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
23
kubernetes/flannel/outputs.tf
Normal file
23
kubernetes/flannel/outputs.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
output "service_account" {
|
||||
value = kubernetes_service_account.this
|
||||
}
|
||||
|
||||
output "cluster_role" {
|
||||
value = kubernetes_cluster_role.this
|
||||
}
|
||||
|
||||
output "cluster_role_binding" {
|
||||
value = kubernetes_cluster_role_binding.this
|
||||
}
|
||||
|
||||
output "pod_security_policy" {
|
||||
value = kubernetes_pod_security_policy.this
|
||||
}
|
||||
|
||||
output "daemonset" {
|
||||
value = kubernetes_daemonset.this
|
||||
}
|
||||
|
||||
output "configmap" {
|
||||
value = kubernetes_config_map.this
|
||||
}
|
||||
65
kubernetes/flannel/pod_security.tf
Normal file
65
kubernetes/flannel/pod_security.tf
Normal file
@@ -0,0 +1,65 @@
|
||||
resource "kubernetes_pod_security_policy" "this" {
|
||||
metadata {
|
||||
name = "psp.flannel.unprivileged"
|
||||
annotations = {
|
||||
"seccomp.security.alpha.kubernetes.io/allowedProfileNames" : "docker/default",
|
||||
"seccomp.security.alpha.kubernetes.io/defaultProfileName" : "docker/default",
|
||||
"apparmor.security.beta.kubernetes.io/allowedProfileNames" : "runtime/default",
|
||||
"apparmor.security.beta.kubernetes.io/defaultProfileName" : "runtime/default",
|
||||
}
|
||||
}
|
||||
spec {
|
||||
# Privilege Escalation
|
||||
allow_privilege_escalation = false
|
||||
default_allow_privilege_escalation = false
|
||||
privileged = false
|
||||
|
||||
volumes = [
|
||||
"configMap",
|
||||
"secret",
|
||||
"emptyDir",
|
||||
"hostPath",
|
||||
]
|
||||
allowed_host_paths {
|
||||
path_prefix = "/etc/cni/net.d"
|
||||
}
|
||||
allowed_host_paths {
|
||||
path_prefix = "/etc/kube-flannel"
|
||||
}
|
||||
allowed_host_paths {
|
||||
path_prefix = "/run/flannel"
|
||||
}
|
||||
|
||||
read_only_root_filesystem = false
|
||||
|
||||
# Users and groups
|
||||
run_as_user {
|
||||
rule = "MustRunAsNonRoot"
|
||||
}
|
||||
supplemental_groups {
|
||||
rule = "RunAsAny"
|
||||
}
|
||||
fs_group {
|
||||
rule = "RunAsAny"
|
||||
}
|
||||
|
||||
# Capabilities
|
||||
allowed_capabilities = ["NET_ADMIN", "NET_RAW"]
|
||||
default_add_capabilities = []
|
||||
required_drop_capabilities = []
|
||||
|
||||
# Host namespaces
|
||||
host_pid = false
|
||||
host_ipc = false
|
||||
host_network = true
|
||||
host_ports {
|
||||
min = 0
|
||||
max = 65535
|
||||
}
|
||||
# SELinux
|
||||
se_linux {
|
||||
# SELinux is unused in CaaSP
|
||||
rule = "RunAsAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
8
kubernetes/flannel/provider.tf
Normal file
8
kubernetes/flannel/provider.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.11"
|
||||
}
|
||||
}
|
||||
}
|
||||
52
kubernetes/flannel/rbac.tf
Normal file
52
kubernetes/flannel/rbac.tf
Normal file
@@ -0,0 +1,52 @@
|
||||
|
||||
resource "kubernetes_cluster_role" "this" {
|
||||
count = var.create_cluster_role ? 1 : 0
|
||||
|
||||
metadata {
|
||||
name = var.cluster_role_name
|
||||
}
|
||||
rule {
|
||||
api_groups = ["extensions"]
|
||||
resources = ["podsecuritypolicies"]
|
||||
verbs = ["use"]
|
||||
resource_names = [kubernetes_pod_security_policy.this.metadata.0.name]
|
||||
}
|
||||
rule {
|
||||
api_groups = [""]
|
||||
resources = ["pods"]
|
||||
verbs = ["get"]
|
||||
}
|
||||
rule {
|
||||
api_groups = [""]
|
||||
resources = ["nodes"]
|
||||
verbs = ["list", "watch"]
|
||||
}
|
||||
rule {
|
||||
api_groups = [""]
|
||||
resources = ["nodes/status"]
|
||||
verbs = ["patch"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_service_account" "this" {
|
||||
metadata {
|
||||
name = "flannel"
|
||||
namespace = var.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "this" {
|
||||
metadata {
|
||||
name = "flannel"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = var.cluster_role_name
|
||||
}
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = kubernetes_service_account.this.metadata.0.name
|
||||
namespace = kubernetes_service_account.this.metadata.0.namespace
|
||||
}
|
||||
}
|
||||
52
kubernetes/flannel/variables.tf
Normal file
52
kubernetes/flannel/variables.tf
Normal file
@@ -0,0 +1,52 @@
|
||||
variable "namespace" {
|
||||
type = string
|
||||
description = "namespace for resources"
|
||||
default = "kube-system"
|
||||
}
|
||||
|
||||
variable "vxlan_id" {
|
||||
type = number
|
||||
description = "VXLAN ID (VNI)"
|
||||
}
|
||||
|
||||
variable "vxlan_port" {
|
||||
type = number
|
||||
description = "UDP port to use for sending encapsulated packets"
|
||||
}
|
||||
|
||||
variable "flannel_image" {
|
||||
type = string
|
||||
default = "rancher/mirrored-flannelcni-flannel:v0.17.0"
|
||||
}
|
||||
|
||||
variable "flannel_cni_plugin_image" {
|
||||
type = string
|
||||
default = "rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.1"
|
||||
}
|
||||
|
||||
variable "cluster_role_name" {
|
||||
type = string
|
||||
description = "Name of the cluster role flannel will use"
|
||||
default = "flannel"
|
||||
}
|
||||
variable "create_cluster_role" {
|
||||
type = bool
|
||||
description = "Weather or not to create a suitable cluster role or use an already exisiting specified by cluster_role_name variable"
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "daemonset_name" {
|
||||
type = string
|
||||
description = "Name of the daeomonset"
|
||||
default = "flannel"
|
||||
}
|
||||
variable "node_selector_os" {
|
||||
type = list(string)
|
||||
description = "Which kubernetes.io/os node-label to match for execution"
|
||||
default = ["linux"]
|
||||
}
|
||||
|
||||
variable "cluster_cidr" {
|
||||
type = string
|
||||
description = "ClusterCIDR"
|
||||
}
|
||||
26
kubernetes/kube-proxy/configmap.tf
Normal file
26
kubernetes/kube-proxy/configmap.tf
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
resource "kubernetes_config_map" "this" {
|
||||
metadata {
|
||||
name = "${var.daemonset_name}-config"
|
||||
namespace = var.namespace
|
||||
}
|
||||
|
||||
data = {
|
||||
"kube-proxy-config.yaml" = yamlencode(merge({
|
||||
kind = "KubeProxyConfiguration"
|
||||
apiVersion = "kubeproxy.config.k8s.io/v1alpha1"
|
||||
clientConnection = {
|
||||
kubeconfig = "/kubeconfig/kubeconfig"
|
||||
}
|
||||
mode = var.mode
|
||||
clusterCIDR = var.cluster_cidr
|
||||
}, var.additional_config))
|
||||
"kubeconfig.sh" : <<EOM
|
||||
#!/bin/sh
|
||||
kubectl --kubeconfig=/kubeconfig/kubeconfig config set-cluster default --certificate-authority=/run/secrets/kubernetes.io/serviceaccount/ca.crt --server=${var.kubernetes_server}
|
||||
kubectl --kubeconfig=/kubeconfig/kubeconfig config set-credentials kube-proxy --token=$(cat /run/secrets/kubernetes.io/serviceaccount/token)
|
||||
kubectl --kubeconfig=/kubeconfig/kubeconfig config set-context default --cluster=default --user=kube-proxy
|
||||
kubectl --kubeconfig=/kubeconfig/kubeconfig config use-context default
|
||||
EOM
|
||||
}
|
||||
}
|
||||
145
kubernetes/kube-proxy/daemonset.tf
Normal file
145
kubernetes/kube-proxy/daemonset.tf
Normal file
@@ -0,0 +1,145 @@
|
||||
|
||||
resource "kubernetes_daemonset" "this" {
|
||||
metadata {
|
||||
name = var.daemonset_name
|
||||
namespace = var.namespace
|
||||
labels = {
|
||||
"k8s-app" = var.daemonset_name
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
selector {
|
||||
match_labels = {
|
||||
"k8s-app" = var.daemonset_name
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"k8s-app" = var.daemonset_name
|
||||
"prometheus.io/scrape" = "true"
|
||||
"prometheus.io/port" = "10249"
|
||||
"prometheus.io/scheme" = "http"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
affinity {
|
||||
node_affinity {
|
||||
required_during_scheduling_ignored_during_execution {
|
||||
node_selector_term {
|
||||
match_expressions {
|
||||
key = "kubernetes.io/os"
|
||||
operator = "In"
|
||||
values = ["linux"]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
host_network = true
|
||||
priority_class_name = "system-node-critical"
|
||||
service_account_name = kubernetes_service_account.this.metadata.0.name
|
||||
|
||||
toleration {
|
||||
operator = "Exists"
|
||||
effect = "NoSchedule"
|
||||
}
|
||||
volume {
|
||||
name = "kube-proxy-config"
|
||||
config_map {
|
||||
name = kubernetes_config_map.this.metadata.0.name
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "kubeconfig"
|
||||
empty_dir {
|
||||
medium = "Memory"
|
||||
}
|
||||
}
|
||||
|
||||
volume {
|
||||
name = "lib-modules"
|
||||
host_path {
|
||||
path = "/lib/modules"
|
||||
}
|
||||
}
|
||||
|
||||
init_container {
|
||||
name = "kubeconfig"
|
||||
image = "bitnami/kubectl:${var.kubernetes_version}"
|
||||
command = ["/bin/bash"]
|
||||
args = ["/kubeconfig.sh"]
|
||||
|
||||
volume_mount {
|
||||
name = "kube-proxy-config"
|
||||
mount_path = "/kubeconfig.sh"
|
||||
sub_path = "kubeconfig.sh"
|
||||
}
|
||||
volume_mount {
|
||||
name = "kubeconfig"
|
||||
mount_path = "/kubeconfig"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
container {
|
||||
security_context {
|
||||
privileged = true
|
||||
}
|
||||
image = "k8s.gcr.io/kube-proxy:v${var.kubernetes_version}"
|
||||
command = ["kube-proxy"]
|
||||
args = [
|
||||
"--config=/var/lib/kube-proxy/kube-proxy-config.yaml",
|
||||
]
|
||||
name = "kube-proxy"
|
||||
|
||||
resources {
|
||||
limits = {
|
||||
cpu = "100m"
|
||||
memory = "50Mi"
|
||||
}
|
||||
requests = {
|
||||
cpu = "100m"
|
||||
memory = "50Mi"
|
||||
}
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "kube-proxy-config"
|
||||
mount_path = "/var/lib/kube-proxy"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "lib-modules"
|
||||
mount_path = "/lib/modules"
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
name = "kubeconfig"
|
||||
mount_path = "/kubeconfig"
|
||||
}
|
||||
|
||||
# liveness_probe {
|
||||
# http_get {
|
||||
# path = "/"
|
||||
# port = 80
|
||||
|
||||
# http_header {
|
||||
# name = "X-Custom-Header"
|
||||
# value = "Awesome"
|
||||
# }
|
||||
# }
|
||||
|
||||
# initial_delay_seconds = 3
|
||||
# period_seconds = 3
|
||||
# }
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
13
kubernetes/kube-proxy/outputs.tf
Normal file
13
kubernetes/kube-proxy/outputs.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
output "daemonset" {
|
||||
value = kubernetes_daemonset.this
|
||||
}
|
||||
output "configmap" {
|
||||
value = kubernetes_config_map.this
|
||||
}
|
||||
output "service_account" {
|
||||
value = kubernetes_service_account.this
|
||||
}
|
||||
|
||||
output "cluster_role_binding" {
|
||||
value = kubernetes_cluster_role_binding.this
|
||||
}
|
||||
8
kubernetes/kube-proxy/provider.tf
Normal file
8
kubernetes/kube-proxy/provider.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.11"
|
||||
}
|
||||
}
|
||||
}
|
||||
22
kubernetes/kube-proxy/rbac.tf
Normal file
22
kubernetes/kube-proxy/rbac.tf
Normal file
@@ -0,0 +1,22 @@
|
||||
resource "kubernetes_service_account" "this" {
|
||||
metadata {
|
||||
name = "kube-proxy"
|
||||
namespace = "kube-system"
|
||||
}
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "this" {
|
||||
metadata {
|
||||
name = "kube-proxy-is-system-node-proxier"
|
||||
}
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "system:node-proxier"
|
||||
}
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = kubernetes_service_account.this.metadata.0.name
|
||||
namespace = "kube-system"
|
||||
}
|
||||
}
|
||||
17
kubernetes/kube-proxy/service.tf
Normal file
17
kubernetes/kube-proxy/service.tf
Normal file
@@ -0,0 +1,17 @@
|
||||
resource "kubernetes_service" "prometheus" {
|
||||
metadata {
|
||||
name = var.daemonset_name
|
||||
namespace = var.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
"k8s-app" = var.daemonset_name
|
||||
}
|
||||
port {
|
||||
port = 10249
|
||||
target_port = 10249
|
||||
}
|
||||
|
||||
type = "ClusterIP"
|
||||
}
|
||||
}
|
||||
34
kubernetes/kube-proxy/variables.tf
Normal file
34
kubernetes/kube-proxy/variables.tf
Normal file
@@ -0,0 +1,34 @@
|
||||
variable "kubernetes_version" {
|
||||
type = string
|
||||
description = "Kubernetes cluster version (eg: 1.23.5)"
|
||||
}
|
||||
|
||||
variable "cluster_cidr" {
|
||||
type = string
|
||||
description = "Kubernetes cluster CIDR"
|
||||
}
|
||||
|
||||
variable "kubernetes_server" {
|
||||
type = string
|
||||
description = "Kubernetes (master) server address (eg: https://k8s.my.domain:6443/)"
|
||||
}
|
||||
|
||||
variable "mode" {
|
||||
type = string
|
||||
default = "iptables"
|
||||
}
|
||||
|
||||
variable "daemonset_name" {
|
||||
type = string
|
||||
default = "kube-proxy"
|
||||
}
|
||||
|
||||
variable "additional_config" {
|
||||
type = map(any)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "namespace" {
|
||||
type = string
|
||||
default = "kube-system"
|
||||
}
|
||||
13
lawndale-vm-ipam/dns.tf
Normal file
13
lawndale-vm-ipam/dns.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
resource "dns_a_record_set" "this" {
|
||||
zone = "lawndale."
|
||||
name = "${var.name}.${var.interface}"
|
||||
addresses = [local.ip_address]
|
||||
ttl = var.ttl
|
||||
}
|
||||
|
||||
resource "dns_ptr_record" "this" {
|
||||
zone = "168.192.in-addr.arpa."
|
||||
name = local.ptr_name
|
||||
ptr = "${var.name}.${var.interface}.lawndale."
|
||||
ttl = var.ttl
|
||||
}
|
||||
18
lawndale-vm-ipam/locals.tf
Normal file
18
lawndale-vm-ipam/locals.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
locals {
|
||||
interface_ip_map = {
|
||||
"internal" = "192.168.254"
|
||||
"nat" = "192.168.253"
|
||||
}
|
||||
interface_ptr_map = {
|
||||
"internal" = "254"
|
||||
"nat" = "253"
|
||||
}
|
||||
|
||||
ip_address = "${local.interface_ip_map[var.interface]}.${var.id}"
|
||||
ptr_name = "${var.id}.${local.interface_ip_map[var.interface]}"
|
||||
|
||||
lawndale_interface_map = {
|
||||
"internal" = "brInternal"
|
||||
"nat" = "brNAT"
|
||||
}
|
||||
}
|
||||
35
lawndale-vm-ipam/outputs.tf
Normal file
35
lawndale-vm-ipam/outputs.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
output "ip_address" {
|
||||
value = local.ip_address
|
||||
}
|
||||
|
||||
output "fqdn" {
|
||||
value = "${dns_a_record_set.this.name}.${dns_a_record_set.this.zone}"
|
||||
}
|
||||
|
||||
output "interface" {
|
||||
value = var.interface
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = var.name
|
||||
}
|
||||
|
||||
output "gateway" {
|
||||
value = "${local.interface_ip_map[var.interface]}.254"
|
||||
}
|
||||
|
||||
output "nameserver" {
|
||||
value = "${local.interface_ip_map[var.interface]}.254"
|
||||
}
|
||||
|
||||
output "search_domains" {
|
||||
value = ["${var.interface}.lawndale.", "lawndale."]
|
||||
}
|
||||
|
||||
output "cidr" {
|
||||
value = 24
|
||||
}
|
||||
|
||||
output "lawndale_interface" {
|
||||
value = local.lawndale_interface_map[var.interface]
|
||||
}
|
||||
8
lawndale-vm-ipam/providers.tf
Normal file
8
lawndale-vm-ipam/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
dns = {
|
||||
source = "hashicorp/dns"
|
||||
version = "~> 3.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
21
lawndale-vm-ipam/variables.tf
Normal file
21
lawndale-vm-ipam/variables.tf
Normal file
@@ -0,0 +1,21 @@
|
||||
variable "id" {
|
||||
type = number
|
||||
description = "The ID for the VM that will be transposed into an IP address"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
description = "The domain name for the vm"
|
||||
}
|
||||
|
||||
variable "interface" {
|
||||
type = string
|
||||
description = "Which interface should be used. Can be either nat or internal"
|
||||
}
|
||||
|
||||
variable "ttl" {
|
||||
type = number
|
||||
description = "TTL value for the new records"
|
||||
|
||||
default = 300
|
||||
}
|
||||
119
lawndale-vm/compute.tf
Normal file
119
lawndale-vm/compute.tf
Normal file
@@ -0,0 +1,119 @@
|
||||
|
||||
resource "libvirt_pool" "this" {
|
||||
count = var.create_root_storage_pool ? 1 : 0
|
||||
name = local.root_storage_pool
|
||||
type = "dir"
|
||||
path = "/vmstore/${var.name}"
|
||||
}
|
||||
|
||||
resource "libvirt_volume" "this" {
|
||||
name = local.root_storage_volume_name
|
||||
pool = local.root_storage_pool
|
||||
|
||||
size = var.root_storage_volume_size_gb * 1024 * 1024 * 1024
|
||||
base_volume_name = var.base_image_volume
|
||||
base_volume_pool = var.base_image_pool
|
||||
}
|
||||
|
||||
|
||||
resource "macaddress" "this" {
|
||||
}
|
||||
|
||||
|
||||
module "ipam" {
|
||||
source = "../../modules/lawndale-vm-ipam"
|
||||
|
||||
id = var.id
|
||||
name = var.name
|
||||
interface = var.interface
|
||||
}
|
||||
|
||||
|
||||
resource "libvirt_domain" "this" {
|
||||
name = "${var.id}-${var.name}"
|
||||
description = var.description
|
||||
|
||||
vcpu = var.vcpu
|
||||
memory = var.memory_mb
|
||||
|
||||
autostart = var.autostart
|
||||
|
||||
cloudinit = libvirt_cloudinit_disk.this.id
|
||||
|
||||
network_interface {
|
||||
bridge = module.ipam.lawndale_interface
|
||||
mac = macaddress.this.address
|
||||
}
|
||||
|
||||
disk {
|
||||
volume_id = libvirt_volume.this.id
|
||||
scsi = true
|
||||
}
|
||||
|
||||
# video { type = "virtio" }
|
||||
# graphics {}
|
||||
|
||||
console {
|
||||
type = "pty"
|
||||
target_port = 0
|
||||
target_type = "serial"
|
||||
}
|
||||
|
||||
qemu_agent = true
|
||||
dynamic "filesystem" {
|
||||
for_each = var.filesystems
|
||||
content {
|
||||
source = filesystem.value.source
|
||||
target = filesystem.value.target
|
||||
readonly = filesystem.value.readonly
|
||||
accessmode = filesystem.value.accessmode
|
||||
}
|
||||
}
|
||||
|
||||
dynamic "xml" {
|
||||
for_each = var.xslt == null ? [] : [var.xslt]
|
||||
content {
|
||||
xslt = xml.value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "libvirt_cloudinit_disk" "this" {
|
||||
name = "${var.id}-${var.name}-cloudinit"
|
||||
pool = local.root_storage_pool
|
||||
|
||||
meta_data = local.meta_data
|
||||
network_config = local.network_config
|
||||
user_data = var.user_data
|
||||
}
|
||||
|
||||
locals {
|
||||
default_network_config = {
|
||||
version = 2
|
||||
ethernets = {
|
||||
eth = {
|
||||
match = {
|
||||
macaddress = macaddress.this.address
|
||||
}
|
||||
addresses = [
|
||||
"${module.ipam.ip_address}/${module.ipam.cidr}"
|
||||
]
|
||||
gateway4 = module.ipam.gateway
|
||||
nameservers = {
|
||||
search = module.ipam.search_domains
|
||||
addresses = [
|
||||
module.ipam.nameserver
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
default_meta_data = <<EOM
|
||||
instance-id: ${var.id}-${var.name}
|
||||
local-hostname: ${var.name}
|
||||
EOM
|
||||
|
||||
meta_data = var.meta_data != null ? var.meta_data : local.default_meta_data
|
||||
network_config = var.network_config != null ? var.network_config : jsonencode(local.default_network_config)
|
||||
}
|
||||
4
lawndale-vm/locals.tf
Normal file
4
lawndale-vm/locals.tf
Normal file
@@ -0,0 +1,4 @@
|
||||
locals {
|
||||
root_storage_pool = var.root_storage_pool != "" ? var.root_storage_pool : var.name
|
||||
root_storage_volume_name = var.root_storage_volume_name != "" ? "${var.root_storage_volume_name}.qcow2" : "${var.name}.qcow2"
|
||||
}
|
||||
0
lawndale-vm/outputs.tf
Normal file
0
lawndale-vm/outputs.tf
Normal file
13
lawndale-vm/providers.tf
Normal file
13
lawndale-vm/providers.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
libvirt = {
|
||||
source = "dmacvicar/libvirt"
|
||||
version = "~> 0.6.14"
|
||||
}
|
||||
|
||||
macaddress = {
|
||||
source = "ivoronin/macaddress"
|
||||
version = "~> 0.3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
112
lawndale-vm/variables.tf
Normal file
112
lawndale-vm/variables.tf
Normal file
@@ -0,0 +1,112 @@
|
||||
variable "id" {
|
||||
type = number
|
||||
description = "The lawndale id of the virtual machine"
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
description = "The name of the virtual machine (must be a [-_a-z0-9])"
|
||||
|
||||
validation {
|
||||
condition = can(regex("^[-_a-z0-9]+$", var.name))
|
||||
error_message = "A virtual machine name must be lowercase, and can only contain alphanumeral characters, dashes and underscores."
|
||||
}
|
||||
}
|
||||
|
||||
variable "vcpu" {
|
||||
type = number
|
||||
description = "CPU count"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "memory_mb" {
|
||||
type = number
|
||||
description = "VM memory allocation in megabytes"
|
||||
}
|
||||
|
||||
variable "description" {
|
||||
type = string
|
||||
description = "(Short) Description for the virtual machine"
|
||||
}
|
||||
|
||||
variable "base_image_pool" {
|
||||
type = string
|
||||
description = "Base image storage pool"
|
||||
}
|
||||
|
||||
variable "base_image_volume" {
|
||||
type = string
|
||||
description = "Base image storage pool"
|
||||
}
|
||||
|
||||
variable "root_storage_pool" {
|
||||
type = string
|
||||
description = "The name of the storage pool. It will default to the VM name"
|
||||
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "create_root_storage_pool" {
|
||||
type = bool
|
||||
description = "Create the storage pool as part of the module"
|
||||
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "root_storage_volume_size_gb" {
|
||||
type = number
|
||||
description = "The size of the storage volume (in gigabytes)"
|
||||
}
|
||||
|
||||
variable "root_storage_volume_name" {
|
||||
type = string
|
||||
description = "the name of the storage volume (must be unique in the pool)"
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "interface" {
|
||||
type = string
|
||||
description = "Network interface to attach the vm on"
|
||||
}
|
||||
|
||||
variable "autostart" {
|
||||
type = bool
|
||||
description = "Start the VM at host start?"
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "user_data" {
|
||||
type = string
|
||||
description = "Cloud-init userdata script to run"
|
||||
}
|
||||
|
||||
variable "network_config" {
|
||||
type = string
|
||||
description = "Cloud-init network config"
|
||||
default = null
|
||||
nullable = true
|
||||
}
|
||||
|
||||
variable "meta_data" {
|
||||
type = string
|
||||
description = "Cloud-init meta-data"
|
||||
default = null
|
||||
nullable = true
|
||||
}
|
||||
|
||||
variable "filesystems" {
|
||||
type = list(object({
|
||||
source = string
|
||||
target = string
|
||||
accessmode = string
|
||||
readonly = bool
|
||||
}))
|
||||
description = "9p shared filesystem devices"
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "xslt" {
|
||||
type = string
|
||||
description = "XSLT applied to the domain before sent to libvirt"
|
||||
default = null
|
||||
}
|
||||
9
remote-state-access/outputs.tf
Normal file
9
remote-state-access/outputs.tf
Normal file
@@ -0,0 +1,9 @@
|
||||
output "role_arn" {
|
||||
type = string
|
||||
value = var.create_role ? aws_iam_role.this.arn : null
|
||||
}
|
||||
|
||||
output "policy_arn" {
|
||||
type = string
|
||||
value = var.create_policy ? aws_iam_policy.this.arn : null
|
||||
}
|
||||
59
remote-state-access/policy.tf
Normal file
59
remote-state-access/policy.tf
Normal file
@@ -0,0 +1,59 @@
|
||||
data "aws_iam_policy_document" "this" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"s3:ListBucket"
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${var.bucket_name}",
|
||||
]
|
||||
}
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"s3:GetObject",
|
||||
"s3:PutObject",
|
||||
"s3:DeleteObject",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${var.bucket_name}",
|
||||
]
|
||||
|
||||
condition {
|
||||
test = "StringLike"
|
||||
variable = "s3:prefix"
|
||||
|
||||
values = var.prefixes
|
||||
}
|
||||
}
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"dynamodb:GetItem",
|
||||
"dynamodb:PutItem",
|
||||
"dynamodb:DeleteItem",
|
||||
]
|
||||
resources = [
|
||||
"arn:aws:dynamodb:*:*:table/${var.dynamodb_table}",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource "aws_iam_role" "this" {
|
||||
count = var.create_role ? 1 : 0
|
||||
name = var.role_name
|
||||
|
||||
inline_policy {
|
||||
name = "Allow access for remote states s3 and dynamo"
|
||||
policy = data.aws_iam_policy_document.this.json
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "this" {
|
||||
count = var.create_policy ? 1 : 0
|
||||
name = var.policy_name
|
||||
path = var.policy_path
|
||||
}
|
||||
8
remote-state-access/provider.tf
Normal file
8
remote-state-access/provider.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 4.9"
|
||||
}
|
||||
}
|
||||
}
|
||||
35
remote-state-access/variables.tf
Normal file
35
remote-state-access/variables.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
variable "bucket_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "dynamo_table" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "prefixes" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "role_name" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "policy_name" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
variable "policy_path" {
|
||||
type = string
|
||||
default = "/"
|
||||
}
|
||||
|
||||
variable "create_role" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "create_policy" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
55
remote-state/bucket.tf
Normal file
55
remote-state/bucket.tf
Normal file
@@ -0,0 +1,55 @@
|
||||
resource "aws_kms_key" "this" {
|
||||
description = "Enryption key for S3 remote terraform state"
|
||||
deletion_window_in_days = 30
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "force_secure_transport" {
|
||||
statement {
|
||||
sid = "ForceSecureTransport"
|
||||
actions = ["s3:*"]
|
||||
effect = "Deny"
|
||||
resources = [
|
||||
module.states_bucket.s3_bucket_arn,
|
||||
"${module.states_bucket.s3_bucket_arn}/*"
|
||||
]
|
||||
condition {
|
||||
test = "Bool"
|
||||
variable = "aws:SecureTransport"
|
||||
values = ["false"]
|
||||
}
|
||||
principals {
|
||||
type = "*"
|
||||
identifiers = ["*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "states_bucket" {
|
||||
source = "terraform-aws-modules/s3-bucket/aws"
|
||||
|
||||
bucket = local.bucket_name
|
||||
|
||||
versioning = {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
server_side_encryption_configuration = {
|
||||
rule = {
|
||||
apply_server_side_encryption_by_default = {
|
||||
kms_master_key_id = aws_kms_key.this.arn
|
||||
sse_algorithm = "aws:kms"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acl = "private"
|
||||
block_public_acls = true
|
||||
block_public_policy = true
|
||||
ignore_public_acls = true
|
||||
restrict_public_buckets = true
|
||||
|
||||
attach_policy = true
|
||||
policy = data.aws_iam_policy_document.force_secure_transport.json
|
||||
|
||||
create_bucket = true
|
||||
}
|
||||
23
remote-state/dynamo.tf
Normal file
23
remote-state/dynamo.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
locals {
|
||||
dynamodb_lock_key_id = "LockID"
|
||||
}
|
||||
|
||||
resource "aws_dynamodb_table" "lock" {
|
||||
name = local.table_name
|
||||
hash_key = local.dynamodb_lock_key_id
|
||||
|
||||
billing_mode = var.table_billing_mode
|
||||
write_capacity = var.table_write_capacity
|
||||
read_capacity = var.table_read_capacity
|
||||
|
||||
attribute {
|
||||
name = local.dynamodb_lock_key_id
|
||||
type = "S"
|
||||
}
|
||||
|
||||
server_side_encryption {
|
||||
enabled = var.table_encryption_at_rest
|
||||
kms_key_arn = aws_kms_key.this.arn
|
||||
}
|
||||
|
||||
}
|
||||
14
remote-state/locals.tf
Normal file
14
remote-state/locals.tf
Normal file
@@ -0,0 +1,14 @@
|
||||
data "aws_region" "current" {}
|
||||
|
||||
locals {
|
||||
region_name = data.aws_region.current.name
|
||||
|
||||
default_bucket_name = try("${var.name_prefix}-${local.region_name}-states", "")
|
||||
bucket_name = var.bucket_name_override != null ? var.bucket_name_override : local.default_bucket_name
|
||||
|
||||
default_table_name = try("${var.name_prefix}-${local.region_name}-tf-state-locks", "")
|
||||
table_name = var.table_name_override != null ? var.table_name_override : local.default_table_name
|
||||
|
||||
default_terraform_iam_policy_name = "terraforming-${local.table_name}"
|
||||
terraform_iam_policy_name = local.default_terraform_iam_policy_name
|
||||
}
|
||||
19
remote-state/outputs.tf
Normal file
19
remote-state/outputs.tf
Normal file
@@ -0,0 +1,19 @@
|
||||
output "kms_key" {
|
||||
value = aws_kms_key.this
|
||||
}
|
||||
|
||||
output "lock_table" {
|
||||
value = aws_dynamodb_table.lock
|
||||
}
|
||||
|
||||
output "terraform_policy" {
|
||||
value = aws_iam_policy.terraform
|
||||
}
|
||||
|
||||
output "bucket_name" {
|
||||
value = local.bucket_name
|
||||
}
|
||||
|
||||
output "lock_table_name" {
|
||||
value = local.table_name
|
||||
}
|
||||
48
remote-state/policy.tf
Normal file
48
remote-state/policy.tf
Normal file
@@ -0,0 +1,48 @@
|
||||
data "aws_iam_policy_document" "access_state" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["s3:ListBucket", "s3:GetBucketVersioning"]
|
||||
resources = [module.states_bucket.s3_bucket_arn]
|
||||
}
|
||||
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = ["s3:GetObject", "s3:PutObject"]
|
||||
resources = ["${module.states_bucket.s3_bucket_arn}/*"]
|
||||
}
|
||||
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"dynamodb:GetItem",
|
||||
"dynamodb:PutItem",
|
||||
"dynamodb:DeleteItem",
|
||||
"dynamodb:DescribeTable",
|
||||
]
|
||||
resources = [aws_dynamodb_table.lock.arn]
|
||||
}
|
||||
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"kms:ListKeys"
|
||||
]
|
||||
resources = ["*"]
|
||||
}
|
||||
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"kms:Encrypt",
|
||||
"kms:Decrypt",
|
||||
"kms:DescribeKey",
|
||||
"kms:GenerateDataKey",
|
||||
]
|
||||
resources = [aws_kms_key.this.arn]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "terraform" {
|
||||
name = local.terraform_iam_policy_name
|
||||
policy = data.aws_iam_policy_document.access_state.json
|
||||
}
|
||||
8
remote-state/provider.tf
Normal file
8
remote-state/provider.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 4.9"
|
||||
}
|
||||
}
|
||||
}
|
||||
59
remote-state/variables.tf
Normal file
59
remote-state/variables.tf
Normal file
@@ -0,0 +1,59 @@
|
||||
## Naming
|
||||
|
||||
variable "name_prefix" {
|
||||
description = "Resource names prefixed by this string."
|
||||
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
}
|
||||
|
||||
## Bucket options
|
||||
|
||||
variable "bucket_name_override" {
|
||||
description = "Explicit name for the remote state bucket. If not specified, the bucket will be named as {name_prefix}-{region_name}-states"
|
||||
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
}
|
||||
|
||||
|
||||
## DynamoDB Table for locks
|
||||
|
||||
variable "table_name_override" {
|
||||
description = "Explicit name for the remote state lock DynamoDB table. If not specified, the table will be named as {name_prefix}-{region_name}-tf-state-locks"
|
||||
|
||||
type = string
|
||||
nullable = true
|
||||
default = null
|
||||
}
|
||||
|
||||
|
||||
variable "table_encryption_at_rest" {
|
||||
description = "Wheather or not apply encryption at rest for the DynamoDB."
|
||||
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "table_billing_mode" {
|
||||
description = "Table billing mode. Can be PAY_PER_REQUEST or PROVISIONED"
|
||||
type = string
|
||||
|
||||
default = "PAY_PER_REQUEST"
|
||||
}
|
||||
|
||||
variable "table_write_capacity" {
|
||||
description = "(Optional) The number of write units for the lock table. If the billing_mode is PROVISIONED, this field is required"
|
||||
type = number
|
||||
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "table_read_capacity" {
|
||||
description = "(Optional) The number of read units for this table. If the billing_mode is PROVISIONED, this field is required"
|
||||
type = number
|
||||
|
||||
default = 0
|
||||
}
|
||||
Reference in New Issue
Block a user