1. Create those infrastructures to fulfill the requirements using Terraform - nutthaphon/aws_iaas GitHub Wiki
TOC
- Create Empty AWS ELK Worker nodes
- Create an application from k8s manifest files (without GitOps)
- Horizontal Pod Autoscaler (HPA)
- Cluster Nodes Autoscaler
1.1 Create Empty AWS ELK Worker nodes
1.1.1 Create Manifast files as below
vpc.tf
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "3.14.2"
name = "eks-vpc1"
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
versions.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.15.0"
}
random = {
source = "hashicorp/random"
version = "3.1.0"
}
}
required_version = "~> 1.2.0"
}
variables.tf
variable "region" {
description = "AWS region"
type = string
default = "ap-southeast-1"
}
security-groups.tf
resource "aws_security_group" "node_group_one" {
name_prefix = "node_group_one"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
]
}
}
resource "aws_security_group" "node_group_two" {
name_prefix = "node_group_two"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"192.168.0.0/16",
]
}
}
outputs.tf
output "cluster_id" {
description = "EKS cluster ID"
value = module.eks.cluster_id
}
output "cluster_endpoint" {
description = "Endpoint for EKS control plane"
value = module.eks.cluster_endpoint
}
output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane"
value = module.eks.cluster_security_group_id
}
output "region" {
description = "AWS region"
value = var.region
}
output "cluster_name" {
description = "Kubernetes Cluster Name"
value = local.cluster_name
}
main.tf
provider "aws" {
region = var.region
}
data "aws_availability_zones" "available" {}
locals {
cluster_name = "education-eks-${random_string.suffix.result}"
}
resource "random_string" "suffix" {
length = 8
special = false
}
eks-cluster.tf
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "18.26.6"
cluster_name = local.cluster_name
cluster_version = "1.22"
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
attach_cluster_primary_security_group = true
# Disabling and using externally provided security groups
create_security_group = false
}
eks_managed_node_groups = {
one = {
name = "node-group-1"
instance_types = ["t3.small"]
min_size = 1
max_size = 10
desired_size = 1
pre_bootstrap_user_data = <<-EOT
echo 'foo bar'
EOT
vac_security_group_ids = [
aws_security_group.node_group_one.id
]
}
two = {
name = "node-group-2"
instance_types = ["t3.medium"]
min_size = 1
max_size = 2
desired_size = 1
pre_bootstrap_user_data = <<-EOT
echo 'foo bar'
EOT
vpc_security_group_ids = [
aws_security_group.node_group_two.id
]
}
}
}
1.1.2 Execute TF script
terraform init terraform apply
1.2 Create an application from k8s manifest files (without GitOps)
1.2.1 Create manifest files
eks-example-dep.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: eks-example
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: eks-example
name: deployment-example
spec:
selector:
matchLabels:
app.kubernetes.io/name: app-example
replicas: 3
template:
metadata:
labels:
app.kubernetes.io/name: app-example
spec:
containers:
- image: public.ecr.aws/w1c7z8j7/devops/exercise:v1
imagePullPolicy: Always
name: app-example
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
namespace: eks-example
name: service-example
spec:
ports:
- port: 80
targetPort: 80
protocol: TCP
type: NodePort
selector:
app.kubernetes.io/name: app-example
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: eks-example
name: ingress-example
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: ip
spec:
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: service-example
port:
number: 80
1.2.2 Deploying the demo application.
kubectl apply -f eks-example-dep.yaml
1.3 Horizontal Pod Autoscaler (HPA)
1.3.1 Install metric server
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
1.3.2 Create HPA manifest
hpa.yml
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: eks-example
namespace: eks-example
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: eks-example
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu #
targetAverageUtilization: 50
1.3.3 Applying the HPA manifest
kubectl apply -f hpa.yml
1.4 Cluster Nodes Autoscaler
1.4.1 Create Manifest files
cluster_autoscaler.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
annotations:
eks.amazonaws.com/role-arn: <YOUR CLUSTER AUTOSCALER ROLE ARN>
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["events", "endpoints"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["endpoints"]
resourceNames: ["cluster-autoscaler"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "list", "get", "update"]
- apiGroups: [""]
resources:
- "namespaces"
- "pods"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
- "persistentvolumes"
verbs: ["watch", "list", "get"]
- apiGroups: ["extensions"]
resources: ["replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["watch", "list"]
- apiGroups: ["apps"]
resources: ["statefulsets", "replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes", "csidrivers", "csistoragecapacities"]
verbs: ["watch", "list", "get"]
- apiGroups: ["batch", "extensions"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resourceNames: ["cluster-autoscaler"]
resources: ["leases"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create","list","watch"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"]
verbs: ["delete", "get", "update", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
app: cluster-autoscaler
spec:
replicas: 1
selector:
matchLabels:
app: cluster-autoscaler
template:
metadata:
labels:
app: cluster-autoscaler
annotations:
prometheus.io/scrape: 'true'
prometheus.io/port: '8085'
spec:
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
runAsUser: 65534
fsGroup: 65534
serviceAccountName: cluster-autoscaler
containers:
- image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.21.0
name: cluster-autoscaler
resources:
limits:
cpu: 100m
memory: 600Mi
requests:
cpu: 100m
memory: 600Mi
command:
- ./cluster-autoscaler
- --v=4
- --stderrthreshold=info
- --cloud-provider=aws
- --skip-nodes-with-local-storage=false
- --expander=least-waste
- --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/<YOUR CLUSTER NAME>
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs/ca-certificates.crt #/etc/ssl/certs/ca-bundle.crt for Amazon Linux Worker Nodes
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-bundle.crt"
1.4.2 Apply config for the cluster autoscaler
kubectl apply -f 3_k8s_apps/cluster_autoscaler/cluster_autoscaler.yml