diff --git a/hack/validate-terraform.sh b/hack/validate-terraform.sh index a085a0ff6e..e95811f1b4 100755 --- a/hack/validate-terraform.sh +++ b/hack/validate-terraform.sh @@ -36,4 +36,8 @@ done terraform -chdir="${conf_dir}" init -backend=false +echo "" +echo "Validating ${conf_dir}" +echo "" + terraform -chdir="${conf_dir}" validate \ No newline at end of file diff --git a/manifests/modules/fastpath/developers/.workshop/cleanup.sh b/manifests/modules/fastpath/developers/.workshop/cleanup.sh new file mode 100644 index 0000000000..a89b1fb7e3 --- /dev/null +++ b/manifests/modules/fastpath/developers/.workshop/cleanup.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +set -e + +# Common +kubectl delete namespace ui --ignore-not-found +kubectl delete namespace catalog --ignore-not-found +kubectl delete namespace carts --ignore-not-found + +# Autoscaling +kubectl delete pod load-generator --ignore-not-found + +uninstall-helm-chart keda keda +kubectl delete ns keda --ignore-not-found + +# Identity +POD_ASSOCIATION_ID=$(aws eks list-pod-identity-associations --region $AWS_REGION --cluster-name $EKS_CLUSTER_NAME --service-account carts --namespace carts --output text --query 'associations[0].associationId') + +if [ "$POD_ASSOCIATION_ID" != "None" ]; then + logmessage "Deleting EKS Pod Identity Association..." + + aws eks delete-pod-identity-association --region $AWS_REGION --association-id $POD_ASSOCIATION_ID --cluster-name $EKS_CLUSTER_NAME + +fi + +pod_identity_check=$(aws eks list-addons --cluster-name $EKS_CLUSTER_NAME --region $AWS_REGION --query "addons[? @ == 'eks-pod-identity-agent']" --output text) + +if [ ! -z "$pod_identity_check" ]; then + logmessage "Deleting EKS Pod Identity Agent addon..." + + aws eks delete-addon --cluster-name $EKS_CLUSTER_NAME --addon-name eks-pod-identity-agent --region $AWS_REGION + + aws eks wait addon-deleted --cluster-name $EKS_CLUSTER_NAME --addon-name eks-pod-identity-agent --region $AWS_REGION +fi + +# Storage +csi_check=$(aws eks list-addons --cluster-name $EKS_CLUSTER_NAME --query "addons[? @ == 'aws-efs-csi-driver']" --output text) + +logmessage "Deleting EFS storage class..." + +kubectl delete storageclass efs-sc --ignore-not-found + +if [ ! -z "$csi_check" ]; then + logmessage "Deleting EFS CSI driver addon..." + + aws eks delete-addon --cluster-name $EKS_CLUSTER_NAME --addon-name aws-efs-csi-driver + + aws eks wait addon-deleted --cluster-name $EKS_CLUSTER_NAME --addon-name aws-efs-csi-driver +fi + +# Ingress +uninstall-helm-chart external-dns external-dns + +uninstall-helm-chart aws-load-balancer-controller kube-system \ No newline at end of file diff --git a/manifests/modules/fastpath/developers/.workshop/terraform/autoscaling.tf b/manifests/modules/fastpath/developers/.workshop/terraform/autoscaling.tf new file mode 100644 index 0000000000..0c888caaf3 --- /dev/null +++ b/manifests/modules/fastpath/developers/.workshop/terraform/autoscaling.tf @@ -0,0 +1,11 @@ +module "iam_assumable_role_keda" { + source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc" + version = "5.59.0" + create_role = true + role_name = "${var.addon_context.eks_cluster_id}-keda" + provider_url = var.addon_context.eks_oidc_issuer_url + role_policy_arns = ["arn:${data.aws_partition.current.partition}:iam::aws:policy/CloudWatchReadOnlyAccess"] + oidc_fully_qualified_subjects = ["system:serviceaccount:keda:keda-operator"] + + tags = var.tags +} diff --git a/manifests/modules/fastpath/developers/.workshop/terraform/exposing.tf b/manifests/modules/fastpath/developers/.workshop/terraform/exposing.tf new file mode 100644 index 0000000000..f9707e45a3 --- /dev/null +++ b/manifests/modules/fastpath/developers/.workshop/terraform/exposing.tf @@ -0,0 +1,42 @@ +resource "aws_route53_zone" "private_zone" { + name = "retailstore.com" + comment = "Private hosted zone for EKS Workshop use" + vpc { + vpc_id = data.aws_vpc.this.id + } + + force_destroy = true + + tags = { + created-by = "eks-workshop-v2" + env = var.addon_context.eks_cluster_id + } +} + +module "eks_blueprints_addons" { + source = "aws-ia/eks-blueprints-addons/aws" + version = "1.21.1" + + cluster_name = var.addon_context.eks_cluster_id + cluster_endpoint = var.addon_context.aws_eks_cluster_endpoint + cluster_version = var.eks_cluster_version + oidc_provider_arn = var.addon_context.eks_oidc_provider_arn + + enable_external_dns = true + external_dns_route53_zone_arns = [aws_route53_zone.private_zone.arn] + external_dns = { + create_role = true + role_name = "${var.addon_context.eks_cluster_id}-external-dns" + policy_name = "${var.addon_context.eks_cluster_id}-external-dns" + } + + enable_aws_load_balancer_controller = true + aws_load_balancer_controller = { + role_name = "${var.addon_context.eks_cluster_id}-alb-controller" + policy_name = "${var.addon_context.eks_cluster_id}-alb-controller" + } + + create_kubernetes_resources = false + + observability_tag = null +} diff --git a/manifests/modules/fastpath/developers/.workshop/terraform/main.tf b/manifests/modules/fastpath/developers/.workshop/terraform/main.tf new file mode 100644 index 0000000000..87ab7b7c8d --- /dev/null +++ b/manifests/modules/fastpath/developers/.workshop/terraform/main.tf @@ -0,0 +1,10 @@ +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} +data "aws_partition" "current" {} + +data "aws_vpc" "this" { + tags = { + created-by = "eks-workshop-v2" + env = var.addon_context.eks_cluster_id + } +} diff --git a/manifests/modules/fastpath/developers/.workshop/terraform/outputs.tf b/manifests/modules/fastpath/developers/.workshop/terraform/outputs.tf new file mode 100644 index 0000000000..02533c655e --- /dev/null +++ b/manifests/modules/fastpath/developers/.workshop/terraform/outputs.tf @@ -0,0 +1,17 @@ +output "environment_variables" { + description = "Environment variables to be added to the IDE shell" + value = { + LBC_CHART_VERSION = var.load_balancer_controller_chart_version + LBC_ROLE_ARN = module.eks_blueprints_addons.aws_load_balancer_controller.iam_role_arn + DNS_CHART_VERSION = var.external_dns_chart_version + DNS_ROLE_ARN = module.eks_blueprints_addons.external_dns.iam_role_arn + + EFS_CSI_ADDON_ROLE = module.efs_csi_driver_irsa.iam_role_arn + + CARTS_DYNAMODB_TABLENAME = aws_dynamodb_table.carts.name + CARTS_IAM_ROLE = module.iam_assumable_role_carts.iam_role_arn + + KEDA_ROLE_ARN = module.iam_assumable_role_keda.iam_role_arn + KEDA_CHART_VERSION = var.keda_chart_version + } +} diff --git a/manifests/modules/fastpath/developers/.workshop/terraform/pod_identity.tf b/manifests/modules/fastpath/developers/.workshop/terraform/pod_identity.tf new file mode 100644 index 0000000000..faf61e2ab0 --- /dev/null +++ b/manifests/modules/fastpath/developers/.workshop/terraform/pod_identity.tf @@ -0,0 +1,166 @@ +resource "aws_dynamodb_table" "carts" { + #checkov:skip=CKV2_AWS_28:Point in time backup not required for workshop + name = "${var.addon_context.eks_cluster_id}-carts" + hash_key = "id" + billing_mode = "PAY_PER_REQUEST" + stream_enabled = true + stream_view_type = "NEW_AND_OLD_IMAGES" + + server_side_encryption { + enabled = true + kms_key_arn = aws_kms_key.cmk_dynamodb.arn + } + + attribute { + name = "id" + type = "S" + } + + attribute { + name = "customerId" + type = "S" + } + + global_secondary_index { + name = "idx_global_customerId" + hash_key = "customerId" + projection_type = "ALL" + } + + tags = var.tags +} + +module "iam_assumable_role_carts" { + source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role" + version = "5.59.0" + create_role = true + role_requires_mfa = false + role_name = "${var.addon_context.eks_cluster_id}-carts-dynamo" + trusted_role_services = ["pods.eks.amazonaws.com"] + custom_role_policy_arns = [aws_iam_policy.carts_dynamo.arn] + trusted_role_actions = ["sts:AssumeRole", "sts:TagSession"] + + tags = var.tags +} + +resource "aws_iam_policy" "carts_dynamo" { + name = "${var.addon_context.eks_cluster_id}-carts-dynamo" + path = "/" + description = "Dynamo policy for carts application" + + policy = < 3d21h +$ kubectl -n kube-system get pods -l app.kubernetes.io/name=eks-pod-identity-agent +NAME READY STATUS RESTARTS AGE +eks-pod-identity-agent-4tn28 1/1 Running 0 3d21h +eks-pod-identity-agent-hslc5 1/1 Running 0 3d21h +eks-pod-identity-agent-thvf5 1/1 Running 0 3d21h +``` + +An IAM role, which provides the required permissions for the `carts` service to read and write to the DynamoDB table, was created when you ran the `prepare-environment` script in the first step of this module. You can view the policy as shown below: + +```bash +$ aws iam get-policy-version \ + --version-id v1 --policy-arn \ + --query 'PolicyVersion.Document' \ + arn:aws:iam::${AWS_ACCOUNT_ID}:policy/${EKS_CLUSTER_NAME}-carts-dynamo | jq . +{ + "Statement": [ + { + "Action": "dynamodb:*", + "Effect": "Allow", + "Resource": [ + "arn:aws:dynamodb:us-west-2:1234567890:table/eks-workshop-carts", + "arn:aws:dynamodb:us-west-2:1234567890:table/eks-workshop-carts/index/*" + ], + "Sid": "AllAPIActionsOnCart" + } + ], + "Version": "2012-10-17" +} +``` + +The role has also been configured with the appropriate trust relationship, which allows the EKS Service Principal to assume this role for Pod Identity. You can view it with the command below: + +```bash +$ aws iam get-role \ + --query 'Role.AssumeRolePolicyDocument' \ + --role-name ${EKS_CLUSTER_NAME}-carts-dynamo | jq . +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "pods.eks.amazonaws.com" + }, + "Action": [ + "sts:AssumeRole", + "sts:TagSession" + ] + } + ] +} +``` + +Next, we will use Amazon EKS Pod Identity feature to associate an AWS IAM role with the Kubernetes Service Account that will be used by our deployment. To create the association, run the following command: + +```bash wait=30 +$ aws eks create-pod-identity-association --cluster-name ${EKS_CLUSTER_NAME} \ + --role-arn arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EKS_CLUSTER_NAME}-carts-dynamo \ + --namespace carts --service-account carts +{ + "association": { + "clusterName": "eks-workshop", + "namespace": "carts", + "serviceAccount": "carts", + "roleArn": "arn:aws:iam::1234567890:role/eks-workshop-carts-dynamo", + "associationArn": "arn:aws::1234567890:podidentityassociation/eks-workshop/a-abcdefghijklmnop1", + "associationId": "a-abcdefghijklmnop1", + "tags": {}, + "createdAt": "2024-01-09T16:16:38.163000+00:00", + "modifiedAt": "2024-01-09T16:16:38.163000+00:00" + } +} +``` + +All that's left is to verify that the `carts` Deployment is using the `carts` Service Account: + +```bash +$ kubectl -n carts describe deployment carts | grep 'Service Account' + Service Account: carts +``` + +With the Service Account verified, let's recycle the `carts` Pods: + +```bash hook=enable-pod-identity hookTimeout=430 +$ kubectl -n carts rollout restart deployment/carts +deployment.apps/carts restarted +$ kubectl -n carts rollout status deployment/carts +Waiting for deployment "carts" rollout to finish: 1 old replicas are pending termination... +deployment "carts" successfully rolled out +``` + +Now, let's verify if the DynamoDB permission issue that we had encountered has been resolved for the carts application in the next section. diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/using-dynamo.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/using-dynamo.md new file mode 100644 index 0000000000..e8f3bfd699 --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/using-dynamo.md @@ -0,0 +1,76 @@ +--- +title: "Using Amazon DynamoDB" +sidebar_position: 32 +--- + +The first step in this process is to re-configure the carts service to use a DynamoDB table that has already been created for us. The application loads most of its configurations from a ConfigMap. Let's take look at it: + +```bash +$ kubectl -n carts get -o yaml cm carts +apiVersion: v1 +data: + AWS_ACCESS_KEY_ID: key + AWS_SECRET_ACCESS_KEY: secret + RETAIL_CART_PERSISTENCE_DYNAMODB_CREATE_TABLE: "true" + RETAIL_CART_PERSISTENCE_DYNAMODB_ENDPOINT: http://carts-dynamodb:8000 + RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME: Items + RETAIL_CART_PERSISTENCE_PROVIDER: dynamodb +kind: ConfigMap +metadata: + name: carts + namespace: carts +``` + +The following kustomization overwrites the ConfigMap removing the DynamoDB endpoint configuration. It tells the SDK to use the real DynamoDB service instead of our test Pod. We've also configured the DynamoDB table name that's already been created for us. The table name is being pulled from the environment variable `RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME`. + +```kustomization +modules/security/eks-pod-identity/dynamo/kustomization.yaml +ConfigMap/carts +``` + +Let's check the value of `CARTS_DYNAMODB_TABLENAME` then run Kustomize to use the real DynamoDB service: + +```bash +$ echo $CARTS_DYNAMODB_TABLENAME +eks-workshop-carts +$ kubectl kustomize ~/environment/eks-workshop/modules/security/eks-pod-identity/dynamo \ + | envsubst | kubectl apply -f- +``` + +This will overwrite our ConfigMap with new values: + +```bash +$ kubectl -n carts get cm carts -o yaml +apiVersion: v1 +data: + RETAIL_CART_PERSISTENCE_DYNAMODB_TABLE_NAME: eks-workshop-carts + RETAIL_CART_PERSISTENCE_PROVIDER: dynamodb +kind: ConfigMap +metadata: + labels: + app: carts + name: carts + namespace: carts +``` + +Now, we need to recycle all the carts pods to pick up our new ConfigMap contents: + +```bash expectError=true hook=enable-dynamo +$ kubectl rollout restart -n carts deployment/carts +deployment.apps/carts restarted +$ kubectl rollout status -n carts deployment/carts --timeout=20s +Waiting for deployment "carts" rollout to finish: 1 old replicas are pending termination... +error: timed out waiting for the condition +``` + +It looks like our change failed to deploy properly. We can confirm this by looking at the Pods: + +```bash +$ kubectl -n carts get pod +NAME READY STATUS RESTARTS AGE +carts-5d486d7cf7-8qxf9 1/1 Running 0 5m49s +carts-df76875ff-7jkhr 0/1 CrashLoopBackOff 3 (36s ago) 2m2s +carts-dynamodb-698674dcc6-hw2bg 1/1 Running 0 20m +``` + +What's gone wrong? diff --git a/website/docs/fastpaths/developer/amazon-eks-pod-identity/verifying-dynamo.md b/website/docs/fastpaths/developer/amazon-eks-pod-identity/verifying-dynamo.md new file mode 100644 index 0000000000..6f496091ad --- /dev/null +++ b/website/docs/fastpaths/developer/amazon-eks-pod-identity/verifying-dynamo.md @@ -0,0 +1,35 @@ +--- +title: "Verifying DynamoDB access" +sidebar_position: 35 +--- + +Now, with the `carts` Service Account associated with the authorized IAM role, the `carts` Pod has permission to access the DynamoDB table. Access the web store again and navigate to the shopping cart. + +```bash +$ ALB_HOSTNAME=$(kubectl get ingress ui -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) +$ echo "http://$ALB_HOSTNAME" +http://k8s-ui-ui-a9797f0f61.elb.us-west-2.amazonaws.com +``` + +The `carts` Pod is able to reach the DynamoDB service and the shopping cart is now accessible! + +![Cart](/img/sample-app-screens/shopping-cart.webp) + +After the AWS IAM role is associated with the Service Account, any newly created Pods using that Service Account will be intercepted by the [EKS Pod Identity webhook](https://github.com/aws/amazon-eks-pod-identity-webhook). This webhook runs on the Amazon EKS cluster's control plane and is fully managed by AWS. Take a closer look at the new `carts` Pod to see the new environment variables: + +```bash +$ kubectl -n carts exec deployment/carts -- env | grep AWS +AWS_STS_REGIONAL_ENDPOINTS=regional +AWS_DEFAULT_REGION=us-west-2 +AWS_REGION=us-west-2 +AWS_CONTAINER_CREDENTIALS_FULL_URI=http://169.254.170.23/v1/credentials +AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE=/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token +``` + +Notable points about these environment variables: + +- `AWS_DEFAULT_REGION` - The region is set automatically to the same as our EKS cluster +- `AWS_STS_REGIONAL_ENDPOINTS` - Regional STS endpoints are configured to avoid putting too much pressure on the global endpoint in `us-east-1` +- `AWS_CONTAINER_CREDENTIALS_FULL_URI` - This variable tells AWS SDKs how to obtain credentials using the [HTTP credential provider](https://docs.aws.amazon.com/sdkref/latest/guide/feature-container-credentials.html). This means that EKS Pod Identity does not need to inject credentials via something like an `AWS_ACCESS_KEY_ID`/`AWS_SECRET_ACCESS_KEY` pair, and instead the SDKs can have temporary credentials vended to them via the EKS Pod Identity mechanism. You can read more about how this functions in the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/pod-identities.html). + +You have successfully configured Pod Identity in your application. diff --git a/website/docs/fastpaths/developer/efs/assets/efs-storage.webp b/website/docs/fastpaths/developer/efs/assets/efs-storage.webp new file mode 100644 index 0000000000..a7612185e4 Binary files /dev/null and b/website/docs/fastpaths/developer/efs/assets/efs-storage.webp differ diff --git a/website/docs/fastpaths/developer/efs/assets/efsfilesystemscreenshort.webp b/website/docs/fastpaths/developer/efs/assets/efsfilesystemscreenshort.webp new file mode 100644 index 0000000000..f35f89437e Binary files /dev/null and b/website/docs/fastpaths/developer/efs/assets/efsfilesystemscreenshort.webp differ diff --git a/website/docs/fastpaths/developer/efs/assets/placeholder.jpg b/website/docs/fastpaths/developer/efs/assets/placeholder.jpg new file mode 100644 index 0000000000..3bc3cbfec2 Binary files /dev/null and b/website/docs/fastpaths/developer/efs/assets/placeholder.jpg differ diff --git a/website/docs/fastpaths/developer/efs/deployment-with-efs.md b/website/docs/fastpaths/developer/efs/deployment-with-efs.md new file mode 100644 index 0000000000..6c0dbbc870 --- /dev/null +++ b/website/docs/fastpaths/developer/efs/deployment-with-efs.md @@ -0,0 +1,160 @@ +--- +title: Dynamic provisioning using EFS +sidebar_position: 30 +--- + +Now that we understand the EFS storage class for Kubernetes, let's create a [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) and modify the UI component to mount this volume. + +First, let's examine the `efspvclaim.yaml` file: + +::yaml{file="manifests/modules/fundamentals/storage/efs/deployment/efspvclaim.yaml" paths="kind,spec.storageClassName,spec.resources.requests.storage"} + +1. The resource being defined is a PersistentVolumeClaim +2. This refers to the `efs-sc` storage class we created earlier +3. We are requesting 5GB of storage + +Now we'll update the UI component to reference the EFS PVC: + +```kustomization +modules/fundamentals/storage/efs/deployment/deployment.yaml +Deployment/ui +``` + +Apply these changes with the following command: + +```bash hook=efs-deployment +$ kubectl apply -k ~/environment/eks-workshop/modules/fundamentals/storage/efs/deployment +namespace/ui unchanged +serviceaccount/ui unchanged +configmap/ui unchanged +service/ui unchanged +persistentvolumeclaim/efs-claim created +deployment.apps/ui configured +$ kubectl rollout status --timeout=130s deployment/ui -n ui +``` + +Let's examine the `volumeMounts` in the deployment. Notice that our new volume named `efsvolume` is mounted at `/efs`: + +```bash +$ kubectl get deployment -n ui \ + -o yaml | yq '.items[].spec.template.spec.containers[].volumeMounts' +- mountPath: /efs + name: efsvolume +- mountPath: /tmp + name: tmp-volume +``` + +A PersistentVolume (PV) has been automatically created to fulfill our PersistentVolumeClaim (PVC): + +```bash +$ kubectl get pv +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-342a674d-b426-4214-b8b6-7847975ae121 5Gi RWX Delete Bound ui/efs-claim efs-sc 2m33s +``` + +Let's examine the details of our PersistentVolumeClaim (PVC): + +```bash +$ kubectl describe pvc -n ui +Name: efs-claim +Namespace: ui +StorageClass: efs-sc +Status: Bound +Volume: pvc-342a674d-b426-4214-b8b6-7847975ae121 +Labels: +Annotations: pv.kubernetes.io/bind-completed: yes + pv.kubernetes.io/bound-by-controller: yes + volume.beta.kubernetes.io/storage-provisioner: efs.csi.aws.com + volume.kubernetes.io/storage-provisioner: efs.csi.aws.com +Finalizers: [kubernetes.io/pvc-protection] +Capacity: 5Gi +Access Modes: RWX +VolumeMode: Filesystem +Used By: +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ExternalProvisioning 34s persistentvolume-controller waiting for a volume to be created, either by external provisioner "efs.csi.aws.com" or manually created by system administrator + Normal Provisioning 34s efs.csi.aws.com_efs-csi-controller-6b4ff45b65-fzqjb_7efe91cc-099a-45c7-8419-6f4b0a4f9e01 External provisioner is provisioning volume for claim "ui/efs-claim" + Normal ProvisioningSucceeded 33s efs.csi.aws.com_efs-csi-controller-6b4ff45b65-fzqjb_7efe91cc-099a-45c7-8419-6f4b0a4f9e01 Successfully provisioned volume pvc-342a674d-b426-4214-b8b6-7847975ae121 +``` + +At this point, the EFS file system is successfully mounted but currently empty: + +```bash +$ POD_1=$(kubectl -n ui get pods -l app.kubernetes.io/instance=ui -o jsonpath='{.items[0].metadata.name}') +$ kubectl exec --stdin $POD_1 -n ui -- bash -c 'ls /efs/' +``` + +Let's use a [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) to populate the EFS volume with images: + +```bash +$ export PVC_NAME="efs-claim" +$ cat ~/environment/eks-workshop/modules/fundamentals/storage/populate-images-job.yaml | envsubst | kubectl apply -f - +$ kubectl wait --for=condition=complete -n ui \ + job/populate-images --timeout=300s +``` + +Now let's demonstrate the shared storage functionality by listing the current files in `/efs` through one of the UI component Pods: + +```bash +$ POD_1=$(kubectl -n ui get pods -l app.kubernetes.io/instance=ui -o jsonpath='{.items[0].metadata.name}') +$ kubectl exec --stdin $POD_1 -n ui -- bash -c 'ls /efs/' +1ca35e86-4b4c-4124-b6b5-076ba4134d0d.jpg +4f18544b-70a5-4352-8e19-0d070f46745d.jpg +631a3db5-ac07-492c-a994-8cd56923c112.jpg +79bce3f3-935f-4912-8c62-0d2f3e059405.jpg +8757729a-c518-4356-8694-9e795a9b3237.jpg +87e89b11-d319-446d-b9be-50adcca5224a.jpg +a1258cd2-176c-4507-ade6-746dab5ad625.jpg +cc789f85-1476-452a-8100-9e74502198e0.jpg +d27cf49f-b689-4a75-a249-d373e0330bb5.jpg +d3104128-1d14-4465-99d3-8ab9267c687b.jpg +d4edfedb-dbe9-4dd9-aae8-009489394955.jpg +d77f9ae6-e9a8-4a3e-86bd-b72af75cbc49.jpg +``` + +To further demonstrate the shared storage capabilities, let's create a new image called `placeholder.jpg` and add it to the EFS volume through the first Pod: + +```bash +$ POD_1=$(kubectl -n ui get pods -l app.kubernetes.io/instance=ui -o jsonpath='{.items[0].metadata.name}') +$ kubectl exec --stdin $POD_1 -n ui -- bash -c 'curl -sS -o /efs/placeholder.jpg https://placehold.co/600x400/jpg?text=EKS+Workshop\\nPlaceholder' +``` + +Now we'll verify that the second UI Pod can access this newly created file, demonstrating the shared nature of our EFS storage: + +```bash hook=sample-images +$ POD_2=$(kubectl -n ui get pods -o jsonpath='{.items[1].metadata.name}') +$ kubectl exec --stdin $POD_2 -n ui -- bash -c 'ls /efs/' +1ca35e86-4b4c-4124-b6b5-076ba4134d0d.jpg +4f18544b-70a5-4352-8e19-0d070f46745d.jpg +631a3db5-ac07-492c-a994-8cd56923c112.jpg +79bce3f3-935f-4912-8c62-0d2f3e059405.jpg +8757729a-c518-4356-8694-9e795a9b3237.jpg +87e89b11-d319-446d-b9be-50adcca5224a.jpg +a1258cd2-176c-4507-ade6-746dab5ad625.jpg +cc789f85-1476-452a-8100-9e74502198e0.jpg +d27cf49f-b689-4a75-a249-d373e0330bb5.jpg +d3104128-1d14-4465-99d3-8ab9267c687b.jpg +d4edfedb-dbe9-4dd9-aae8-009489394955.jpg +d77f9ae6-e9a8-4a3e-86bd-b72af75cbc49.jpg +placeholder.jpg <---------------- +``` + +As you can see, even though we created the file through the first Pod, the second Pod has immediate access to it because they're both accessing the same shared EFS file system. + +Finally, let's confirm that the image is accessible through the UI service: + +```bash hook=placeholder +$ ALB_HOSTNAME=$(kubectl get ingress ui -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) +$ echo "http://$ALB_HOSTNAME/assets/img/products/placeholder.jpg" +http://k8s-ui-ui-a9797f0f61.elb.us-west-2.amazonaws.com/assets/img/products/placeholder.jpg +``` + +Visit the URL in your browser: + + + + + +We've successfully demonstrated how Amazon EFS provides persistent shared storage for workloads running on Amazon EKS. This solution allows multiple pods to read from and write to the same storage volume simultaneously, making it ideal for shared content hosting and other use cases requiring distributed file system access. diff --git a/website/docs/fastpaths/developer/efs/efs-csi-driver.md b/website/docs/fastpaths/developer/efs/efs-csi-driver.md new file mode 100644 index 0000000000..7251e11b69 --- /dev/null +++ b/website/docs/fastpaths/developer/efs/efs-csi-driver.md @@ -0,0 +1,81 @@ +--- +title: EFS CSI Driver +sidebar_position: 20 +--- + +Before diving into this section, you should be familiar with the Kubernetes storage objects (volumes, persistent volumes (PV), persistent volume claims (PVC), dynamic provisioning and ephemeral storage) that were introduced in the main [Storage](../index.md) section. + +The [Amazon Elastic File System Container Storage Interface (CSI) Driver](https://github.com/kubernetes-sigs/aws-efs-csi-driver) enables you to run stateful containerized applications by providing a CSI interface that allows Kubernetes clusters running on AWS to manage the lifecycle of Amazon EFS file systems. + +The following architecture diagram illustrates how we will use EFS as persistent storage for our EKS pods: + +![Assets with EFS](./assets/efs-storage.webp) + +To utilize Amazon EFS with dynamic provisioning on our EKS cluster, we first need to confirm that we have the EFS CSI Driver installed. The driver implements the CSI specification which allows container orchestrators to manage Amazon EFS file systems throughout their lifecycle. + +For improved security and simplified management, you can run the Amazon EFS CSI driver as an Amazon EKS add-on. Since the required IAM role has already been created for us, we can proceed with installing the add-on: + +```bash timeout=300 wait=60 +$ aws eks create-addon --cluster-name $EKS_CLUSTER_NAME --addon-name aws-efs-csi-driver \ + --service-account-role-arn $EFS_CSI_ADDON_ROLE +$ aws eks wait addon-active --cluster-name $EKS_CLUSTER_NAME --addon-name aws-efs-csi-driver +``` + +Let's examine what the add-on has created in our EKS cluster. For example, a DaemonSet that runs a Pod on each node in our cluster: + +```bash +$ kubectl get daemonset efs-csi-node -n kube-system +NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE +efs-csi-node 3 3 3 3 3 kubernetes.io/os=linux 47s +``` + +The EFS CSI driver supports both dynamic and static provisioning: + +- **Dynamic provisioning**: The driver creates an access point for each PersistentVolume. This requires an existing AWS EFS file system that must be specified in the StorageClass parameters. +- **Static provisioning**: This also requires a pre-created AWS EFS file system, which can then be mounted as a volume inside a container using the driver. + +An EFS file system has been provisioned for us, along with mount targets and the required security group that includes an inbound rule allowing NFS traffic to the EFS mount points. Let's get its ID which we'll need later: + +```bash +$ export EFS_ID=$(aws efs describe-file-systems --query "FileSystems[?Name=='$EKS_CLUSTER_NAME-efs-assets'] | [0].FileSystemId" --output text) +$ echo $EFS_ID +fs-061cb5c5ed841a6b0 +``` + +Next, we'll create a [StorageClass](https://kubernetes.io/docs/concepts/storage/storage-classes/) object configured to use our pre-provisioned EFS file system and [EFS Access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in provisioning mode using the `efsstorageclass.yaml` file. + +::yaml{file="manifests/modules/fundamentals/storage/efs/storageclass/efsstorageclass.yaml" paths="provisioner,parameters.fileSystemId"} + +1. Set the `provisioner` parameter to `efs.csi.aws.com` for the EFS CSI provisioner +2. Inject `EFS_ID` environment variable into the `filesystemid` parameter + + +Apply the kustomization: + +```bash +$ kubectl kustomize ~/environment/eks-workshop/modules/fundamentals/storage/efs/storageclass \ + | envsubst | kubectl apply -f- +storageclass.storage.k8s.io/efs-sc created +``` + +Let's examine the StorageClass. Note that it uses the EFS CSI driver as the provisioner and is configured for EFS access point provisioning mode with the file system ID we exported earlier: + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +efs-sc efs.csi.aws.com Delete Immediate false 8m29s +$ kubectl describe sc efs-sc +Name: efs-sc +IsDefaultClass: No +Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{},"name":"efs-sc"},"parameters":{"directoryPerms":"700","fileSystemId":"fs-061cb5c5ed841a6b0","provisioningMode":"efs-ap"},"provisioner":"efs.csi.aws.com"} + +Provisioner: efs.csi.aws.com +Parameters: directoryPerms=700,fileSystemId=fs-061cb5c5ed841a6b0,provisioningMode=efs-ap +AllowVolumeExpansion: +MountOptions: +ReclaimPolicy: Delete +VolumeBindingMode: Immediate +Events: +``` + +Now that we understand the EFS StorageClass and how the EFS CSI driver works, we're ready to proceed to the next step where we'll modify the UI component to use the EFS `StorageClass` with Kubernetes dynamic volume provisioning and a PersistentVolume for storing product images. diff --git a/website/docs/fastpaths/developer/efs/existing-architecture.md b/website/docs/fastpaths/developer/efs/existing-architecture.md new file mode 100644 index 0000000000..be05ebdfcf --- /dev/null +++ b/website/docs/fastpaths/developer/efs/existing-architecture.md @@ -0,0 +1,40 @@ +--- +title: Existing architecture +sidebar_position: 10 +--- + +In this section, we'll explore how to handle storage in Kubernetes deployments using a simple image hosting example. We'll start with an existing deployment from our sample store application and modify it to serve as an image host. The UI component is a stateless microservice, which is an excellent example for demonstrating deployments since they enable **horizontal scaling** and **declarative state management** of Pods. + +One of the roles of the UI component is to serve static product images. Currently, these images are bundled into the container during the build process. However, this approach has a significant limitation - we're unable to add new images once the container is deployed. To address this limitation, we'll implement a solution using [Amazon Elastic File System](https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html) and Kubernetes [Persistent Volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) to create a shared storage environment. This will allow multiple web server containers to serve assets while scaling dynamically to meet demand. + +Let's examine the current Deployment's volume configuration: + +```bash +$ kubectl describe deployment -n ui +Name: ui +Namespace: ui +[...] + Containers: + ui: + Image: public.ecr.aws/aws-containers/retail-store-sample-ui:1.2.1 + Port: 8080/TCP + Host Port: 0/TCP + Limits: + memory: 1536Mi + Requests: + cpu: 250 + memory: 1536Mi + [...] + Mounts: + /tmp from tmp-volume (rw) + Volumes: + tmp-volume: + Type: EmptyDir (a temporary directory that shares a pod's lifetime) + Medium: Memory + SizeLimit: +[...] +``` + +Looking at the [`Volumes`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir-configuration-example) section, we can see that the Deployment currently uses an [EmptyDir volume type](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) that exists only for the Pod's lifetime. This means that when the Pod is terminated, the data stored in this volume is permanently lost. + +However, in the case of the UI component, the product images are currently being served as [static web content](https://spring.io/blog/2013/12/19/serving-static-web-content-with-spring-boot) via Spring Boot, so the images are not even present on the filesystem. diff --git a/website/docs/fastpaths/developer/efs/index.md b/website/docs/fastpaths/developer/efs/index.md new file mode 100644 index 0000000000..530841f739 --- /dev/null +++ b/website/docs/fastpaths/developer/efs/index.md @@ -0,0 +1,23 @@ +--- +title: Workload storage with Amazon EFS +sidebar_position: 40 +description: "Serverless, fully elastic file storage for workloads on Amazon Elastic Kubernetes Service with Amazon Elastic File System." +--- + +:::tip What's been set up for you +The environment preparation stage made the following changes to your lab environment: + +- Create an IAM role for the Amazon EFS CSI driver +- Create an Amazon EFS file system + +::: + +[Amazon Elastic File System](https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html) (Amazon EFS) provides a serverless, fully elastic file system that automatically scales on demand to petabytes without disrupting applications. It eliminates the need to provision and manage capacity as you add and remove files, making it ideal for use with AWS Cloud services and on-premises resources. + +In this lab, you will: + +- Learn about persistent network storage +- Configure and deploy the EFS CSI Driver for Kubernetes +- Implement dynamic provisioning using EFS in a Kubernetes deployment + +This hands-on experience will demonstrate how to effectively use Amazon EFS with Amazon EKS for scalable, persistent storage solutions. diff --git a/website/docs/fastpaths/developer/efs/tests/hook-efs-deployment.sh b/website/docs/fastpaths/developer/efs/tests/hook-efs-deployment.sh new file mode 100644 index 0000000000..74b7cac66d --- /dev/null +++ b/website/docs/fastpaths/developer/efs/tests/hook-efs-deployment.sh @@ -0,0 +1,23 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 60 + + EXIT_CODE=0 + + timeout -s TERM 60 bash -c \ + 'while [[ $(kubectl get pod -l app.kubernetes.io/name=ui -n ui -o json | jq -r ".items | length") -lt 2 ]];\ + do sleep 30;\ + done' || EXIT_CODE=$? + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "UI service did not deploy in 60 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/efs/tests/hook-placeholder.sh b/website/docs/fastpaths/developer/efs/tests/hook-placeholder.sh new file mode 100644 index 0000000000..cd58bf2e1c --- /dev/null +++ b/website/docs/fastpaths/developer/efs/tests/hook-placeholder.sh @@ -0,0 +1,21 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + export ui_endpoint=$(kubectl get ingress ui -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) + + if [ -z "$ui_endpoint" ]; then + >&2 echo "Failed to retrieve LB hostname" + exit 1 + fi + + if [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/assets/img/products/placeholder.jpg)" != "200" ]]; then + >&2 echo "Expected placeholder image not available" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/efs/tests/hook-sample-images.sh b/website/docs/fastpaths/developer/efs/tests/hook-sample-images.sh new file mode 100644 index 0000000000..323e30452c --- /dev/null +++ b/website/docs/fastpaths/developer/efs/tests/hook-sample-images.sh @@ -0,0 +1,15 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + if [[ $TEST_OUTPUT != *"1ca35e86-4b4c-4124-b6b5-076ba4134d0d.jpg"* ]]; then + >&2 echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/getting-started/about.md b/website/docs/fastpaths/developer/getting-started/about.md new file mode 100644 index 0000000000..4bb4819b4b --- /dev/null +++ b/website/docs/fastpaths/developer/getting-started/about.md @@ -0,0 +1,26 @@ +--- +title: Sample application +sidebar_position: 10 +--- + +Most of the labs in this workshop use a common sample application to provide actual container components that we can work on during the exercises. The sample application models a simple web store application, where customers can browse a catalog, add items to their cart and complete an order through the checkout process. + + + + + +The application has several components and dependencies: + + + +| Component | Description | +| --------- | --------------------------------------------------------------------------------------------- | +| UI | Provides the front end user interface and aggregates API calls to the various other services. | +| Catalog | API for product listings and details | +| Cart | API for customer shopping carts | +| Checkout | API to orchestrate the checkout process | +| Orders | API to receive and process customer orders | + +Initially we'll deploy the application in a manner that is self-contained in the Amazon EKS cluster, without using any AWS services like load balancers or a managed database. Over the course of the labs we'll leverage different features of EKS to take advantage of broader AWS services and features for our retail store. + +You can find the full source code for the sample application on [GitHub](https://github.com/aws-containers/retail-store-sample-app). diff --git a/website/docs/fastpaths/developer/getting-started/assets/catalog-microservice.webp b/website/docs/fastpaths/developer/getting-started/assets/catalog-microservice.webp new file mode 100644 index 0000000000..3e213f9481 Binary files /dev/null and b/website/docs/fastpaths/developer/getting-started/assets/catalog-microservice.webp differ diff --git a/website/docs/fastpaths/developer/getting-started/assets/ide-base.webp b/website/docs/fastpaths/developer/getting-started/assets/ide-base.webp new file mode 100644 index 0000000000..49b50ab379 Binary files /dev/null and b/website/docs/fastpaths/developer/getting-started/assets/ide-base.webp differ diff --git a/website/docs/fastpaths/developer/getting-started/assets/ide-initial.webp b/website/docs/fastpaths/developer/getting-started/assets/ide-initial.webp new file mode 100644 index 0000000000..f1861e6529 Binary files /dev/null and b/website/docs/fastpaths/developer/getting-started/assets/ide-initial.webp differ diff --git a/website/docs/fastpaths/developer/getting-started/assets/ide-modules.webp b/website/docs/fastpaths/developer/getting-started/assets/ide-modules.webp new file mode 100644 index 0000000000..d28b9902c8 Binary files /dev/null and b/website/docs/fastpaths/developer/getting-started/assets/ide-modules.webp differ diff --git a/website/docs/fastpaths/developer/getting-started/assets/microservices.webp b/website/docs/fastpaths/developer/getting-started/assets/microservices.webp new file mode 100644 index 0000000000..e00e007ee1 Binary files /dev/null and b/website/docs/fastpaths/developer/getting-started/assets/microservices.webp differ diff --git a/website/docs/fastpaths/developer/getting-started/finish.md b/website/docs/fastpaths/developer/getting-started/finish.md new file mode 100644 index 0000000000..4059b7c383 --- /dev/null +++ b/website/docs/fastpaths/developer/getting-started/finish.md @@ -0,0 +1,59 @@ +--- +title: Other components +sidebar_position: 50 +--- + +In this lab exercise, we'll deploy the rest of the sample application efficiently using the power of Kustomize. The following kustomization file shows how you can reference other kustomizations and deploy multiple components together: + +```file +manifests/base-application/kustomization.yaml +``` + +:::tip +Notice that the catalog API is in this kustomization, didn't we already deploy it? + +Because Kubernetes uses a declarative mechanism we can apply the manifests for the catalog API again and expect that because all of the resources are already created Kubernetes will take no action. +::: + +Apply this kustomization to our cluster to deploy the rest of the components: + +```bash wait=10 +$ kubectl apply -k ~/environment/eks-workshop/base-application +``` + +After this is complete we can use `kubectl wait` to make sure all the components have started before we proceed: + +```bash timeout=200 +$ kubectl wait --for=condition=Ready --timeout=180s pods \ + -l app.kubernetes.io/created-by=eks-workshop -A +``` + +We'll now have a Namespace for each of our application components: + +```bash +$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop +NAME STATUS AGE +carts Active 62s +catalog Active 7m17s +checkout Active 62s +orders Active 62s +other Active 62s +ui Active 62s +``` + +We can also see all of the Deployments created for the components: + +```bash +$ kubectl get deployment -l app.kubernetes.io/created-by=eks-workshop -A +NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE +carts carts 1/1 1 1 90s +carts carts-dynamodb 1/1 1 1 90s +catalog catalog 1/1 1 1 7m46s +checkout checkout 1/1 1 1 90s +checkout checkout-redis 1/1 1 1 90s +orders orders 1/1 1 1 90s +orders orders-postgresql 1/1 1 1 90s +ui ui 1/1 1 1 90s +``` + +The sample application is now deployed and ready to provide a foundation for us to use in the rest of the labs in this workshop! diff --git a/website/docs/fastpaths/developer/getting-started/first.md b/website/docs/fastpaths/developer/getting-started/first.md new file mode 100644 index 0000000000..afff87f19b --- /dev/null +++ b/website/docs/fastpaths/developer/getting-started/first.md @@ -0,0 +1,144 @@ +--- +title: Deploying our first component +sidebar_position: 40 +--- + +The sample application is composed of a set of Kubernetes manifests organized in a way that can be easily applied with Kustomize. Kustomize is an open-source tool also provided as a native feature of the `kubectl` CLI. This workshop uses Kustomize to apply changes to Kubernetes manifests, making it easier to understand changes to manifest files without needing to manually edit YAML. As we work through the various modules of this workshop, we'll incrementally apply overlays and patches with Kustomize. + +The easiest way to browse the YAML manifests for the sample application and the modules in this workshop is using the file browser in the IDE: + +![IDE files](./assets/ide-initial.webp) + +Expanding the `eks-workshop` and then `base-application` items will allow you to browse the manifests that make up the initial state of the sample application: + +![IDE files base](./assets/ide-base.webp) + +The structure consists of a directory for each application component that was outlined in the **Sample application** section. + +The `modules` directory contains sets of manifests that we will apply to the cluster throughout the subsequent lab exercises: + +![IDE files modules](./assets/ide-modules.webp) + +Before we do anything lets inspect the current Namespaces in our EKS cluster: + +```bash +$ kubectl get namespaces +NAME STATUS AGE +default Active 1h +kube-node-lease Active 1h +kube-public Active 1h +kube-system Active 1h +``` + +All of the entries listed are Namespaces for system components that were pre-installed for us. We'll ignore these by using [Kubernetes labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) to filter the Namespaces down to only those we've created: + +```bash +$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop +No resources found +``` + +The first thing we'll do is deploy the catalog component by itself. The manifests for this component can be found in `~/environment/eks-workshop/base-application/catalog`. + +```bash +$ ls ~/environment/eks-workshop/base-application/catalog +configMap.yaml +deployment.yaml +kustomization.yaml +namespace.yaml +secrets.yaml +service-mysql.yaml +service.yaml +serviceAccount.yaml +statefulset-mysql.yaml +``` + +These manifests include the Deployment for the catalog API which expresses the desired state of the catalog API component: + +::yaml{file="manifests/base-application/catalog/deployment.yaml" paths="spec.replicas,spec.template.metadata.labels,spec.template.spec.containers.0.image,spec.template.spec.containers.0.ports,spec.template.spec.containers.0.livenessProbe,spec.template.spec.containers.0.resources"} + +1. Run a single replica +2. Apply labels to the Pods so other resources can refer to them +3. Use the `public.ecr.aws/aws-containers/retail-store-sample-catalog` container image +4. Expose the container on port 8080 named `http` +5. Run [probes/healthchecks](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) against the `/health` path +6. [Requests](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) a specific amount of CPU and memory so the Kubernetes scheduler can place it on a node with enough available resources + +The manifests also include the Service used by other components to access the catalog API: + +::yaml{file="manifests/base-application/catalog/service.yaml" paths="spec.ports,spec.selector"} + +1. Exposes itself on port 80 and targets the `http` port exposed by the Deployment, which translates to port 8080 +2. Selects catalog Pods using labels that match what we expressed in the Deployment above + +Let's create the catalog component: + +```bash +$ kubectl apply -k ~/environment/eks-workshop/base-application/catalog +namespace/catalog created +serviceaccount/catalog created +configmap/catalog created +secret/catalog-db created +service/catalog created +service/catalog-mysql created +deployment.apps/catalog created +statefulset.apps/catalog-mysql created +``` + +Now we'll see a new Namespace: + +```bash +$ kubectl get namespaces -l app.kubernetes.io/created-by=eks-workshop +NAME STATUS AGE +catalog Active 15s +``` + +We can take a look at the Pods running in this namespace: + +```bash +$ kubectl get pod -n catalog +NAME READY STATUS RESTARTS AGE +catalog-846479dcdd-fznf5 1/1 Running 2 (43s ago) 46s +catalog-mysql-0 1/1 Running 0 46s +``` + +Notice we have a Pod for our catalog API and another for the MySQL database. If the `catalog` Pod is showing a status of `CrashLoopBackOff`, it needs to be able to connect to the `catalog-mysql` Pod before it will start. Kubernetes will keep restarting it until this is the case. In that case, we can use [kubectl wait](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#wait) to monitor specific Pods until they are in a Ready state: + +```bash +$ kubectl wait --for=condition=Ready pods --all -n catalog --timeout=180s +``` + +Now that the Pods are running we can [check their logs](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#logs), for example the catalog API: + +:::tip +You can ["follow" the kubectl logs output](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) by using the '-f' option with the command. (Use CTRL-C to stop following the output) +::: + +```bash +$ kubectl logs -n catalog deployment/catalog +``` + +Kubernetes also allows us to easily scale the number of catalog Pods horizontally: + +```bash +$ kubectl scale -n catalog --replicas 3 deployment/catalog +deployment.apps/catalog scaled +$ kubectl wait --for=condition=Ready pods --all -n catalog --timeout=180s +``` + +The manifests we applied also create a Service for each of our application and MySQL Pods that can be used by other components in the cluster to connect: + +```bash +$ kubectl get svc -n catalog +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +catalog ClusterIP 172.20.83.84 80/TCP 2m48s +catalog-mysql ClusterIP 172.20.181.252 3306/TCP 2m48s +``` + +These Services are internal to the cluster, so we cannot access them from the Internet or even the VPC. However, we can use [exec](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/) to access an existing Pod in the EKS cluster to check the catalog API is working: + +```bash +$ kubectl -n catalog exec -i \ + deployment/catalog -- curl catalog.catalog.svc/catalog/products | jq . +``` + +You should receive back a JSON payload with product information. Congratulations, you've just deployed your first microservice to Kubernetes with EKS! diff --git a/website/docs/fastpaths/developer/getting-started/index.md b/website/docs/fastpaths/developer/getting-started/index.md new file mode 100644 index 0000000000..56e5c40716 --- /dev/null +++ b/website/docs/fastpaths/developer/getting-started/index.md @@ -0,0 +1,9 @@ +--- +title: Getting started +sidebar_position: 10 +description: "Learn the basics of running workloads on Amazon Elastic Kubernetes Service." +--- + +Welcome to the first hands-on lab in the EKS workshop. The goal of this exercise is to familiarize ourselves with the sample application we'll use for many of the coming lab exercises and in doing so touch on some basic concepts related to deploying workloads to EKS. We'll explore the architecture of the application and deploy out the components to our EKS cluster. + +Let's deploy your first workload to the EKS cluster in your lab environment and explore! diff --git a/website/docs/fastpaths/developer/getting-started/microservices.md b/website/docs/fastpaths/developer/getting-started/microservices.md new file mode 100644 index 0000000000..b806c8d12a --- /dev/null +++ b/website/docs/fastpaths/developer/getting-started/microservices.md @@ -0,0 +1,22 @@ +--- +title: Microservices on Kubernetes +sidebar_position: 30 +--- + +Now that we're familiar with the overall architecture of the sample application, how will we initially deploy this in to EKS? Let's explore some of the Kubernetes building blocks by looking at the **catalog** component: + +![Catalog microservice in Kubernetes](./assets/catalog-microservice.webp) + +There are a number of things to consider in this diagram: + +- The application that provides the catalog API runs as a [Pod](https://kubernetes.io/docs/concepts/workloads/pods/), which is the smallest deployable unit in Kubernetes. Application Pods will run the container images we outlined in the previous section. +- The Pods that run for the catalog component are created by a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) which may manage one or more "replicas" of the catalog Pod, allowing it to scale horizontally. +- A [Service](https://kubernetes.io/docs/concepts/services-networking/service/) is an abstract way to expose an application running as a set of Pods, and this allows our catalog API to be called by other components inside the Kubernetes cluster. Each Service is given its own DNS entry. +- We're starting this workshop with a MySQL database that runs inside our Kubernetes cluster as a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/), which is designed to manage stateful workloads. +- All of these Kubernetes constructs are grouped in their own dedicated catalog Namespace. Each of the application components has its own Namespace. + +Each of the components in the microservices architecture is conceptually similar to the catalog, using Deployments to manage application workload Pods and Services to route traffic to those Pods. If we expand out our view of the architecture we can consider how traffic is routed throughout the broader system: + +![Microservices in Kubernetes](./assets/microservices.webp) + +The **ui** component receives HTTP requests from, for example, a user's browser. It then makes HTTP requests to other API components in the architecture to fulfill that request and returns a response to the user. Each of the downstream components may have their own data stores or other infrastructure. The Namespaces are a logical grouping of the resources for each microservice and also act as a soft isolation boundary, which can be used to effectively implement controls using Kubernetes RBAC and Network Policies. diff --git a/website/docs/fastpaths/developer/getting-started/packaging-application.md b/website/docs/fastpaths/developer/getting-started/packaging-application.md new file mode 100644 index 0000000000..dfb8d8adcd --- /dev/null +++ b/website/docs/fastpaths/developer/getting-started/packaging-application.md @@ -0,0 +1,16 @@ +--- +title: Packaging the components +sidebar_position: 20 +--- + +Before a workload can be deployed to a Kubernetes distribution like EKS it first must be packaged as a container image and published to a container registry. Basic container topics like this are not covered as part of this workshop, and the sample application has container images already available in Amazon Elastic Container Registry for the labs we'll complete today. + +The table below provides links to the ECR Public repository for each component, as well as the `Dockerfile` that was used to build each component. + +| Component | ECR Public repository | Dockerfile | +| ------------- | --------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | +| UI | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-ui) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/ui/Dockerfile) | +| Catalog | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-catalog) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/catalog/Dockerfile) | +| Shopping cart | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-cart) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/cart/Dockerfile) | +| Checkout | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-checkout) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/checkout/Dockerfile) | +| Orders | [Repository](https://gallery.ecr.aws/aws-containers/retail-store-sample-orders) | [Dockerfile](https://github.com/aws-containers/retail-store-sample-app/blob/v1.2.1/src/orders/Dockerfile) | diff --git a/website/docs/fastpaths/developer/index.md b/website/docs/fastpaths/developer/index.md new file mode 100644 index 0000000000..f97e57fdd8 --- /dev/null +++ b/website/docs/fastpaths/developer/index.md @@ -0,0 +1,23 @@ +--- +title: "⚡ Fast path - Developers" +chapter: true +--- + +::required-time + +:::tip Before you start +Prepare your environment for this section: + +```bash timeout=300 wait=30 +$ prepare-environment fastpath/developers +``` + +Each section of this lab will outline what resources have been setup for your convenience. + +You can view the Terraform that applies these changes [here](https://github.com/VAR::MANIFESTS_OWNER/VAR::MANIFESTS_REPOSITORY/tree/VAR::MANIFESTS_REF/manifests/modules/fast-path/developers/.workshop/terraform). + +::: + +Welcome to the EKS Workshop fast path for developers! This is a collection of labs from EKS Workshop optimized for developers to learn the feature of Amazon EKS we see most often required as they deploy workloads to this AWS service. + +Thoughout this series of exercises you'll learn diff --git a/website/docs/fastpaths/developer/ingress/adding-ingress.md b/website/docs/fastpaths/developer/ingress/adding-ingress.md new file mode 100644 index 0000000000..206d78d18a --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/adding-ingress.md @@ -0,0 +1,124 @@ +--- +title: "Creating the Ingress" +sidebar_position: 20 +--- + +Let's create an Ingress resource with the following configuration: + +::yaml{file="manifests/modules/exposing/ingress/creating-ingress/ingress.yaml" paths="kind,metadata.annotations,spec.rules.0"} + +1. Use an `Ingress` kind +2. We can use annotations to configure various behavior of the ALB thats created such as the health checks it performs on the target pods +3. The rules section is used to express how the ALB should route traffic. In this example we route all HTTP requests where the path starts with `/` to the Kubernetes service called `ui` on port 80 + +Apply this configuration: + +```bash timeout=180 hook=add-ingress hookTimeout=430 +$ kubectl apply -k ~/environment/eks-workshop/modules/exposing/ingress/creating-ingress +``` + +Let's inspect the Ingress object created: + +```bash +$ kubectl get ingress ui -n ui +NAME CLASS HOSTS ADDRESS PORTS AGE +ui alb * k8s-ui-ui-1268651632.us-west-2.elb.amazonaws.com 80 15s +``` + +The ALB will take several minutes to provision and register its targets so take some time to take a closer look at the ALB provisioned for this Ingress to see how its configured: + +```bash +$ aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-ui`) == `true`]' +[ + { + "LoadBalancerArn": "arn:aws:elasticloadbalancing:us-west-2:1234567890:loadbalancer/app/k8s-ui-ui-cb8129ddff/f62a7bc03db28e7c", + "DNSName": "k8s-ui-ui-cb8129ddff-1888909706.us-west-2.elb.amazonaws.com", + "CanonicalHostedZoneId": "Z1H1FL5HABSF5", + "CreatedTime": "2022-09-30T03:40:00.950000+00:00", + "LoadBalancerName": "k8s-ui-ui-cb8129ddff", + "Scheme": "internet-facing", + "VpcId": "vpc-0851f873025a2ece5", + "State": { + "Code": "active" + }, + "Type": "application", + "AvailabilityZones": [ + { + "ZoneName": "us-west-2b", + "SubnetId": "subnet-00415f527bbbd999b", + "LoadBalancerAddresses": [] + }, + { + "ZoneName": "us-west-2a", + "SubnetId": "subnet-0264d4b9985bd8691", + "LoadBalancerAddresses": [] + }, + { + "ZoneName": "us-west-2c", + "SubnetId": "subnet-05cda6deed7f3da65", + "LoadBalancerAddresses": [] + } + ], + "SecurityGroups": [ + "sg-0f8e704ee37512eb2", + "sg-02af06ec605ef8777" + ], + "IpAddressType": "ipv4" + } +] +``` + +What does this tell us? + +- The ALB is accessible over the public internet +- It uses the public subnets in our VPC + +Inspect the targets in the target group that was created by the controller: + +```bash +$ ALB_ARN=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-ui`) == `true`].LoadBalancerArn' | jq -r '.[0]') +$ TARGET_GROUP_ARN=$(aws elbv2 describe-target-groups --load-balancer-arn $ALB_ARN | jq -r '.TargetGroups[0].TargetGroupArn') +$ aws elbv2 describe-target-health --target-group-arn $TARGET_GROUP_ARN +{ + "TargetHealthDescriptions": [ + { + "Target": { + "Id": "10.42.180.183", + "Port": 8080, + "AvailabilityZone": "us-west-2c" + }, + "HealthCheckPort": "8080", + "TargetHealth": { + "State": "healthy" + } + } + ] +} +``` + +Since we specified using IP mode in our Ingress object, the target is registered using the IP address of the `ui` pod and the port on which it serves traffic. + +You can also inspect the ALB and its target groups in the console by clicking this link: + + + +Get the URL from the Ingress resource: + +```bash +$ ADDRESS=$(kubectl get ingress -n ui ui -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") +$ echo "http://${ADDRESS}" +http://k8s-ui-ui-a9797f0f61.elb.us-west-2.amazonaws.com +``` + +To wait until the load balancer has finished provisioning you can run this command: + +```bash +$ curl --head -X GET --retry 30 --retry-all-errors --retry-delay 15 --connect-timeout 30 --max-time 60 \ + -k $(kubectl get ingress -n ui ui -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") +``` + +And access it in your web browser. You will see the UI from the web store displayed and will be able to navigate around the site as a user. + + + + diff --git a/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-lb.webp b/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-lb.webp new file mode 100644 index 0000000000..c7f21b056b Binary files /dev/null and b/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-lb.webp differ diff --git a/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-listener.webp b/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-listener.webp new file mode 100644 index 0000000000..08a8c0bef9 Binary files /dev/null and b/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-listener.webp differ diff --git a/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-rules.webp b/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-rules.webp new file mode 100644 index 0000000000..f809553601 Binary files /dev/null and b/website/docs/fastpaths/developer/ingress/assets/multiple-ingress-rules.webp differ diff --git a/website/docs/fastpaths/developer/ingress/assets/web-ui.webp b/website/docs/fastpaths/developer/ingress/assets/web-ui.webp new file mode 100644 index 0000000000..2221ddcbde Binary files /dev/null and b/website/docs/fastpaths/developer/ingress/assets/web-ui.webp differ diff --git a/website/docs/fastpaths/developer/ingress/index.md b/website/docs/fastpaths/developer/ingress/index.md new file mode 100644 index 0000000000..dafb3eff62 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/index.md @@ -0,0 +1,21 @@ +--- +title: "Exposing workloads with Ingress" +chapter: true +sidebar_position: 20 +description: "Expose HTTP and HTTPS routes to the outside world using Ingress API on Amazon Elastic Kubernetes Service." +--- + +:::tip What's been set up for you +The environment preparation stage made the following changes to your lab environment: + +- Create an IAM role required by the AWS Load Balancer Controller +- Create an IAM role required by ExternalDNS +- Create an AWS Route 53 private hosted zone + +::: + +Right now our web store application is not exposed to the outside world, so there's no way for users to access it. Although there are many microservices in our web store workload, only the `ui` application needs to be available to end users. This is because the `ui` application will perform all communication to the other backend services using internal Kubernetes networking. + +Kubernetes Ingress is an API resource that allows you to manage external or internal HTTP(S) access to Kubernetes services running in a cluster. Amazon Elastic Load Balancing Application Load Balancer (ALB) is a popular AWS service that load balances incoming traffic at the application layer (layer 7) across multiple targets, such as Amazon EC2 instances, in a region. ALB supports multiple features including host or path based routing, TLS (Transport Layer Security) termination, WebSockets, HTTP/2, AWS WAF (Web Application Firewall) integration, integrated access logs, and health checks. + +In this lab exercise, we'll expose our sample application using an ALB with the Kubernetes ingress model. diff --git a/website/docs/fastpaths/developer/ingress/introduction.md b/website/docs/fastpaths/developer/ingress/introduction.md new file mode 100644 index 0000000000..7c49d32e7d --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/introduction.md @@ -0,0 +1,41 @@ +--- +title: "Introduction" +sidebar_position: 10 +--- + +First lets install the AWS Load Balancer controller using helm: + +```bash wait=10 +$ helm repo add eks-charts https://aws.github.io/eks-charts +$ helm upgrade --install aws-load-balancer-controller eks-charts/aws-load-balancer-controller \ + --version "${LBC_CHART_VERSION}" \ + --namespace "kube-system" \ + --set "clusterName=${EKS_CLUSTER_NAME}" \ + --set "serviceAccount.name=aws-load-balancer-controller-sa" \ + --set "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"="$LBC_ROLE_ARN" \ + --wait +Release "aws-load-balancer-controller" does not exist. Installing it now. +NAME: aws-load-balancer-controller +LAST DEPLOYED: [...] +NAMESPACE: kube-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +AWS Load Balancer controller installed! +``` + +Currently there are no Ingress resources in our cluster, which you can check with the following command: + +```bash expectError=true +$ kubectl get ingress -n ui +No resources found in ui namespace. +``` + +There are also no Service resources of type `LoadBalancer`, which you can confirm with the following command: + +```bash +$ kubectl get svc -n ui +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +ui ClusterIP 10.100.221.103 80/TCP 29m +``` diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-add-ingress.sh b/website/docs/fastpaths/developer/ingress/tests/hook-add-ingress.sh new file mode 100644 index 0000000000..8ae8af228b --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-add-ingress.sh @@ -0,0 +1,32 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 20 + + export ui_endpoint=$(kubectl get ingress -n ui ui -o json | jq -r '.status.loadBalancer.ingress[0].hostname') + + if [ -z "$ui_endpoint" ]; then + >&2 echo "Failed to retrieve hostname from Ingress" + exit 1 + fi + + EXIT_CODE=0 + + timeout -s TERM 400 bash -c \ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/home)" != "200" ]];\ + do sleep 20;\ + done' || EXIT_CODE=$? + + echo "Timeout completed" + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Ingress did not become available after 400 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-dns-curl.sh b/website/docs/fastpaths/developer/ingress/tests/hook-dns-curl.sh new file mode 100644 index 0000000000..beed10e525 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-dns-curl.sh @@ -0,0 +1,15 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + if [[ $TEST_OUTPUT != *"HTTP/1.1 200 OK"* ]]; then + >&2 echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-dns-logs.sh b/website/docs/fastpaths/developer/ingress/tests/hook-dns-logs.sh new file mode 100644 index 0000000000..f02fef2cb9 --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-dns-logs.sh @@ -0,0 +1,15 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + if [[ $TEST_OUTPUT != *"Desired change: CREATE ui.retailstore.com"* ]]; then + >&2 echo "Failed to match expected output" + echo $TEST_OUTPUT + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/ingress/tests/hook-multiple-ingress.sh b/website/docs/fastpaths/developer/ingress/tests/hook-multiple-ingress.sh new file mode 100644 index 0000000000..bfc5010e9d --- /dev/null +++ b/website/docs/fastpaths/developer/ingress/tests/hook-multiple-ingress.sh @@ -0,0 +1,37 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 60 + + kubectl get ingress -A + + export catalog_endpoint=$(kubectl get ingress -n catalog catalog-multi -o json | jq -r '.status.loadBalancer.ingress[0].hostname') + + if [ -z "$catalog_endpoint" ]; then + >&2 echo "Failed to retrieve hostname from Ingress" + exit 1 + fi + + EXIT_CODE=0 + + timeout -s TERM 400 bash -c \ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${catalog_endpoint}/catalog/products)" != "200" ]];\ + do sleep 20;\ + done' || EXIT_CODE=$? + + echo "Timeout completed" + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Ingress did not become available after 400 seconds" + echo "Was checking $catalog_endpoint" + echo "" + kubectl get ingress -A + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/keda/configure-keda.md b/website/docs/fastpaths/developer/keda/configure-keda.md new file mode 100644 index 0000000000..56e9d168e0 --- /dev/null +++ b/website/docs/fastpaths/developer/keda/configure-keda.md @@ -0,0 +1,30 @@ +--- +title: "Configure KEDA" +sidebar_position: 10 +--- + +When installed, KEDA creates several custom resources. One of those resources, a `ScaledObject`, enables you to map an external event source to a Deployment or StatefulSet for scaling. In this lab, we'll create a `ScaledObject` that targets the `ui` Deployment and scales this workload based on the `RequestCountPerTarget` metric in CloudWatch. + +::yaml{file="manifests/modules/autoscaling/workloads/keda/scaledobject/scaledobject.yaml" paths="spec.scaleTargetRef,spec.minReplicaCount,spec.maxReplicaCount,spec.triggers"} + +1. This is the resource KEDA will scale. The `name` is the name of the deployment you are targeting and your `ScaledObject` must be in the same namespace as the Deployment +2. The minimum number of replicas that KEDA will scale the deployment to +3. The maximum number of replicas that KEDA will scale the deployment to +4. The `expression` uses [CloudWatch Metrics Insights](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch-metrics-insights-querylanguage.html) syntax to select your target metric. When the `targetMetricValue` is exceeded, KEDA will scale out the workload to support the increased load. In our case, if the `RequestCountPerTarget` is greater than 100, KEDA will scale the deployment. + +More details on the AWS CloudWatch scaler can be found [here](https://keda.sh/docs/scalers/aws-cloudwatch/). + +First we need to gather some information about the Application Load Balancer (ALB) and Target Group that were created as part of the lab pre-requisites. + +```bash +$ export ALB_ARN=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-ui`) == `true`]' | jq -r .[0].LoadBalancerArn) +$ export ALB_ID=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[?contains(LoadBalancerName, `k8s-ui-ui`) == `true`]' | jq -r .[0].LoadBalancerArn | awk -F "loadbalancer/" '{print $2}') +$ export TARGETGROUP_ID=$(aws elbv2 describe-target-groups --load-balancer-arn $ALB_ARN | jq -r '.TargetGroups[0].TargetGroupArn' | awk -F ":" '{print $6}') +``` + +Now we can use those values to update the configuration of our `ScaledObject` and create the resource in the cluster. + +```bash +$ kubectl kustomize ~/environment/eks-workshop/modules/autoscaling/workloads/keda/scaledobject \ + | envsubst | kubectl apply -f- +``` diff --git a/website/docs/fastpaths/developer/keda/index.md b/website/docs/fastpaths/developer/keda/index.md new file mode 100644 index 0000000000..75d7aaf520 --- /dev/null +++ b/website/docs/fastpaths/developer/keda/index.md @@ -0,0 +1,22 @@ +--- +title: "Autoscaling applications" +chapter: true +sidebar_position: 80 +description: "Automatically scale workloads on Amazon Elastic Kubernetes Service with KEDA" +--- + +:::tip What's been set up for you +The environment preparation stage made the following changes to your lab environment: + +- Create an IAM role required by the KEDA Operator + +::: + +Autoscaling monitors your workloads and automatically adjusts capacity to maintain steady, predictable performance while also optimizing for cost. When using Kubernetes there are two main relevant mechanisms which can be used to scale automatically: + +- **Compute:** As pods are scaled the underlying compute in a Kubernetes cluster must also adapt by adjusting the number or size of worker nodes used to run the Pods. +- **Pods:** Since pods are used to run workloads in a Kubernetes cluster, scaling a workload is primarily done by scaling Pods either horizontally or vertically in response to scenarios such as changes in load on a given application. + +In this lab, we'll look at using the [Kubernetes Event-Driven Autoscaler (KEDA)](https://keda.sh/) to scale pods in a deployment. In the previous lab on the Horizontal Pod Autoscaler (HPA), we saw how the HPA resource can be used to horizontally scale pods in a deployment based on average CPU utilization. But sometimes workloads need to scale based on external events or metrics. KEDA provides the capability to scale your workload based on events from various event sources, such as the queue length in Amazon SQS or other metrics in CloudWatch. KEDA supports 60+ [scalers](https://keda.sh/docs/scalers/) for various metrics systems, databases, messaging systems, and more. + +KEDA is a lightweight workload that can be deployed into a Kubernetes cluster using a Helm chart. KEDA works with standard Kubernetes components like the Horizontal Pod Autoscaler to scale a Deployment or StatefulSet. With KEDA, you selectively choose the workloads you want to scale with these various event sources. diff --git a/website/docs/fastpaths/developer/keda/install-keda.md b/website/docs/fastpaths/developer/keda/install-keda.md new file mode 100644 index 0000000000..2fc92e51c6 --- /dev/null +++ b/website/docs/fastpaths/developer/keda/install-keda.md @@ -0,0 +1,44 @@ +--- +title: "Installing KEDA" +sidebar_position: 5 +--- + +First lets install KEDA using Helm. One pre-requisite was created during the lab preparation stage. An IAM role was created with permissions to access the metric data within CloudWatch. + +```bash +$ helm repo add kedacore https://kedacore.github.io/charts +$ helm upgrade --install keda kedacore/keda \ + --version "${KEDA_CHART_VERSION}" \ + --namespace keda \ + --create-namespace \ + --set "podIdentity.aws.irsa.enabled=true" \ + --set "podIdentity.aws.irsa.roleArn=${KEDA_ROLE_ARN}" \ + --wait +Release "keda" does not exist. Installing it now. +NAME: keda +LAST DEPLOYED: [...] +NAMESPACE: kube-system +STATUS: deployed +REVISION: 1 +TEST SUITE: None +NOTES: +[...] +``` + +After the Helm install, KEDA will be running as several deployments in the keda namespace: + +```bash +$ kubectl get deployment -n keda +NAME READY UP-TO-DATE AVAILABLE AGE +keda-admission-webhooks 1/1 1 1 105s +keda-operator 1/1 1 1 105s +keda-operator-metrics-apiserver 1/1 1 1 105s +``` + +Each KEDA deployment performs a different key role: + +1. Agent (keda-operator) - controls the scaling of the workload +2. Metrics (keda-operator-metrics-server) - acts as a Kubernetes metrics server, providing access to external metrics +3. Admission Webhooks (keda-admission-webhooks) - validates resource configuration to prevent misconfiguration (ex. multiple ScaledObjects targeting the same workload) + +Now we can move on to configuring KEDA to scale our workload. diff --git a/website/docs/fastpaths/developer/keda/test-keda.md b/website/docs/fastpaths/developer/keda/test-keda.md new file mode 100644 index 0000000000..a98d7002d2 --- /dev/null +++ b/website/docs/fastpaths/developer/keda/test-keda.md @@ -0,0 +1,50 @@ +--- +title: "Generate load" +sidebar_position: 20 +--- + +To observe KEDA scale the deployment in response to the KEDA `ScaledObject` we have configured, we need to generate some load on our application. We'll do that by calling the home page of the workload with [hey](https://github.com/rakyll/hey). + +The command below will run the load generator with: + +- 3 workers running concurrently +- Sending 5 queries per second each +- Running for a maximum of 10 minutes + +```bash hook=keda-pod-scaleout hookTimeout=330 +$ export ALB_HOSTNAME=$(kubectl get ingress ui -n ui -o yaml | yq .status.loadBalancer.ingress[0].hostname) +$ kubectl run load-generator \ + --image=williamyeh/hey:latest \ + --restart=Never -- -c 3 -q 5 -z 10m http://$ALB_HOSTNAME/home +``` + +Based on the `ScaledObject`, KEDA creates an HPA resource and provides the required metrics to allow the HPA to scale the workload. Now that we have requests hitting our application we can watch the HPA resource to follow its progress: + +```bash test=false +$ kubectl get hpa keda-hpa-ui-hpa -n ui --watch +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +keda-hpa-ui-hpa Deployment/ui 7/100 (avg) 1 10 1 7m58s +keda-hpa-ui-hpa Deployment/ui 778/100 (avg) 1 10 1 8m33s +keda-hpa-ui-hpa Deployment/ui 194500m/100 (avg) 1 10 4 8m48s +keda-hpa-ui-hpa Deployment/ui 97250m/100 (avg) 1 10 8 9m3s +keda-hpa-ui-hpa Deployment/ui 625m/100 (avg) 1 10 8 9m18s +keda-hpa-ui-hpa Deployment/ui 91500m/100 (avg) 1 10 8 9m33s +keda-hpa-ui-hpa Deployment/ui 92125m/100 (avg) 1 10 8 9m48s +keda-hpa-ui-hpa Deployment/ui 750m/100 (avg) 1 10 8 10m +keda-hpa-ui-hpa Deployment/ui 102625m/100 (avg) 1 10 8 10m +keda-hpa-ui-hpa Deployment/ui 113625m/100 (avg) 1 10 8 11m +keda-hpa-ui-hpa Deployment/ui 90900m/100 (avg) 1 10 10 11m +keda-hpa-ui-hpa Deployment/ui 91500m/100 (avg) 1 10 10 12m +``` + +Once you're satisfied with the autoscaling behavior, you can end the watch with `Ctrl+C` and stop the load generator like so: + +```bash +$ kubectl delete pod load-generator +``` + +As the load generator terminates, notice that the HPA will slowly bring the replica count to min number based on its configuration. + +You can also view the load test results in the CloudWatch console. Navigate to the metrics section and find the `RequestCount` and `RequestCountPerTarget` metrics for the load balancer and target group that was created. From the results you can see that initially all of the load was handled by a single pod, but as KEDA begins to scale the workload the requests are distributed across the additional pods added to the workload. If you let the load-generator pod run for the full 10 minutes, you'll see results similar to this. + +![Insights](/img/keda/keda-cloudwatch.png) diff --git a/website/docs/fastpaths/developer/keda/tests/hook-keda-pod-scaleout.sh b/website/docs/fastpaths/developer/keda/tests/hook-keda-pod-scaleout.sh new file mode 100644 index 0000000000..1b16393bd8 --- /dev/null +++ b/website/docs/fastpaths/developer/keda/tests/hook-keda-pod-scaleout.sh @@ -0,0 +1,21 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + EXIT_CODE=0 + + timeout -s TERM 300 bash -c \ + 'while [[ $(kubectl get pod -l app.kubernetes.io/instance=ui -n ui -o json | jq -r ".items | length") -lt 2 ]];\ + do sleep 30;\ + done' || EXIT_CODE=$? + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Pods did not scale within 300 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/keda/tests/hook-validate-ingress.sh b/website/docs/fastpaths/developer/keda/tests/hook-validate-ingress.sh new file mode 100644 index 0000000000..e573766a3e --- /dev/null +++ b/website/docs/fastpaths/developer/keda/tests/hook-validate-ingress.sh @@ -0,0 +1,32 @@ +set -Eeuo pipefail + +before() { + echo "noop" +} + +after() { + sleep 20 + + export ui_endpoint=$(kubectl -n kube-system get ingress -n ui ui -o json | jq -r '.status.loadBalancer.ingress[0].hostname') + + if [ -z "$ui_endpoint" ]; then + >&2 echo "Failed to retrieve hostname from Ingress" + exit 1 + fi + + EXIT_CODE=0 + + timeout -s TERM 400 bash -c \ + 'while [[ "$(curl -s -o /dev/null -L -w ''%{http_code}'' ${ui_endpoint}/home)" != "200" ]];\ + do sleep 20;\ + done' || EXIT_CODE=$? + + echo "Timeout completed" + + if [ $EXIT_CODE -ne 0 ]; then + >&2 echo "Ingress did not become available after 400 seconds" + exit 1 + fi +} + +"$@" diff --git a/website/docs/fastpaths/developer/tests/hook-suite.sh b/website/docs/fastpaths/developer/tests/hook-suite.sh new file mode 100644 index 0000000000..8b5a4baea5 --- /dev/null +++ b/website/docs/fastpaths/developer/tests/hook-suite.sh @@ -0,0 +1,11 @@ +set -e + +before() { + echo "noop" +} + +after() { + prepare-environment +} + +"$@" diff --git a/website/docs/fundamentals/exposing/ingress/index.md b/website/docs/fundamentals/exposing/ingress/index.md index a02634c263..c7cca3df0c 100644 --- a/website/docs/fundamentals/exposing/ingress/index.md +++ b/website/docs/fundamentals/exposing/ingress/index.md @@ -17,8 +17,8 @@ $ prepare-environment exposing/ingress This will make the following changes to your lab environment: -- Creates an IAM role required by the AWS Load Balancer Controller -- Creates an IAM role required by ExternalDNS +- Create an IAM role required by the AWS Load Balancer Controller +- Create an IAM role required by ExternalDNS - Create an AWS Route 53 private hosted zone You can view the Terraform that applies these changes [here](https://github.com/VAR::MANIFESTS_OWNER/VAR::MANIFESTS_REPOSITORY/tree/VAR::MANIFESTS_REF/manifests/modules/exposing/ingress/.workshop/terraform). diff --git a/website/docs/introduction/setup/your-account/using-terraform.md b/website/docs/introduction/setup/your-account/using-terraform.md index 360bddc03e..cef70877e3 100644 --- a/website/docs/introduction/setup/your-account/using-terraform.md +++ b/website/docs/introduction/setup/your-account/using-terraform.md @@ -7,7 +7,7 @@ sidebar_position: 30 Creating the workshop cluster with Terraform is currently in preview. Please raise any issues encountered in the [GitHub repository](https://github.com/aws-samples/eks-workshop-v2/issues). ::: -This section outlines how to build a cluster for the lab exercises using [Hashicorp Terraform](https://developer.hashicorp.com/terraform). This is intended for learners who are familiar with using Terraform infrastructure-as-code. +This section outlines how to build a cluster for the lab exercises using [HashiCorp Terraform](https://developer.hashicorp.com/terraform). This is intended for learners who are familiar with using Terraform infrastructure-as-code. The `terraform` CLI has been pre-installed in your IDE environment, so we can immediately create the cluster. Let's examine the main Terraform configuration files that will be used to build the cluster and its supporting infrastructure. diff --git a/website/sidebars.js b/website/sidebars.js index 4375d03407..f7fc45a567 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -23,6 +23,7 @@ const sidebars = { automation: [{ type: "autogenerated", dirName: "automation" }], aiml: [{ type: "autogenerated", dirName: "aiml" }], troubleshooting: [{ type: "autogenerated", dirName: "troubleshooting" }], + fastpaths: [{ type: "autogenerated", dirName: "fastpaths" }], }; module.exports = sidebars; diff --git a/website/src/components/ConsoleButton/index.tsx b/website/src/components/ConsoleButton/index.tsx index ad7bd4c082..1d5743773a 100644 --- a/website/src/components/ConsoleButton/index.tsx +++ b/website/src/components/ConsoleButton/index.tsx @@ -1,10 +1,7 @@ import React, { type ReactNode } from "react"; -import clsx from "clsx"; import styles from "./styles.module.css"; import useBaseUrl from "@docusaurus/useBaseUrl"; -import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; -import { faArrowUpRightFromSquare } from "@fortawesome/free-solid-svg-icons"; interface Props { service: string; @@ -17,7 +14,7 @@ export default function ConsoleButton({ url = "http://localhost:3000", label = "Launch", }: Props): JSX.Element { - let serviceIcon = service || "console"; + const serviceIcon = service || "console"; return (