From aa3928449aca090a16e595f5c853fdc79cc78ff9 Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:19:10 -0700 Subject: [PATCH 01/18] feat: add core Helm chart structure with comprehensive values schema - Add Chart.yaml with metadata and version 2.8.0 - Add values.yaml with multi-cloud kubeconfig support - Add JSON Schema validation for values configuration - Support AWS EKS, GCP GKE, Azure AKS, URL, and custom providers - Include security configurations and resource management --- helm-chart/Chart.yaml | 21 ++ helm-chart/values.schema.json | 307 ++++++++++++++++ helm-chart/values.yaml | 651 ++++++++++++++++++++++++++++++++++ 3 files changed, 979 insertions(+) create mode 100644 helm-chart/Chart.yaml create mode 100644 helm-chart/values.schema.json create mode 100644 helm-chart/values.yaml diff --git a/helm-chart/Chart.yaml b/helm-chart/Chart.yaml new file mode 100644 index 0000000..577e091 --- /dev/null +++ b/helm-chart/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: mcp-server-kubernetes +description: MCP server for interacting with Kubernetes clusters +type: application +version: 2.8.0 +appVersion: "2.8.0" +home: https://github.com/Flux159/mcp-server-kubernetes +sources: + - https://github.com/Flux159/mcp-server-kubernetes +maintainers: + - name: Flux159, Paras Patel +keywords: + - mcp + - kubernetes + - kubectl + - k8s + - eks + - gke + - aks +annotations: + category: Infrastructure \ No newline at end of file diff --git a/helm-chart/values.schema.json b/helm-chart/values.schema.json new file mode 100644 index 0000000..5acb020 --- /dev/null +++ b/helm-chart/values.schema.json @@ -0,0 +1,307 @@ +{ + "$schema": "https://json-schema.org/draft-07/schema#", + "type": "object", + "title": "MCP Server Kubernetes Helm Chart Values Schema", + "description": "Schema for validating values.yaml configuration", + "properties": { + "image": { + "type": "object", + "properties": { + "repository": {"type": "string"}, + "pullPolicy": {"type": "string", "enum": ["Always", "IfNotPresent", "Never"]}, + "tag": {"type": "string"} + }, + "required": ["repository", "pullPolicy"], + "additionalProperties": false + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"} + } + } + }, + "nameOverride": {"type": "string"}, + "fullnameOverride": {"type": "string"}, + "commonLabels": {"type": "object"}, + "commonAnnotations": {"type": "object"}, + "transport": { + "type": "object", + "properties": { + "mode": {"type": "string", "enum": ["stdio", "sse", "http"]}, + "service": { + "type": "object", + "properties": { + "type": {"type": "string", "enum": ["ClusterIP", "NodePort", "LoadBalancer", "ExternalName"]}, + "port": {"type": "integer", "minimum": 1, "maximum": 65535}, + "targetPort": {"type": "integer", "minimum": 1, "maximum": 65535}, + "annotations": {"type": "object"} + } + }, + "ingress": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "className": {"type": "string"}, + "annotations": {"type": "object"}, + "hosts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "host": {"type": "string"}, + "paths": { + "type": "array", + "items": { + "type": "object", + "properties": { + "path": {"type": "string"}, + "pathType": {"type": "string", "enum": ["Exact", "Prefix", "ImplementationSpecific"]} + } + } + } + } + } + }, + "tls": {"type": "array"} + } + } + }, + "required": ["mode"] + }, + "kubeconfig": { + "type": "object", + "properties": { + "provider": {"type": "string", "enum": ["aws", "gcp", "azure", "url", "serviceaccount", "custom", "content"]}, + "aws": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "clusterName": {"type": "string"}, + "region": {"type": "string"}, + "roleArn": {"type": "string"}, + "extraArgs": {"type": "array", "items": {"type": "string"}} + }, + "required": ["name", "clusterName", "region"] + } + }, + "defaultContext": {"type": "string"} + } + }, + "gcp": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "clusterName": {"type": "string"}, + "zone": {"type": "string"}, + "region": {"type": "string"}, + "project": {"type": "string"}, + "extraArgs": {"type": "array", "items": {"type": "string"}} + }, + "required": ["name", "clusterName"], + "anyOf": [ + {"required": ["zone"]}, + {"required": ["region"]} + ] + } + }, + "defaultContext": {"type": "string"} + } + }, + "azure": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "clusterName": {"type": "string"}, + "resourceGroup": {"type": "string"}, + "subscription": {"type": "string"}, + "extraArgs": {"type": "array", "items": {"type": "string"}} + }, + "required": ["name", "clusterName", "resourceGroup"] + } + }, + "defaultContext": {"type": "string"} + } + }, + "url": { + "type": "object", + "properties": { + "configs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "url": {"type": "string", "format": "uri"}, + "extraArgs": {"type": "array", "items": {"type": "string"}} + }, + "required": ["name", "url"] + } + } + } + }, + "custom": { + "type": "object", + "properties": { + "command": {"type": "string"}, + "args": {"type": "array", "items": {"type": "string"}} + } + }, + "content": {"type": "string"}, + "env": {"type": "object"}, + "initContainer": { + "type": "object", + "properties": { + "maxRetries": {"type": "integer", "minimum": 1, "maximum": 10}, + "retryDelay": {"type": "integer", "minimum": 1, "maximum": 300}, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": {"type": "string"}, + "memory": {"type": "string"} + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": {"type": "string"}, + "memory": {"type": "string"} + } + } + } + } + } + } + }, + "required": ["provider"] + }, + "security": { + "type": "object", + "properties": { + "allowOnlyNonDestructive": {"type": "boolean"}, + "allowOnlyReadonly": {"type": "boolean"}, + "allowedTools": {"type": "string"}, + "podSecurityContext": {"type": "object"}, + "securityContext": {"type": "object"} + } + }, + "serviceAccount": { + "type": "object", + "properties": { + "create": {"type": "boolean"}, + "automount": {"type": "boolean"}, + "annotations": {"type": "object"}, + "name": {"type": "string"} + } + }, + "rbac": { + "type": "object", + "properties": { + "create": {"type": "boolean"}, + "annotations": {"type": "object"}, + "rules": {"type": "array"}, + "useLegacyRules": {"type": "boolean"}, + "legacyRules": {"type": "array"} + } + }, + "podAnnotations": {"type": "object"}, + "podLabels": {"type": "object"}, + "replicaCount": {"type": "integer", "minimum": 1}, + "autoscaling": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "minReplicas": {"type": "integer", "minimum": 1}, + "maxReplicas": {"type": "integer", "minimum": 1}, + "targetCPUUtilizationPercentage": {"type": "integer", "minimum": 1, "maximum": 100}, + "targetMemoryUtilizationPercentage": {"type": "integer", "minimum": 1, "maximum": 100}, + "customMetrics": {"type": "array"}, + "behavior": {"type": "object"}, + "annotations": {"type": "object"} + } + }, + "podDisruptionBudget": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "minAvailable": {"oneOf": [{"type": "integer"}, {"type": "string"}]}, + "maxUnavailable": {"oneOf": [{"type": "integer"}, {"type": "string"}]}, + "annotations": {"type": "object"} + }, + "not": { + "allOf": [ + {"required": ["minAvailable"]}, + {"required": ["maxUnavailable"]} + ] + } + }, + "resources": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "properties": { + "cpu": {"type": "string"}, + "memory": {"type": "string"} + } + }, + "requests": { + "type": "object", + "properties": { + "cpu": {"type": "string"}, + "memory": {"type": "string"} + } + } + } + }, + "livenessProbe": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"} + } + }, + "readinessProbe": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"} + } + }, + "nodeSelector": {"type": "object"}, + "tolerations": {"type": "array"}, + "affinity": {"type": "object"}, + "env": {"type": "object"}, + "volumeMounts": {"type": "array"}, + "volumes": {"type": "array"}, + "networkPolicy": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "annotations": {"type": "object"}, + "ingress": {"type": "array"}, + "egress": {"type": "array"} + } + } + }, + "required": ["image", "transport", "kubeconfig", "security"], + "additionalProperties": false +} \ No newline at end of file diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml new file mode 100644 index 0000000..6ecc6e8 --- /dev/null +++ b/helm-chart/values.yaml @@ -0,0 +1,651 @@ +# Default values for mcp-server-kubernetes +# This is a YAML-formatted file. + +# Image configuration +image: + repository: flux159/mcp-server-kubernetes + pullPolicy: IfNotPresent + # Overrides the image tag whose default is "latest". Pin to specific version for production. + tag: "latest" + + # Multi-architecture support + # Set architecture-specific image tags if needed + # architectures: + # amd64: "latest-amd64" + # arm64: "latest-arm64" + + # Architecture preference (auto-detected if not specified) + # Set to specific architecture if needed: amd64, arm64, etc. + architecture: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +# Common labels added to all resources +commonLabels: {} +# Example: +# commonLabels: +# environment: production +# team: platform +# cost-center: engineering + +# Common annotations added to all resources +commonAnnotations: {} +# Example: +# commonAnnotations: +# monitoring.coreos.com/enabled: "true" +# backup.velero.io/backup-volumes: "data" +# policy.kubernetes.io/security-level: "restricted" + +# Transport configuration +transport: + # Transport mode: stdio, sse, http + mode: "http" + + # Service configuration (for sse/http modes) + service: + type: ClusterIP + port: 3001 + targetPort: 3001 + annotations: {} + + # Ingress configuration (for sse/http modes) + # ⚠️ WARNING: Model Context Protocol (MCP) uses streaming connections + # Some ingress controllers and load balancers may not support MCP properly: + # + # KNOWN ISSUES: + # - AWS Classic Load Balancer (ELB): Does not support streaming, use NLB instead + # - NGINX Ingress with ELB: May timeout streaming connections, configure timeouts + # - CloudFlare: May buffer streaming responses, disable buffering + # - Some API Gateways: May not support Server-Sent Events (SSE) properly + # + # RECOMMENDED CONFIGURATIONS: + # - AWS: Use NLB (type: LoadBalancer with aws-load-balancer-type: nlb) + # - NGINX: Add streaming timeout annotations (nginx.ingress.kubernetes.io/proxy-read-timeout) + # - For production: Test MCP streaming behavior thoroughly with your ingress setup + ingress: + enabled: false + className: "" + annotations: {} + # Example streaming-friendly annotations: + # annotations: + # nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + # nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + hosts: + - host: mcp-server.local + paths: + - path: / + pathType: Prefix + tls: [] + +# Kubeconfig configuration +kubeconfig: + # Provider type: aws, gcp, azure, url, serviceaccount, custom, content + provider: "serviceaccount" + + # AWS EKS configuration + aws: + clusters: [] + # Example: + # clusters: + # - name: "prod-us-east" + # clusterName: "prod-cluster" + # region: "us-east-1" + # roleArn: "arn:aws:iam::123456789:role/EKSAdminRole" + # extraArgs: + # - "--profile=production" + # - "--alias=prod-east" + # - name: "staging-us-west" + # clusterName: "staging-cluster" + # region: "us-west-2" + # roleArn: "arn:aws:iam::987654321:role/EKSReadOnlyRole" + # extraArgs: + # - "--profile=staging" + defaultContext: "" + + # GCP GKE configuration + gcp: + clusters: [] + # Example: + # clusters: + # - name: "prod-cluster" + # clusterName: "prod-gke" + # zone: "us-central1-a" + # project: "company-prod" + # extraArgs: + # - "--internal-ip" + # - name: "staging-cluster" + # clusterName: "staging-gke" + # zone: "us-central1-b" + # project: "company-staging" + defaultContext: "" + + # Azure AKS configuration + azure: + clusters: [] + # Example: + # clusters: + # - name: "prod-cluster" + # clusterName: "prod-aks" + # resourceGroup: "prod-rg" + # subscription: "prod-sub-id" + # extraArgs: + # - "--admin" + # - name: "dev-cluster" + # clusterName: "dev-aks" + # resourceGroup: "dev-rg" + # subscription: "dev-sub-id" + defaultContext: "" + + # URL-based configuration + url: + configs: [] + # Example: + # configs: + # - name: "prod-config" + # url: "https://storage.company.com/prod-kubeconfig.yaml" + # extraArgs: + # - "--header=Authorization: Bearer ${PROD_TOKEN}" + # - name: "staging-config" + # url: "https://storage.company.com/staging-kubeconfig.yaml" + # extraArgs: + # - "--header=Authorization: Bearer ${STAGING_TOKEN}" + + # Custom command configuration + custom: + command: "" + args: [] + # Example: + # command: "/usr/local/bin/custom-kubeconfig-fetcher" + # args: + # - "--cluster-id=special-cluster" + # - "--output-path=/shared/kubeconfig" + # - "--format=kubeconfig" + + # Direct kubeconfig content + content: "" + # Example: + # content: | + # apiVersion: v1 + # kind: Config + # clusters: ... + + # Environment variables for kubeconfig fetching + env: {} + # Example: + # env: + # AWS_PROFILE: "production" + # GOOGLE_APPLICATION_CREDENTIALS: "/var/secrets/gcp-key.json" + # PROD_TOKEN: "my-auth-token" + + # Init container retry configuration for kubeconfig fetch + initContainer: + maxRetries: 3 + retryDelay: 10 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + +# Security configuration +security: + # Tool filtering + allowOnlyNonDestructive: false + # When enabled, these destructive tools are DISABLED: + # kubectl_delete, uninstall_helm_chart, cleanup, kubectl_generic + allowOnlyReadonly: false + # When enabled, only these read-only tools are available: + # kubectl_get, kubectl_describe, kubectl_logs, kubectl_context, + # explain_resource, list_api_resources, ping + allowedTools: "" # Comma-separated list of allowed tools + # Examples: "kubectl_get,kubectl_describe,kubectl_logs,kubectl_context" + # Available tools: kubectl_get, kubectl_describe, kubectl_apply, kubectl_delete, + # kubectl_create, kubectl_logs, kubectl_patch, kubectl_scale, kubectl_rollout, + # kubectl_generic, kubectl_context, install_helm_chart, upgrade_helm_chart, + # uninstall_helm_chart, start_port_forward, stop_port_forward, exec_in_pod, + # explain_resource, list_api_resources, ping, cleanup + + # Pod security context + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# Service Account +serviceAccount: + # Specifies whether a service account should be created + create: true + # Automatically mount a ServiceAccount's API credentials? + automount: true + # Annotations to add to the service account + annotations: {} + # Examples for cloud provider IAM integration: + # + # AWS IRSA (IAM Roles for Service Accounts): + # annotations: + # eks.amazonaws.com/role-arn: "arn:aws:iam::123456789012:role/mcp-server-role" + # eks.amazonaws.com/sts-regional-endpoints: "true" + # + # GCP Workload Identity: + # annotations: + # iam.gke.io/gcp-service-account: "mcp-server@my-project.iam.gserviceaccount.com" + # + # Azure Workload Identity: + # annotations: + # azure.workload.identity/client-id: "12345678-1234-1234-1234-123456789012" + # azure.workload.identity/tenant-id: "87654321-4321-4321-4321-210987654321" + # azure.workload.identity/use: "true" + # + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +# RBAC configuration +rbac: + # Specifies whether RBAC resources should be created + create: true + # Annotations to add to RBAC resources + annotations: {} + # Rules for ClusterRole - least-privilege defaults + # For production, customize these rules based on your specific needs + rules: + # Core resources read access + - apiGroups: [""] + resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps", "secrets", "namespaces", "nodes"] + verbs: ["get", "list", "watch"] + + # Core resources write access (needed for MCP operations) + - apiGroups: [""] + resources: ["pods", "services", "endpoints", "persistentvolumeclaims", "configmaps"] + verbs: ["create", "update", "patch", "delete"] + + # Pod exec and logs access (needed for exec_in_pod and logs) + - apiGroups: [""] + resources: ["pods/exec", "pods/log", "pods/portforward"] + verbs: ["create", "get"] + + # Apps resources + - apiGroups: ["apps"] + resources: ["deployments", "replicasets", "daemonsets", "statefulsets"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + + # Batch resources + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + + # Networking + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies", "ingresses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + + # RBAC (read-only for security) + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings", "clusterroles", "clusterrolebindings"] + verbs: ["get", "list", "watch"] + + # Metrics and monitoring + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list"] + + # Custom Resource Definitions (read-only by default) + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch"] + + # Events (read-only) + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch"] + + # Advanced RBAC configuration + # Set to true to use the more permissive legacy rules (not recommended for production) + useLegacyRules: false + + # Legacy rules (only used if useLegacyRules: true) + legacyRules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + +# Pod configuration +podAnnotations: {} +podLabels: {} + +# Deployment configuration +replicaCount: 1 + +# Horizontal Pod Autoscaler +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 10 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + + # Custom metrics for scaling + customMetrics: [] + # Example: + # customMetrics: + # - type: Pods + # pods: + # metric: + # name: custom_metric + # target: + # type: AverageValue + # averageValue: "100m" + + # Scaling behavior configuration + behavior: {} + # Example: + # behavior: + # scaleUp: + # stabilizationWindowSeconds: 60 + # policies: + # - type: Percent + # value: 100 + # periodSeconds: 15 + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Percent + # value: 10 + # periodSeconds: 60 + + # Annotations for HPA resource + annotations: {} + +# Pod Disruption Budget for high availability +podDisruptionBudget: + enabled: false + # Define either minAvailable OR maxUnavailable, not both + minAvailable: 1 + # maxUnavailable: 1 + # maxUnavailable: 50% + annotations: {} + +# Resource limits +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# Health check configuration +# For HTTP/SSE transport modes: uses TCP port checks (default) +# For stdio mode: uses process checks +# Override with custom exec, httpGet, or tcpSocket as needed + +# Liveness probe - determines if the container should be restarted +livenessProbe: + enabled: false + # TCP port check is used by default for HTTP/SSE modes + # Process check is used by default for stdio mode + # Override with custom configuration: + # tcpSocket: + # port: 3001 + # httpGet: + # path: /health + # port: http + # exec: + # command: + # - /bin/sh + # - -c + # - "pgrep -f 'node.*dist/index.js' > /dev/null" + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + +# Readiness probe - determines if the container can serve traffic +readinessProbe: + enabled: false + # TCP port check is used by default for HTTP/SSE modes + # Process check is used by default for stdio mode + # Override with custom configuration: + # tcpSocket: + # port: 3001 + # httpGet: + # path: /ready + # port: http + # exec: + # command: + # - /bin/sh + # - -c + # - "pgrep -f 'node.*dist/index.js' > /dev/null" + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + +# Startup probe - determines if the container has started successfully +startupProbe: + enabled: false + # TCP port check is used by default for HTTP/SSE modes + # Process check is used by default for stdio mode + # Override with custom configuration: + # httpGet: + # path: /ping + # port: http + # tcpSocket: + # port: 3001 + # exec: + # command: + # - /bin/sh + # - -c + # - "pgrep -f 'node.*dist/index.js' > /dev/null" + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 30 # Allow up to 5 minutes for startup + successThreshold: 1 + +# Node selection +nodeSelector: {} +# Example for architecture-specific node selection: +# nodeSelector: +# kubernetes.io/arch: amd64 + +tolerations: [] + +# Affinity configuration +affinity: {} +# Example for multi-architecture affinity: +# affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: kubernetes.io/arch +# operator: In +# values: ["amd64", "arm64"] +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: kubernetes.io/arch +# operator: In +# values: ["amd64", "arm64"] + +# Additional environment variables +env: {} +# Example: +# env: +# CUSTOM_VAR: "value" + +# Additional volume mounts +volumeMounts: [] +# Example: +# volumeMounts: +# - name: custom-volume +# mountPath: /custom/path + +# Additional volumes +volumes: [] +# Example: +# volumes: +# - name: custom-volume +# configMap: +# name: custom-configmap + +# Network Policy configuration +networkPolicy: + # Enable NetworkPolicy creation + enabled: false + + # Annotations for NetworkPolicy + annotations: {} + + # Default deny all ingress and egress traffic (security best practice) + # Users must explicitly define allowed connections below + + # Flexible DNS configuration for egress rules + dns: + # Enable automatic DNS egress rules + enabled: true + # DNS namespace selector (adjust for your cluster) + namespaceSelector: + matchLabels: + name: kube-system + # DNS pod selector (adjust for your DNS provider) + podSelector: + matchLabels: + k8s-app: kube-dns + # Alternative DNS configurations for different providers + # Uncomment and adjust as needed: + # CoreDNS: + # podSelector: + # matchLabels: + # k8s-app: kube-dns + # Amazon EKS DNS: + # podSelector: + # matchLabels: + # k8s-app: kube-dns + # Azure AKS DNS: + # podSelector: + # matchLabels: + # k8s-app: kube-dns + # Custom DNS: + # podSelector: + # matchLabels: + # app: custom-dns + + # Kubernetes API access configuration + kubernetesApi: + # Enable automatic Kubernetes API egress rules + enabled: true + # Service CIDR (adjust for your cluster) + serviceCidr: "10.96.0.0/12" + # Alternative CIDRs for different environments: + # GKE default: "10.96.0.0/12" + # EKS default: "10.100.0.0/16" or "172.20.0.0/16" + # AKS default: "10.0.0.0/16" + # Custom: specify your cluster's service CIDR + + # Cloud provider API access (for kubeconfig providers) + cloudProviderApi: + # Enable automatic cloud provider API egress rules + enabled: true + # Allow all HTTPS traffic (less secure but more compatible) + allowAllHttps: true + # Specific cloud provider CIDR ranges (more secure) + # Set allowAllHttps: false and configure specific ranges: + # awsCidrs: + # - "52.94.0.0/16" # AWS API endpoints + # - "54.239.0.0/16" # AWS services + # gcpCidrs: + # - "35.199.0.0/16" # Google APIs + # - "199.36.153.8/30" # metadata.google.internal + # azureCidrs: + # - "20.0.0.0/8" # Azure public cloud + + # Metadata services access + metadata: + # Enable access to cloud metadata services + enabled: true + # AWS/GCP metadata service + awsGcpMetadata: "169.254.169.254/32" + # Azure metadata service + azureMetadata: "169.254.169.254/32" + + # Ingress rules - traffic coming TO the pod + # By default, all ingress is DENIED. Add rules to allow specific traffic. + ingress: [] + # Examples: + # ingress: + # # Allow traffic from specific pods with labels + # - from: + # - podSelector: + # matchLabels: + # app: allowed-app + # - namespaceSelector: + # matchLabels: + # name: allowed-namespace + # ports: + # - protocol: TCP + # port: 3001 + # + # # Allow traffic from specific CIDR blocks + # - from: + # - ipBlock: + # cidr: 10.0.0.0/8 + # except: + # - 10.0.1.0/24 + # ports: + # - protocol: TCP + # port: 3001 + # + # # Allow traffic from ingress controller + # - from: + # - namespaceSelector: + # matchLabels: + # name: ingress-nginx + # podSelector: + # matchLabels: + # app.kubernetes.io/name: ingress-nginx + # ports: + # - protocol: TCP + # port: 3001 + + # Egress rules - traffic going FROM the pod + # By default, all egress is DENIED. Add rules to allow specific traffic. + # Note: DNS, Kubernetes API, and cloud provider rules are automatically added if enabled above + egress: [] + # Examples: + # egress: + # # Allow egress to specific services + # - to: + # - podSelector: + # matchLabels: + # app: allowed-service + # ports: + # - protocol: TCP + # port: 8080 + # + # # Allow egress to external services + # - to: + # - ipBlock: + # cidr: 203.0.113.0/24 + # ports: + # - protocol: TCP + # port: 443 \ No newline at end of file From 9787f1cda0f5894b6286d08084c84d1de72d692a Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:19:44 -0700 Subject: [PATCH 02/18] feat: add core Kubernetes resource templates - Add helper functions for labels, names, and multi-architecture support - Add deployment with init container for kubeconfig fetching - Add service account with cloud provider IAM annotations - Add RBAC with least-privilege defaults and legacy mode option --- helm-chart/templates/_helpers.tpl | 268 +++++++++++++++++++++++ helm-chart/templates/deployment.yaml | 206 +++++++++++++++++ helm-chart/templates/rbac.yaml | 53 +++++ helm-chart/templates/serviceaccount.yaml | 19 ++ 4 files changed, 546 insertions(+) create mode 100644 helm-chart/templates/_helpers.tpl create mode 100644 helm-chart/templates/deployment.yaml create mode 100644 helm-chart/templates/rbac.yaml create mode 100644 helm-chart/templates/serviceaccount.yaml diff --git a/helm-chart/templates/_helpers.tpl b/helm-chart/templates/_helpers.tpl new file mode 100644 index 0000000..962f2ae --- /dev/null +++ b/helm-chart/templates/_helpers.tpl @@ -0,0 +1,268 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "mcp-server-kubernetes.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "mcp-server-kubernetes.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "mcp-server-kubernetes.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "mcp-server-kubernetes.labels" -}} +helm.sh/chart: {{ include "mcp-server-kubernetes.chart" . }} +{{ include "mcp-server-kubernetes.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.commonLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Common annotations +*/}} +{{- define "mcp-server-kubernetes.annotations" -}} +{{- with .Values.commonAnnotations }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "mcp-server-kubernetes.selectorLabels" -}} +app.kubernetes.io/name: {{ include "mcp-server-kubernetes.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "mcp-server-kubernetes.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "mcp-server-kubernetes.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Get the appropriate init container image based on provider with architecture support +*/}} +{{- define "mcp-server-kubernetes.initImage" -}} +{{- $baseImage := "" }} +{{- if eq .Values.kubeconfig.provider "aws" }} +{{- $baseImage = "amazon/aws-cli" }} +{{- else if eq .Values.kubeconfig.provider "gcp" }} +{{- $baseImage = "gcr.io/google.com/cloudsdktool/cloud-sdk" }} +{{- else if eq .Values.kubeconfig.provider "azure" }} +{{- $baseImage = "mcr.microsoft.com/azure-cli" }} +{{- else if eq .Values.kubeconfig.provider "url" }} +{{- $baseImage = "curlimages/curl" }} +{{- else if eq .Values.kubeconfig.provider "custom" }} +{{- $baseImage = "alpine" }} +{{- else }} +{{- $baseImage = "alpine" }} +{{- end }} +{{- if and .Values.image.architectures .Values.image.architecture }} +{{- $archTag := index .Values.image.architectures .Values.image.architecture | default "latest" }} +{{- printf "%s:%s" $baseImage $archTag }} +{{- else }} +{{- printf "%s:latest" $baseImage }} +{{- end }} +{{- end }} + +{{/* +Determine if we need an init container +*/}} +{{- define "mcp-server-kubernetes.needsInitContainer" -}} +{{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") (eq .Values.kubeconfig.provider "azure") (eq .Values.kubeconfig.provider "url") (eq .Values.kubeconfig.provider "custom") }} +true +{{- else }} +false +{{- end }} +{{- end }} + +{{/* +Generate kubeconfig environment variable based on provider +*/}} +{{- define "mcp-server-kubernetes.kubeconfigEnv" -}} +{{- if eq .Values.kubeconfig.provider "url" }} +{{- $files := list }} +{{- range .Values.kubeconfig.url.configs }} +{{- $files = append $files (printf "/kubeconfig/%s.yaml" .name) }} +{{- end }} +{{- $files | join ":" }} +{{- else if eq .Values.kubeconfig.provider "content" }} +/kubeconfig/kubeconfig.yaml +{{- else if eq .Values.kubeconfig.provider "serviceaccount" }} +{{- /* ServiceAccount mode doesn't need KUBECONFIG env var */ -}} +{{- else }} +/kubeconfig/kubeconfig +{{- end }} +{{- end }} + +{{/* +Generate architecture-aware node selector +*/}} +{{- define "mcp-server-kubernetes.nodeSelector" -}} +{{- $nodeSelector := .Values.nodeSelector | default dict }} +{{- if .Values.image.architecture }} +{{- $nodeSelector = merge $nodeSelector (dict "kubernetes.io/arch" .Values.image.architecture) }} +{{- end }} +{{- if $nodeSelector }} +{{- toYaml $nodeSelector }} +{{- end }} +{{- end }} + +{{/* +Generate architecture-aware affinity +*/}} +{{- define "mcp-server-kubernetes.affinity" -}} +{{- $affinity := .Values.affinity | default dict }} +{{- if and .Values.image.architectures (not .Values.image.architecture) }} +{{- $archList := keys .Values.image.architectures }} +{{- if not (hasKey $affinity "nodeAffinity") }} +{{- $affinity = merge $affinity (dict "nodeAffinity" dict) }} +{{- end }} +{{- if not (hasKey $affinity.nodeAffinity "preferredDuringSchedulingIgnoredDuringExecution") }} +{{- $preferred := list (dict "weight" 100 "preference" (dict "matchExpressions" (list (dict "key" "kubernetes.io/arch" "operator" "In" "values" $archList)))) }} +{{- $affinity = merge $affinity (dict "nodeAffinity" (merge $affinity.nodeAffinity (dict "preferredDuringSchedulingIgnoredDuringExecution" $preferred))) }} +{{- end }} +{{- end }} +{{- if $affinity }} +{{- toYaml $affinity }} +{{- end }} +{{- end }} + +{{/* +Get the image tag with architecture support +*/}} +{{- define "mcp-server-kubernetes.imageTag" -}} +{{- if and .Values.image.architectures .Values.image.architecture }} +{{- index .Values.image.architectures .Values.image.architecture | default (.Values.image.tag | default "latest") }} +{{- else }} +{{- .Values.image.tag | default "latest" }} +{{- end }} +{{- end }} + +{{/* +Generate health check for TCP port +*/}} +{{- define "mcp-server-kubernetes.tcpSocketCheck" -}} +{{- $values := . }} +{{- if or (eq $values.transport.mode "sse") (eq $values.transport.mode "http") }} +tcpSocket: + port: {{ $values.transport.service.targetPort | default 3001 }} +{{- else }} +exec: + command: + - "/bin/sh" + - "-c" + - "pgrep -f 'node.*dist/index.js' > /dev/null" +{{- end }} +{{- end }} + +{{/* +Generate health check command based on mode +*/}} +{{- define "mcp-server-kubernetes.healthCheckCommand" -}} +{{- $probe := index . 0 }} +{{- $values := index . 1 }} +{{- if $values.healthChecks.enabled }} +{{- if eq $values.healthChecks.mode "custom" }} +{{- if eq $probe "startup" }} +{{- toYaml $values.healthChecks.customChecks.startup }} +{{- else if eq $probe "liveness" }} +{{- toYaml $values.healthChecks.customChecks.liveness }} +{{- else if eq $probe "readiness" }} +{{- toYaml $values.healthChecks.customChecks.readiness }} +{{- end }} +{{- else }} +- "/bin/sh" +- "-c" +- "pgrep -f 'node.*dist/index.js' > /dev/null" +{{- end }} +{{- end }} +{{- end }} + +{{/* +Generate startup probe configuration +*/}} +{{- define "mcp-server-kubernetes.startupProbe" -}} +{{- if .Values.startupProbe.enabled }} +{{- if and (not .Values.startupProbe.exec) (not .Values.startupProbe.httpGet) (not .Values.startupProbe.tcpSocket) }} +{{- include "mcp-server-kubernetes.tcpSocketCheck" .Values | nindent 0 }} +initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds | default 10 }} +periodSeconds: {{ .Values.startupProbe.periodSeconds | default 10 }} +timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds | default 5 }} +failureThreshold: {{ .Values.startupProbe.failureThreshold | default 30 }} +successThreshold: {{ .Values.startupProbe.successThreshold | default 1 }} +{{- else }} +{{- omit .Values.startupProbe "enabled" | toYaml }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Generate liveness probe configuration +*/}} +{{- define "mcp-server-kubernetes.livenessProbe" -}} +{{- if .Values.livenessProbe.enabled }} +{{- if and (not .Values.livenessProbe.exec) (not .Values.livenessProbe.httpGet) (not .Values.livenessProbe.tcpSocket) }} +{{- include "mcp-server-kubernetes.tcpSocketCheck" .Values | nindent 0 }} +initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds | default 30 }} +periodSeconds: {{ .Values.livenessProbe.periodSeconds | default 10 }} +timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds | default 5 }} +failureThreshold: {{ .Values.livenessProbe.failureThreshold | default 3 }} +successThreshold: {{ .Values.livenessProbe.successThreshold | default 1 }} +{{- else }} +{{- omit .Values.livenessProbe "enabled" | toYaml }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Generate readiness probe configuration +*/}} +{{- define "mcp-server-kubernetes.readinessProbe" -}} +{{- if .Values.readinessProbe.enabled }} +{{- if and (not .Values.readinessProbe.exec) (not .Values.readinessProbe.httpGet) (not .Values.readinessProbe.tcpSocket) }} +{{- include "mcp-server-kubernetes.tcpSocketCheck" .Values | nindent 0 }} +initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds | default 5 }} +periodSeconds: {{ .Values.readinessProbe.periodSeconds | default 5 }} +timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds | default 5 }} +failureThreshold: {{ .Values.readinessProbe.failureThreshold | default 3 }} +successThreshold: {{ .Values.readinessProbe.successThreshold | default 1 }} +{{- else }} +{{- omit .Values.readinessProbe "enabled" | toYaml }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/templates/deployment.yaml b/helm-chart/templates/deployment.yaml new file mode 100644 index 0000000..801683f --- /dev/null +++ b/helm-chart/templates/deployment.yaml @@ -0,0 +1,206 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + annotations: + {{- $commonAnnotations | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + {{- include "mcp-server-kubernetes.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 8 }} + {{- end }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "mcp-server-kubernetes.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.security.podSecurityContext | nindent 8 }} + {{- if eq (include "mcp-server-kubernetes.needsInitContainer" .) "true" }} + initContainers: + - name: kubeconfig-fetcher + image: {{ include "mcp-server-kubernetes.initImage" . }} + imagePullPolicy: IfNotPresent + securityContext: + {{- toYaml .Values.security.securityContext | nindent 12 }} + {{- if eq .Values.kubeconfig.provider "aws" }} + command: ["/bin/sh"] + args: ["/scripts/fetch-aws-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "gcp" }} + command: ["/bin/sh"] + args: ["/scripts/fetch-gcp-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "azure" }} + command: ["/bin/sh"] + args: ["/scripts/fetch-azure-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "url" }} + command: ["/bin/sh"] + args: ["/scripts/fetch-url-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "custom" }} + command: ["/bin/sh"] + args: ["/scripts/fetch-custom-kubeconfig.sh"] + {{- end }} + env: + # Retry configuration for init container + - name: MAX_RETRIES + value: {{ .Values.kubeconfig.initContainer.maxRetries | default 3 | quote }} + - name: RETRY_DELAY + value: {{ .Values.kubeconfig.initContainer.retryDelay | default 10 | quote }} + {{- range $key, $value := .Values.kubeconfig.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + - name: kubeconfig-scripts + mountPath: /scripts + resources: + {{- toYaml .Values.kubeconfig.initContainer.resources | nindent 12 }} + {{- end }} + containers: + - name: mcp-server + securityContext: + {{- toYaml .Values.security.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ include "mcp-server-kubernetes.imageTag" . }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if or (eq .Values.transport.mode "sse") (eq .Values.transport.mode "http") }} + ports: + - name: http + containerPort: {{ .Values.transport.service.targetPort }} + protocol: TCP + {{- end }} + env: + # Transport configuration + {{- if eq .Values.transport.mode "sse" }} + - name: ENABLE_UNSAFE_SSE_TRANSPORT + value: "true" + {{- else if eq .Values.transport.mode "http" }} + - name: ENABLE_UNSAFE_STREAMABLE_HTTP_TRANSPORT + value: "true" + {{- end }} + {{- if or (eq .Values.transport.mode "sse") (eq .Values.transport.mode "http") }} + - name: PORT + value: {{ .Values.transport.service.targetPort | quote }} + - name: HOST + value: "0.0.0.0" + {{- end }} + + # Security configuration + {{- if .Values.security.allowOnlyNonDestructive }} + - name: ALLOW_ONLY_NON_DESTRUCTIVE_TOOLS + value: "true" + {{- end }} + {{- if .Values.security.allowOnlyReadonly }} + - name: ALLOW_ONLY_READONLY_TOOLS + value: "true" + {{- end }} + {{- if .Values.security.allowedTools }} + - name: ALLOWED_TOOLS + value: {{ .Values.security.allowedTools | quote }} + {{- end }} + + # Kubeconfig configuration + {{- if ne .Values.kubeconfig.provider "serviceaccount" }} + {{- $kubeconfigPath := include "mcp-server-kubernetes.kubeconfigEnv" . }} + {{- if $kubeconfigPath }} + - name: KUBECONFIG + value: {{ $kubeconfigPath | quote }} + {{- end }} + {{- end }} + {{- if eq .Values.kubeconfig.provider "content" }} + - name: KUBECONFIG_YAML + valueFrom: + secretKeyRef: + name: {{ include "mcp-server-kubernetes.fullname" . }}-kubeconfig + key: kubeconfig.yaml + {{- end }} + + # Additional environment variables + {{- range $key, $value := .Values.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- if ne .Values.kubeconfig.provider "serviceaccount" }} + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + {{- range .Values.volumeMounts }} + - {{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + volumeMounts: + {{- range .Values.volumeMounts }} + - {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + {{- $startupProbe := include "mcp-server-kubernetes.startupProbe" . }} + {{- if $startupProbe }} + startupProbe: + {{- $startupProbe | nindent 12 }} + {{- end }} + {{- $livenessProbe := include "mcp-server-kubernetes.livenessProbe" . }} + {{- if $livenessProbe }} + livenessProbe: + {{- $livenessProbe | nindent 12 }} + {{- end }} + {{- $readinessProbe := include "mcp-server-kubernetes.readinessProbe" . }} + {{- if $readinessProbe }} + readinessProbe: + {{- $readinessProbe | nindent 12 }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + {{- if eq (include "mcp-server-kubernetes.needsInitContainer" .) "true" }} + - name: kubeconfig-volume + emptyDir: {} + - name: kubeconfig-scripts + configMap: + name: {{ include "mcp-server-kubernetes.fullname" . }}-scripts + defaultMode: 0755 + {{- else if eq .Values.kubeconfig.provider "content" }} + - name: kubeconfig-volume + secret: + secretName: {{ include "mcp-server-kubernetes.fullname" . }}-kubeconfig + defaultMode: 0600 + {{- end }} + {{- range .Values.volumes }} + - {{- toYaml . | nindent 10 }} + {{- end }} + {{- $nodeSelector := include "mcp-server-kubernetes.nodeSelector" . }} + {{- if $nodeSelector }} + nodeSelector: + {{- $nodeSelector | nindent 8 }} + {{- end }} + {{- $affinity := include "mcp-server-kubernetes.affinity" . }} + {{- if $affinity }} + affinity: + {{- $affinity | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/helm-chart/templates/rbac.yaml b/helm-chart/templates/rbac.yaml new file mode 100644 index 0000000..66c1591 --- /dev/null +++ b/helm-chart/templates/rbac.yaml @@ -0,0 +1,53 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.rbac.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.rbac.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +rules: +{{- if .Values.rbac.useLegacyRules }} +{{- with .Values.rbac.legacyRules }} + {{- toYaml . | nindent 2 }} +{{- end }} +{{- else }} +{{- with .Values.rbac.rules }} + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.rbac.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.rbac.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "mcp-server-kubernetes.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ include "mcp-server-kubernetes.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/templates/serviceaccount.yaml b/helm-chart/templates/serviceaccount.yaml new file mode 100644 index 0000000..6c8fd4c --- /dev/null +++ b/helm-chart/templates/serviceaccount.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mcp-server-kubernetes.serviceAccountName" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.serviceAccount.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automount }} +{{- end }} \ No newline at end of file From fdde5697243863d2add6076a1f7cd2fd5d3913ff Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:20:08 -0700 Subject: [PATCH 03/18] feat: implement multi-cloud kubeconfig fetching with retry logic - Add configmap with scripts for AWS, GCP, Azure, URL, and custom providers - Implement comprehensive retry logic with configurable attempts and delays - Support multiple clusters per provider with role assumption - Include validation and error handling for all cloud providers --- helm-chart/templates/configmap.yaml | 260 ++++++++++++++++++++++++++++ 1 file changed, 260 insertions(+) create mode 100644 helm-chart/templates/configmap.yaml diff --git a/helm-chart/templates/configmap.yaml b/helm-chart/templates/configmap.yaml new file mode 100644 index 0000000..9a3557d --- /dev/null +++ b/helm-chart/templates/configmap.yaml @@ -0,0 +1,260 @@ +{{- if eq (include "mcp-server-kubernetes.needsInitContainer" .) "true" }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }}-scripts + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + annotations: + {{- $commonAnnotations | nindent 4 }} + {{- end }} +data: + {{- if eq .Values.kubeconfig.provider "aws" }} + fetch-aws-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Fetching AWS EKS kubeconfigs..." + + # Retry configuration + MAX_RETRIES=${MAX_RETRIES:-3} + RETRY_DELAY=${RETRY_DELAY:-10} + + export KUBECONFIG=/kubeconfig/kubeconfig + touch $KUBECONFIG + + # Function to retry commands + retry_command() { + local cmd="$1" + local description="$2" + local attempt=1 + + while [ $attempt -le $MAX_RETRIES ]; do + echo "Attempt $attempt/$MAX_RETRIES: $description" + if eval "$cmd"; then + echo "✓ Success: $description" + return 0 + else + if [ $attempt -eq $MAX_RETRIES ]; then + echo "✗ Failed after $MAX_RETRIES attempts: $description" + return 1 + fi + echo "⚠ Attempt $attempt failed, retrying in ${RETRY_DELAY}s..." + sleep $RETRY_DELAY + fi + attempt=$((attempt + 1)) + done + } + + {{- range .Values.kubeconfig.aws.clusters }} + # Fetch cluster: {{ .name }} + AWS_CMD="aws eks update-kubeconfig --name {{ .clusterName | quote }} --region {{ .region | quote }}{{- if .roleArn }} --role-arn {{ .roleArn | quote }}{{- end }} --kubeconfig $KUBECONFIG{{- range .extraArgs }} {{ . | quote }}{{- end }}" + retry_command "$AWS_CMD" "Fetching EKS cluster {{ .name }}" + {{- end }} + + {{- if .Values.kubeconfig.aws.defaultContext }} + echo "Setting default context: {{ .Values.kubeconfig.aws.defaultContext }}" + kubectl config use-context {{ .Values.kubeconfig.aws.defaultContext | quote }} --kubeconfig=$KUBECONFIG + {{- end }} + + echo "AWS kubeconfig setup complete" + kubectl config get-contexts --kubeconfig=$KUBECONFIG + {{- end }} + + {{- if eq .Values.kubeconfig.provider "gcp" }} + fetch-gcp-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Fetching GCP GKE kubeconfigs..." + + # Retry configuration + MAX_RETRIES=${MAX_RETRIES:-3} + RETRY_DELAY=${RETRY_DELAY:-10} + + export KUBECONFIG=/kubeconfig/kubeconfig + touch $KUBECONFIG + + # Function to retry commands + retry_command() { + local cmd="$1" + local description="$2" + local attempt=1 + + while [ $attempt -le $MAX_RETRIES ]; do + echo "Attempt $attempt/$MAX_RETRIES: $description" + if eval "$cmd"; then + echo "✓ Success: $description" + return 0 + else + if [ $attempt -eq $MAX_RETRIES ]; then + echo "✗ Failed after $MAX_RETRIES attempts: $description" + return 1 + fi + echo "⚠ Attempt $attempt failed, retrying in ${RETRY_DELAY}s..." + sleep $RETRY_DELAY + fi + attempt=$((attempt + 1)) + done + } + + {{- range .Values.kubeconfig.gcp.clusters }} + # Fetch cluster: {{ .name }} + GCP_CMD="gcloud container clusters get-credentials {{ .clusterName | quote }}{{- if .zone }} --zone={{ .zone | quote }}{{- else if .region }} --region={{ .region | quote }}{{- end }}{{- if .project }} --project={{ .project | quote }}{{- end }} --kubeconfig $KUBECONFIG{{- range .extraArgs }} {{ . | quote }}{{- end }}" + retry_command "$GCP_CMD" "Fetching GKE cluster {{ .name }}" + {{- end }} + + {{- if .Values.kubeconfig.gcp.defaultContext }} + echo "Setting default context: {{ .Values.kubeconfig.gcp.defaultContext }}" + kubectl config use-context {{ .Values.kubeconfig.gcp.defaultContext | quote }} --kubeconfig=$KUBECONFIG + {{- end }} + + echo "GCP kubeconfig setup complete" + kubectl config get-contexts --kubeconfig=$KUBECONFIG + {{- end }} + + {{- if eq .Values.kubeconfig.provider "azure" }} + fetch-azure-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Fetching Azure AKS kubeconfigs..." + + # Retry configuration + MAX_RETRIES=${MAX_RETRIES:-3} + RETRY_DELAY=${RETRY_DELAY:-10} + + export KUBECONFIG=/kubeconfig/kubeconfig + touch $KUBECONFIG + + # Function to retry commands + retry_command() { + local cmd="$1" + local description="$2" + local attempt=1 + + while [ $attempt -le $MAX_RETRIES ]; do + echo "Attempt $attempt/$MAX_RETRIES: $description" + if eval "$cmd"; then + echo "✓ Success: $description" + return 0 + else + if [ $attempt -eq $MAX_RETRIES ]; then + echo "✗ Failed after $MAX_RETRIES attempts: $description" + return 1 + fi + echo "⚠ Attempt $attempt failed, retrying in ${RETRY_DELAY}s..." + sleep $RETRY_DELAY + fi + attempt=$((attempt + 1)) + done + } + + {{- range .Values.kubeconfig.azure.clusters }} + # Fetch cluster: {{ .name }} + AZURE_CMD="az aks get-credentials --name {{ .clusterName | quote }} --resource-group {{ .resourceGroup | quote }}{{- if .subscription }} --subscription {{ .subscription | quote }}{{- end }} --file $KUBECONFIG{{- range .extraArgs }} {{ . | quote }}{{- end }}" + retry_command "$AZURE_CMD" "Fetching AKS cluster {{ .name }}" + {{- end }} + + {{- if .Values.kubeconfig.azure.defaultContext }} + echo "Setting default context: {{ .Values.kubeconfig.azure.defaultContext }}" + kubectl config use-context {{ .Values.kubeconfig.azure.defaultContext | quote }} --kubeconfig=$KUBECONFIG + {{- end }} + + echo "Azure kubeconfig setup complete" + kubectl config get-contexts --kubeconfig=$KUBECONFIG + {{- end }} + + {{- if eq .Values.kubeconfig.provider "url" }} + fetch-url-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Downloading kubeconfigs from URLs..." + + # Retry configuration + MAX_RETRIES=${MAX_RETRIES:-3} + RETRY_DELAY=${RETRY_DELAY:-10} + + KUBECONFIG_FILES="" + + # Function to retry commands + retry_command() { + local cmd="$1" + local description="$2" + local attempt=1 + + while [ $attempt -le $MAX_RETRIES ]; do + echo "Attempt $attempt/$MAX_RETRIES: $description" + if eval "$cmd"; then + echo "✓ Success: $description" + return 0 + else + if [ $attempt -eq $MAX_RETRIES ]; then + echo "✗ Failed after $MAX_RETRIES attempts: $description" + return 1 + fi + echo "⚠ Attempt $attempt failed, retrying in ${RETRY_DELAY}s..." + sleep $RETRY_DELAY + fi + attempt=$((attempt + 1)) + done + } + + {{- range $index, $config := .Values.kubeconfig.url.configs }} + # Download: {{ $config.name }} + CURL_CMD="curl -sS{{- range $config.extraArgs }} {{ . | quote }}{{- end }} -o /kubeconfig/{{ $config.name }}.yaml {{ $config.url | quote }}" + retry_command "$CURL_CMD" "Downloading kubeconfig {{ $config.name }}" + + {{- if eq $index 0 }} + KUBECONFIG_FILES="/kubeconfig/{{ $config.name }}.yaml" + {{- else }} + KUBECONFIG_FILES="$KUBECONFIG_FILES:/kubeconfig/{{ $config.name }}.yaml" + {{- end }} + {{- end }} + + echo "URL kubeconfig downloads complete" + echo "KUBECONFIG will be set to: $KUBECONFIG_FILES" + + # Validate each kubeconfig file + {{- range .Values.kubeconfig.url.configs }} + echo "Validating {{ .name }}.yaml..." + retry_command "kubectl config view --kubeconfig=/kubeconfig/{{ .name }}.yaml --minify" "Validating kubeconfig {{ .name }}" + {{- end }} + {{- end }} + + {{- if eq .Values.kubeconfig.provider "custom" }} + fetch-custom-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Running custom kubeconfig command..." + + {{- if .Values.kubeconfig.custom.command }} + {{ .Values.kubeconfig.custom.command }} \ + {{- range .Values.kubeconfig.custom.args }} + {{ . | quote }} \ + {{- end }} + {{- else }} + echo "Error: No custom command specified" + exit 1 + {{- end }} + + echo "Custom kubeconfig setup complete" + {{- end }} +{{- end }} + +{{- if eq .Values.kubeconfig.provider "content" }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }}-kubeconfig + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + annotations: + {{- $commonAnnotations | nindent 4 }} + {{- end }} +type: Opaque +data: + kubeconfig.yaml: {{ .Values.kubeconfig.content | b64enc }} +{{- end }} \ No newline at end of file From c74ebecbd0d0383601b9cb948562bc6cadfc41fe Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:20:33 -0700 Subject: [PATCH 04/18] feat: add service and ingress templates for MCP server exposure - Add service template with configurable type and annotations - Add ingress template with streaming compatibility warnings - Support for different transport modes (stdio, sse, http) - Include MCP streaming behavior documentation and recommendations --- helm-chart/templates/ingress.yaml | 65 +++++++++++++++++++++++++++++++ helm-chart/templates/service.yaml | 27 +++++++++++++ 2 files changed, 92 insertions(+) create mode 100644 helm-chart/templates/ingress.yaml create mode 100644 helm-chart/templates/service.yaml diff --git a/helm-chart/templates/ingress.yaml b/helm-chart/templates/ingress.yaml new file mode 100644 index 0000000..4de5d32 --- /dev/null +++ b/helm-chart/templates/ingress.yaml @@ -0,0 +1,65 @@ +{{- if and .Values.transport.ingress.enabled (or (eq .Values.transport.mode "sse") (eq .Values.transport.mode "http")) }} +{{- $fullName := include "mcp-server-kubernetes.fullname" . -}} +{{- $svcPort := .Values.transport.service.port -}} +{{- if and .Values.transport.ingress.className (not (hasKey .Values.transport.ingress.annotations "kubernetes.io/ingress.class")) }} + {{- $_ := set .Values.transport.ingress.annotations "kubernetes.io/ingress.class" .Values.transport.ingress.className}} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.transport.ingress.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.transport.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if and .Values.transport.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.transport.ingress.className }} + {{- end }} + {{- if .Values.transport.ingress.tls }} + tls: + {{- range .Values.transport.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.transport.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/templates/service.yaml b/helm-chart/templates/service.yaml new file mode 100644 index 0000000..d3ed2b5 --- /dev/null +++ b/helm-chart/templates/service.yaml @@ -0,0 +1,27 @@ +{{- if or (eq .Values.transport.mode "sse") (eq .Values.transport.mode "http") }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.transport.service.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.transport.service.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.transport.service.type }} + ports: + - port: {{ .Values.transport.service.port }} + targetPort: {{ .Values.transport.service.targetPort }} + protocol: TCP + name: http + selector: + {{- include "mcp-server-kubernetes.selectorLabels" . | nindent 4 }} +{{- end }} \ No newline at end of file From f13fe6ff533250c3fc78594630d961cfb8c93171 Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:20:50 -0700 Subject: [PATCH 05/18] feat: implement flexible NetworkPolicy with default deny security - Add NetworkPolicy with default deny for ingress and egress - Implement flexible DNS, Kubernetes API, and cloud provider access - Support configurable CIDR ranges and service selectors - Auto-generate essential egress rules for cluster functionality --- helm-chart/templates/networkpolicy.yaml | 233 ++++++++++++++++++++++++ 1 file changed, 233 insertions(+) create mode 100644 helm-chart/templates/networkpolicy.yaml diff --git a/helm-chart/templates/networkpolicy.yaml b/helm-chart/templates/networkpolicy.yaml new file mode 100644 index 0000000..146c95b --- /dev/null +++ b/helm-chart/templates/networkpolicy.yaml @@ -0,0 +1,233 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.networkPolicy.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.networkPolicy.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "mcp-server-kubernetes.selectorLabels" . | nindent 6 }} + # Default deny all ingress and egress traffic (security best practice) + policyTypes: + - Ingress + - Egress + {{- if .Values.networkPolicy.ingress }} + ingress: + {{- range .Values.networkPolicy.ingress }} + - {{- if .from }} + from: + {{- range .from }} + {{- if .podSelector }} + - podSelector: + {{- if .podSelector.matchLabels }} + matchLabels: + {{- toYaml .podSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .podSelector.matchExpressions }} + matchExpressions: + {{- toYaml .podSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- if .namespaceSelector }} + namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- end }} + {{- else if .namespaceSelector }} + - namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- else if .ipBlock }} + - ipBlock: + cidr: {{ .ipBlock.cidr }} + {{- if .ipBlock.except }} + except: + {{- toYaml .ipBlock.except | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .ports }} + ports: + {{- range .ports }} + - protocol: {{ .protocol | default "TCP" }} + {{- if .port }} + port: {{ .port }} + {{- end }} + {{- if .endPort }} + endPort: {{ .endPort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + egress: + {{- if .Values.networkPolicy.dns.enabled }} + # Auto-generated DNS egress rule + - to: + - namespaceSelector: + {{- toYaml .Values.networkPolicy.dns.namespaceSelector | nindent 12 }} + podSelector: + {{- toYaml .Values.networkPolicy.dns.podSelector | nindent 12 }} + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + {{- end }} + {{- if .Values.networkPolicy.kubernetesApi.enabled }} + # Auto-generated Kubernetes API egress rule + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.kubernetesApi.serviceCidr }} + ports: + - protocol: TCP + port: 443 + {{- end }} + {{- if .Values.networkPolicy.cloudProviderApi.enabled }} + {{- if .Values.networkPolicy.cloudProviderApi.allowAllHttps }} + # Auto-generated cloud provider API egress rule (all HTTPS) + - to: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - protocol: TCP + port: 443 + {{- else }} + {{- if .Values.networkPolicy.cloudProviderApi.awsCidrs }} + # AWS API access + {{- range .Values.networkPolicy.cloudProviderApi.awsCidrs }} + - to: + - ipBlock: + cidr: {{ . }} + ports: + - protocol: TCP + port: 443 + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.cloudProviderApi.gcpCidrs }} + # GCP API access + {{- range .Values.networkPolicy.cloudProviderApi.gcpCidrs }} + - to: + - ipBlock: + cidr: {{ . }} + ports: + - protocol: TCP + port: 443 + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.cloudProviderApi.azureCidrs }} + # Azure API access + {{- range .Values.networkPolicy.cloudProviderApi.azureCidrs }} + - to: + - ipBlock: + cidr: {{ . }} + ports: + - protocol: TCP + port: 443 + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.metadata.enabled }} + # Auto-generated metadata service egress rules + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.metadata.awsGcpMetadata }} + ports: + - protocol: TCP + port: 80 + {{- if ne .Values.networkPolicy.metadata.awsGcpMetadata .Values.networkPolicy.metadata.azureMetadata }} + - to: + - ipBlock: + cidr: {{ .Values.networkPolicy.metadata.azureMetadata }} + ports: + - protocol: TCP + port: 80 + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.egress }} + # User-defined egress rules + {{- range .Values.networkPolicy.egress }} + - {{- if .to }} + to: + {{- range .to }} + {{- if .podSelector }} + - podSelector: + {{- if .podSelector.matchLabels }} + matchLabels: + {{- toYaml .podSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .podSelector.matchExpressions }} + matchExpressions: + {{- toYaml .podSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- if .namespaceSelector }} + namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- end }} + {{- else if .namespaceSelector }} + - namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- else if .ipBlock }} + - ipBlock: + cidr: {{ .ipBlock.cidr }} + {{- if .ipBlock.except }} + except: + {{- toYaml .ipBlock.except | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .ports }} + ports: + {{- range .ports }} + - protocol: {{ .protocol | default "TCP" }} + {{- if .port }} + port: {{ .port }} + {{- end }} + {{- if .endPort }} + endPort: {{ .endPort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file From ff8f48e1747e0daec2bdbf4278793724b06a0246 Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:21:09 -0700 Subject: [PATCH 06/18] feat: add high availability and auto-scaling support - Add HorizontalPodAutoscaler with CPU/memory/custom metrics - Add PodDisruptionBudget for maintaining availability during updates - Support Kubernetes version compatibility (v2/v2beta2 HPA) - Include scaling behavior configuration and annotations --- helm-chart/templates/hpa.yaml | 53 +++++++++++++++++++ helm-chart/templates/poddisruptionbudget.yaml | 28 ++++++++++ 2 files changed, 81 insertions(+) create mode 100644 helm-chart/templates/hpa.yaml create mode 100644 helm-chart/templates/poddisruptionbudget.yaml diff --git a/helm-chart/templates/hpa.yaml b/helm-chart/templates/hpa.yaml new file mode 100644 index 0000000..15c9ac0 --- /dev/null +++ b/helm-chart/templates/hpa.yaml @@ -0,0 +1,53 @@ +{{- if .Values.autoscaling.enabled }} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else }} +apiVersion: autoscaling/v2beta2 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.autoscaling.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.autoscaling.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "mcp-server-kubernetes.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- with .Values.autoscaling.customMetrics }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.autoscaling.behavior }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/templates/poddisruptionbudget.yaml b/helm-chart/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..9918dbb --- /dev/null +++ b/helm-chart/templates/poddisruptionbudget.yaml @@ -0,0 +1,28 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }} + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.podDisruptionBudget.annotations $commonAnnotations }} + annotations: + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.podDisruptionBudget.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "mcp-server-kubernetes.selectorLabels" . | nindent 6 }} +{{- end }} \ No newline at end of file From df8e718728d47b304f5900e623b1d1fe9ce9fbd9 Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:21:38 -0700 Subject: [PATCH 07/18] feat: add comprehensive Helm test suite - Add connectivity tests for HTTP/SSE transport validation - Add kubeconfig tests for cloud provider authentication - Add MCP tools tests for functionality validation - Add NetworkPolicy support for test pod communication - Include weighted test execution and cleanup automation --- helm-chart/templates/networkpolicy-tests.yaml | 252 ++++++++++++++++++ .../templates/tests/test-connectivity.yaml | 53 ++++ .../templates/tests/test-kubeconfig.yaml | 135 ++++++++++ .../templates/tests/test-mcp-tools.yaml | 81 ++++++ .../tests/test-scripts-configmap.yaml | 175 ++++++++++++ 5 files changed, 696 insertions(+) create mode 100644 helm-chart/templates/networkpolicy-tests.yaml create mode 100644 helm-chart/templates/tests/test-connectivity.yaml create mode 100644 helm-chart/templates/tests/test-kubeconfig.yaml create mode 100644 helm-chart/templates/tests/test-mcp-tools.yaml create mode 100644 helm-chart/templates/tests/test-scripts-configmap.yaml diff --git a/helm-chart/templates/networkpolicy-tests.yaml b/helm-chart/templates/networkpolicy-tests.yaml new file mode 100644 index 0000000..81d8a55 --- /dev/null +++ b/helm-chart/templates/networkpolicy-tests.yaml @@ -0,0 +1,252 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }}-tests + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if or .Values.networkPolicy.annotations $commonAnnotations }} + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} + {{- with .Values.networkPolicy.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} +spec: + podSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - {{ include "mcp-server-kubernetes.name" . }} + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + policyTypes: + - Ingress + - Egress + + # Allow test pods to communicate with MCP server + ingress: + # Allow test pods to access MCP server + - from: + - podSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - {{ include "mcp-server-kubernetes.name" . }} + - key: app.kubernetes.io/instance + operator: In + values: + - {{ .Release.Name }} + ports: + - protocol: TCP + port: {{ .Values.transport.service.targetPort }} + + # Include user-defined ingress rules + {{- range .Values.networkPolicy.ingress }} + - {{- if .from }} + from: + {{- range .from }} + {{- if .podSelector }} + - podSelector: + {{- if .podSelector.matchLabels }} + matchLabels: + {{- toYaml .podSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .podSelector.matchExpressions }} + matchExpressions: + {{- toYaml .podSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- if .namespaceSelector }} + namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- end }} + {{- else if .namespaceSelector }} + - namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- else if .ipBlock }} + - ipBlock: + cidr: {{ .ipBlock.cidr }} + {{- if .ipBlock.except }} + except: + {{- toYaml .ipBlock.except | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .ports }} + ports: + {{- range .ports }} + - protocol: {{ .protocol | default "TCP" }} + {{- if .port }} + port: {{ .port }} + {{- end }} + {{- if .endPort }} + endPort: {{ .endPort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + # Allow test pods egress for their functionality + egress: + # Allow test pods to reach MCP server + - to: + - podSelector: + matchLabels: + {{- include "mcp-server-kubernetes.selectorLabels" . | nindent 14 }} + ports: + - protocol: TCP + port: {{ .Values.transport.service.targetPort }} + + # Allow DNS resolution for tests + - to: + - namespaceSelector: + matchLabels: + name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + + # Alternative DNS for CoreDNS + - to: + - namespaceSelector: + matchLabels: + name: kube-system + podSelector: + matchLabels: + k8s-app: coredns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + + # Allow Kubernetes API access for kubectl tests + - to: + - ipBlock: + cidr: 10.96.0.0/12 # Default service CIDR - adjust for your cluster + ports: + - protocol: TCP + port: 443 + + # Allow cloud provider API access for kubeconfig tests + {{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") (eq .Values.kubeconfig.provider "azure") }} + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 80 # For metadata services + {{- end }} + + # Allow URL downloads for URL provider tests + {{- if eq .Values.kubeconfig.provider "url" }} + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 80 + {{- end }} + + # Include user-defined egress rules + {{- range .Values.networkPolicy.egress }} + - {{- if .to }} + to: + {{- range .to }} + {{- if .podSelector }} + - podSelector: + {{- if .podSelector.matchLabels }} + matchLabels: + {{- toYaml .podSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .podSelector.matchExpressions }} + matchExpressions: + {{- toYaml .podSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- if .namespaceSelector }} + namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- end }} + {{- else if .namespaceSelector }} + - namespaceSelector: + {{- if .namespaceSelector.matchLabels }} + matchLabels: + {{- toYaml .namespaceSelector.matchLabels | nindent 14 }} + {{- end }} + {{- if .namespaceSelector.matchExpressions }} + matchExpressions: + {{- toYaml .namespaceSelector.matchExpressions | nindent 14 }} + {{- end }} + {{- else if .ipBlock }} + - ipBlock: + cidr: {{ .ipBlock.cidr }} + {{- if .ipBlock.except }} + except: + {{- toYaml .ipBlock.except | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .ports }} + ports: + {{- range .ports }} + - protocol: {{ .protocol | default "TCP" }} + {{- if .port }} + port: {{ .port }} + {{- end }} + {{- if .endPort }} + endPort: {{ .endPort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/templates/tests/test-connectivity.yaml b/helm-chart/templates/tests/test-connectivity.yaml new file mode 100644 index 0000000..6f0a772 --- /dev/null +++ b/helm-chart/templates/tests/test-connectivity.yaml @@ -0,0 +1,53 @@ +{{- if or (eq .Values.transport.mode "sse") (eq .Values.transport.mode "http") }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "mcp-server-kubernetes.fullname" . }}-test-connectivity" + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "10" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} +spec: + restartPolicy: Never + containers: + - name: connectivity-test + image: curlimages/curl:latest + command: + - /bin/sh + - -c + - | + set -e + echo "Testing MCP server connectivity..." + + SERVICE_URL="http://{{ include "mcp-server-kubernetes.fullname" . }}:{{ .Values.transport.service.port }}" + + # Test basic connectivity + echo "Testing connection to: $SERVICE_URL" + curl -f --connect-timeout 10 --max-time 30 -s "$SERVICE_URL" || curl -f --connect-timeout 10 --max-time 30 -s "$SERVICE_URL/health" || { + echo "ERROR: Cannot connect to MCP server at $SERVICE_URL" + echo "Checking service status..." + nslookup {{ include "mcp-server-kubernetes.fullname" . }} || true + exit 1 + } + + echo "✓ MCP server is accessible" + + {{- if eq .Values.transport.mode "http" }} + # Test MCP HTTP endpoint + echo "Testing MCP HTTP endpoint..." + curl -f --connect-timeout 10 --max-time 30 -s -X POST \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}' \ + "$SERVICE_URL/mcp" && echo "✓ MCP HTTP endpoint is working" || { + echo "WARNING: MCP HTTP endpoint test failed (server might need more time to start)" + } + {{- end }} + + echo "Connectivity test completed successfully" +{{- end }} \ No newline at end of file diff --git a/helm-chart/templates/tests/test-kubeconfig.yaml b/helm-chart/templates/tests/test-kubeconfig.yaml new file mode 100644 index 0000000..3b26e0e --- /dev/null +++ b/helm-chart/templates/tests/test-kubeconfig.yaml @@ -0,0 +1,135 @@ +{{- if ne .Values.kubeconfig.provider "serviceaccount" }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "mcp-server-kubernetes.fullname" . }}-test-kubeconfig" + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} +spec: + restartPolicy: Never + securityContext: + {{- toYaml .Values.security.podSecurityContext | nindent 4 }} + {{- if eq (include "mcp-server-kubernetes.needsInitContainer" .) "true" }} + initContainers: + - name: kubeconfig-test-fetcher + image: {{ include "mcp-server-kubernetes.initImage" . }} + securityContext: + {{- toYaml .Values.security.securityContext | nindent 8 }} + {{- if eq .Values.kubeconfig.provider "aws" }} + command: ["/bin/sh"] + args: ["/scripts/test-aws-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "gcp" }} + command: ["/bin/sh"] + args: ["/scripts/test-gcp-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "azure" }} + command: ["/bin/sh"] + args: ["/scripts/test-azure-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "url" }} + command: ["/bin/sh"] + args: ["/scripts/test-url-kubeconfig.sh"] + {{- else if eq .Values.kubeconfig.provider "custom" }} + command: ["/bin/sh"] + args: ["/scripts/test-custom-kubeconfig.sh"] + {{- end }} + env: + {{- range $key, $value := .Values.kubeconfig.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + - name: kubeconfig-test-scripts + mountPath: /scripts + {{- end }} + containers: + - name: kubeconfig-test + image: bitnami/kubectl:latest + securityContext: + {{- toYaml .Values.security.securityContext | nindent 6 }} + env: + {{- if ne .Values.kubeconfig.provider "serviceaccount" }} + {{- $kubeconfigPath := include "mcp-server-kubernetes.kubeconfigEnv" . }} + {{- if $kubeconfigPath }} + - name: KUBECONFIG + value: {{ $kubeconfigPath | quote }} + {{- end }} + {{- end }} + {{- if eq .Values.kubeconfig.provider "content" }} + - name: KUBECONFIG_YAML + valueFrom: + secretKeyRef: + name: {{ include "mcp-server-kubernetes.fullname" . }}-kubeconfig + key: kubeconfig.yaml + {{- end }} + command: + - /bin/bash + - -c + - | + set -e + echo "Testing kubeconfig functionality..." + + {{- if eq .Values.kubeconfig.provider "content" }} + # For content provider, decode and use the kubeconfig + echo "$KUBECONFIG_YAML" | base64 -d > /tmp/kubeconfig + export KUBECONFIG=/tmp/kubeconfig + {{- end }} + + # Test basic kubectl connectivity + echo "Testing kubectl connectivity..." + kubectl cluster-info --request-timeout=10s || { + echo "ERROR: kubectl cluster-info failed" + echo "Kubeconfig provider: {{ .Values.kubeconfig.provider }}" + {{- if .Values.kubeconfig.provider }} + echo "KUBECONFIG: $KUBECONFIG" + {{- end }} + exit 1 + } + + echo "✓ kubectl cluster-info successful" + + # Test basic API access + echo "Testing API server access..." + kubectl get nodes --request-timeout=10s >/dev/null 2>&1 && \ + echo "✓ Can access cluster nodes" || \ + echo "WARNING: Cannot access nodes (may be RBAC limited)" + + # Test namespace access + kubectl get namespaces --request-timeout=10s >/dev/null 2>&1 && \ + echo "✓ Can list namespaces" || \ + echo "WARNING: Cannot list namespaces (may be RBAC limited)" + + # Show available contexts + echo "Available contexts:" + kubectl config get-contexts 2>/dev/null || echo "No contexts available" + + echo "Kubeconfig test completed successfully" + {{- if ne .Values.kubeconfig.provider "serviceaccount" }} + volumeMounts: + - name: kubeconfig-volume + mountPath: /kubeconfig + readOnly: true + {{- end }} + volumes: + {{- if eq (include "mcp-server-kubernetes.needsInitContainer" .) "true" }} + - name: kubeconfig-volume + emptyDir: {} + - name: kubeconfig-test-scripts + configMap: + name: {{ include "mcp-server-kubernetes.fullname" . }}-test-scripts + defaultMode: 0755 + {{- else if eq .Values.kubeconfig.provider "content" }} + - name: kubeconfig-volume + secret: + secretName: {{ include "mcp-server-kubernetes.fullname" . }}-kubeconfig + defaultMode: 0600 + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/templates/tests/test-mcp-tools.yaml b/helm-chart/templates/tests/test-mcp-tools.yaml new file mode 100644 index 0000000..bfac144 --- /dev/null +++ b/helm-chart/templates/tests/test-mcp-tools.yaml @@ -0,0 +1,81 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "mcp-server-kubernetes.fullname" . }}-test-mcp-tools" + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "20" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} +spec: + restartPolicy: Never + serviceAccountName: {{ include "mcp-server-kubernetes.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.security.podSecurityContext | nindent 4 }} + containers: + - name: mcp-tools-test + image: curlimages/curl:latest + securityContext: + {{- toYaml .Values.security.securityContext | nindent 6 }} + command: + - /bin/sh + - -c + - | + set -e + echo "Testing MCP server tools and functionality..." + + {{- if or (eq .Values.transport.mode "sse") (eq .Values.transport.mode "http") }} + SERVICE_URL="http://{{ include "mcp-server-kubernetes.fullname" . }}:{{ .Values.transport.service.port }}" + + # Test MCP tools/list endpoint + echo "Testing MCP tools/list..." + RESPONSE=$(curl -s --connect-timeout 10 --max-time 30 -X POST \ + -H "Content-Type: application/json" \ + -d '{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}' \ + "{{ if eq .Values.transport.mode "http" }}$SERVICE_URL/mcp{{ else }}$SERVICE_URL/sse{{ end }}") + + echo "MCP Response: $RESPONSE" + + # Check if response contains expected tools + if echo "$RESPONSE" | grep -q "kubectl_get"; then + echo "✓ kubectl_get tool found" + else + echo "WARNING: kubectl_get tool not found in response" + fi + + if echo "$RESPONSE" | grep -q "ping"; then + echo "✓ ping tool found" + else + echo "WARNING: ping tool not found in response" + fi + + # Check security filtering + {{- if .Values.security.allowOnlyReadonly }} + if echo "$RESPONSE" | grep -q "kubectl_delete"; then + echo "ERROR: kubectl_delete should not be available in readonly mode" + exit 1 + else + echo "✓ Readonly mode working - destructive tools filtered" + fi + {{- end }} + + {{- if .Values.security.allowOnlyNonDestructive }} + if echo "$RESPONSE" | grep -q "kubectl_delete"; then + echo "ERROR: kubectl_delete should not be available in non-destructive mode" + exit 1 + else + echo "✓ Non-destructive mode working - destructive tools filtered" + fi + {{- end }} + + {{- else }} + echo "Skipping HTTP/SSE tests - server is in stdio mode" + echo "✓ MCP server is deployed in stdio transport mode" + {{- end }} + + echo "MCP tools test completed successfully" \ No newline at end of file diff --git a/helm-chart/templates/tests/test-scripts-configmap.yaml b/helm-chart/templates/tests/test-scripts-configmap.yaml new file mode 100644 index 0000000..cacb2d9 --- /dev/null +++ b/helm-chart/templates/tests/test-scripts-configmap.yaml @@ -0,0 +1,175 @@ +{{- if and (eq (include "mcp-server-kubernetes.needsInitContainer" .) "true") (ne .Values.kubeconfig.provider "serviceaccount") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mcp-server-kubernetes.fullname" . }}-test-scripts + labels: + {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} + {{- if $commonAnnotations }} + {{- $commonAnnotations | nindent 4 }} + {{- end }} +data: + {{- if eq .Values.kubeconfig.provider "aws" }} + test-aws-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Testing AWS EKS kubeconfig fetch..." + + export KUBECONFIG=/kubeconfig/kubeconfig + touch $KUBECONFIG + + {{- range .Values.kubeconfig.aws.clusters }} + echo "Testing cluster: {{ .name }}" + aws eks describe-cluster --name {{ .clusterName | quote }} --region {{ .region | quote }} >/dev/null || { + echo "ERROR: Cannot access EKS cluster {{ .name }}" + exit 1 + } + echo "✓ EKS cluster {{ .name }} is accessible" + + aws eks update-kubeconfig \ + --name {{ .clusterName | quote }} \ + --region {{ .region | quote }} \ + {{- if .roleArn }} + --role-arn {{ .roleArn | quote }} \ + {{- end }} \ + --kubeconfig $KUBECONFIG \ + {{- range .extraArgs }} + {{ . | quote }} \ + {{- end }} + {{- end }} + + echo "AWS kubeconfig test completed successfully" + {{- end }} + + {{- if eq .Values.kubeconfig.provider "gcp" }} + test-gcp-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Testing GCP GKE kubeconfig fetch..." + + export KUBECONFIG=/kubeconfig/kubeconfig + touch $KUBECONFIG + + {{- range .Values.kubeconfig.gcp.clusters }} + echo "Testing cluster: {{ .name }}" + gcloud container clusters describe {{ .clusterName | quote }} \ + {{- if .zone }} + --zone={{ .zone | quote }} \ + {{- else if .region }} + --region={{ .region | quote }} \ + {{- end }} + {{- if .project }} + --project={{ .project | quote }} \ + {{- end }} + >/dev/null || { + echo "ERROR: Cannot access GKE cluster {{ .name }}" + exit 1 + } + echo "✓ GKE cluster {{ .name }} is accessible" + + gcloud container clusters get-credentials {{ .clusterName | quote }} \ + {{- if .zone }} + --zone={{ .zone | quote }} \ + {{- else if .region }} + --region={{ .region | quote }} \ + {{- end }} + {{- if .project }} + --project={{ .project | quote }} \ + {{- end }} + --kubeconfig $KUBECONFIG \ + {{- range .extraArgs }} + {{ . | quote }} \ + {{- end }} + {{- end }} + + echo "GCP kubeconfig test completed successfully" + {{- end }} + + {{- if eq .Values.kubeconfig.provider "azure" }} + test-azure-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Testing Azure AKS kubeconfig fetch..." + + export KUBECONFIG=/kubeconfig/kubeconfig + touch $KUBECONFIG + + {{- range .Values.kubeconfig.azure.clusters }} + echo "Testing cluster: {{ .name }}" + az aks show \ + --name {{ .clusterName | quote }} \ + --resource-group {{ .resourceGroup | quote }} \ + {{- if .subscription }} + --subscription {{ .subscription | quote }} \ + {{- end }} + >/dev/null || { + echo "ERROR: Cannot access AKS cluster {{ .name }}" + exit 1 + } + echo "✓ AKS cluster {{ .name }} is accessible" + + az aks get-credentials \ + --name {{ .clusterName | quote }} \ + --resource-group {{ .resourceGroup | quote }} \ + {{- if .subscription }} + --subscription {{ .subscription | quote }} \ + {{- end }} + --file $KUBECONFIG \ + {{- range .extraArgs }} + {{ . | quote }} \ + {{- end }} + {{- end }} + + echo "Azure kubeconfig test completed successfully" + {{- end }} + + {{- if eq .Values.kubeconfig.provider "url" }} + test-url-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Testing URL kubeconfig downloads..." + + {{- range .Values.kubeconfig.url.configs }} + echo "Testing download: {{ .name }}" + curl -sS --connect-timeout 10 --max-time 30 \ + {{- range .extraArgs }} + {{ . | quote }} \ + {{- end }} + -o /kubeconfig/{{ .name }}.yaml \ + {{ .url | quote }} || { + echo "ERROR: Cannot download kubeconfig from {{ .url }}" + exit 1 + } + echo "✓ Successfully downloaded {{ .name }}.yaml" + {{- end }} + + echo "URL kubeconfig test completed successfully" + {{- end }} + + {{- if eq .Values.kubeconfig.provider "custom" }} + test-custom-kubeconfig.sh: | + #!/bin/sh + set -e + echo "Testing custom kubeconfig command..." + + {{- if .Values.kubeconfig.custom.command }} + {{ .Values.kubeconfig.custom.command }} \ + {{- range .Values.kubeconfig.custom.args }} + {{ . | quote }} \ + {{- end }} || { + echo "ERROR: Custom kubeconfig command failed" + exit 1 + } + {{- else }} + echo "ERROR: No custom command specified" + exit 1 + {{- end }} + + echo "Custom kubeconfig test completed successfully" + {{- end }} +{{- end }} \ No newline at end of file From c1a9ceab86a1388f3b5bfac715c584558c723948 Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Fri, 22 Aug 2025 00:22:40 -0700 Subject: [PATCH 08/18] docs: add production examples and comprehensive installation guide - Add cloud provider IAM integration examples (IRSA, Workload Identity) - Add multi-cluster deployment configurations - Add secure NetworkPolicy and production-complete examples - Add detailed installation guide with troubleshooting - Include NOTES.txt for post-installation guidance --- helm-chart/HELM_INSTALL.md | 529 ++++++++++++++++++ helm-chart/examples/aws-irsa-example.yaml | 120 ++++ helm-chart/examples/aws-multi-cluster.yaml | 151 +++++ .../examples/azure-workload-identity.yaml | 136 +++++ .../examples/gcp-workload-identity.yaml | 104 ++++ helm-chart/examples/production-complete.yaml | 291 ++++++++++ helm-chart/examples/secure-networkpolicy.yaml | 221 ++++++++ helm-chart/templates/NOTES.txt | 129 +++++ 8 files changed, 1681 insertions(+) create mode 100644 helm-chart/HELM_INSTALL.md create mode 100644 helm-chart/examples/aws-irsa-example.yaml create mode 100644 helm-chart/examples/aws-multi-cluster.yaml create mode 100644 helm-chart/examples/azure-workload-identity.yaml create mode 100644 helm-chart/examples/gcp-workload-identity.yaml create mode 100644 helm-chart/examples/production-complete.yaml create mode 100644 helm-chart/examples/secure-networkpolicy.yaml create mode 100644 helm-chart/templates/NOTES.txt diff --git a/helm-chart/HELM_INSTALL.md b/helm-chart/HELM_INSTALL.md new file mode 100644 index 0000000..77d92e0 --- /dev/null +++ b/helm-chart/HELM_INSTALL.md @@ -0,0 +1,529 @@ +# MCP Server Kubernetes - Helm Installation Guide + +Complete guide for installing and configuring the MCP Server Kubernetes using Helm. + +## Quick Start + +```bash +# Basic installation with default settings +helm install mcp-server ./helm-chart + +# Install in specific namespace +helm install mcp-server ./helm-chart -n mcp-system --create-namespace +``` + +## Installation Examples + +### AWS EKS Multi-Cluster + +```bash +helm install mcp-server-k8s ./helm-chart \ + --set kubeconfig.provider=aws \ + --set kubeconfig.aws.clusters[0].name=prod-us-east \ + --set kubeconfig.aws.clusters[0].clusterName=prod-cluster \ + --set kubeconfig.aws.clusters[0].region=us-east-1 \ + --set kubeconfig.aws.clusters[0].roleArn="arn:aws:iam::123456789:role/EKSAdminRole" \ + --set kubeconfig.aws.clusters[1].name=staging-us-west \ + --set kubeconfig.aws.clusters[1].clusterName=staging-cluster \ + --set kubeconfig.aws.clusters[1].region=us-west-2 \ + --set kubeconfig.aws.defaultContext=prod-us-east +``` + +### GCP GKE Multi-Cluster + +```bash +helm install mcp-server-k8s ./helm-chart \ + --set kubeconfig.provider=gcp \ + --set kubeconfig.gcp.clusters[0].name=prod-cluster \ + --set kubeconfig.gcp.clusters[0].clusterName=prod-gke \ + --set kubeconfig.gcp.clusters[0].zone=us-central1-a \ + --set kubeconfig.gcp.clusters[0].project=company-prod \ + --set kubeconfig.gcp.clusters[1].name=dev-cluster \ + --set kubeconfig.gcp.clusters[1].clusterName=dev-gke \ + --set kubeconfig.gcp.clusters[1].zone=us-central1-b \ + --set kubeconfig.gcp.clusters[1].project=company-dev +``` + +### URL-based Kubeconfig + +```bash +helm install mcp-server-k8s ./helm-chart \ + --set kubeconfig.provider=url \ + --set kubeconfig.url.configs[0].name=prod-config \ + --set kubeconfig.url.configs[0].url="https://storage.company.com/prod.yaml" \ + --set kubeconfig.url.configs[1].name=staging-config \ + --set kubeconfig.url.configs[1].url="https://storage.company.com/staging.yaml" +``` + +### Web-Accessible (HTTP Transport) + +```bash +# Basic HTTP transport setup +helm install mcp-server-k8s ./helm-chart \ + --set transport.mode=http \ + --set transport.service.type=LoadBalancer \ + --set transport.ingress.enabled=true \ + --set transport.ingress.hosts[0].host=mcp-server.company.com + +# AWS with NLB (recommended for MCP streaming) +helm install mcp-server-k8s ./helm-chart \ + --set transport.mode=http \ + --set transport.service.type=LoadBalancer \ + --set transport.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-type"="nlb" \ + --set transport.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-internal"="true" + +# NGINX Ingress with streaming support +helm install mcp-server-k8s ./helm-chart \ + --set transport.mode=http \ + --set transport.ingress.enabled=true \ + --set transport.ingress.className="nginx" \ + --set transport.ingress.annotations."nginx\.ingress\.kubernetes\.io/proxy-read-timeout"="3600" \ + --set transport.ingress.annotations."nginx\.ingress\.kubernetes\.io/proxy-buffering"="off" +``` + +#### ⚠️ MCP Streaming Compatibility Warning + +Model Context Protocol uses streaming connections that may not work with all ingress controllers: + +**Known Issues:** +- **AWS Classic ELB**: Does not support streaming - use NLB instead +- **NGINX + ELB**: May timeout - configure proxy timeouts +- **CloudFlare**: May buffer responses - disable buffering +- **API Gateways**: May not support Server-Sent Events properly + +**Recommended Solutions:** +- Use Network Load Balancer (NLB) on AWS +- Configure NGINX proxy timeouts and disable buffering +- Test MCP streaming thoroughly with your setup + +## Cloud Provider IAM Integration + +### AWS IRSA (IAM Roles for Service Accounts) +```bash +helm install mcp-server-k8s ./helm-chart \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::123456789012:role/mcp-server-role" \ + --set serviceAccount.annotations."eks\.amazonaws\.com/sts-regional-endpoints"="true" +``` + +### GCP Workload Identity +```bash +helm install mcp-server-k8s ./helm-chart \ + --set serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="mcp-server@my-project.iam.gserviceaccount.com" +``` + +### Azure Workload Identity +```bash +helm install mcp-server-k8s ./helm-chart \ + --set serviceAccount.annotations."azure\.workload\.identity/client-id"="12345678-1234-1234-1234-123456789012" \ + --set serviceAccount.annotations."azure\.workload\.identity/tenant-id"="87654321-4321-4321-4321-210987654321" \ + --set serviceAccount.annotations."azure\.workload\.identity/use"="true" +``` + +## Security Configuration + +### Non-Destructive Mode (Safe Operations Only) + +```bash +helm install mcp-server-k8s ./helm-chart \ + --set security.allowOnlyNonDestructive=true +``` + +### Network Policy (Default Deny - Security Best Practice) + +```bash +# Enable NetworkPolicy with default deny and minimal required access +helm install mcp-server-k8s ./helm-chart \ + --set networkPolicy.enabled=true \ + --set networkPolicy.ingress[0].from[0].namespaceSelector.matchLabels.name=ingress-nginx \ + --set networkPolicy.ingress[0].ports[0].protocol=TCP \ + --set networkPolicy.ingress[0].ports[0].port=3001 \ + --set networkPolicy.egress[0].to[0].namespaceSelector.matchLabels.name=kube-system \ + --set networkPolicy.egress[0].ports[0].protocol=UDP \ + --set networkPolicy.egress[0].ports[0].port=53 + +# ⚠️ WARNING: NetworkPolicy uses default deny - you MUST define egress rules +# for DNS, Kubernetes API, and cloud provider APIs or the pod won't function! +``` + +### Horizontal Pod Autoscaler (Auto-scaling) + +```bash +# Enable HPA with CPU and memory scaling +helm install mcp-server-k8s ./helm-chart \ + --set autoscaling.enabled=true \ + --set autoscaling.minReplicas=2 \ + --set autoscaling.maxReplicas=20 \ + --set autoscaling.targetCPUUtilizationPercentage=70 \ + --set autoscaling.targetMemoryUtilizationPercentage=80 +``` + +## Advanced Configuration + +### Custom Environment Variables + +```yaml +kubeconfig: + env: + AWS_PROFILE: "production" + GOOGLE_APPLICATION_CREDENTIALS: "/var/secrets/gcp-key.json" + CUSTOM_TOKEN: "my-auth-token" +``` + +### Extra Arguments for Cloud Providers + +```yaml +kubeconfig: + aws: + clusters: + - name: "prod" + clusterName: "prod-cluster" + region: "us-east-1" + extraArgs: + - "--profile=production" + - "--external-id=unique-id" + - "--session-name=mcp-session" +``` + +### Custom RBAC + +```yaml +rbac: + rules: + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "patch"] +``` + +### Network Policy (Default Deny) + +```yaml +# ⚠️ NetworkPolicy implements DEFAULT DENY for security best practices +# You MUST explicitly allow all required traffic or the pod will not function! + +networkPolicy: + enabled: true + + # Ingress rules - explicitly allow inbound traffic + ingress: + # Allow ingress controller access + - from: + - namespaceSelector: + matchLabels: + name: ingress-nginx + podSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + ports: + - protocol: TCP + port: 3001 + + # Allow specific CIDR blocks + - from: + - ipBlock: + cidr: 10.0.0.0/8 + except: + - 10.0.1.0/24 + ports: + - protocol: TCP + port: 3001 + + # Egress rules - explicitly allow outbound traffic (REQUIRED) + egress: + # REQUIRED: Allow DNS resolution + - to: + - namespaceSelector: + matchLabels: + name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + + # REQUIRED: Allow Kubernetes API access + - to: + - ipBlock: + cidr: 10.96.0.0/12 # Service CIDR (adjust for your cluster) + ports: + - protocol: TCP + port: 443 + + # REQUIRED for cloud providers: Allow cloud provider API access + - to: + - ipBlock: + cidr: 0.0.0.0/0 # Restrict this CIDR for better security + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 80 # For metadata services +``` + +### Horizontal Pod Autoscaler + +```yaml +# Enable HPA with advanced scaling configuration +autoscaling: + enabled: true + minReplicas: 2 + maxReplicas: 50 + targetCPUUtilizationPercentage: 70 + targetMemoryUtilizationPercentage: 80 + + # Custom metrics scaling + customMetrics: + - type: Pods + pods: + metric: + name: http_requests_per_second + target: + type: AverageValue + averageValue: "100" + + # Scaling behavior + behavior: + scaleUp: + stabilizationWindowSeconds: 60 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + - type: Pods + value: 2 + periodSeconds: 60 + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 10 + periodSeconds: 60 +``` + +## Values Validation + +The chart includes a JSON Schema (`values.schema.json`) that validates your configuration. Most Helm clients will automatically validate values against this schema. + +To manually validate your values file: +```bash +# Using helm plugin (if available) +helm plugin install https://github.com/losisin/helm-values-schema-json +helm schema validate ./helm-chart/values.yaml ./helm-chart/values.schema.json + +# Using online JSON Schema validators +# Copy your values and the schema to https://www.jsonschemavalidator.net/ +``` + +## Configuration Values + +### Core Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `image.repository` | string | `"flux159/mcp-server-kubernetes"` | Container image repository | +| `image.tag` | string | `"latest"` | Image tag | +| `transport.mode` | string | `"http"` | Transport mode: stdio, sse, http | +| `transport.service.type` | string | `"ClusterIP"` | Service type for http/sse modes | +| `kubeconfig.provider` | string | `"serviceaccount"` | Kubeconfig provider type | + +### Security Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `security.allowOnlyNonDestructive` | bool | `false` | Disable destructive operations (kubectl_delete, uninstall_helm_chart, cleanup, kubectl_generic) | +| `security.allowOnlyReadonly` | bool | `false` | Enable read-only mode (kubectl_get, kubectl_describe, kubectl_logs, kubectl_context, explain_resource, list_api_resources, ping) | +| `security.allowedTools` | string | `""` | Comma-separated list of specific tools to allow | + +### Scaling and Resources + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `autoscaling.enabled` | bool | `false` | Enable HorizontalPodAutoscaler | +| `autoscaling.minReplicas` | int | `1` | Minimum number of replicas | +| `autoscaling.maxReplicas` | int | `10` | Maximum number of replicas | +| `networkPolicy.enabled` | bool | `false` | Enable NetworkPolicy | +| `resources.limits.memory` | string | `"512Mi"` | Memory limit | +| `resources.limits.cpu` | string | `"500m"` | CPU limit | + +### Common Configuration + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| `commonLabels` | object | `{}` | Common labels for all resources | +| `commonAnnotations` | object | `{}` | Common annotations for all resources | +| `rbac.create` | bool | `true` | Create RBAC resources | +| `serviceAccount.create` | bool | `true` | Create service account | + +## Installation from Examples + +Use the provided example files for common deployment scenarios: + +```bash +# AWS Multi-cluster with role assumption +helm install mcp-server ./helm-chart -f examples/aws-multi-cluster.yaml + +# AWS IRSA integration +helm install mcp-server ./helm-chart -f examples/aws-irsa-example.yaml + +# GCP Workload Identity +helm install mcp-server ./helm-chart -f examples/gcp-workload-identity.yaml + +# Azure Workload Identity +helm install mcp-server ./helm-chart -f examples/azure-workload-identity.yaml + +# Secure NetworkPolicy with default deny +helm install mcp-server ./helm-chart -f examples/secure-networkpolicy.yaml + +# Complete production configuration +helm install mcp-server ./helm-chart -f examples/production-complete.yaml +``` + +## Testing + +After installation, validate the deployment using Helm tests: + +```bash +# Run all tests +helm test mcp-server + +# Run tests with detailed output +helm test mcp-server --logs + +# Run specific test +helm test mcp-server --filter name=mcp-server-test-connectivity +``` + +### Available Tests + +The chart includes several test pods to validate functionality: + +1. **Kubeconfig Test** (`test-kubeconfig`) - Weight: 5 + - Validates kubeconfig fetching from cloud providers + - Tests kubectl connectivity to target clusters + - Verifies API server access and authentication + +2. **Connectivity Test** (`test-connectivity`) - Weight: 10 + - Tests HTTP/SSE transport connectivity (if enabled) + - Validates service discovery and network access + - Basic health check validation + +3. **MCP Tools Test** (`test-mcp-tools`) - Weight: 20 + - Tests MCP protocol functionality + - Validates available tools and security filtering + - Confirms tool restrictions (readonly/non-destructive modes) + +### Test Examples + +```bash +# Test basic installation +helm install mcp-server ./helm-chart +helm test mcp-server + +# Test AWS multi-cluster setup +helm install mcp-server ./helm-chart -f examples/aws-multi-cluster.yaml +helm test mcp-server --logs + +# Test with security restrictions +helm install mcp-server ./helm-chart --set security.allowOnlyReadonly=true +helm test mcp-server --filter name=mcp-server-test-mcp-tools +``` + +### Test Troubleshooting + +If tests fail, check the test pod logs: + +```bash +# Get test pod logs +kubectl logs mcp-server-test-connectivity +kubectl logs mcp-server-test-kubeconfig +kubectl logs mcp-server-test-mcp-tools + +# Describe test pods for more details +kubectl describe pod mcp-server-test-connectivity +``` + +Common test failure causes: +- **Kubeconfig test**: Cloud provider credentials, network access, RBAC permissions +- **Connectivity test**: Service not ready, network policies, ingress configuration +- **MCP tools test**: Server startup time, security filtering configuration + +### NetworkPolicy Considerations + +When NetworkPolicy is enabled, the chart automatically creates additional NetworkPolicy rules for test pods: + +- **Test Pod Communication**: Allows test pods to communicate with the MCP server +- **DNS Access**: Enables DNS resolution for test pods +- **Cloud Provider APIs**: Permits access to cloud provider APIs for kubeconfig tests +- **Kubernetes API**: Allows kubectl connectivity tests + +The test NetworkPolicy (`networkpolicy-tests.yaml`) includes: +- Ingress rules allowing test pod → MCP server communication +- Egress rules for DNS, Kubernetes API, and cloud provider access +- Automatic cleanup after test completion + +If tests fail with NetworkPolicy enabled, check: +```bash +# Verify NetworkPolicy rules +kubectl get networkpolicy +kubectl describe networkpolicy mcp-server-networkpolicy-tests + +# Check test pod network connectivity +kubectl exec mcp-server-test-connectivity -- nslookup kubernetes.default +kubectl exec mcp-server-test-connectivity -- nc -zv mcp-server 3001 +``` + +## Upgrading + +```bash +# Upgrade to latest version +helm upgrade mcp-server ./helm-chart + +# Upgrade with new values +helm upgrade mcp-server ./helm-chart --set image.tag=2.8.1 + +# Upgrade from values file +helm upgrade mcp-server ./helm-chart -f my-values.yaml + +# Test after upgrade +helm test mcp-server +``` + +## Uninstalling + +```bash +# Run tests before uninstall (optional) +helm test mcp-server + +# Uninstall the release +helm uninstall mcp-server +``` + +## Troubleshooting + +### Check Pod Status +```bash +kubectl get pods -l app.kubernetes.io/name=mcp-server-kubernetes +``` + +### View Logs +```bash +kubectl logs -l app.kubernetes.io/name=mcp-server-kubernetes +``` + +### Test Init Container +```bash +kubectl describe pod -l app.kubernetes.io/name=mcp-server-kubernetes +``` + +### Common Issues + +1. **Init container fails**: Check cloud provider credentials and permissions +2. **RBAC errors**: Verify ServiceAccount has required cluster permissions +3. **Transport not accessible**: Check service type and ingress configuration +4. **Kubeconfig issues**: Validate provider configuration and network access +5. **NetworkPolicy blocks traffic**: Verify egress rules for DNS, API, and cloud providers \ No newline at end of file diff --git a/helm-chart/examples/aws-irsa-example.yaml b/helm-chart/examples/aws-irsa-example.yaml new file mode 100644 index 0000000..08dd7c4 --- /dev/null +++ b/helm-chart/examples/aws-irsa-example.yaml @@ -0,0 +1,120 @@ +# Example: AWS IRSA (IAM Roles for Service Accounts) Configuration +# This example shows how to use AWS IRSA for secure access to AWS resources +# without storing long-lived credentials in the cluster. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/aws-irsa-example.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "latest" + +# HTTP transport for web accessibility +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + +# Use ServiceAccount mode with IRSA for secure AWS access +kubeconfig: + provider: "serviceaccount" + # No AWS credentials needed - IRSA handles authentication + +# Service Account with IRSA configuration +serviceAccount: + create: true + annotations: + # AWS IRSA annotation - links ServiceAccount to IAM role + eks.amazonaws.com/role-arn: "arn:aws:iam::123456789012:role/mcp-server-cross-cluster-role" + # Use regional STS endpoints for better performance and reliability + eks.amazonaws.com/sts-regional-endpoints: "true" + +# Security configuration +security: + allowOnlyNonDestructive: true + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# RBAC for the ServiceAccount +rbac: + create: true + annotations: + description: "MCP Server with IRSA cross-cluster access" + rules: + # Full access to current cluster via ServiceAccount token + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + +# Resource configuration +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# Additional environment variables for AWS SDK +env: + # AWS SDK will automatically use IRSA credentials + AWS_DEFAULT_REGION: "us-east-1" + AWS_SDK_LOAD_CONFIG: "1" + # Enable IRSA token refresh + AWS_ROLE_SESSION_NAME: "mcp-server-session" + +# Example IAM role trust policy for the IRSA role: +# { +# "Version": "2012-10-17", +# "Statement": [ +# { +# "Effect": "Allow", +# "Principal": { +# "Federated": "arn:aws:iam::123456789012:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/EXAMPLE" +# }, +# "Action": "sts:AssumeRoleWithWebIdentity", +# "Condition": { +# "StringEquals": { +# "oidc.eks.us-east-1.amazonaws.com/id/EXAMPLE:sub": "system:serviceaccount:default:mcp-server-kubernetes", +# "oidc.eks.us-east-1.amazonaws.com/id/EXAMPLE:aud": "sts.amazonaws.com" +# } +# } +# } +# ] +# } + +# Example IAM role permissions for cross-cluster EKS access: +# { +# "Version": "2012-10-17", +# "Statement": [ +# { +# "Effect": "Allow", +# "Action": [ +# "eks:DescribeCluster", +# "eks:ListClusters", +# "sts:AssumeRole" +# ], +# "Resource": "*" +# }, +# { +# "Effect": "Allow", +# "Action": "sts:AssumeRole", +# "Resource": [ +# "arn:aws:iam::*:role/EKSClusterAdminRole", +# "arn:aws:iam::*:role/EKSReadOnlyRole" +# ] +# } +# ] +# } \ No newline at end of file diff --git a/helm-chart/examples/aws-multi-cluster.yaml b/helm-chart/examples/aws-multi-cluster.yaml new file mode 100644 index 0000000..40d24d6 --- /dev/null +++ b/helm-chart/examples/aws-multi-cluster.yaml @@ -0,0 +1,151 @@ +# Example: AWS EKS Multi-Cluster Configuration +# This example shows how to configure the MCP server to manage multiple EKS clusters +# across different AWS accounts and regions with role assumption. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/aws-multi-cluster.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "2.8.0" + +# HTTP transport for web accessibility +transport: + mode: "http" + service: + type: LoadBalancer + port: 3001 + ingress: + enabled: true + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + hosts: + - host: mcp-server.company.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: mcp-server-tls + hosts: + - mcp-server.company.com + +# AWS EKS multi-cluster configuration +kubeconfig: + provider: "aws" + aws: + clusters: + # Production US-East cluster + - name: "prod-us-east" + clusterName: "company-prod-us-east" + region: "us-east-1" + roleArn: "arn:aws:iam::123456789012:role/EKSClusterAdminRole" + extraArgs: + - "--profile=production" + - "--alias=prod-east" + - "--external-id=mcp-server-prod" + - "--session-name=mcp-prod-session" + + # Production US-West cluster + - name: "prod-us-west" + clusterName: "company-prod-us-west" + region: "us-west-2" + roleArn: "arn:aws:iam::123456789012:role/EKSClusterAdminRole" + extraArgs: + - "--profile=production" + - "--alias=prod-west" + - "--external-id=mcp-server-prod" + - "--session-name=mcp-prod-session" + + # Staging cluster (different account) + - name: "staging-us-east" + clusterName: "company-staging" + region: "us-east-1" + roleArn: "arn:aws:iam::987654321098:role/EKSReadOnlyRole" + extraArgs: + - "--profile=staging" + - "--alias=staging" + - "--external-id=mcp-server-staging" + - "--duration-seconds=3600" + + # Development cluster (limited permissions) + - name: "dev-us-central" + clusterName: "company-dev" + region: "us-central-1" + roleArn: "arn:aws:iam::192837465019:role/EKSDeveloperRole" + extraArgs: + - "--profile=development" + - "--alias=dev" + - "--no-include-email" + + defaultContext: "prod-us-east" + + # Environment variables for AWS authentication + env: + AWS_DEFAULT_REGION: "us-east-1" + AWS_SDK_LOAD_CONFIG: "1" + +# Security configuration for production +security: + # Allow non-destructive operations only for safety + allowOnlyNonDestructive: true + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# RBAC for cross-cluster operations +rbac: + create: true + annotations: + description: "MCP Server cross-cluster access" + rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["get", "list", "describe", "watch"] + +# Resource limits for production workload +resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 200m + memory: 256Mi + +# Production deployment settings +replicaCount: 2 + +nodeSelector: + kubernetes.io/os: linux + +tolerations: + - key: "dedicated" + operator: "Equal" + value: "mcp-server" + effect: "NoSchedule" + +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - mcp-server-kubernetes + topologyKey: kubernetes.io/hostname \ No newline at end of file diff --git a/helm-chart/examples/azure-workload-identity.yaml b/helm-chart/examples/azure-workload-identity.yaml new file mode 100644 index 0000000..6708ac1 --- /dev/null +++ b/helm-chart/examples/azure-workload-identity.yaml @@ -0,0 +1,136 @@ +# Example: Azure Workload Identity Configuration +# This example shows how to use Azure Workload Identity for secure access to Azure resources +# without storing service principal credentials in the cluster. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/azure-workload-identity.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "latest" + +# HTTP transport +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + +# Use ServiceAccount mode with Workload Identity for secure Azure access +kubeconfig: + provider: "serviceaccount" + # No Azure service principal secrets needed - Workload Identity handles authentication + +# Service Account with Azure Workload Identity configuration +serviceAccount: + create: true + annotations: + # Azure Workload Identity annotations + azure.workload.identity/client-id: "12345678-1234-1234-1234-123456789012" + azure.workload.identity/tenant-id: "87654321-4321-4321-4321-210987654321" + azure.workload.identity/use: "true" + +# Pod labels required for Azure Workload Identity +podLabels: + azure.workload.identity/use: "true" + +# Security configuration +security: + allowOnlyNonDestructive: true + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# RBAC for the ServiceAccount +rbac: + create: true + annotations: + description: "MCP Server with Azure Workload Identity cross-cluster access" + rules: + # Full access to current cluster via ServiceAccount token + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + +# Resource configuration +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# Environment variables for Azure SDK +env: + # Azure SDK will automatically use Workload Identity credentials + AZURE_CLIENT_ID: "12345678-1234-1234-1234-123456789012" + AZURE_TENANT_ID: "87654321-4321-4321-4321-210987654321" + AZURE_AUTHORITY_HOST: "https://login.microsoftonline.com/" + AZURE_FEDERATED_TOKEN_FILE: "/var/run/secrets/azure/tokens/azure-identity-token" + +# Required setup steps for Azure Workload Identity: +# +# 1. Install Azure Workload Identity on the AKS cluster: +# helm repo add azure-workload-identity https://azure.github.io/azure-workload-identity/charts +# helm install workload-identity-webhook azure-workload-identity/workload-identity-webhook \ +# --namespace azure-workload-identity-system \ +# --create-namespace +# +# 2. Enable OIDC issuer on the AKS cluster: +# az aks update \ +# --resource-group myResourceGroup \ +# --name myAKSCluster \ +# --enable-oidc-issuer \ +# --enable-workload-identity +# +# 3. Create a Managed Identity: +# az identity create \ +# --name mcp-server-identity \ +# --resource-group myResourceGroup \ +# --location eastus +# +# 4. Create federated credential for the Managed Identity: +# az identity federated-credential create \ +# --name mcp-server-federated-credential \ +# --identity-name mcp-server-identity \ +# --resource-group myResourceGroup \ +# --issuer $(az aks show --resource-group myResourceGroup --name myAKSCluster --query "oidcIssuerProfile.issuerUrl" -o tsv) \ +# --subject system:serviceaccount:NAMESPACE:RELEASE_NAME-mcp-server-kubernetes +# +# 5. Assign necessary Azure RBAC roles to the Managed Identity: +# az role assignment create \ +# --assignee $(az identity show --resource-group myResourceGroup --name mcp-server-identity --query principalId -o tsv) \ +# --role "Azure Kubernetes Service Cluster User Role" \ +# --scope /subscriptions/SUBSCRIPTION_ID/resourceGroups/myResourceGroup + +# Example Azure role assignments for cross-cluster AKS access: +# - Azure Kubernetes Service Cluster Admin Role (for full cluster access) +# - Azure Kubernetes Service Cluster User Role (for basic access) +# - Azure Kubernetes Service RBAC Admin (for RBAC management) +# - Custom roles for specific resource access + +# Volume mounts for Azure Workload Identity token +volumeMounts: + - name: azure-identity-token + mountPath: "/var/run/secrets/azure/tokens" + readOnly: true + +volumes: + - name: azure-identity-token + projected: + sources: + - serviceAccountToken: + path: azure-identity-token + expirationSeconds: 3600 + audience: api://AzureADTokenExchange \ No newline at end of file diff --git a/helm-chart/examples/gcp-workload-identity.yaml b/helm-chart/examples/gcp-workload-identity.yaml new file mode 100644 index 0000000..59ec705 --- /dev/null +++ b/helm-chart/examples/gcp-workload-identity.yaml @@ -0,0 +1,104 @@ +# Example: GCP Workload Identity Configuration +# This example shows how to use GCP Workload Identity for secure access to GCP resources +# without storing service account keys in the cluster. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/gcp-workload-identity.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "latest" + +# HTTP transport +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + +# Use ServiceAccount mode with Workload Identity for secure GCP access +kubeconfig: + provider: "serviceaccount" + # No GCP service account keys needed - Workload Identity handles authentication + +# Service Account with Workload Identity configuration +serviceAccount: + create: true + annotations: + # GCP Workload Identity annotation - links K8s ServiceAccount to GCP Service Account + iam.gke.io/gcp-service-account: "mcp-server@my-gcp-project.iam.gserviceaccount.com" + +# Security configuration +security: + allowOnlyNonDestructive: true + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# RBAC for the ServiceAccount +rbac: + create: true + annotations: + description: "MCP Server with Workload Identity cross-cluster access" + rules: + # Full access to current cluster via ServiceAccount token + - apiGroups: ["*"] + resources: ["*"] + verbs: ["*"] + +# Resource configuration +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# Environment variables for GCP SDK +env: + # GCP SDK will automatically use Workload Identity credentials + GOOGLE_CLOUD_PROJECT: "my-gcp-project" + CLOUDSDK_CORE_PROJECT: "my-gcp-project" + +# Required setup steps for GCP Workload Identity: +# +# 1. Enable Workload Identity on the GKE cluster: +# gcloud container clusters update CLUSTER_NAME \ +# --workload-pool=PROJECT_ID.svc.id.goog +# +# 2. Create a GCP Service Account: +# gcloud iam service-accounts create mcp-server \ +# --display-name="MCP Server Service Account" +# +# 3. Grant necessary permissions to the GCP Service Account: +# gcloud projects add-iam-policy-binding PROJECT_ID \ +# --member="serviceAccount:mcp-server@PROJECT_ID.iam.gserviceaccount.com" \ +# --role="roles/container.clusterAdmin" +# +# 4. Allow the Kubernetes ServiceAccount to impersonate the GCP Service Account: +# gcloud iam service-accounts add-iam-policy-binding \ +# --role roles/iam.workloadIdentityUser \ +# --member "serviceAccount:PROJECT_ID.svc.id.goog[NAMESPACE/RELEASE_NAME-mcp-server-kubernetes]" \ +# mcp-server@PROJECT_ID.iam.gserviceaccount.com +# +# 5. Annotate the Kubernetes ServiceAccount (done automatically by this chart): +# kubectl annotate serviceaccount RELEASE_NAME-mcp-server-kubernetes \ +# --namespace NAMESPACE \ +# iam.gke.io/gcp-service-account=mcp-server@PROJECT_ID.iam.gserviceaccount.com + +# Example GCP Service Account permissions for cross-cluster GKE access: +# - roles/container.clusterAdmin (for full cluster access) +# - roles/container.clusterViewer (for read-only access) +# - roles/iam.serviceAccountTokenCreator (for impersonation) +# - Custom roles for specific resource access \ No newline at end of file diff --git a/helm-chart/examples/production-complete.yaml b/helm-chart/examples/production-complete.yaml new file mode 100644 index 0000000..296de33 --- /dev/null +++ b/helm-chart/examples/production-complete.yaml @@ -0,0 +1,291 @@ +# Complete Production Configuration Example +# This example demonstrates all major features of the MCP server Helm chart +# including multi-cluster AWS access, security, networking, and auto-scaling. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/production-complete.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "2.8.0" + pullPolicy: IfNotPresent + +# Common labels and annotations for all resources +commonLabels: + environment: production + team: platform + cost-center: engineering + app.kubernetes.io/part-of: mcp-platform + +commonAnnotations: + monitoring.coreos.com/enabled: "true" + backup.velero.io/backup-volumes: "kubeconfig-volume" + policy.kubernetes.io/security-level: "restricted" + +# HTTP transport for web accessibility +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + service.beta.kubernetes.io/aws-load-balancer-internal: "true" + + ingress: + enabled: true + className: "nginx" + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + nginx.ingress.kubernetes.io/auth-type: basic + nginx.ingress.kubernetes.io/auth-secret: mcp-server-auth + hosts: + - host: mcp-server.company.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: mcp-server-tls + hosts: + - mcp-server.company.com + +# AWS EKS multi-cluster configuration +kubeconfig: + provider: "aws" + aws: + clusters: + # Production US-East cluster + - name: "prod-us-east" + clusterName: "company-prod-us-east" + region: "us-east-1" + roleArn: "arn:aws:iam::123456789012:role/EKSClusterAdminRole" + extraArgs: + - "--profile=production" + - "--alias=prod-east" + - "--external-id=mcp-server-prod" + + # Production US-West cluster + - name: "prod-us-west" + clusterName: "company-prod-us-west" + region: "us-west-2" + roleArn: "arn:aws:iam::123456789012:role/EKSClusterAdminRole" + extraArgs: + - "--profile=production" + - "--alias=prod-west" + + # Staging cluster (different account) + - name: "staging-us-east" + clusterName: "company-staging" + region: "us-east-1" + roleArn: "arn:aws:iam::987654321098:role/EKSReadOnlyRole" + extraArgs: + - "--profile=staging" + - "--duration-seconds=3600" + + defaultContext: "prod-us-east" + + env: + AWS_DEFAULT_REGION: "us-east-1" + AWS_SDK_LOAD_CONFIG: "1" + +# Security configuration +security: + # Enable non-destructive mode (disables: kubectl_delete, uninstall_helm_chart, cleanup, kubectl_generic) + allowOnlyNonDestructive: true + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# Network Policy for secure communication +networkPolicy: + enabled: true + annotations: + policy.kubernetes.io/description: "MCP Server production network policy" + + ingress: + # Allow ingress controller access + - from: + - namespaceSelector: + matchLabels: + name: ingress-nginx + podSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + ports: + - protocol: TCP + port: 3001 + + # Allow monitoring access + - from: + - namespaceSelector: + matchLabels: + name: monitoring + podSelector: + matchLabels: + app: prometheus + ports: + - protocol: TCP + port: 3001 + + egress: + # Allow DNS resolution + - to: + - namespaceSelector: + matchLabels: + name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + + # Allow Kubernetes API access + - to: + - ipBlock: + cidr: 10.96.0.0/12 # Service CIDR + ports: + - protocol: TCP + port: 443 + + # Allow AWS API access + - to: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - protocol: TCP + port: 443 + +# Horizontal Pod Autoscaler +autoscaling: + enabled: true + minReplicas: 3 + maxReplicas: 30 + targetCPUUtilizationPercentage: 70 + targetMemoryUtilizationPercentage: 80 + + behavior: + scaleUp: + stabilizationWindowSeconds: 60 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + - type: Pods + value: 2 + periodSeconds: 60 + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 10 + periodSeconds: 60 + - type: Pods + value: 1 + periodSeconds: 180 + +# RBAC for production +rbac: + create: true + annotations: + rbac.authorization.kubernetes.io/description: "MCP Server cross-cluster access" + rules: + - apiGroups: ["*"] + resources: ["*"] + verbs: ["get", "list", "describe", "watch"] + # Limited write access for specific resources + - apiGroups: [""] + resources: ["configmaps", "secrets"] + verbs: ["create", "patch", "update"] + +# Service Account +serviceAccount: + create: true + annotations: + eks.amazonaws.com/role-arn: "arn:aws:iam::123456789012:role/EKSPodRole" + +# Resource limits for production workload +resources: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 500m + memory: 512Mi + +# Health checks +livenessProbe: + enabled: true + httpGet: + path: /health + port: 3001 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + +readinessProbe: + enabled: true + httpGet: + path: /ready + port: 3001 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + +# Pod configuration +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "3001" + prometheus.io/path: "/metrics" + +# Deployment settings +replicaCount: 3 + +nodeSelector: + kubernetes.io/os: linux + node-type: compute-optimized + +tolerations: + - key: "dedicated" + operator: "Equal" + value: "mcp-server" + effect: "NoSchedule" + +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - mcp-server-kubernetes + topologyKey: kubernetes.io/hostname + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: instance-type + operator: In + values: + - c5.large + - c5.xlarge \ No newline at end of file diff --git a/helm-chart/examples/secure-networkpolicy.yaml b/helm-chart/examples/secure-networkpolicy.yaml new file mode 100644 index 0000000..4550bdf --- /dev/null +++ b/helm-chart/examples/secure-networkpolicy.yaml @@ -0,0 +1,221 @@ +# Example: Secure NetworkPolicy with Default Deny +# This example demonstrates security best practices with NetworkPolicy +# using default deny and explicit allow rules. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/secure-networkpolicy.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "latest" + +# HTTP transport for web access +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + + ingress: + enabled: true + className: "nginx" + hosts: + - host: mcp-server.company.com + paths: + - path: / + pathType: Prefix + +# Use ServiceAccount authentication +kubeconfig: + provider: "serviceaccount" + +# Security configuration +security: + allowOnlyNonDestructive: true + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# NetworkPolicy with default deny and explicit allow rules +networkPolicy: + enabled: true + annotations: + policy.kubernetes.io/description: "Default deny with explicit allow rules for MCP server" + + # Ingress rules - explicitly allow inbound connections + ingress: + # Allow ingress controller access for web traffic + - from: + - namespaceSelector: + matchLabels: + name: ingress-nginx + podSelector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + ports: + - protocol: TCP + port: 3001 + + # Allow monitoring/metrics collection + - from: + - namespaceSelector: + matchLabels: + name: monitoring + podSelector: + matchLabels: + app: prometheus + ports: + - protocol: TCP + port: 3001 + + # Allow access from specific internal subnets only + - from: + - ipBlock: + cidr: 10.0.0.0/8 + except: + - 10.0.1.0/24 # DMZ subnet + - 10.0.2.0/24 # Guest network + ports: + - protocol: TCP + port: 3001 + + # Egress rules - explicitly allow outbound connections (CRITICAL!) + egress: + # REQUIRED: DNS resolution (adjust labels for your cluster) + - to: + - namespaceSelector: + matchLabels: + name: kube-system + podSelector: + matchLabels: + k8s-app: kube-dns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + + # Alternative DNS resolution (for clusters using CoreDNS) + - to: + - namespaceSelector: + matchLabels: + name: kube-system + podSelector: + matchLabels: + k8s-app: coredns + ports: + - protocol: UDP + port: 53 + - protocol: TCP + port: 53 + + # REQUIRED: Kubernetes API server access + # Option 1: Via service CIDR (adjust for your cluster) + - to: + - ipBlock: + cidr: 10.96.0.0/12 # Default service CIDR + ports: + - protocol: TCP + port: 443 + + # Option 2: Direct API server access (if using external API server) + - to: + - ipBlock: + cidr: 172.20.0.0/16 # API server subnet + ports: + - protocol: TCP + port: 6443 + + # REQUIRED for AWS: AWS API and metadata service access + - to: + - ipBlock: + cidr: 169.254.169.254/32 # AWS metadata service + ports: + - protocol: TCP + port: 80 + + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 # Internal networks + - 172.16.0.0/12 # Internal networks + - 192.168.0.0/16 # Internal networks + ports: + - protocol: TCP + port: 443 # AWS APIs (EKS, STS, etc.) + + # REQUIRED for GCP: GCP metadata and API access + - to: + - ipBlock: + cidr: 169.254.169.254/32 # GCP metadata service + ports: + - protocol: TCP + port: 80 + + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + ports: + - protocol: TCP + port: 443 # GCP APIs + + # REQUIRED for Azure: Azure metadata and API access + - to: + - ipBlock: + cidr: 169.254.169.254/32 # Azure metadata service + ports: + - protocol: TCP + port: 80 + + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 172.16.0.0/12 + - 192.168.0.0/16 + ports: + - protocol: TCP + port: 443 # Azure APIs + +# Resource limits +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# IMPORTANT NOTES FOR NETWORKPOLICY: +# +# 1. DEFAULT DENY: This NetworkPolicy implements default deny for both ingress and egress +# 2. EGRESS REQUIREMENTS: You MUST allow egress traffic for: +# - DNS resolution (kube-dns/coredns) +# - Kubernetes API server access +# - Cloud provider APIs (AWS/GCP/Azure) +# - Metadata services (169.254.169.254) +# 3. CLUSTER-SPECIFIC: Adjust CIDR blocks and selectors for your cluster: +# - Service CIDR (usually 10.96.0.0/12 or 172.20.0.0/16) +# - API server endpoints +# - DNS service labels +# 4. TESTING: Test connectivity after applying: +# kubectl exec -it deployment/mcp-server -- nslookup kubernetes.default +# kubectl exec -it deployment/mcp-server -- curl -k https://kubernetes.default/api +# 5. MONITORING: Monitor NetworkPolicy denials: +# kubectl logs -n kube-system -l app=calico-node | grep denied \ No newline at end of file diff --git a/helm-chart/templates/NOTES.txt b/helm-chart/templates/NOTES.txt new file mode 100644 index 0000000..8eae4bf --- /dev/null +++ b/helm-chart/templates/NOTES.txt @@ -0,0 +1,129 @@ +1. MCP Server Kubernetes has been deployed! + +{{- if eq .Values.transport.mode "stdio" }} + + Transport Mode: STDIO (default) + + The MCP server is running in stdio mode and is ready to accept MCP client connections. + + To connect via kubectl port-forward: + kubectl port-forward deployment/{{ include "mcp-server-kubernetes.fullname" . }} 8080:8080 + +{{- else if eq .Values.transport.mode "sse" }} + + Transport Mode: Server-Sent Events (SSE) + + {{- if .Values.transport.ingress.enabled }} + {{- range $host := .Values.transport.ingress.hosts }} + The MCP server is accessible at: + {{- range .paths }} + http{{ if $.Values.transport.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} + {{- end }} + {{- else }} + Get the application URL by running these commands: + {{- if contains "NodePort" .Values.transport.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mcp-server-kubernetes.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "LoadBalancer" .Values.transport.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mcp-server-kubernetes.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mcp-server-kubernetes.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.transport.service.port }} + {{- else if contains "ClusterIP" .Values.transport.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "{{ include "mcp-server-kubernetes.selectorLabels" . }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT + {{- end }} + {{- end }} + +{{- else if eq .Values.transport.mode "http" }} + + Transport Mode: HTTP Streaming + + {{- if .Values.transport.ingress.enabled }} + {{- range $host := .Values.transport.ingress.hosts }} + The MCP server is accessible at: + {{- range .paths }} + http{{ if $.Values.transport.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}mcp + {{- end }} + {{- end }} + {{- else }} + Get the application URL by running these commands: + {{- if contains "NodePort" .Values.transport.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mcp-server-kubernetes.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/mcp + {{- else if contains "LoadBalancer" .Values.transport.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mcp-server-kubernetes.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mcp-server-kubernetes.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.transport.service.port }}/mcp + {{- else if contains "ClusterIP" .Values.transport.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "{{ include "mcp-server-kubernetes.selectorLabels" . }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080/mcp to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT + {{- end }} + {{- end }} + +{{- end }} + +2. Configuration Summary: + + Kubeconfig Provider: {{ .Values.kubeconfig.provider }} + {{- if eq .Values.kubeconfig.provider "aws" }} + AWS Clusters: {{ len .Values.kubeconfig.aws.clusters }} + {{- range .Values.kubeconfig.aws.clusters }} + - {{ .name }}: {{ .clusterName }} ({{ .region }}) + {{- end }} + {{- else if eq .Values.kubeconfig.provider "gcp" }} + GCP Clusters: {{ len .Values.kubeconfig.gcp.clusters }} + {{- range .Values.kubeconfig.gcp.clusters }} + - {{ .name }}: {{ .clusterName }} ({{ .zone | default .region }}) + {{- end }} + {{- else if eq .Values.kubeconfig.provider "azure" }} + Azure Clusters: {{ len .Values.kubeconfig.azure.clusters }} + {{- range .Values.kubeconfig.azure.clusters }} + - {{ .name }}: {{ .clusterName }} ({{ .resourceGroup }}) + {{- end }} + {{- else if eq .Values.kubeconfig.provider "url" }} + URL Configs: {{ len .Values.kubeconfig.url.configs }} + {{- range .Values.kubeconfig.url.configs }} + - {{ .name }}: {{ .url }} + {{- end }} + {{- else if eq .Values.kubeconfig.provider "serviceaccount" }} + Using ServiceAccount: {{ include "mcp-server-kubernetes.serviceAccountName" . }} + {{- else if eq .Values.kubeconfig.provider "content" }} + Using provided kubeconfig content + {{- else if eq .Values.kubeconfig.provider "custom" }} + Using custom command: {{ .Values.kubeconfig.custom.command }} + {{- end }} + + Security Settings: + {{- if .Values.security.allowOnlyNonDestructive }} + - Non-destructive tools only: ENABLED + {{- end }} + {{- if .Values.security.allowOnlyReadonly }} + - Read-only tools only: ENABLED + {{- end }} + {{- if .Values.security.allowedTools }} + - Allowed tools: {{ .Values.security.allowedTools }} + {{- end }} + {{- if not (or .Values.security.allowOnlyNonDestructive .Values.security.allowOnlyReadonly .Values.security.allowedTools) }} + - All tools enabled (full access) + {{- end }} + +3. Verify deployment: + kubectl get pods -l "{{ include "mcp-server-kubernetes.selectorLabels" . }}" -n {{ .Release.Namespace }} + +4. View logs: + kubectl logs -l "{{ include "mcp-server-kubernetes.selectorLabels" . }}" -n {{ .Release.Namespace }} + +{{- if eq .Values.kubeconfig.provider "serviceaccount" }} +5. RBAC Note: + The server is using the ServiceAccount: {{ include "mcp-server-kubernetes.serviceAccountName" . }} + Make sure it has the required permissions for your intended operations. +{{- end }} \ No newline at end of file From e871c6630e00fa88778fe26133369511313ac456 Mon Sep 17 00:00:00 2001 From: Krupesh Date: Tue, 16 Sep 2025 18:43:31 +0530 Subject: [PATCH 09/18] fix: helm chart --- helm-chart/HELM_INSTALL.md | 6 +++--- helm-chart/examples/aws-multi-cluster.yaml | 2 +- helm-chart/examples/production-complete.yaml | 18 ++++++++++-------- helm-chart/values.schema.json | 6 ++++++ 4 files changed, 20 insertions(+), 12 deletions(-) diff --git a/helm-chart/HELM_INSTALL.md b/helm-chart/HELM_INSTALL.md index 77d92e0..44bdac4 100644 --- a/helm-chart/HELM_INSTALL.md +++ b/helm-chart/HELM_INSTALL.md @@ -65,13 +65,13 @@ helm install mcp-server-k8s ./helm-chart \ --set transport.ingress.enabled=true \ --set transport.ingress.hosts[0].host=mcp-server.company.com -# AWS with NLB (recommended for MCP streaming) +# AWS with ALB (recommended for MCP streaming) helm install mcp-server-k8s ./helm-chart \ --set transport.mode=http \ --set transport.service.type=LoadBalancer \ - --set transport.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-type"="nlb" \ + --set transport.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-type"="alb" \ --set transport.service.annotations."service\.beta\.kubernetes\.io/aws-load-balancer-internal"="true" - + # NGINX Ingress with streaming support helm install mcp-server-k8s ./helm-chart \ --set transport.mode=http \ diff --git a/helm-chart/examples/aws-multi-cluster.yaml b/helm-chart/examples/aws-multi-cluster.yaml index 40d24d6..1969e10 100644 --- a/helm-chart/examples/aws-multi-cluster.yaml +++ b/helm-chart/examples/aws-multi-cluster.yaml @@ -6,7 +6,7 @@ image: repository: flux159/mcp-server-kubernetes - tag: "2.8.0" + tag: "latest" # HTTP transport for web accessibility transport: diff --git a/helm-chart/examples/production-complete.yaml b/helm-chart/examples/production-complete.yaml index 296de33..e001fcf 100644 --- a/helm-chart/examples/production-complete.yaml +++ b/helm-chart/examples/production-complete.yaml @@ -6,7 +6,7 @@ image: repository: flux159/mcp-server-kubernetes - tag: "2.8.0" + tag: "latest" pullPolicy: IfNotPresent # Common labels and annotations for all resources @@ -28,18 +28,16 @@ transport: type: ClusterIP port: 3001 annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + service.beta.kubernetes.io/aws-load-balancer-type: "alb" service.beta.kubernetes.io/aws-load-balancer-internal: "true" ingress: enabled: true className: "nginx" annotations: - cert-manager.io/cluster-issuer: "letsencrypt-prod" - nginx.ingress.kubernetes.io/ssl-redirect: "true" - nginx.ingress.kubernetes.io/force-ssl-redirect: "true" - nginx.ingress.kubernetes.io/auth-type: basic - nginx.ingress.kubernetes.io/auth-secret: mcp-server-auth + alb.ingress.kubernetes.io/ssl-redirect: "443" + alb.ingress.kubernetes.io/healthcheck-path: /health + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80,"HTTPS":443}]' hosts: - host: mcp-server.company.com paths: @@ -229,6 +227,10 @@ resources: cpu: 500m memory: 512Mi + +startupProbe: + enabled: false + # Health checks livenessProbe: enabled: true @@ -243,7 +245,7 @@ livenessProbe: readinessProbe: enabled: true httpGet: - path: /ready + path: /health port: 3001 initialDelaySeconds: 5 periodSeconds: 5 diff --git a/helm-chart/values.schema.json b/helm-chart/values.schema.json index 5acb020..cd8b625 100644 --- a/helm-chart/values.schema.json +++ b/helm-chart/values.schema.json @@ -274,6 +274,12 @@ } } }, + "startupProbe": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"} + } + }, "livenessProbe": { "type": "object", "properties": { From ba53547efc4a9e71c66e1acbf8b7eea025a7842a Mon Sep 17 00:00:00 2001 From: Krupesh Date: Tue, 16 Sep 2025 22:10:55 +0530 Subject: [PATCH 10/18] fix: helm deployment and configmap for init containers --- helm-chart/templates/_helpers.tpl | 32 ++++++++++++++-------------- helm-chart/templates/configmap.yaml | 8 ------- helm-chart/templates/deployment.yaml | 6 +++--- 3 files changed, 19 insertions(+), 27 deletions(-) diff --git a/helm-chart/templates/_helpers.tpl b/helm-chart/templates/_helpers.tpl index 962f2ae..ab5a4a8 100644 --- a/helm-chart/templates/_helpers.tpl +++ b/helm-chart/templates/_helpers.tpl @@ -103,30 +103,30 @@ Get the appropriate init container image based on provider with architecture sup Determine if we need an init container */}} {{- define "mcp-server-kubernetes.needsInitContainer" -}} -{{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") (eq .Values.kubeconfig.provider "azure") (eq .Values.kubeconfig.provider "url") (eq .Values.kubeconfig.provider "custom") }} +{{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") (eq .Values.kubeconfig.provider "azure") (eq .Values.kubeconfig.provider "url") (eq .Values.kubeconfig.provider "custom") -}} true -{{- else }} +{{- else -}} false -{{- end }} +{{- end -}} {{- end }} {{/* Generate kubeconfig environment variable based on provider */}} {{- define "mcp-server-kubernetes.kubeconfigEnv" -}} -{{- if eq .Values.kubeconfig.provider "url" }} -{{- $files := list }} -{{- range .Values.kubeconfig.url.configs }} -{{- $files = append $files (printf "/kubeconfig/%s.yaml" .name) }} -{{- end }} -{{- $files | join ":" }} -{{- else if eq .Values.kubeconfig.provider "content" }} -/kubeconfig/kubeconfig.yaml -{{- else if eq .Values.kubeconfig.provider "serviceaccount" }} -{{- /* ServiceAccount mode doesn't need KUBECONFIG env var */ -}} -{{- else }} -/kubeconfig/kubeconfig -{{- end }} +{{- if eq .Values.kubeconfig.provider "url" -}} + {{- $files := list -}} + {{- range .Values.kubeconfig.url.configs -}} + {{- $files = append $files (printf "/kubeconfig/%s.yaml" .name) -}} + {{- end -}} + {{- $files | join ":" -}} +{{- else if eq .Values.kubeconfig.provider "content" -}} + /kubeconfig/kubeconfig.yaml +{{- else if eq .Values.kubeconfig.provider "serviceaccount" -}} + {{- /* ServiceAccount mode doesn't need KUBECONFIG env var */ -}} +{{- else -}} + /kubeconfig/kubeconfig +{{- end -}} {{- end }} {{/* diff --git a/helm-chart/templates/configmap.yaml b/helm-chart/templates/configmap.yaml index 9a3557d..32fa995 100644 --- a/helm-chart/templates/configmap.yaml +++ b/helm-chart/templates/configmap.yaml @@ -52,14 +52,6 @@ data: AWS_CMD="aws eks update-kubeconfig --name {{ .clusterName | quote }} --region {{ .region | quote }}{{- if .roleArn }} --role-arn {{ .roleArn | quote }}{{- end }} --kubeconfig $KUBECONFIG{{- range .extraArgs }} {{ . | quote }}{{- end }}" retry_command "$AWS_CMD" "Fetching EKS cluster {{ .name }}" {{- end }} - - {{- if .Values.kubeconfig.aws.defaultContext }} - echo "Setting default context: {{ .Values.kubeconfig.aws.defaultContext }}" - kubectl config use-context {{ .Values.kubeconfig.aws.defaultContext | quote }} --kubeconfig=$KUBECONFIG - {{- end }} - - echo "AWS kubeconfig setup complete" - kubectl config get-contexts --kubeconfig=$KUBECONFIG {{- end }} {{- if eq .Values.kubeconfig.provider "gcp" }} diff --git a/helm-chart/templates/deployment.yaml b/helm-chart/templates/deployment.yaml index 801683f..7161f9b 100644 --- a/helm-chart/templates/deployment.yaml +++ b/helm-chart/templates/deployment.yaml @@ -3,6 +3,7 @@ kind: Deployment metadata: name: {{ include "mcp-server-kubernetes.fullname" . }} labels: + krupesh: {{ include "mcp-server-kubernetes.needsInitContainer" . | quote }} {{- include "mcp-server-kubernetes.labels" . | nindent 4 }} {{- $commonAnnotations := include "mcp-server-kubernetes.annotations" . }} {{- if $commonAnnotations }} @@ -64,9 +65,9 @@ spec: env: # Retry configuration for init container - name: MAX_RETRIES - value: {{ .Values.kubeconfig.initContainer.maxRetries | default 3 | quote }} + value: {{ .Values.kubeconfig.initContainer.maxRetries | quote }} - name: RETRY_DELAY - value: {{ .Values.kubeconfig.initContainer.retryDelay | default 10 | quote }} + value: {{ .Values.kubeconfig.initContainer.retryDelay | quote }} {{- range $key, $value := .Values.kubeconfig.env }} - name: {{ $key }} value: {{ $value | quote }} @@ -146,7 +147,6 @@ spec: volumeMounts: - name: kubeconfig-volume mountPath: /kubeconfig - readOnly: true {{- range .Values.volumeMounts }} - {{- toYaml . | nindent 14 }} {{- end }} From 95d3d954a2084842594568aee65ab3cc68b11a74 Mon Sep 17 00:00:00 2001 From: Krupesh Date: Tue, 16 Sep 2025 23:25:26 +0530 Subject: [PATCH 11/18] fix: example values --- helm-chart/examples/aws-multi-cluster.yaml | 25 ++++++++++++---------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/helm-chart/examples/aws-multi-cluster.yaml b/helm-chart/examples/aws-multi-cluster.yaml index 1969e10..ac14748 100644 --- a/helm-chart/examples/aws-multi-cluster.yaml +++ b/helm-chart/examples/aws-multi-cluster.yaml @@ -32,6 +32,16 @@ transport: # AWS EKS multi-cluster configuration kubeconfig: + initContainer: + maxRetries: 3 + retryDelay: 10 + resources: + limits: + cpu: 250m + memory: 200Mi + requests: + cpu: 250m + memory: 200Mi provider: "aws" aws: clusters: @@ -43,9 +53,7 @@ kubeconfig: extraArgs: - "--profile=production" - "--alias=prod-east" - - "--external-id=mcp-server-prod" - - "--session-name=mcp-prod-session" - + # Production US-West cluster - name: "prod-us-west" clusterName: "company-prod-us-west" @@ -54,9 +62,7 @@ kubeconfig: extraArgs: - "--profile=production" - "--alias=prod-west" - - "--external-id=mcp-server-prod" - - "--session-name=mcp-prod-session" - + # Staging cluster (different account) - name: "staging-us-east" clusterName: "company-staging" @@ -65,9 +71,7 @@ kubeconfig: extraArgs: - "--profile=staging" - "--alias=staging" - - "--external-id=mcp-server-staging" - - "--duration-seconds=3600" - + # Development cluster (limited permissions) - name: "dev-us-central" clusterName: "company-dev" @@ -76,8 +80,7 @@ kubeconfig: extraArgs: - "--profile=development" - "--alias=dev" - - "--no-include-email" - + defaultContext: "prod-us-east" # Environment variables for AWS authentication From 1ce230a771c42982cc416352ebe16b06653b7d76 Mon Sep 17 00:00:00 2001 From: Krupesh Date: Tue, 16 Sep 2025 23:26:14 +0530 Subject: [PATCH 12/18] fix: example values --- helm-chart/examples/aws-multi-cluster.yaml | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/helm-chart/examples/aws-multi-cluster.yaml b/helm-chart/examples/aws-multi-cluster.yaml index ac14748..875ba7c 100644 --- a/helm-chart/examples/aws-multi-cluster.yaml +++ b/helm-chart/examples/aws-multi-cluster.yaml @@ -92,22 +92,6 @@ kubeconfig: security: # Allow non-destructive operations only for safety allowOnlyNonDestructive: true - - podSecurityContext: - fsGroup: 1000 - runAsNonRoot: true - runAsUser: 1000 - seccompProfile: - type: RuntimeDefault - - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 # RBAC for cross-cluster operations rbac: From 5c73639ee5342423d97b16680af221da59eb2cfe Mon Sep 17 00:00:00 2001 From: Krupesh Date: Mon, 6 Oct 2025 22:06:14 +0530 Subject: [PATCH 13/18] fix: add assume-role support for generating kubeconfig in multi-account, multi-cluster AWS setups --- helm-chart/examples/aws-multi-cluster.yaml | 4 +++- helm-chart/templates/configmap.yaml | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/helm-chart/examples/aws-multi-cluster.yaml b/helm-chart/examples/aws-multi-cluster.yaml index 875ba7c..95cd4db 100644 --- a/helm-chart/examples/aws-multi-cluster.yaml +++ b/helm-chart/examples/aws-multi-cluster.yaml @@ -58,6 +58,7 @@ kubeconfig: - name: "prod-us-west" clusterName: "company-prod-us-west" region: "us-west-2" + assumeRoleArn: "arn:aws:iam::123456789012:role/ProductionAccountAccessRole" roleArn: "arn:aws:iam::123456789012:role/EKSClusterAdminRole" extraArgs: - "--profile=production" @@ -66,7 +67,8 @@ kubeconfig: # Staging cluster (different account) - name: "staging-us-east" clusterName: "company-staging" - region: "us-east-1" + region: "us-east-1" + assumeRoleArn: "arn:aws:iam::987654321098:role/OrganizationAccountAccessRole" roleArn: "arn:aws:iam::987654321098:role/EKSReadOnlyRole" extraArgs: - "--profile=staging" diff --git a/helm-chart/templates/configmap.yaml b/helm-chart/templates/configmap.yaml index 32fa995..0d1a28c 100644 --- a/helm-chart/templates/configmap.yaml +++ b/helm-chart/templates/configmap.yaml @@ -49,7 +49,7 @@ data: {{- range .Values.kubeconfig.aws.clusters }} # Fetch cluster: {{ .name }} - AWS_CMD="aws eks update-kubeconfig --name {{ .clusterName | quote }} --region {{ .region | quote }}{{- if .roleArn }} --role-arn {{ .roleArn | quote }}{{- end }} --kubeconfig $KUBECONFIG{{- range .extraArgs }} {{ . | quote }}{{- end }}" + AWS_CMD="aws eks update-kubeconfig --name {{ .clusterName | quote }} --region {{ .region | quote }}{{- if .roleArn }} --role-arn {{ .roleArn | quote }}{{- end }}{{- if .assumeRoleArn }} --assume-role-arn {{ .assumeRoleArn | quote }}{{- end }} --kubeconfig $KUBECONFIG{{- range .extraArgs }} {{ . | quote }}{{- end }}" retry_command "$AWS_CMD" "Fetching EKS cluster {{ .name }}" {{- end }} {{- end }} From c1a6e59568f1fb7263fad6e93d2739c6fa10d477 Mon Sep 17 00:00:00 2001 From: Krupesh Date: Mon, 6 Oct 2025 22:07:28 +0530 Subject: [PATCH 14/18] fix: example values files --- helm-chart/examples/aws-multi-cluster.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/helm-chart/examples/aws-multi-cluster.yaml b/helm-chart/examples/aws-multi-cluster.yaml index 95cd4db..243ae4d 100644 --- a/helm-chart/examples/aws-multi-cluster.yaml +++ b/helm-chart/examples/aws-multi-cluster.yaml @@ -19,7 +19,9 @@ transport: className: "nginx" annotations: cert-manager.io/cluster-issuer: "letsencrypt-prod" - nginx.ingress.kubernetes.io/ssl-redirect: "true" + alb.ingress.kubernetes.io/ssl-redirect: "443" + alb.ingress.kubernetes.io/healthcheck-path: /health + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP":80,"HTTPS":443}]' hosts: - host: mcp-server.company.com paths: From 8fc7d1460fabbf38a5271bcbe221e013736cba4a Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Sun, 19 Oct 2025 16:39:36 -0700 Subject: [PATCH 15/18] fix: remove Azure support from kubeconfig and related configurations --- helm-chart/Chart.yaml | 1 - .../examples/azure-workload-identity.yaml | 136 ------------------ helm-chart/examples/secure-networkpolicy.yaml | 21 +-- helm-chart/templates/_helpers.tpl | 4 +- helm-chart/templates/configmap.yaml | 53 +------ helm-chart/templates/deployment.yaml | 3 - helm-chart/templates/networkpolicy-tests.yaml | 2 +- helm-chart/templates/networkpolicy.yaml | 21 +-- helm-chart/values.schema.json | 22 +-- helm-chart/values.yaml | 41 +----- 10 files changed, 11 insertions(+), 293 deletions(-) delete mode 100644 helm-chart/examples/azure-workload-identity.yaml diff --git a/helm-chart/Chart.yaml b/helm-chart/Chart.yaml index 577e091..86dd3de 100644 --- a/helm-chart/Chart.yaml +++ b/helm-chart/Chart.yaml @@ -16,6 +16,5 @@ keywords: - k8s - eks - gke - - aks annotations: category: Infrastructure \ No newline at end of file diff --git a/helm-chart/examples/azure-workload-identity.yaml b/helm-chart/examples/azure-workload-identity.yaml deleted file mode 100644 index 6708ac1..0000000 --- a/helm-chart/examples/azure-workload-identity.yaml +++ /dev/null @@ -1,136 +0,0 @@ -# Example: Azure Workload Identity Configuration -# This example shows how to use Azure Workload Identity for secure access to Azure resources -# without storing service principal credentials in the cluster. - -# Deploy with: helm install mcp-server ./helm-chart -f examples/azure-workload-identity.yaml - -image: - repository: flux159/mcp-server-kubernetes - tag: "latest" - -# HTTP transport -transport: - mode: "http" - service: - type: ClusterIP - port: 3001 - -# Use ServiceAccount mode with Workload Identity for secure Azure access -kubeconfig: - provider: "serviceaccount" - # No Azure service principal secrets needed - Workload Identity handles authentication - -# Service Account with Azure Workload Identity configuration -serviceAccount: - create: true - annotations: - # Azure Workload Identity annotations - azure.workload.identity/client-id: "12345678-1234-1234-1234-123456789012" - azure.workload.identity/tenant-id: "87654321-4321-4321-4321-210987654321" - azure.workload.identity/use: "true" - -# Pod labels required for Azure Workload Identity -podLabels: - azure.workload.identity/use: "true" - -# Security configuration -security: - allowOnlyNonDestructive: true - - podSecurityContext: - fsGroup: 1000 - runAsNonRoot: true - runAsUser: 1000 - - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -# RBAC for the ServiceAccount -rbac: - create: true - annotations: - description: "MCP Server with Azure Workload Identity cross-cluster access" - rules: - # Full access to current cluster via ServiceAccount token - - apiGroups: ["*"] - resources: ["*"] - verbs: ["*"] - -# Resource configuration -resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 100m - memory: 128Mi - -# Environment variables for Azure SDK -env: - # Azure SDK will automatically use Workload Identity credentials - AZURE_CLIENT_ID: "12345678-1234-1234-1234-123456789012" - AZURE_TENANT_ID: "87654321-4321-4321-4321-210987654321" - AZURE_AUTHORITY_HOST: "https://login.microsoftonline.com/" - AZURE_FEDERATED_TOKEN_FILE: "/var/run/secrets/azure/tokens/azure-identity-token" - -# Required setup steps for Azure Workload Identity: -# -# 1. Install Azure Workload Identity on the AKS cluster: -# helm repo add azure-workload-identity https://azure.github.io/azure-workload-identity/charts -# helm install workload-identity-webhook azure-workload-identity/workload-identity-webhook \ -# --namespace azure-workload-identity-system \ -# --create-namespace -# -# 2. Enable OIDC issuer on the AKS cluster: -# az aks update \ -# --resource-group myResourceGroup \ -# --name myAKSCluster \ -# --enable-oidc-issuer \ -# --enable-workload-identity -# -# 3. Create a Managed Identity: -# az identity create \ -# --name mcp-server-identity \ -# --resource-group myResourceGroup \ -# --location eastus -# -# 4. Create federated credential for the Managed Identity: -# az identity federated-credential create \ -# --name mcp-server-federated-credential \ -# --identity-name mcp-server-identity \ -# --resource-group myResourceGroup \ -# --issuer $(az aks show --resource-group myResourceGroup --name myAKSCluster --query "oidcIssuerProfile.issuerUrl" -o tsv) \ -# --subject system:serviceaccount:NAMESPACE:RELEASE_NAME-mcp-server-kubernetes -# -# 5. Assign necessary Azure RBAC roles to the Managed Identity: -# az role assignment create \ -# --assignee $(az identity show --resource-group myResourceGroup --name mcp-server-identity --query principalId -o tsv) \ -# --role "Azure Kubernetes Service Cluster User Role" \ -# --scope /subscriptions/SUBSCRIPTION_ID/resourceGroups/myResourceGroup - -# Example Azure role assignments for cross-cluster AKS access: -# - Azure Kubernetes Service Cluster Admin Role (for full cluster access) -# - Azure Kubernetes Service Cluster User Role (for basic access) -# - Azure Kubernetes Service RBAC Admin (for RBAC management) -# - Custom roles for specific resource access - -# Volume mounts for Azure Workload Identity token -volumeMounts: - - name: azure-identity-token - mountPath: "/var/run/secrets/azure/tokens" - readOnly: true - -volumes: - - name: azure-identity-token - projected: - sources: - - serviceAccountToken: - path: azure-identity-token - expirationSeconds: 3600 - audience: api://AzureADTokenExchange \ No newline at end of file diff --git a/helm-chart/examples/secure-networkpolicy.yaml b/helm-chart/examples/secure-networkpolicy.yaml index 4550bdf..c0ea8c6 100644 --- a/helm-chart/examples/secure-networkpolicy.yaml +++ b/helm-chart/examples/secure-networkpolicy.yaml @@ -173,25 +173,6 @@ networkPolicy: ports: - protocol: TCP port: 443 # GCP APIs - - # REQUIRED for Azure: Azure metadata and API access - - to: - - ipBlock: - cidr: 169.254.169.254/32 # Azure metadata service - ports: - - protocol: TCP - port: 80 - - - to: - - ipBlock: - cidr: 0.0.0.0/0 - except: - - 10.0.0.0/8 - - 172.16.0.0/12 - - 192.168.0.0/16 - ports: - - protocol: TCP - port: 443 # Azure APIs # Resource limits resources: @@ -208,7 +189,7 @@ resources: # 2. EGRESS REQUIREMENTS: You MUST allow egress traffic for: # - DNS resolution (kube-dns/coredns) # - Kubernetes API server access -# - Cloud provider APIs (AWS/GCP/Azure) +# - Cloud provider APIs (AWS/GCP) # - Metadata services (169.254.169.254) # 3. CLUSTER-SPECIFIC: Adjust CIDR blocks and selectors for your cluster: # - Service CIDR (usually 10.96.0.0/12 or 172.20.0.0/16) diff --git a/helm-chart/templates/_helpers.tpl b/helm-chart/templates/_helpers.tpl index ab5a4a8..ed8cd4e 100644 --- a/helm-chart/templates/_helpers.tpl +++ b/helm-chart/templates/_helpers.tpl @@ -82,8 +82,6 @@ Get the appropriate init container image based on provider with architecture sup {{- $baseImage = "amazon/aws-cli" }} {{- else if eq .Values.kubeconfig.provider "gcp" }} {{- $baseImage = "gcr.io/google.com/cloudsdktool/cloud-sdk" }} -{{- else if eq .Values.kubeconfig.provider "azure" }} -{{- $baseImage = "mcr.microsoft.com/azure-cli" }} {{- else if eq .Values.kubeconfig.provider "url" }} {{- $baseImage = "curlimages/curl" }} {{- else if eq .Values.kubeconfig.provider "custom" }} @@ -103,7 +101,7 @@ Get the appropriate init container image based on provider with architecture sup Determine if we need an init container */}} {{- define "mcp-server-kubernetes.needsInitContainer" -}} -{{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") (eq .Values.kubeconfig.provider "azure") (eq .Values.kubeconfig.provider "url") (eq .Values.kubeconfig.provider "custom") -}} +{{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") (eq .Values.kubeconfig.provider "url") (eq .Values.kubeconfig.provider "custom") -}} true {{- else -}} false diff --git a/helm-chart/templates/configmap.yaml b/helm-chart/templates/configmap.yaml index 0d1a28c..3c0d7d3 100644 --- a/helm-chart/templates/configmap.yaml +++ b/helm-chart/templates/configmap.yaml @@ -104,58 +104,7 @@ data: echo "GCP kubeconfig setup complete" kubectl config get-contexts --kubeconfig=$KUBECONFIG {{- end }} - - {{- if eq .Values.kubeconfig.provider "azure" }} - fetch-azure-kubeconfig.sh: | - #!/bin/sh - set -e - echo "Fetching Azure AKS kubeconfigs..." - - # Retry configuration - MAX_RETRIES=${MAX_RETRIES:-3} - RETRY_DELAY=${RETRY_DELAY:-10} - - export KUBECONFIG=/kubeconfig/kubeconfig - touch $KUBECONFIG - - # Function to retry commands - retry_command() { - local cmd="$1" - local description="$2" - local attempt=1 - - while [ $attempt -le $MAX_RETRIES ]; do - echo "Attempt $attempt/$MAX_RETRIES: $description" - if eval "$cmd"; then - echo "✓ Success: $description" - return 0 - else - if [ $attempt -eq $MAX_RETRIES ]; then - echo "✗ Failed after $MAX_RETRIES attempts: $description" - return 1 - fi - echo "⚠ Attempt $attempt failed, retrying in ${RETRY_DELAY}s..." - sleep $RETRY_DELAY - fi - attempt=$((attempt + 1)) - done - } - - {{- range .Values.kubeconfig.azure.clusters }} - # Fetch cluster: {{ .name }} - AZURE_CMD="az aks get-credentials --name {{ .clusterName | quote }} --resource-group {{ .resourceGroup | quote }}{{- if .subscription }} --subscription {{ .subscription | quote }}{{- end }} --file $KUBECONFIG{{- range .extraArgs }} {{ . | quote }}{{- end }}" - retry_command "$AZURE_CMD" "Fetching AKS cluster {{ .name }}" - {{- end }} - - {{- if .Values.kubeconfig.azure.defaultContext }} - echo "Setting default context: {{ .Values.kubeconfig.azure.defaultContext }}" - kubectl config use-context {{ .Values.kubeconfig.azure.defaultContext | quote }} --kubeconfig=$KUBECONFIG - {{- end }} - - echo "Azure kubeconfig setup complete" - kubectl config get-contexts --kubeconfig=$KUBECONFIG - {{- end }} - + {{- if eq .Values.kubeconfig.provider "url" }} fetch-url-kubeconfig.sh: | #!/bin/sh diff --git a/helm-chart/templates/deployment.yaml b/helm-chart/templates/deployment.yaml index 7161f9b..2db1ef3 100644 --- a/helm-chart/templates/deployment.yaml +++ b/helm-chart/templates/deployment.yaml @@ -52,9 +52,6 @@ spec: {{- else if eq .Values.kubeconfig.provider "gcp" }} command: ["/bin/sh"] args: ["/scripts/fetch-gcp-kubeconfig.sh"] - {{- else if eq .Values.kubeconfig.provider "azure" }} - command: ["/bin/sh"] - args: ["/scripts/fetch-azure-kubeconfig.sh"] {{- else if eq .Values.kubeconfig.provider "url" }} command: ["/bin/sh"] args: ["/scripts/fetch-url-kubeconfig.sh"] diff --git a/helm-chart/templates/networkpolicy-tests.yaml b/helm-chart/templates/networkpolicy-tests.yaml index 81d8a55..15365d8 100644 --- a/helm-chart/templates/networkpolicy-tests.yaml +++ b/helm-chart/templates/networkpolicy-tests.yaml @@ -159,7 +159,7 @@ spec: port: 443 # Allow cloud provider API access for kubeconfig tests - {{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") (eq .Values.kubeconfig.provider "azure") }} + {{- if or (eq .Values.kubeconfig.provider "aws") (eq .Values.kubeconfig.provider "gcp") }} - to: - ipBlock: cidr: 0.0.0.0/0 diff --git a/helm-chart/templates/networkpolicy.yaml b/helm-chart/templates/networkpolicy.yaml index 146c95b..f2ee943 100644 --- a/helm-chart/templates/networkpolicy.yaml +++ b/helm-chart/templates/networkpolicy.yaml @@ -139,35 +139,16 @@ spec: port: 443 {{- end }} {{- end }} - {{- if .Values.networkPolicy.cloudProviderApi.azureCidrs }} - # Azure API access - {{- range .Values.networkPolicy.cloudProviderApi.azureCidrs }} - - to: - - ipBlock: - cidr: {{ . }} - ports: - - protocol: TCP - port: 443 - {{- end }} - {{- end }} {{- end }} {{- end }} {{- if .Values.networkPolicy.metadata.enabled }} # Auto-generated metadata service egress rules - to: - ipBlock: - cidr: {{ .Values.networkPolicy.metadata.awsGcpMetadata }} + cidr: {{ .Values.networkPolicy.metadata.cloudMetadata }} ports: - protocol: TCP port: 80 - {{- if ne .Values.networkPolicy.metadata.awsGcpMetadata .Values.networkPolicy.metadata.azureMetadata }} - - to: - - ipBlock: - cidr: {{ .Values.networkPolicy.metadata.azureMetadata }} - ports: - - protocol: TCP - port: 80 - {{- end }} {{- end }} {{- if .Values.networkPolicy.egress }} # User-defined egress rules diff --git a/helm-chart/values.schema.json b/helm-chart/values.schema.json index cd8b625..82adc8c 100644 --- a/helm-chart/values.schema.json +++ b/helm-chart/values.schema.json @@ -74,7 +74,7 @@ "kubeconfig": { "type": "object", "properties": { - "provider": {"type": "string", "enum": ["aws", "gcp", "azure", "url", "serviceaccount", "custom", "content"]}, + "provider": {"type": "string", "enum": ["aws", "gcp", "url", "serviceaccount", "custom", "content"]}, "aws": { "type": "object", "properties": { @@ -120,26 +120,6 @@ "defaultContext": {"type": "string"} } }, - "azure": { - "type": "object", - "properties": { - "clusters": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": {"type": "string"}, - "clusterName": {"type": "string"}, - "resourceGroup": {"type": "string"}, - "subscription": {"type": "string"}, - "extraArgs": {"type": "array", "items": {"type": "string"}} - }, - "required": ["name", "clusterName", "resourceGroup"] - } - }, - "defaultContext": {"type": "string"} - } - }, "url": { "type": "object", "properties": { diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml index 6ecc6e8..3944c81 100644 --- a/helm-chart/values.yaml +++ b/helm-chart/values.yaml @@ -83,9 +83,9 @@ transport: # Kubeconfig configuration kubeconfig: - # Provider type: aws, gcp, azure, url, serviceaccount, custom, content + # Provider type: aws, gcp, url, serviceaccount, custom, content provider: "serviceaccount" - + # AWS EKS configuration aws: clusters: [] @@ -105,7 +105,7 @@ kubeconfig: # extraArgs: # - "--profile=staging" defaultContext: "" - + # GCP GKE configuration gcp: clusters: [] @@ -123,23 +123,6 @@ kubeconfig: # project: "company-staging" defaultContext: "" - # Azure AKS configuration - azure: - clusters: [] - # Example: - # clusters: - # - name: "prod-cluster" - # clusterName: "prod-aks" - # resourceGroup: "prod-rg" - # subscription: "prod-sub-id" - # extraArgs: - # - "--admin" - # - name: "dev-cluster" - # clusterName: "dev-aks" - # resourceGroup: "dev-rg" - # subscription: "dev-sub-id" - defaultContext: "" - # URL-based configuration url: configs: [] @@ -245,11 +228,6 @@ serviceAccount: # annotations: # iam.gke.io/gcp-service-account: "mcp-server@my-project.iam.gserviceaccount.com" # - # Azure Workload Identity: - # annotations: - # azure.workload.identity/client-id: "12345678-1234-1234-1234-123456789012" - # azure.workload.identity/tenant-id: "87654321-4321-4321-4321-210987654321" - # azure.workload.identity/use: "true" # # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template @@ -541,10 +519,6 @@ networkPolicy: # podSelector: # matchLabels: # k8s-app: kube-dns - # Azure AKS DNS: - # podSelector: - # matchLabels: - # k8s-app: kube-dns # Custom DNS: # podSelector: # matchLabels: @@ -559,7 +533,6 @@ networkPolicy: # Alternative CIDRs for different environments: # GKE default: "10.96.0.0/12" # EKS default: "10.100.0.0/16" or "172.20.0.0/16" - # AKS default: "10.0.0.0/16" # Custom: specify your cluster's service CIDR # Cloud provider API access (for kubeconfig providers) @@ -576,17 +549,13 @@ networkPolicy: # gcpCidrs: # - "35.199.0.0/16" # Google APIs # - "199.36.153.8/30" # metadata.google.internal - # azureCidrs: - # - "20.0.0.0/8" # Azure public cloud # Metadata services access metadata: # Enable access to cloud metadata services enabled: true - # AWS/GCP metadata service - awsGcpMetadata: "169.254.169.254/32" - # Azure metadata service - azureMetadata: "169.254.169.254/32" + # Cloud provider metadata service (AWS/GCP use same IP: 169.254.169.254) + cloudMetadata: "169.254.169.254/32" # Ingress rules - traffic coming TO the pod # By default, all ingress is DENIED. Add rules to allow specific traffic. From c767278ab641ce41c021b3dafe4ffe18de37332b Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Sun, 19 Oct 2025 16:40:09 -0700 Subject: [PATCH 16/18] fix: remove Azure support from kubeconfig and related test scripts --- helm-chart/{HELM_INSTALL.md => README.md} | 8 ---- helm-chart/templates/NOTES.txt | 5 --- .../templates/tests/test-kubeconfig.yaml | 3 -- .../tests/test-scripts-configmap.yaml | 40 +------------------ 4 files changed, 1 insertion(+), 55 deletions(-) rename helm-chart/{HELM_INSTALL.md => README.md} (97%) diff --git a/helm-chart/HELM_INSTALL.md b/helm-chart/README.md similarity index 97% rename from helm-chart/HELM_INSTALL.md rename to helm-chart/README.md index 44bdac4..22addc6 100644 --- a/helm-chart/HELM_INSTALL.md +++ b/helm-chart/README.md @@ -111,14 +111,6 @@ helm install mcp-server-k8s ./helm-chart \ --set serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="mcp-server@my-project.iam.gserviceaccount.com" ``` -### Azure Workload Identity -```bash -helm install mcp-server-k8s ./helm-chart \ - --set serviceAccount.annotations."azure\.workload\.identity/client-id"="12345678-1234-1234-1234-123456789012" \ - --set serviceAccount.annotations."azure\.workload\.identity/tenant-id"="87654321-4321-4321-4321-210987654321" \ - --set serviceAccount.annotations."azure\.workload\.identity/use"="true" -``` - ## Security Configuration ### Non-Destructive Mode (Safe Operations Only) diff --git a/helm-chart/templates/NOTES.txt b/helm-chart/templates/NOTES.txt index 8eae4bf..3605f3f 100644 --- a/helm-chart/templates/NOTES.txt +++ b/helm-chart/templates/NOTES.txt @@ -84,11 +84,6 @@ {{- range .Values.kubeconfig.gcp.clusters }} - {{ .name }}: {{ .clusterName }} ({{ .zone | default .region }}) {{- end }} - {{- else if eq .Values.kubeconfig.provider "azure" }} - Azure Clusters: {{ len .Values.kubeconfig.azure.clusters }} - {{- range .Values.kubeconfig.azure.clusters }} - - {{ .name }}: {{ .clusterName }} ({{ .resourceGroup }}) - {{- end }} {{- else if eq .Values.kubeconfig.provider "url" }} URL Configs: {{ len .Values.kubeconfig.url.configs }} {{- range .Values.kubeconfig.url.configs }} diff --git a/helm-chart/templates/tests/test-kubeconfig.yaml b/helm-chart/templates/tests/test-kubeconfig.yaml index 3b26e0e..709e014 100644 --- a/helm-chart/templates/tests/test-kubeconfig.yaml +++ b/helm-chart/templates/tests/test-kubeconfig.yaml @@ -29,9 +29,6 @@ spec: {{- else if eq .Values.kubeconfig.provider "gcp" }} command: ["/bin/sh"] args: ["/scripts/test-gcp-kubeconfig.sh"] - {{- else if eq .Values.kubeconfig.provider "azure" }} - command: ["/bin/sh"] - args: ["/scripts/test-azure-kubeconfig.sh"] {{- else if eq .Values.kubeconfig.provider "url" }} command: ["/bin/sh"] args: ["/scripts/test-url-kubeconfig.sh"] diff --git a/helm-chart/templates/tests/test-scripts-configmap.yaml b/helm-chart/templates/tests/test-scripts-configmap.yaml index cacb2d9..242e083 100644 --- a/helm-chart/templates/tests/test-scripts-configmap.yaml +++ b/helm-chart/templates/tests/test-scripts-configmap.yaml @@ -89,45 +89,7 @@ data: echo "GCP kubeconfig test completed successfully" {{- end }} - - {{- if eq .Values.kubeconfig.provider "azure" }} - test-azure-kubeconfig.sh: | - #!/bin/sh - set -e - echo "Testing Azure AKS kubeconfig fetch..." - - export KUBECONFIG=/kubeconfig/kubeconfig - touch $KUBECONFIG - - {{- range .Values.kubeconfig.azure.clusters }} - echo "Testing cluster: {{ .name }}" - az aks show \ - --name {{ .clusterName | quote }} \ - --resource-group {{ .resourceGroup | quote }} \ - {{- if .subscription }} - --subscription {{ .subscription | quote }} \ - {{- end }} - >/dev/null || { - echo "ERROR: Cannot access AKS cluster {{ .name }}" - exit 1 - } - echo "✓ AKS cluster {{ .name }} is accessible" - - az aks get-credentials \ - --name {{ .clusterName | quote }} \ - --resource-group {{ .resourceGroup | quote }} \ - {{- if .subscription }} - --subscription {{ .subscription | quote }} \ - {{- end }} - --file $KUBECONFIG \ - {{- range .extraArgs }} - {{ . | quote }} \ - {{- end }} - {{- end }} - - echo "Azure kubeconfig test completed successfully" - {{- end }} - + {{- if eq .Values.kubeconfig.provider "url" }} test-url-kubeconfig.sh: | #!/bin/sh From e9814e404c18d8b3e2abfdfec497abbbcc97a39c Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Sun, 19 Oct 2025 16:57:36 -0700 Subject: [PATCH 17/18] feat: add examples for custom, generic, and URL-based kubeconfig configurations --- helm-chart/examples/custom-kubeconfig.yaml | 372 ++++++++++++++++++++ helm-chart/examples/generic-kubeconfig.yaml | 215 +++++++++++ helm-chart/examples/url-kubeconfig.yaml | 243 +++++++++++++ 3 files changed, 830 insertions(+) create mode 100644 helm-chart/examples/custom-kubeconfig.yaml create mode 100644 helm-chart/examples/generic-kubeconfig.yaml create mode 100644 helm-chart/examples/url-kubeconfig.yaml diff --git a/helm-chart/examples/custom-kubeconfig.yaml b/helm-chart/examples/custom-kubeconfig.yaml new file mode 100644 index 0000000..9f29b68 --- /dev/null +++ b/helm-chart/examples/custom-kubeconfig.yaml @@ -0,0 +1,372 @@ +# Example: Custom Command Kubeconfig Configuration +# This example shows how to use a custom command/script to generate kubeconfig +# Useful for complex scenarios like: +# - Custom authentication workflows +# - Integration with proprietary credential systems +# - Dynamic kubeconfig generation based on runtime parameters +# - Multi-step authentication processes + +# Deploy with: helm install mcp-server ./helm-chart -f examples/custom-kubeconfig.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "latest" + +# HTTP transport for web accessibility +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + ingress: + enabled: false + +# Custom command-based kubeconfig generation +kubeconfig: + provider: "custom" + + custom: + # Path to your custom script or binary + # This should be available in the container or mounted via volumeMounts + command: "/usr/local/bin/generate-kubeconfig.sh" + + # Arguments to pass to the command + args: + - "--output=/shared/kubeconfig" + - "--format=yaml" + - "--clusters=prod,staging,dev" + - "--context=prod" + + # Alternative examples: + # Example 1: Python script for custom authentication + # command: "/usr/bin/python3" + # args: + # - "/scripts/fetch_kubeconfig.py" + # - "--vault-addr=${VAULT_ADDR}" + # - "--vault-token=${VAULT_TOKEN}" + # - "--output=/shared/kubeconfig" + + # Example 2: Custom binary for proprietary system + # command: "/opt/company/bin/k8s-config-fetcher" + # args: + # - "generate" + # - "--user=${SERVICE_ACCOUNT}" + # - "--clusters=all" + # - "--output-path=/shared/kubeconfig" + + # Example 3: Shell script with multiple steps + # command: "/bin/bash" + # args: + # - "/scripts/multi-step-config.sh" + + # Environment variables available to the custom command + env: + # Vault integration example + VAULT_ADDR: "https://vault.example.com" + VAULT_TOKEN: "your-vault-token" + VAULT_NAMESPACE: "admin/kubernetes" + + # Custom system credentials + SERVICE_ACCOUNT: "mcp-server" + API_ENDPOINT: "https://config-api.example.com" + API_KEY: "your-api-key" + + # Cluster selection + CLUSTER_ENVIRONMENT: "production" + CLUSTER_REGION: "us-east-1" + + # Output configuration + KUBECONFIG_OUTPUT_PATH: "/shared/kubeconfig" + + # Init container retry configuration + initContainer: + maxRetries: 5 + retryDelay: 15 + resources: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + +# Security configuration +security: + allowOnlyNonDestructive: false + allowOnlyReadonly: false + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # Note: readOnlyRootFilesystem may need to be false if your script + # needs to write temporary files. Use /tmp for temp files. + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# ServiceAccount configuration +serviceAccount: + create: true + annotations: {} + # If using cloud provider workload identity: + # annotations: + # eks.amazonaws.com/role-arn: "arn:aws:iam::123456789:role/custom-config-role" + # iam.gke.io/gcp-service-account: "config-fetcher@project.iam.gserviceaccount.com" + +# RBAC for local cluster (minimal permissions) +rbac: + create: true + annotations: + description: "MCP Server with custom kubeconfig command" + + rules: + # Minimal read access to local cluster + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch"] + +# Mount custom scripts via ConfigMap or Secret +volumes: + # Mount custom script from ConfigMap + - name: custom-scripts + configMap: + name: kubeconfig-generator-scripts + defaultMode: 0755 # Make scripts executable + + # Mount credentials from Secret + - name: custom-creds + secret: + secretName: custom-kubeconfig-credentials + defaultMode: 0400 # Read-only for security + +volumeMounts: + # Mount scripts directory + - name: custom-scripts + mountPath: /usr/local/bin/generate-kubeconfig.sh + subPath: generate-kubeconfig.sh + readOnly: true + + # Mount credentials + - name: custom-creds + mountPath: /var/secrets/custom + readOnly: true + +# Resource limits +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# Network Policy - Allow egress to custom services +networkPolicy: + enabled: true + + dns: + enabled: true + + kubernetesApi: + enabled: true + serviceCidr: "10.96.0.0/12" + + # Allow egress to custom authentication/config services + egress: + # Allow HTTPS to external services (Vault, custom APIs, etc.) + - to: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 8200 # Vault default port + + # Allow internal service access + - to: + - namespaceSelector: + matchLabels: + name: auth-system + ports: + - protocol: TCP + port: 443 + +# Health checks +livenessProbe: + enabled: true + tcpSocket: + port: 3001 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + +readinessProbe: + enabled: true + tcpSocket: + port: 3001 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + +# Labels and annotations +podLabels: + app: mcp-server-kubernetes + kubeconfig-type: custom + +podAnnotations: + description: "MCP Server with custom kubeconfig command" + +commonLabels: + managed-by: helm + component: mcp-server + +# SETUP INSTRUCTIONS: +# +# 1. Create your custom script as a ConfigMap: +# kubectl create configmap kubeconfig-generator-scripts \ +# --from-file=generate-kubeconfig.sh=./path/to/your/script.sh +# +# 2. Create secrets for credentials: +# kubectl create secret generic custom-kubeconfig-credentials \ +# --from-literal=vault-token='your-token' \ +# --from-literal=api-key='your-key' +# +# 3. Deploy the chart: +# helm install mcp-server ./helm-chart -f examples/custom-kubeconfig.yaml + +--- +# Example ConfigMap with custom script +# Create this before deploying the Helm chart: +# kubectl apply -f this-section-below +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubeconfig-generator-scripts +data: + generate-kubeconfig.sh: | + #!/bin/bash + set -euo pipefail + + # Example custom kubeconfig generation script + # This is a template - customize for your needs + + OUTPUT_PATH="${1:-/shared/kubeconfig}" + + echo "Generating kubeconfig..." + + # Example 1: Fetch from Vault + if [ -n "${VAULT_ADDR:-}" ] && [ -n "${VAULT_TOKEN:-}" ]; then + echo "Fetching kubeconfig from Vault..." + curl -s -H "X-Vault-Token: ${VAULT_TOKEN}" \ + "${VAULT_ADDR}/v1/secret/data/kubernetes/config" \ + | jq -r '.data.data.kubeconfig' > "${OUTPUT_PATH}" + fi + + # Example 2: Call custom API + if [ -n "${API_ENDPOINT:-}" ] && [ -n "${API_KEY:-}" ]; then + echo "Fetching kubeconfig from custom API..." + curl -s -H "X-API-Key: ${API_KEY}" \ + "${API_ENDPOINT}/api/v1/kubeconfig?cluster=${CLUSTER_ENVIRONMENT}" \ + > "${OUTPUT_PATH}" + fi + + # Example 3: Generate from templates and credentials + if [ -f "/var/secrets/custom/cluster-cert" ]; then + echo "Generating kubeconfig from credentials..." + cat > "${OUTPUT_PATH}" < -c fetch-kubeconfig +# - Check script output and errors +# - Verify environment variables are set correctly +# - Ensure volumes are mounted properly +# - Test script locally before deploying +# +# 4. Security Best Practices: +# - Never hardcode credentials in scripts +# - Use Kubernetes Secrets for sensitive data +# - Minimize script privileges +# - Validate input parameters +# - Use HTTPS for external API calls +# - Implement proper error handling +# - Log securely (don't log secrets) +# +# 5. Advanced Examples: +# - Multi-cluster from service discovery +# - Credential rotation with CertManager +# - Integration with external CMDB +# - Dynamic RBAC policy generation +# - Cross-cloud authentication diff --git a/helm-chart/examples/generic-kubeconfig.yaml b/helm-chart/examples/generic-kubeconfig.yaml new file mode 100644 index 0000000..da6447d --- /dev/null +++ b/helm-chart/examples/generic-kubeconfig.yaml @@ -0,0 +1,215 @@ +# Example: Generic Kubernetes Kubeconfig Configuration +# This example shows how to use a standard kubeconfig file with the MCP server +# Useful for on-premises clusters, custom Kubernetes distributions, or any cluster +# where you have direct kubeconfig access. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/generic-kubeconfig.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "latest" + +# HTTP transport for web accessibility +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + ingress: + enabled: false + # If enabling ingress, use streaming-friendly annotations: + # annotations: + # nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + # nginx.ingress.kubernetes.io/proxy-buffering: "off" + +# Direct kubeconfig content +kubeconfig: + provider: "content" + + # Paste your kubeconfig content here + # This can be a single cluster or multiple clusters with contexts + content: | + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTi... + server: https://your-k8s-api-server:6443 + name: my-cluster + contexts: + - context: + cluster: my-cluster + user: my-user + namespace: default + name: my-context + current-context: my-context + users: + - name: my-user + user: + client-certificate-data: LS0tLS1CRUdJTi... + client-key-data: LS0tLS1CRUdJTi... + + # If your kubeconfig needs specific environment variables + env: {} + # Example: + # env: + # KUBERNETES_SERVICE_HOST: "api.k8s.example.com" + # KUBERNETES_SERVICE_PORT: "443" + +# Security configuration - adjust based on your needs +security: + # Non-destructive mode prevents deletion operations + allowOnlyNonDestructive: false + # Read-only mode for monitoring/observability use cases + allowOnlyReadonly: false + # Whitelist specific tools (optional) + allowedTools: "" + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# ServiceAccount configuration +# Note: When using content provider, the ServiceAccount only affects pod permissions +# in the current cluster, not the target cluster specified in kubeconfig +serviceAccount: + create: true + annotations: {} + +# RBAC for local cluster operations (if needed) +rbac: + create: true + annotations: + description: "MCP Server with generic kubeconfig" + + # Minimal RBAC rules for local cluster (if ServiceAccount is used for anything) + rules: + # Basic read access to local cluster resources + - apiGroups: [""] + resources: ["pods", "services", "configmaps"] + verbs: ["get", "list", "watch"] + + # Events read-only + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch"] + +# Resource limits +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# Network Policy (optional - enable for security) +networkPolicy: + enabled: false + + # DNS and Kubernetes API access + dns: + enabled: true + kubernetesApi: + enabled: true + # Adjust serviceCidr to match your cluster + serviceCidr: "10.96.0.0/12" + + # Allow egress to your Kubernetes API server + egress: + # Allow HTTPS to your API server + - to: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 6443 + +# Health checks +livenessProbe: + enabled: true + tcpSocket: + port: 3001 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + +readinessProbe: + enabled: true + tcpSocket: + port: 3001 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + +# Additional labels +podLabels: + app: mcp-server-kubernetes + kubeconfig-type: generic + +# Additional annotations +podAnnotations: + description: "MCP Server with generic kubeconfig" + +# Common labels for all resources +commonLabels: + managed-by: helm + component: mcp-server + +# Node selector (optional) +nodeSelector: {} + +# Tolerations (optional) +tolerations: [] + +# Affinity rules (optional) +affinity: {} + +# USAGE NOTES: +# +# 1. Obtaining your kubeconfig: +# - From existing file: cat ~/.kube/config +# - From managed cluster: Use cloud provider CLI to get credentials +# - From kubeadm: sudo cat /etc/kubernetes/admin.conf +# +# 2. Multi-cluster kubeconfig: +# You can include multiple clusters and contexts in the content field. +# The MCP server will respect the current-context setting, and you can +# switch contexts using the kubectl_context tool. +# +# 3. Security considerations: +# - Store sensitive kubeconfig in Kubernetes Secrets, not in values files +# - Use RBAC to limit what the kubeconfig user can do in the target cluster +# - Enable networkPolicy to restrict egress to only necessary endpoints +# - Consider using certificate-based auth over token-based auth +# +# 4. Alternative: Using Secrets +# Instead of putting kubeconfig in content field, you can mount a secret: +# a) Create secret: kubectl create secret generic kubeconfig --from-file=config=~/.kube/config +# b) Add volume mount in values: +# volumes: +# - name: kubeconfig +# secret: +# secretName: kubeconfig +# volumeMounts: +# - name: kubeconfig +# mountPath: /home/node/.kube +# readOnly: true +# env: +# KUBECONFIG: "/home/node/.kube/config" +# c) Set provider to: "serviceaccount" (it will use KUBECONFIG env var) diff --git a/helm-chart/examples/url-kubeconfig.yaml b/helm-chart/examples/url-kubeconfig.yaml new file mode 100644 index 0000000..90ec657 --- /dev/null +++ b/helm-chart/examples/url-kubeconfig.yaml @@ -0,0 +1,243 @@ +# Example: URL-based Kubeconfig Configuration +# This example shows how to fetch kubeconfig from a remote URL +# Useful for centralized configuration management, secret stores, or S3 buckets +# where kubeconfig files are stored and need to be retrieved dynamically. + +# Deploy with: helm install mcp-server ./helm-chart -f examples/url-kubeconfig.yaml + +image: + repository: flux159/mcp-server-kubernetes + tag: "latest" + +# HTTP transport for web accessibility +transport: + mode: "http" + service: + type: ClusterIP + port: 3001 + ingress: + enabled: false + +# URL-based kubeconfig fetching +kubeconfig: + provider: "url" + + url: + configs: + # Example 1: Fetch from S3 bucket with pre-signed URL + - name: "prod-cluster" + url: "https://my-bucket.s3.amazonaws.com/kubeconfigs/prod-cluster.yaml" + extraArgs: [] + + # Example 2: Fetch from authenticated HTTP server + - name: "staging-cluster" + url: "https://config-server.example.com/kubeconfig/staging.yaml" + extraArgs: + - "--header=Authorization: Bearer ${CONFIG_SERVER_TOKEN}" + + # Example 3: Fetch from internal service with custom headers + - name: "dev-cluster" + url: "http://internal-config-service.config-system.svc.cluster.local/kubeconfig" + extraArgs: + - "--header=X-API-Key: ${DEV_API_KEY}" + - "--header=X-Environment: development" + + # Environment variables for authentication and configuration + env: + # Tokens/credentials for authenticating to the config server + CONFIG_SERVER_TOKEN: "your-bearer-token-here" + DEV_API_KEY: "your-api-key-here" + # Additional environment variables as needed + HTTP_PROXY: "" + HTTPS_PROXY: "" + NO_PROXY: "" + + # Init container configuration for retrying failed fetches + initContainer: + maxRetries: 5 + retryDelay: 10 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + +# Security configuration +security: + allowOnlyNonDestructive: false + allowOnlyReadonly: false + + podSecurityContext: + fsGroup: 1000 + runAsNonRoot: true + runAsUser: 1000 + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# ServiceAccount configuration +serviceAccount: + create: true + annotations: {} + +# RBAC for local cluster (minimal permissions) +rbac: + create: true + annotations: + description: "MCP Server with URL-based kubeconfig" + + rules: + # Minimal read access to local cluster + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch"] + +# Resource limits +resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +# Network Policy - Allow egress to config servers +networkPolicy: + enabled: true + + dns: + enabled: true + + kubernetesApi: + enabled: true + serviceCidr: "10.96.0.0/12" + + # Allow egress to configuration servers and S3 + egress: + # Allow HTTPS to external config servers + - to: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - protocol: TCP + port: 443 + - protocol: TCP + port: 80 + + # Allow internal service access (if using in-cluster config service) + - to: + - namespaceSelector: + matchLabels: + name: config-system + ports: + - protocol: TCP + port: 80 + - protocol: TCP + port: 443 + +# Health checks +livenessProbe: + enabled: true + tcpSocket: + port: 3001 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + +readinessProbe: + enabled: true + tcpSocket: + port: 3001 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + +# Labels and annotations +podLabels: + app: mcp-server-kubernetes + kubeconfig-type: url + +podAnnotations: + description: "MCP Server with URL-based kubeconfig fetching" + +commonLabels: + managed-by: helm + component: mcp-server + +# USAGE NOTES: +# +# 1. URL Sources: +# - AWS S3: s3://bucket/path or pre-signed HTTPS URLs +# - Google Cloud Storage: https://storage.googleapis.com/bucket/path +# - Azure Blob Storage: https://account.blob.core.windows.net/container/path +# - HTTP/HTTPS servers: Any accessible web server +# - Internal Kubernetes services: http://service.namespace.svc.cluster.local/path +# +# 2. Authentication Methods: +# a) Pre-signed URLs (recommended for cloud storage) +# b) Bearer tokens in Authorization header +# c) API keys in custom headers +# d) Basic authentication (not recommended) +# e) IAM/Workload Identity (for cloud storage) +# +# 3. Using AWS S3 with IAM Roles: +# serviceAccount: +# annotations: +# eks.amazonaws.com/role-arn: "arn:aws:iam::123456789:role/s3-config-reader" +# kubeconfig: +# url: +# configs: +# - name: "cluster" +# url: "https://my-bucket.s3.amazonaws.com/kubeconfig.yaml" +# +# 4. Using GCS with Workload Identity: +# serviceAccount: +# annotations: +# iam.gke.io/gcp-service-account: "config-reader@project.iam.gserviceaccount.com" +# kubeconfig: +# url: +# configs: +# - name: "cluster" +# url: "https://storage.googleapis.com/my-bucket/kubeconfig.yaml" +# +# 5. Security Best Practices: +# - Use HTTPS for external URLs +# - Store credentials in Kubernetes Secrets, not in values files: +# kubectl create secret generic config-server-creds \ +# --from-literal=token='your-token' \ +# --from-literal=api-key='your-key' +# - Reference secrets in deployment via envFrom +# - Use short-lived credentials when possible +# - Enable NetworkPolicy to restrict egress +# - Rotate credentials regularly +# +# 6. Using Secrets for Credentials: +# Create secret first: +# kubectl create secret generic url-kubeconfig-creds \ +# --from-literal=CONFIG_SERVER_TOKEN='token-value' \ +# --from-literal=DEV_API_KEY='key-value' +# +# Then in your deployment, add envFrom to reference the secret +# (modify deployment.yaml or use extraEnvFrom if chart supports it) +# +# 7. Multi-cluster Configuration: +# The init container fetches all configured URLs and merges them into +# a single kubeconfig file. You can switch between clusters using the +# kubectl_context tool. +# +# 8. Troubleshooting: +# - Check init container logs: kubectl logs -c fetch-kubeconfig +# - Verify URL is accessible from cluster +# - Check authentication headers and credentials +# - Ensure NetworkPolicy allows egress to config server +# - Verify retry settings if fetch is intermittent From 79e6cf11beebe4ed0fd7b8a72c0a7537d8275581 Mon Sep 17 00:00:00 2001 From: Paras Patel Date: Mon, 20 Oct 2025 20:08:52 -0700 Subject: [PATCH 18/18] fix: add architecture property to image schema in values.yaml --- helm-chart/values.schema.json | 1 + 1 file changed, 1 insertion(+) diff --git a/helm-chart/values.schema.json b/helm-chart/values.schema.json index 82adc8c..059090a 100644 --- a/helm-chart/values.schema.json +++ b/helm-chart/values.schema.json @@ -7,6 +7,7 @@ "image": { "type": "object", "properties": { + "architecture": {"type": "string"}, "repository": {"type": "string"}, "pullPolicy": {"type": "string", "enum": ["Always", "IfNotPresent", "Never"]}, "tag": {"type": "string"}