Skip to content

Commit 0267145

Browse files
authored
[126] Kubectl: remove unnecessary shenanigans for MFA (#183)
* remove unnecessary shenanigans for MFA * fix MFA mess * create kube config directory if missing * update tests * restoring refresh_aws_credentials decorator approach * no longer required * fix decorator + add mfa/sso tests * back to the context-manager approach * get correct region from path rather than assuming it * avoid cwd property by now, it breaks tf command * unnecessary import
1 parent 1a3b82a commit 0267145

File tree

4 files changed

+118
-66
lines changed

4 files changed

+118
-66
lines changed

leverage/_utils.py

Lines changed: 16 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,9 @@
11
"""
22
General use utilities.
33
"""
4-
import functools
54
from subprocess import run
65
from subprocess import PIPE
76

8-
from click.exceptions import Exit
9-
10-
from leverage import logger
11-
127

138
def clean_exception_traceback(exception):
149
""" Delete special local variables from all frames of an exception's traceback
@@ -67,7 +62,6 @@ class CustomEntryPoint:
6762
Set a custom entrypoint on the container while entering the context.
6863
Once outside, return it to its original value.
6964
"""
70-
7165
def __init__(self, container, entrypoint):
7266
self.container = container
7367
self.old_entrypoint = container.entrypoint
@@ -80,54 +74,31 @@ def __exit__(self, *args, **kwargs):
8074
self.container.entrypoint = self.old_entrypoint
8175

8276

83-
class EmptyEntryPoint(CustomEntryPoint):
77+
class AwsCredsEntryPoint(CustomEntryPoint):
8478
"""
85-
Force an empty entrypoint. This will let you execute any commands freely.
79+
Fetching AWS credentials by setting the SSO/MFA entrypoints.
80+
This works as a replacement of _prepare_container.
8681
"""
8782

8883
def __init__(self, container):
89-
super(EmptyEntryPoint, self).__init__(container, entrypoint="")
90-
91-
92-
def refresh_aws_credentials(func):
93-
"""
94-
Use this decorator in the case you want to make sure you will have fresh tokens to interact with AWS
95-
during the execution of your wrapped method.
96-
"""
97-
@functools.wraps(func)
98-
def wrapper(*args, **kwargs):
99-
container = args[0] # this is the "self" of the method you are decorating; a LeverageContainer instance
100-
10184
if container.sso_enabled:
10285
container._check_sso_token()
103-
auth_method = container.TF_SSO_ENTRYPOINT
86+
auth_method = f"{container.TF_SSO_ENTRYPOINT} -- "
10487
elif container.mfa_enabled:
105-
auth_method = container.TF_MFA_ENTRYPOINT
106-
# TODO: ask why this was necessary
88+
auth_method = f"{container.TF_MFA_ENTRYPOINT} -- "
10789
container.environment.update({
10890
"AWS_SHARED_CREDENTIALS_FILE": container.environment["AWS_SHARED_CREDENTIALS_FILE"].replace("tmp", ".aws"),
10991
"AWS_CONFIG_FILE": container.environment["AWS_CONFIG_FILE"].replace("tmp", ".aws"),
11092
})
11193
else:
112-
# no auth method found: skip the refresh
113-
return func(*args, **kwargs)
114-
115-
logger.info("Fetching AWS credentials...")
116-
with CustomEntryPoint(container, f"{auth_method} -- echo"):
117-
# this simple echo "Fetching..." will run the SSO/MFA entrypoints underneath
118-
# that takes care of the token refresh
119-
exit_code = container._start("Fetching done.")
120-
if exit_code:
121-
raise Exit(exit_code)
122-
if container.mfa_enabled:
123-
# we need to revert to the original values, otherwise other tools that rely on awscli, like kubectl
124-
# won't find the credentials
125-
container.environment.update({
126-
"AWS_SHARED_CREDENTIALS_FILE": container.environment["AWS_SHARED_CREDENTIALS_FILE"].replace(".aws", "tmp"),
127-
"AWS_CONFIG_FILE": container.environment["AWS_CONFIG_FILE"].replace(".aws", "tmp"),
128-
})
129-
130-
# we should have a valid token at this point, now execute the original method
131-
return func(*args, **kwargs)
132-
133-
return wrapper
94+
auth_method = ""
95+
96+
super(AwsCredsEntryPoint, self).__init__(container, entrypoint=auth_method)
97+
98+
def __exit__(self, *args, **kwargs):
99+
super(AwsCredsEntryPoint, self).__exit__(*args, **kwargs)
100+
if self.container.mfa_enabled:
101+
self.container.environment.update({
102+
"AWS_SHARED_CREDENTIALS_FILE": self.container.environment["AWS_SHARED_CREDENTIALS_FILE"].replace(".aws", "tmp"),
103+
"AWS_CONFIG_FILE": self.container.environment["AWS_CONFIG_FILE"].replace(".aws", "tmp"),
104+
})

leverage/container.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
22
import os
3+
import re
34
from pathlib import Path
45
from datetime import datetime
56

@@ -21,6 +22,15 @@
2122
from leverage.path import NotARepositoryError
2223
from leverage.conf import load as load_env
2324

25+
REGION = (
26+
r"(.*)" # project folder
27+
# start region
28+
r"(global|(?:[a-z]{2}-(?:gov-)?"
29+
r"(?:central|north|south|east|west|northeast|northwest|southeast|southwest|secret|topsecret)-[1-4]))"
30+
# end region
31+
r"(.*)" # layer
32+
)
33+
2434

2535
def get_docker_client():
2636
""" Attempt to get a Docker client from the environment configuration. Halt application otherwise.
@@ -167,6 +177,18 @@ def backend_tfvars(self):
167177
def guest_aws_credentials_dir(self):
168178
return f"/root/tmp/{self.project}"
169179

180+
@property
181+
def region(self):
182+
"""
183+
Return the region of the layer.
184+
"""
185+
if matches := re.match(REGION, self.cwd.as_posix()):
186+
# the region (group 1) is between the projects folders (group 0) and the layers (group 2)
187+
return matches.groups()[1]
188+
189+
logger.exception(f"No valid region could be found at: {self.cwd.as_posix()}")
190+
raise Exit(1)
191+
170192
def ensure_image(self):
171193
""" Make sure the required Docker image is available in the system. If not, pull it from registry. """
172194
found_image = self.client.api.images(f"{self.image}:{self.image_tag}")
@@ -269,6 +291,20 @@ def run_func(client, container):
269291

270292
return self._run(container, run_func)
271293

294+
def _start_with_output(self, command="/bin/bash", *args):
295+
"""
296+
Same than _start but also returns the outputs (by dumping the logs) of the container.
297+
"""
298+
container = self._create_container(True, command, *args)
299+
300+
def run_func(client, container):
301+
dockerpty.start(client=client.api, container=container)
302+
exit_code = client.api.inspect_container(container)["State"]["ExitCode"]
303+
logs = client.api.logs(container).decode("utf-8")
304+
return exit_code, logs
305+
306+
return self._run(container, run_func)
307+
272308
def start(self, command="/bin/sh", *arguments):
273309
""" Run command with the given arguments in an interactive container.
274310

leverage/containers/kubectl.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from docker.types import Mount
77

88
from leverage import logger
9-
from leverage._utils import chain_commands, EmptyEntryPoint, refresh_aws_credentials
9+
from leverage._utils import chain_commands, AwsCredsEntryPoint
1010
from leverage.container import TerraformContainer
1111

1212

@@ -22,52 +22,52 @@ def __init__(self, client):
2222

2323
self.entrypoint = self.KUBECTL_CLI_BINARY
2424

25-
host_config_path = str(Path.home() / Path(f".kube/{self.project}"))
25+
self.host_kubectl_config_dir = Path.home() / Path(f".kube/{self.project}")
26+
if not self.host_kubectl_config_dir.exists():
27+
# make sure the folder exists before mounting it
28+
self.host_kubectl_config_dir.mkdir(parents=True)
29+
2630
self.container_config["host_config"]["Mounts"].append(
2731
# the container is expecting a file named "config" here
2832
Mount(
29-
source=host_config_path,
33+
source=str(self.host_kubectl_config_dir),
3034
target=str(self.KUBECTL_CONFIG_PATH),
3135
type="bind",
3236
)
3337
)
3438

35-
@refresh_aws_credentials
3639
def start_shell(self):
37-
with EmptyEntryPoint(self):
38-
self._start()
40+
with AwsCredsEntryPoint(self):
41+
self._start("/bin/bash")
3942

40-
@refresh_aws_credentials
4143
def configure(self):
4244
# make sure we are on the cluster layer
4345
self.check_for_layer_location()
4446

4547
logger.info("Retrieving k8s cluster information...")
46-
with EmptyEntryPoint(self):
47-
# generate the command that will configure the new cluster
48+
# generate the command that will configure the new cluster
49+
with AwsCredsEntryPoint(self):
4850
add_eks_cluster_cmd = self._get_eks_kube_config()
4951
# and the command that will set the proper ownership on the config file (otherwise the owner will be "root")
5052
change_owner_cmd = self._change_kube_file_owner_cmd()
5153
full_cmd = chain_commands([add_eks_cluster_cmd, change_owner_cmd])
5254

5355
logger.info("Configuring context...")
54-
with EmptyEntryPoint(self):
55-
# we use _start here because in the case of MFA it will ask for the token
56+
with AwsCredsEntryPoint(self):
5657
exit_code = self._start(full_cmd)
5758
if exit_code:
5859
raise Exit(exit_code)
5960

6061
logger.info("Done.")
6162

6263
def _get_eks_kube_config(self) -> str:
63-
exit_code, output = self._exec(f"{self.TF_BINARY} output")
64+
exit_code, output = self._start_with_output(f"{self.TF_BINARY} output -no-color")
6465
if exit_code:
6566
logger.error(output)
6667
raise Exit(exit_code)
6768

68-
aws_eks_cmd = next(op for op in output.split("\n") if op.startswith("aws eks update-kubeconfig"))
69-
# assuming the cluster container is on the primary region
70-
return aws_eks_cmd + f" --region {self.common_conf['region_primary']}"
69+
aws_eks_cmd = next(op for op in output.split("\r\n") if op.startswith("aws eks update-kubeconfig"))
70+
return aws_eks_cmd + f" --region {self.region}"
7171

7272
def _get_user_group_id(self, user_id) -> int:
7373
user = pwd.getpwuid(user_id)

tests/test_containers/test_kubectl.py

Lines changed: 51 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ def kubectl_container(muted_click_context):
3333

3434

3535
def test_get_eks_kube_config(kubectl_container):
36-
tf_output = "\naws eks update-kubeconfig --name test-cluster --profile test-profile\n"
37-
with patch.object(kubectl_container, "_exec", return_value=(0, tf_output)):
38-
kubectl_container.common_conf["region_primary"] = "us-east-1"
36+
tf_output = "\r\naws eks update-kubeconfig --name test-cluster --profile test-profile\r\n"
37+
with patch.object(kubectl_container, "_start_with_output", return_value=(0, tf_output)):
38+
kubectl_container.cwd = Path("/project/account/us-east-1/cluster")
3939
cmd = kubectl_container._get_eks_kube_config()
4040

4141
assert cmd == AWS_EKS_UPDATE_KUBECONFIG
@@ -45,7 +45,7 @@ def test_get_eks_kube_config_tf_output_error(kubectl_container):
4545
"""
4646
Test that if the TF OUTPUT fails, we get an error back.
4747
"""
48-
with patch.object(kubectl_container, "_exec", return_value=(1, "ERROR!")):
48+
with patch.object(kubectl_container, "_start_with_output", return_value=(1, "ERROR!")):
4949
with pytest.raises(Exit):
5050
kubectl_container._get_eks_kube_config()
5151

@@ -85,7 +85,7 @@ def test_start_shell(kubectl_container):
8585
container_args = kubectl_container.client.api.create_container.call_args[1]
8686

8787
# we want a shell, so -> /bin/bash with no entrypoint
88-
assert container_args["command"] == "/bin/sh"
88+
assert container_args["command"] == "/bin/bash"
8989
assert container_args["entrypoint"] == ""
9090

9191
# make sure we are pointing to the AWS credentials
@@ -106,8 +106,53 @@ def test_start_shell(kubectl_container):
106106
@patch.object(KubeCtlContainer, "check_for_layer_location", Mock())
107107
# nor terraform
108108
@patch.object(KubeCtlContainer, "_get_eks_kube_config", Mock(return_value=AWS_EKS_UPDATE_KUBECONFIG))
109-
def test_configure(kubectl_container, caplog):
109+
def test_configure(kubectl_container):
110110
with patch.object(kubectl_container, "_start", return_value=0) as mock_start:
111111
kubectl_container.configure()
112112

113113
assert mock_start.call_args[0][0] == f'bash -c "{AWS_EKS_UPDATE_KUBECONFIG} && chown 1234:5678 /root/.kube/config"'
114+
115+
116+
#####################
117+
# test auth methods #
118+
#####################
119+
120+
def test_start_shell_mfa(kubectl_container):
121+
"""
122+
Make sure the command is executed through the proper MFA script.
123+
"""
124+
# container = KubeCtlContainer(docker_client, env_conf=dict(MFA_ENABLED="true", **FAKE_ENV))
125+
# container._run = Mock()
126+
127+
kubectl_container.enable_mfa()
128+
# mock the __exit__ of the context manager to avoid the restoration of the values
129+
# otherwise the asserts around /.aws/ wouldn't be possible
130+
with patch("leverage._utils.AwsCredsEntryPoint.__exit__"):
131+
kubectl_container.start_shell()
132+
container_args = kubectl_container.client.api.create_container.call_args[1]
133+
134+
# we want a shell, so -> /bin/bash with no entrypoint
135+
assert container_args["command"] == "/bin/bash"
136+
assert container_args["entrypoint"] == "/root/scripts/aws-mfa/aws-mfa-entrypoint.sh -- "
137+
138+
# make sure we are pointing to the right AWS credentials: /.aws/ folder for MFA
139+
assert container_args["environment"]["AWS_CONFIG_FILE"] == "/root/.aws/test/config"
140+
assert container_args["environment"]["AWS_SHARED_CREDENTIALS_FILE"] == "/root/.aws/test/credentials"
141+
142+
143+
def test_start_shell_sso(kubectl_container):
144+
"""
145+
Make sure the command is executed through the proper SSO script.
146+
"""
147+
kubectl_container.enable_sso()
148+
kubectl_container._check_sso_token = Mock(return_value=True)
149+
kubectl_container.start_shell()
150+
container_args = kubectl_container.client.api.create_container.call_args[1]
151+
152+
# we want a shell, so -> /bin/bash with no entrypoint
153+
assert container_args["command"] == "/bin/bash"
154+
assert container_args["entrypoint"] == "/root/scripts/aws-sso/aws-sso-entrypoint.sh -- "
155+
156+
# make sure we are pointing to the right AWS credentials: /tmp/ folder for SSO
157+
assert container_args["environment"]["AWS_CONFIG_FILE"] == "/root/tmp/test/config"
158+
assert container_args["environment"]["AWS_SHARED_CREDENTIALS_FILE"] == "/root/tmp/test/credentials"

0 commit comments

Comments
 (0)