Skip to content

OCPBUGS-44637: Mount proxy certificate to node-joiner pod #1965

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 71 additions & 0 deletions pkg/cli/admin/nodeimage/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -798,5 +798,76 @@ func (o *CreateOptions) configurePodProxySetting(ctx context.Context, pod *corev
for i := range pod.Spec.Containers {
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, proxyVars...)
}

// An external proxy may be configured with a self-signed certificate.
// In such cases, the cluster's proxy would be configured to contain
// the certificate by including it in the additionalTrustBundle in the
// install-config.
//
// If the certificate is configured, it is saved
// to the "user-ca-bundle" config map in the "openshift-config"
// namespace.
//
// The certificate must be made available to the node-joiner pod
// and in its namespace so that the certificate can be read
// when the proxy is accessed by node-joiner to pull container
// images.

if proxy.Status.HTTPProxy == "" && proxy.Status.HTTPSProxy == "" {
// proxy is not configured, the user-ca-bundle does not need
// to be copied to the node-joiner namespace.
return nil
}

klog.V(2).Infof("Copying user-ca-bundle to node-joiner pod")

userCABundle, err := o.Client.CoreV1().ConfigMaps("openshift-config").Get(ctx, "user-ca-bundle", metav1.GetOptions{})
if err != nil {
if kapierrors.IsNotFound(err) {
// The self-signed certificate could be missing if
// the external proxy was not configured with one.
klog.V(2).Infof("user-ca-bundle not found")
return nil
}
klog.V(2).Infof("Error reading user-ca-bundle from openshift-config namespace: %v", err)
return err
}

cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "user-ca-bundle",
Namespace: o.nodeJoinerNamespace.GetName(),
},
Data: userCABundle.Data,
}

_, err = o.Client.CoreV1().ConfigMaps(o.nodeJoinerNamespace.GetName()).Create(ctx, cm, metav1.CreateOptions{})

if err != nil {
klog.V(2).Infof("Error writing user-ca-bundle to %s namespace: %v", o.nodeJoinerNamespace, err)
return err
}

pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{
Name: "user-ca-bundle",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "user-ca-bundle",
},
Items: []corev1.KeyToPath{
// The path must be tls-ca-bundle.pem or the container's
// CA trust flow will not be able to read the certificate.
{Key: "ca-bundle.crt", Path: "tls-ca-bundle.pem"},
},
},
},
})

pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
Name: "user-ca-bundle",
MountPath: "/etc/pki/ca-trust/extracted/pem",
})

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Given that it seems the reasonable motivation for the issue, there is one point not clear to me: why the oc command must be responsible for extracting a certificate, and injecting it into the node-joiner container?
Couldn't be something directly managed by the node-joiner tool itself?
Personally I'd prefer to avoid having additional logic in the oc wrapper layer - if not the one just required the proper execution of the container (for the rest, the node-joiner must be able to work also on its own)

return nil
}
86 changes: 71 additions & 15 deletions pkg/cli/admin/nodeimage/create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ func TestRun(t *testing.T) {
generatePXEFiles bool

objects func(string, string) []runtime.Object
configObjects func(string, string) []runtime.Object
remoteExecOutput string

expectedErrorCode int
Expand All @@ -148,23 +149,23 @@ func TestRun(t *testing.T) {
{
name: "default",
nodesConfig: defaultNodesConfigYaml,
objects: defaultClusterVersionObjectFn,
configObjects: defaultClusterVersionObjectFn,
assetsDir: "/my-working-dir",
generatePXEFiles: false,
expectedRsyncInclude: []string{"*.iso"},
},
{
name: "default pxe",
nodesConfig: defaultNodesConfigYaml,
objects: defaultClusterVersionObjectFn,
configObjects: defaultClusterVersionObjectFn,
assetsDir: "/my-working-dir",
generatePXEFiles: true,
expectedRsyncInclude: []string{"*.img", "*.*vmlinuz", "*.ipxe"},
},
{
name: "node-joiner tool failure",
nodesConfig: defaultNodesConfigYaml,
objects: defaultClusterVersionObjectFn,
name: "node-joiner tool failure",
nodesConfig: defaultNodesConfigYaml,
configObjects: defaultClusterVersionObjectFn,
remoteExecOutput: createCmdOutput(t, report{
stageHeader: stageHeader{
EndTime: time.Date(2024, 11, 14, 0, 0, 0, 0, time.UTC),
Expand All @@ -180,7 +181,7 @@ func TestRun(t *testing.T) {
{
name: "node-joiner unsupported prior to 4.17",
nodesConfig: defaultNodesConfigYaml,
objects: ClusterVersion_4_16_ObjectFn,
configObjects: ClusterVersion_4_16_ObjectFn,
expectedError: fmt.Sprintf("the 'oc adm node-image' command is only available for OpenShift versions %s and later", nodeJoinerMinimumSupportedVersion),
},
{
Expand All @@ -191,7 +192,7 @@ func TestRun(t *testing.T) {
{
name: "use proxy settings when defined",
nodesConfig: defaultNodesConfigYaml,
objects: func(repo, manifestDigest string) []runtime.Object {
configObjects: func(repo, manifestDigest string) []runtime.Object {
objs := defaultClusterVersionObjectFn(repo, manifestDigest)
return append(objs, &configv1.Proxy{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -229,16 +230,65 @@ func TestRun(t *testing.T) {
}
},
},
{
name: "node-joiner pod should mount user-ca-bundle as a volume if it is available and a proxy is configured",
nodesConfig: defaultNodesConfigYaml,
objects: func(repo, manifestDigest string) []runtime.Object {
return []runtime.Object{
&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "user-ca-bundle",
Namespace: "openshift-config",
},
Data: map[string]string{
"ca-bundle.crt": "certificate-contents",
},
}}
},
configObjects: func(repo, manifestDigest string) []runtime.Object {
objs := defaultClusterVersionObjectFn(repo, manifestDigest)
objs = append(objs, &configv1.Proxy{
ObjectMeta: metav1.ObjectMeta{
Name: "cluster",
},
Status: configv1.ProxyStatus{
HTTPProxy: "http://192.168.111.1:8215",
HTTPSProxy: "https://192.168.111.1:8215",
},
})
return objs
},
expectedPod: func(t *testing.T, pod *corev1.Pod) {
containsUserCABundleVolume := false
for _, vol := range pod.Spec.Volumes {
if vol.Name == "user-ca-bundle" {
containsUserCABundleVolume = true
}
}
if !containsUserCABundleVolume {
t.Error("expected pod to contain user-ca-bundle volume but it doesn't")
}
containsVolumeMount := false
for _, volMount := range pod.Spec.Containers[0].VolumeMounts {
if volMount.Name == "user-ca-bundle" {
containsVolumeMount = true
}
}
if !containsVolumeMount {
t.Error("expected pod to contain user-ca-bundle volume mount, but it doesn't")
}
},
},
{
name: "basic report for ocp < 4.18",
nodesConfig: defaultNodesConfigYaml,
objects: ClusterVersion_4_17_ObjectFn,
configObjects: ClusterVersion_4_17_ObjectFn,
remoteExecOutput: "0",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fakeReg, fakeClient, fakeRestConfig, fakeRemoteExec := createFakes(t, nodeJoinerContainer)
fakeReg, fakeClient, fakeRestConfig, fakeRemoteExec := createFakes(t, nodeJoinerContainer, tc.objects)
defer fakeReg.Close()

// Create the fake filesystem, required to provide the command input config file.
Expand All @@ -250,8 +300,8 @@ func TestRun(t *testing.T) {
}
// Allow the test case to use the right digest created by the fake registry.
objs := []runtime.Object{}
if tc.objects != nil {
objs = tc.objects(fakeReg.URL()[len("https://"):], fakeReg.fakeManifestDigest)
if tc.configObjects != nil {
objs = tc.configObjects(fakeReg.URL()[len("https://"):], fakeReg.fakeManifestDigest)
}

fakeRemoteExec.execOut = createCmdOutput(t, report{
Expand Down Expand Up @@ -539,14 +589,20 @@ func (f *fakeRemoteExecutor) Execute(url *url.URL, config *restclient.Config, st
return f.execErr
}

func createFakes(t *testing.T, podName string) (*fakeRegistry, *fake.Clientset, *restclient.Config, *fakeRemoteExecutor) {
func createFakes(t *testing.T, podName string, clientObjs func(string, string) []runtime.Object) (*fakeRegistry, *fake.Clientset, *restclient.Config, *fakeRemoteExecutor) {
// Create the fake registry. It will provide the required manifests and image data,
// when looking for the baremetal-installer image pullspec.
fakeReg := newFakeRegistry(t)

fakeClient := fake.NewSimpleClientset()
// When creating a pod, it's necessary to set a propert name. Also, to simulate the pod execution, its container status
// is moved to a terminal state.
// Allow the test case to use the right digest created by the fake registry.
objects := []runtime.Object{}
if clientObjs != nil {
objects = clientObjs(fakeReg.URL()[len("https://"):], fakeReg.fakeManifestDigest)
}

fakeClient := fake.NewSimpleClientset(objects...)
// // When creating a pod, it's necessary to set a propert name. Also, to simulate the pod execution, its container status
// // is moved to a terminal state.
fakeClient.PrependReactor("create", "pods", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
createAction, _ := action.(clientgotesting.CreateAction)
pod := createAction.GetObject().(*corev1.Pod)
Expand Down
2 changes: 1 addition & 1 deletion pkg/cli/admin/nodeimage/monitor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func TestMonitorRun(t *testing.T) {
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fakeReg, fakeClient, fakeRestConfig, fakeRemoteExec := createFakes(t, nodeJoinerMonitorContainer)
fakeReg, fakeClient, fakeRestConfig, fakeRemoteExec := createFakes(t, nodeJoinerMonitorContainer, nil)
defer fakeReg.Close()

// Allow the test case to use the right digest created by the fake registry.
Expand Down