Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 94 additions & 35 deletions e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"io"
"os/exec"
"strings"
"sync"
"syscall"
"time"

Expand All @@ -13,7 +14,7 @@ import (
)

const (
StartCommandWait = 30 * time.Second
StartCommandWait = 15 * time.Second
RunCommandTimeout = 60 * time.Second
)

Expand All @@ -32,39 +33,56 @@ func StartCommand(log *logrus.Entry, commandName string, arg ...string) (string,
outPipe, _ := cmd.StdoutPipe()
errPipe, _ := cmd.StderrPipe()

var sb strings.Builder
var sbOut strings.Builder
var sbErr strings.Builder

go func(_ io.ReadCloser) {
reader := bufio.NewReader(errPipe)
line, err := reader.ReadString('\n')
for err == nil {
sb.WriteString(line)
line, err = reader.ReadString('\n')
for {
line, err := reader.ReadString('\n')
// Write line even if there's an error, as long as we got data
if len(line) > 0 {
sbErr.WriteString(line)
}
if err != nil {
break
}
}
}(errPipe)

go func(_ io.ReadCloser) {
reader := bufio.NewReader(outPipe)
line, err := reader.ReadString('\n')
for err == nil {
sb.WriteString(line)
line, err = reader.ReadString('\n')
for {
line, err := reader.ReadString('\n')
// Write line even if there's an error, as long as we got data
if len(line) > 0 {
sbOut.WriteString(line)
}
if err != nil {
break
}
}
}(outPipe)

// start async
go func() {
log.Debug("Starting async ...")
_, err := pty.Start(cmd)
ptmx, err := pty.Start(cmd)
if err != nil {
log.Errorf("Start returned error: %v", err)
return
}
// Note: PTY is intentionally NOT closed here as command continues running
// Keep the PTY file descriptor alive to prevent SIGHUP
_ = ptmx // Keep reference to prevent premature PTY closure
}()

log.Debugf("Waiting %v ...", StartCommandWait)
time.Sleep(StartCommandWait)

log.Debug("Returning result while command still running")
return sb.String(), nil
// Combine stderr first (errors more visible), then stdout
return sbErr.String() + sbOut.String(), nil
}

// run command with tty support and wait for stop
Expand All @@ -77,44 +95,65 @@ func RunCommand(log *logrus.Entry, commandName string, arg ...string) (string, e
outPipe, _ := cmd.StdoutPipe()
errPipe, _ := cmd.StderrPipe()

var sb strings.Builder
var sbOut strings.Builder
var sbErr strings.Builder
var wg sync.WaitGroup

wg.Add(2)
go func(_ io.ReadCloser) {
defer wg.Done()
reader := bufio.NewReader(errPipe)
line, err := reader.ReadString('\n')
for err == nil {
sb.WriteString(line)
line, err = reader.ReadString('\n')
for {
line, err := reader.ReadString('\n')
// Write line even if there's an error, as long as we got data
if len(line) > 0 {
sbErr.WriteString(line)
}
if err != nil {
break
}
}
}(errPipe)

go func(_ io.ReadCloser) {
defer wg.Done()
reader := bufio.NewReader(outPipe)
line, err := reader.ReadString('\n')
for err == nil {
sb.WriteString(line)
line, err = reader.ReadString('\n')
for {
line, err := reader.ReadString('\n')
// Write line even if there's an error, as long as we got data
if len(line) > 0 {
sbOut.WriteString(line)
}
if err != nil {
break
}
}
}(outPipe)

log.Debug("Starting ...")
_, err := pty.Start(cmd)
ptmx, err := pty.Start(cmd)
if err != nil {
log.Errorf("Start returned error: %v", err)
return "", err
}
defer ptmx.Close() // Ensure PTY is closed after command finishes

log.Debug("Waiting ...")
err = cmd.Wait()
if err != nil {
log.Errorf("Wait returned error: %v", err)
}

log.Debug("Waiting for output goroutines to finish...")
wg.Wait()

// TODO: find why this returns -1. That may be related to pty implementation
/*if cmd.ProcessState.ExitCode() != 0 {
return sb.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode())
return sbErr.String() + sbOut.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode())
}*/

return sb.String(), nil
// Combine stderr first (errors more visible), then stdout
return sbErr.String() + sbOut.String(), nil
}

// run command with tty support and terminate it after timeout
Expand All @@ -137,22 +176,38 @@ func RunCommandAndTerminate(log *logrus.Entry, commandName string, arg ...string
})
defer timer.Stop()

var sb strings.Builder
var sbOut strings.Builder
var sbErr strings.Builder
var wg sync.WaitGroup

wg.Add(2)
go func(_ io.ReadCloser) {
defer wg.Done()
reader := bufio.NewReader(errPipe)
line, err := reader.ReadString('\n')
for err == nil {
sb.WriteString(line)
line, err = reader.ReadString('\n')
for {
line, err := reader.ReadString('\n')
// Write line even if there's an error, as long as we got data
if len(line) > 0 {
sbErr.WriteString(line)
}
if err != nil {
break
}
}
}(errPipe)

go func(_ io.ReadCloser) {
defer wg.Done()
reader := bufio.NewReader(outPipe)
line, err := reader.ReadString('\n')
for err == nil {
sb.WriteString(line)
line, err = reader.ReadString('\n')
for {
line, err := reader.ReadString('\n')
// Write line even if there's an error, as long as we got data
if len(line) > 0 {
sbOut.WriteString(line)
}
if err != nil {
break
}
}
}(outPipe)

Expand All @@ -178,10 +233,14 @@ func RunCommandAndTerminate(log *logrus.Entry, commandName string, arg ...string
log.Errorf("Wait returned error: %v", err)
}

log.Debug("Waiting for output goroutines to finish...")
wg.Wait()

// TODO: find why this returns -1. That may be related to pty implementation
/*if cmd.ProcessState.ExitCode() != 0 {
return sb.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode())
return sbErr.String() + sbOut.String(), fmt.Errorf("Cmd returned code %d", cmd.ProcessState.ExitCode())
}*/

return sb.String(), nil
// Combine stderr first (errors more visible), then stdout
return sbErr.String() + sbOut.String(), nil
}
30 changes: 20 additions & 10 deletions e2e/integration-tests/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"context"
"io/fs"
"os"
"path/filepath"
"time"

"k8s.io/apimachinery/pkg/api/errors"
Expand All @@ -19,15 +20,16 @@ import (
const (
PollInterval = 5 * time.Second
PollTimeout = 10 * time.Minute
outputDir = "./output/flow"
)

var (
clog = logrus.WithField("component", "cli")
)

func isNamespace(clientset *kubernetes.Clientset, cliNS string, exists bool) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(context.Context) (done bool, err error) {
namespace, err := getNamespace(clientset, cliNS)
err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(ctx context.Context) (done bool, err error) {
namespace, err := getNamespace(ctx, clientset, cliNS)
if exists {
if err != nil {
return false, err
Expand All @@ -36,7 +38,7 @@ func isNamespace(clientset *kubernetes.Clientset, cliNS string, exists bool) (bo
} else if errors.IsNotFound(err) {
return true, nil
}
return false, err
return false, nil
})
if err != nil {
return false, err
Expand All @@ -45,8 +47,8 @@ func isNamespace(clientset *kubernetes.Clientset, cliNS string, exists bool) (bo
}

func isCollector(clientset *kubernetes.Clientset, cliNS string, ready bool) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(context.Context) (done bool, err error) {
collectorPod, err := getNamespacePods(clientset, cliNS, &metav1.ListOptions{FieldSelector: "status.phase=Running", LabelSelector: "run=collector"})
err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(ctx context.Context) (done bool, err error) {
collectorPod, err := getNamespacePods(ctx, clientset, cliNS, &metav1.ListOptions{FieldSelector: "status.phase=Running", LabelSelector: "run=collector"})
if err != nil {
return false, err
}
Expand All @@ -62,9 +64,13 @@ func isCollector(clientset *kubernetes.Clientset, cliNS string, ready bool) (boo
}

func isDaemonsetReady(clientset *kubernetes.Clientset, daemonsetName string, cliNS string) (bool, error) {
err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(context.Context) (done bool, err error) {
cliDaemonset, err := getDaemonSet(clientset, daemonsetName, cliNS)
err := wait.PollUntilContextTimeout(context.Background(), PollInterval, PollTimeout, true, func(ctx context.Context) (done bool, err error) {
cliDaemonset, err := getDaemonSet(ctx, clientset, daemonsetName, cliNS)
if err != nil {
if errors.IsNotFound(err) {
clog.Infof("daemonset not found %v", err)
return false, nil
}
return false, err
}
return cliDaemonset.Status.DesiredNumberScheduled == cliDaemonset.Status.NumberReady, nil
Expand Down Expand Up @@ -110,22 +116,26 @@ func isCLIDone(clientset *kubernetes.Clientset, cliNS string) (bool, error) {
func getFlowsJSONFile() (string, error) {
// var files []fs.DirEntry
var files []string
outputDir := "./output/flow/"
dirFS := os.DirFS(outputDir)

files, err := fs.Glob(dirFS, "*.json")
if err != nil {
return "", err
}
// this could be problematic if two tests are running in parallel with --copy=true
var mostRecentFile fs.FileInfo
for _, file := range files {
fileInfo, err := os.Stat(outputDir + file)
fileInfo, err := os.Stat(filepath.Join(outputDir, file))
if err != nil {
return "", nil
}
if mostRecentFile == nil || fileInfo.ModTime().After(mostRecentFile.ModTime()) {
mostRecentFile = fileInfo
}
}
return outputDir + mostRecentFile.Name(), nil
absPath, err := filepath.Abs(filepath.Join(outputDir, mostRecentFile.Name()))
if err != nil {
return "", err
}
return absPath, nil
}
24 changes: 12 additions & 12 deletions e2e/integration-tests/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func getNewClient() (*kubernetes.Clientset, error) {
}

func getClusterNodes(clientset *kubernetes.Clientset, options *metav1.ListOptions) ([]string, error) {
nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), *options)
nodes, err := clientset.CoreV1().Nodes().List(context.Background(), *options)
var allNodes []string
if err != nil {
return allNodes, err
Expand All @@ -50,8 +50,8 @@ func getClusterNodes(clientset *kubernetes.Clientset, options *metav1.ListOption
return allNodes, nil
}

func getDaemonSet(clientset *kubernetes.Clientset, daemonset string, ns string) (*appsv1.DaemonSet, error) {
ds, err := clientset.AppsV1().DaemonSets(ns).Get(context.TODO(), daemonset, metav1.GetOptions{})
func getDaemonSet(ctx context.Context, clientset *kubernetes.Clientset, daemonset string, ns string) (*appsv1.DaemonSet, error) {
ds, err := clientset.AppsV1().DaemonSets(ns).Get(ctx, daemonset, metav1.GetOptions{})

if err != nil {
return nil, err
Expand All @@ -60,16 +60,16 @@ func getDaemonSet(clientset *kubernetes.Clientset, daemonset string, ns string)
return ds, nil
}

func getNamespace(clientset *kubernetes.Clientset, name string) (*corev1.Namespace, error) {
namespace, err := clientset.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
func getNamespace(ctx context.Context, clientset *kubernetes.Clientset, name string) (*corev1.Namespace, error) {
namespace, err := clientset.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return namespace, nil
}

func getNamespacePods(clientset *kubernetes.Clientset, namespace string, options *metav1.ListOptions) ([]string, error) {
pods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), *options)
func getNamespacePods(ctx context.Context, clientset *kubernetes.Clientset, namespace string, options *metav1.ListOptions) ([]string, error) {
pods, err := clientset.CoreV1().Pods(namespace).List(ctx, *options)
var allPods []string
if err != nil {
return allPods, err
Expand All @@ -80,8 +80,8 @@ func getNamespacePods(clientset *kubernetes.Clientset, namespace string, options
return allPods, nil
}

func getConfigMap(clientset *kubernetes.Clientset, name string, namespace string) (*corev1.ConfigMap, error) {
cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
func getConfigMap(ctx context.Context, clientset *kubernetes.Clientset, name string, namespace string) (*corev1.ConfigMap, error) {
cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -111,7 +111,7 @@ func queryPrometheusMetric(clientset *kubernetes.Clientset, query string) (float

// Get the Prometheus route from openshift-monitoring namespace
var routeGVR = schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}
unstructuredRoute, err := dynclient.Resource(routeGVR).Namespace("openshift-monitoring").Get(context.TODO(), "prometheus-k8s", metav1.GetOptions{})
unstructuredRoute, err := dynclient.Resource(routeGVR).Namespace("openshift-monitoring").Get(context.Background(), "prometheus-k8s", metav1.GetOptions{})
if err != nil {
return 0.0, fmt.Errorf("failed to get prometheus route: %w", err)
}
Expand Down Expand Up @@ -159,7 +159,7 @@ func queryPrometheusMetric(clientset *kubernetes.Clientset, query string) (float

var finalResult float64
// Poll for 5 minutes at 20-second intervals
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 5*time.Minute, false, func(context.Context) (done bool, err error) {
err = wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 5*time.Minute, false, func(_ context.Context) (done bool, err error) {
// Execute the request
resp, err := httpClient.Do(req)
if err != nil {
Expand Down Expand Up @@ -235,7 +235,7 @@ func createServiceAccountToken(clientset *kubernetes.Clientset, serviceAccountNa
}

token, err := clientset.CoreV1().ServiceAccounts(namespace).CreateToken(
context.TODO(),
context.Background(),
serviceAccountName,
tokenRequest,
metav1.CreateOptions{},
Expand Down
Loading
Loading