podWatcher: Refactor
continuous-integration/drone/push Build is passing Details

This commit is contained in:
Rob Watson 2022-06-14 21:11:06 +02:00
parent d8e57c3d0e
commit e33612cced
1 changed files with 49 additions and 45 deletions

View File

@ -20,36 +20,39 @@ import (
const nl = "\n" const nl = "\n"
type stream struct { // logStream represents the logStream from an individual pod.
type logStream struct {
podName string podName string
container string
namespace string namespace string
done chan struct{} done chan struct{}
} }
func newStream(podName, namespace string) stream { func newStream(podName, container, namespace string) logStream {
return stream{podName: podName, namespace: namespace, done: make(chan struct{})} return logStream{
podName: podName,
container: container,
namespace: namespace,
done: make(chan struct{}),
}
} }
func (s stream) close() { type logError struct {
close(s.done)
}
type streamError struct {
err error err error
stream stream logStream logStream
recoverable bool recoverable bool
} }
func (re *streamError) Error() string { func (re *logError) Error() string {
return re.err.Error() return re.err.Error()
} }
func newStreamError(err error, stream stream) *streamError { func newLogError(err error, stream logStream) *logError {
return &streamError{err: err, stream: stream} return &logError{err: err, logStream: stream}
} }
func newRecoverableError(err error, stream stream) *streamError { func newRecoverableLogError(err error, stream logStream) *logError {
return &streamError{err: err, stream: stream, recoverable: true} return &logError{err: err, logStream: stream, recoverable: true}
} }
// PodWatcher consumes and merges the logs for a specified set of pods. // PodWatcher consumes and merges the logs for a specified set of pods.
@ -59,7 +62,7 @@ type PodWatcher struct {
container string container string
labelSelector labels.Selector labelSelector labels.Selector
spec map[string]*corev1.Pod spec map[string]*corev1.Pod
status map[string]stream status map[string]logStream
streamResults chan error streamResults chan error
dst io.Writer dst io.Writer
closeChan chan struct{} closeChan chan struct{}
@ -74,7 +77,7 @@ func NewPodWatcher(client KubernetesClient, namespace string, container string,
container: container, container: container,
labelSelector: labelSelector, labelSelector: labelSelector,
spec: make(map[string]*corev1.Pod), spec: make(map[string]*corev1.Pod),
status: make(map[string]stream), status: make(map[string]logStream),
streamResults: make(chan error), streamResults: make(chan error),
dst: dst, dst: dst,
closeChan: make(chan struct{}), closeChan: make(chan struct{}),
@ -115,8 +118,8 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
ticker := time.NewTicker(tickerInterval) ticker := time.NewTicker(tickerInterval)
defer ticker.Stop() defer ticker.Stop()
logStream := make(chan string) logLines := make(chan string)
streamErrors := make(chan error) logErrorChan := make(chan error)
resultChan := watcher.ResultChan() resultChan := watcher.ResultChan()
for { for {
@ -126,7 +129,7 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
case <-pw.closeChan: case <-pw.closeChan:
for _, stream := range pw.status { for _, stream := range pw.status {
stream.close() pw.removePod(stream)
} }
return nil return nil
@ -135,14 +138,14 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
// process the logs of any missing pods. // process the logs of any missing pods.
for podName, pod := range pw.spec { for podName, pod := range pw.spec {
if _, ok := pw.status[podName]; !ok { if _, ok := pw.status[podName]; !ok {
pw.logger.Printf("[PodWatcher] adding pod, name = %s", pod.Name) pw.logger.Printf("[PodWatcher] adding pod, name = %s, container = %s, namespace = %s,", pod.Name, pw.container, pod.Namespace)
s := newStream(pod.Name, pod.Namespace) stream := newStream(pod.Name, pw.container, pod.Namespace)
pw.status[pod.Name] = s pw.status[pod.Name] = stream
wg.Add(1) wg.Add(1)
go func() { go func() {
if err := copyPodLogs(ctx, wg, pw.client, s, pw.container, logStream); err != nil { if err := copyLogStream(ctx, wg, pw.client, stream, logLines); err != nil {
streamErrors <- err logErrorChan <- err
} }
}() }()
} }
@ -154,27 +157,28 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
} }
} }
// logStream is never closed // logLines is never closed
case l := <-logStream: case line := <-logLines:
if _, err := pw.dst.Write([]byte(l)); err != nil { if _, err := pw.dst.Write([]byte(line)); err != nil {
return fmt.Errorf("error writing logs: %v", err) return fmt.Errorf("error writing logs: %v", err)
} }
// streamErrors is never closed // logErrorChan is never closed
case err := <-streamErrors: case err := <-logErrorChan:
var streamErr *streamError var logErr *logError
if errors.As(err, &streamErr) && streamErr.recoverable { if errors.As(err, &logErr) && logErr.recoverable {
// if the error is recoverable, we just remove the pod from the status // if the error is recoverable, we just remove the pod from the status
// map. It will be recreated and retried on the next iteration. // map. It will be recreated and retried on the next iteration.
pw.removePod(streamErr.stream) pw.removePod(logErr.logStream)
} else { } else {
return fmt.Errorf("error streaming logs: %w", streamErr) return fmt.Errorf("error streaming logs: %w", logErr)
} }
case evt, ok := <-resultChan: case evt, ok := <-resultChan:
if !ok { if !ok {
return nil return nil
} }
switch evt.Type { switch evt.Type {
case watch.Added, watch.Modified: case watch.Added, watch.Modified:
pod := evt.Object.(*corev1.Pod) pod := evt.Object.(*corev1.Pod)
@ -189,40 +193,40 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
} }
} }
func (pw *PodWatcher) removePod(stream stream) { func (pw *PodWatcher) removePod(stream logStream) {
pw.logger.Printf("[PodWatcher] removing pod, name = %s", stream.podName) pw.logger.Printf("[PodWatcher] removing pod, name = %s", stream.podName)
stream.close() close(stream.done)
delete(pw.status, stream.podName) delete(pw.status, stream.podName)
} }
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, stream stream, container string, logStream chan string) error { func copyLogStream(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, stream logStream, logsLines chan string) error {
defer wg.Done() defer wg.Done()
req := client.Typed.CoreV1().Pods(stream.namespace).GetLogs(stream.podName, &corev1.PodLogOptions{Follow: true, Container: container}) req := client.Typed.CoreV1().Pods(stream.namespace).GetLogs(stream.podName, &corev1.PodLogOptions{Follow: true, Container: stream.container})
logs, err := req.Stream(ctx) logReader, err := req.Stream(ctx)
// If one container is still being created, do not treat this as a fatal error. // If one container is still being created, do not treat this as a fatal error.
var statusErr *apierrors.StatusError var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") { if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") {
return newRecoverableError(err, stream) return newRecoverableLogError(err, stream)
} else if err != nil { } else if err != nil {
return newStreamError(err, stream) return newLogError(err, stream)
} }
// Closing the reader ensures that the goroutine below is not leaked. // Closing the reader ensures that the goroutine below is not leaked.
defer func() { _ = logs.Close() }() defer func() { _ = logReader.Close() }()
done := make(chan error, 1) done := make(chan error, 1)
go func() { go func() {
scanner := bufio.NewScanner(logs) defer close(done)
scanner := bufio.NewScanner(logReader)
for scanner.Scan() { for scanner.Scan() {
logStream <- "[" + stream.podName + "] " + scanner.Text() + nl logsLines <- "[" + stream.podName + "] " + scanner.Text() + nl
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
done <- newStreamError(fmt.Errorf("error scanning logs: %v", err), stream) done <- newLogError(fmt.Errorf("error scanning logs: %v", err), stream)
return return
} }
done <- nil
}() }()
for { for {