kubectl-persistent-logger/logs/pod_watcher.go

191 lines
5.1 KiB
Go

package logs
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"log"
"strings"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
)
const nl = "\n"
type streamError struct {
err error
podName string
recoverable bool
}
func (re *streamError) Error() string {
return re.err.Error()
}
// PodWatcher consumes and merges the logs for a specified set of pods.
type PodWatcher struct {
clientset KubernetesClient
container string
labelSelector *metav1.LabelSelector
spec map[string]*corev1.Pod
status map[string]bool
streamResults chan error
dst io.Writer
closeChan chan struct{}
}
// NewPodWatcher initializes a new PodWatcher.
func NewPodWatcher(clientset KubernetesClient, container string, labelSelector *metav1.LabelSelector, dst io.Writer) PodWatcherInterface {
return &PodWatcher{
clientset: clientset,
container: container,
labelSelector: labelSelector,
spec: make(map[string]*corev1.Pod),
status: make(map[string]bool),
streamResults: make(chan error),
dst: dst,
}
}
// WatchPods blocks while it watches the pods which match the Selector of the
// provided deployment.
//
// Cancelling ctx will result in the event loop exiting immediately.
func (pw *PodWatcher) WatchPods(ctx context.Context) error {
var wg sync.WaitGroup
err := pw.watchPods(ctx, &wg)
wg.Wait()
log.Println("[PodWatcher] all goroutines exited, exiting")
return err
}
// Close terminates the watcher, waiting for all logs to be consumed before
// exiting.
func (pw *PodWatcher) Close() {
pw.closeChan <- struct{}{}
}
func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
podsClient := pw.clientset.CoreV1().Pods(corev1.NamespaceDefault)
labelsMap, err := metav1.LabelSelectorAsMap(pw.labelSelector)
if err != nil {
return err
}
watcher, err := podsClient.Watch(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labelsMap).String()})
if err != nil {
return err
}
defer watcher.Stop()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
// streamErrors is never closed.
streamErrors := make(chan error)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-pw.closeChan:
return nil
case <-ticker.C:
// Iterate through the desired state (w.spec) and launch goroutines to
// process the logs of any missing pods.
for podName, pod := range pw.spec {
pod := pod
if _, ok := pw.status[podName]; !ok {
log.Printf("[PodWatcher] adding pod, name = %s", pod.Name)
pw.status[pod.Name] = true
wg.Add(1)
go func() {
if err := copyPodLogs(ctx, wg, pw.clientset, pod, pw.container, pw.dst); err != nil {
streamErrors <- err
}
}()
}
}
// For any pods which no longer exist, remove the pod.
// TODO: check this is needed when a pod's labels change to no longer
// match the deployment's selector.
for podName := range pw.status {
if _, ok := pw.spec[podName]; !ok {
pw.removePod(podName)
}
}
case err := <-streamErrors:
var streamErr *streamError
if errors.As(err, &streamErr) && streamErr.recoverable {
// if the error is recoverable, we just remove the pod from the status
// map. It will be recreated and retried on the next iteration.
pw.removePod(streamErr.podName)
} else {
return streamErr
}
case evt := <-watcher.ResultChan():
switch evt.Type {
case watch.Added, watch.Modified:
pod := evt.Object.(*corev1.Pod)
if pod.Status.Phase == corev1.PodRunning {
pw.spec[pod.Name] = pod
}
case watch.Deleted:
pod := evt.Object.(*corev1.Pod)
delete(pw.spec, pod.Name)
}
}
}
}
func (pw *PodWatcher) removePod(podName string) {
log.Printf("[PodWatcher] removing pod, name = %s", podName)
delete(pw.status, podName)
}
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, clientset KubernetesClient, p *corev1.Pod, container string, dst io.Writer) error {
defer wg.Done()
podLogOpts := corev1.PodLogOptions{
Follow: true,
Container: container,
}
req := clientset.CoreV1().Pods(p.Namespace).GetLogs(p.Name, &podLogOpts)
logs, err := req.Stream(ctx)
// If one container is still being created, do not treat this as a fatal error.
// We try to verify the error type as strictly as possible.
var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") {
return &streamError{err: err, podName: p.Name, recoverable: true}
} else if err != nil {
return &streamError{err: err, podName: p.Name}
}
defer func() { _ = logs.Close() }()
scanner := bufio.NewScanner(logs)
for scanner.Scan() {
if _, err = dst.Write([]byte("[" + p.Name + "] " + scanner.Text() + nl)); err != nil {
return &streamError{err: fmt.Errorf("error writing logs: %v", err), podName: p.Name}
}
}
if err := scanner.Err(); err != nil {
return &streamError{err: fmt.Errorf("error scanning logs: %v", err), podName: p.Name}
}
return nil
}