kubectl-persistent-logger/logs/pod_watcher.go

203 lines
5.4 KiB
Go
Raw Normal View History

2022-06-01 17:19:55 +00:00
package logs
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"log"
"strings"
"sync"
"time"
corev1 "k8s.io/api/core/v1"
2022-06-01 20:04:02 +00:00
apierrors "k8s.io/apimachinery/pkg/api/errors"
2022-06-01 17:19:55 +00:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
)
const nl = "\n"
type streamError struct {
err error
podName string
recoverable bool
}
func (re *streamError) Error() string {
return re.err.Error()
}
2022-06-02 17:23:41 +00:00
func newStreamError(err error, podName string) *streamError {
return &streamError{err: err, podName: podName}
}
func newRecoverableError(err error, podName string) *streamError {
return &streamError{err: err, podName: podName, recoverable: true}
}
2022-06-01 17:19:55 +00:00
// PodWatcher consumes and merges the logs for a specified set of pods.
type PodWatcher struct {
clientset KubernetesClient
container string
labelSelector *metav1.LabelSelector
spec map[string]*corev1.Pod
status map[string]bool
streamResults chan error
dst io.Writer
closeChan chan struct{}
}
// NewPodWatcher initializes a new PodWatcher.
func NewPodWatcher(clientset KubernetesClient, container string, labelSelector *metav1.LabelSelector, dst io.Writer) PodWatcherInterface {
return &PodWatcher{
clientset: clientset,
container: container,
labelSelector: labelSelector,
spec: make(map[string]*corev1.Pod),
status: make(map[string]bool),
streamResults: make(chan error),
dst: dst,
}
}
// WatchPods blocks while it watches the pods which match the Selector of the
// provided deployment.
//
// Cancelling ctx will result in the event loop exiting immediately.
func (pw *PodWatcher) WatchPods(ctx context.Context) error {
var wg sync.WaitGroup
err := pw.watchPods(ctx, &wg)
wg.Wait()
log.Println("[PodWatcher] all goroutines exited, exiting")
return err
}
// Close terminates the watcher, waiting for all logs to be consumed before
// exiting.
func (pw *PodWatcher) Close() {
pw.closeChan <- struct{}{}
}
func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
podsClient := pw.clientset.CoreV1().Pods(corev1.NamespaceDefault)
labelsMap, err := metav1.LabelSelectorAsMap(pw.labelSelector)
if err != nil {
return err
}
watcher, err := podsClient.Watch(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labelsMap).String()})
if err != nil {
return err
}
defer watcher.Stop()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
// streamErrors is never closed.
2022-06-02 17:23:41 +00:00
streamErrors := make(chan *streamError)
2022-06-03 20:20:23 +00:00
resultChan := watcher.ResultChan()
2022-06-01 17:19:55 +00:00
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-pw.closeChan:
return nil
case <-ticker.C:
// Iterate through the desired state (w.spec) and launch goroutines to
// process the logs of any missing pods.
for podName, pod := range pw.spec {
pod := pod
if _, ok := pw.status[podName]; !ok {
log.Printf("[PodWatcher] adding pod, name = %s", pod.Name)
pw.status[pod.Name] = true
wg.Add(1)
go func() {
if err := copyPodLogs(ctx, wg, pw.clientset, pod, pw.container, pw.dst); err != nil {
streamErrors <- err
}
}()
}
}
// For any pods which no longer exist, remove the pod.
// TODO: check this is needed when a pod's labels change to no longer
// match the deployment's selector.
for podName := range pw.status {
if _, ok := pw.spec[podName]; !ok {
pw.removePod(podName)
}
}
2022-06-02 17:23:41 +00:00
case streamErr := <-streamErrors:
if streamErr.recoverable {
2022-06-01 17:19:55 +00:00
// if the error is recoverable, we just remove the pod from the status
// map. It will be recreated and retried on the next iteration.
pw.removePod(streamErr.podName)
} else {
2022-06-02 17:23:41 +00:00
return fmt.Errorf("error streaming logs: %w", streamErr)
2022-06-01 17:19:55 +00:00
}
2022-06-03 20:20:23 +00:00
case evt, ok := <-resultChan:
if !ok {
resultChan = nil
continue
}
2022-06-01 17:19:55 +00:00
switch evt.Type {
case watch.Added, watch.Modified:
pod := evt.Object.(*corev1.Pod)
if pod.Status.Phase == corev1.PodRunning {
pw.spec[pod.Name] = pod
}
case watch.Deleted:
pod := evt.Object.(*corev1.Pod)
delete(pw.spec, pod.Name)
}
}
}
}
func (pw *PodWatcher) removePod(podName string) {
log.Printf("[PodWatcher] removing pod, name = %s", podName)
delete(pw.status, podName)
}
2022-06-02 17:23:44 +00:00
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, clientset KubernetesClient, pod *corev1.Pod, container string, dst io.Writer) *streamError {
2022-06-01 17:19:55 +00:00
defer wg.Done()
podLogOpts := corev1.PodLogOptions{
Follow: true,
Container: container,
}
2022-06-02 17:23:44 +00:00
req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts)
2022-06-01 17:19:55 +00:00
logs, err := req.Stream(ctx)
2022-06-01 20:04:02 +00:00
// If one container is still being created, do not treat this as a fatal error.
// We try to verify the error type as strictly as possible.
var statusErr *apierrors.StatusError
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonBadRequest && strings.Contains(statusErr.Error(), "ContainerCreating") {
2022-06-02 17:23:44 +00:00
return newRecoverableError(err, pod.Name)
2022-06-01 17:19:55 +00:00
} else if err != nil {
2022-06-02 17:23:44 +00:00
return newStreamError(err, pod.Name)
2022-06-01 17:19:55 +00:00
}
defer func() { _ = logs.Close() }()
scanner := bufio.NewScanner(logs)
for scanner.Scan() {
2022-06-02 17:23:44 +00:00
if _, err = dst.Write([]byte("[" + pod.Name + "] " + scanner.Text() + nl)); err != nil {
return newStreamError(fmt.Errorf("error writing logs: %v", err), pod.Name)
2022-06-01 17:19:55 +00:00
}
}
if err := scanner.Err(); err != nil {
2022-06-02 17:23:44 +00:00
return newStreamError(fmt.Errorf("error scanning logs: %v", err), pod.Name)
2022-06-01 17:19:55 +00:00
}
return nil
}