Support other resource types
continuous-integration/drone/push Build is passing
Details
continuous-integration/drone/push Build is passing
Details
Use the untyped k8s client to enable support for statefulsets and replicasets as well as deployments. The typed client is retained for usage inside pod_watcher.go.
This commit is contained in:
parent
f8c59aeb15
commit
ca822496b0
|
@ -0,0 +1,18 @@
|
||||||
|
package logs
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// ParseType returns an API resource type (pluralized) from a singular or
|
||||||
|
// shortened name. If the resource is unsupported, an error will be returned.
|
||||||
|
func ParseType(input string) (string, error) {
|
||||||
|
switch input {
|
||||||
|
case "deploy", "deployment", "deployments":
|
||||||
|
return "deployments", nil
|
||||||
|
case "sts", "statefulset", "statefulsets":
|
||||||
|
return "statefulsets", nil
|
||||||
|
case "rs", "replicaset", "replicasets":
|
||||||
|
return "replicasets", nil
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf(`unsupported resource: "%s". Supported resources are [deployment, statefulset, replicaset]`, input)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,68 @@
|
||||||
|
package logs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.netflux.io/rob/kubectl-persistent-logger/logs"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseKind(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
input string
|
||||||
|
wantOut string
|
||||||
|
wantErr string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: "deploy",
|
||||||
|
wantOut: "deployments",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "deployment",
|
||||||
|
wantOut: "deployments",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "deployments",
|
||||||
|
wantOut: "deployments",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "sts",
|
||||||
|
wantOut: "statefulsets",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "statefulset",
|
||||||
|
wantOut: "statefulsets",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "statefulsets",
|
||||||
|
wantOut: "statefulsets",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "rs",
|
||||||
|
wantOut: "replicasets",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "replicaset",
|
||||||
|
wantOut: "replicasets",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "replicasets",
|
||||||
|
wantOut: "replicasets",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: "foo",
|
||||||
|
wantErr: `unsupported resource: "foo". Supported resources are [deployment, statefulset, replicaset]`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.input, func(t *testing.T) {
|
||||||
|
result, err := logs.ParseType(tc.input)
|
||||||
|
if tc.wantErr != "" {
|
||||||
|
require.EqualError(t, err, tc.wantErr)
|
||||||
|
}
|
||||||
|
assert.Equal(t, tc.wantOut, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -40,9 +40,9 @@ func newRecoverableError(err error, podName string) *streamError {
|
||||||
|
|
||||||
// PodWatcher consumes and merges the logs for a specified set of pods.
|
// PodWatcher consumes and merges the logs for a specified set of pods.
|
||||||
type PodWatcher struct {
|
type PodWatcher struct {
|
||||||
clientset KubernetesClient
|
client KubernetesClient
|
||||||
container string
|
container string
|
||||||
labelSelector *metav1.LabelSelector
|
labelSelector labels.Selector
|
||||||
spec map[string]*corev1.Pod
|
spec map[string]*corev1.Pod
|
||||||
status map[string]bool
|
status map[string]bool
|
||||||
streamResults chan error
|
streamResults chan error
|
||||||
|
@ -51,15 +51,16 @@ type PodWatcher struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPodWatcher initializes a new PodWatcher.
|
// NewPodWatcher initializes a new PodWatcher.
|
||||||
func NewPodWatcher(clientset KubernetesClient, container string, labelSelector *metav1.LabelSelector, dst io.Writer) PodWatcherInterface {
|
func NewPodWatcher(client KubernetesClient, container string, labelSelector labels.Selector, dst io.Writer) PodWatcherInterface {
|
||||||
return &PodWatcher{
|
return &PodWatcher{
|
||||||
clientset: clientset,
|
client: client,
|
||||||
container: container,
|
container: container,
|
||||||
labelSelector: labelSelector,
|
labelSelector: labelSelector,
|
||||||
spec: make(map[string]*corev1.Pod),
|
spec: make(map[string]*corev1.Pod),
|
||||||
status: make(map[string]bool),
|
status: make(map[string]bool),
|
||||||
streamResults: make(chan error),
|
streamResults: make(chan error),
|
||||||
dst: dst,
|
dst: dst,
|
||||||
|
closeChan: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,25 +78,25 @@ func (pw *PodWatcher) WatchPods(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close terminates the watcher, waiting for all logs to be consumed before
|
const tickerInterval = time.Millisecond * 250
|
||||||
// exiting.
|
|
||||||
|
// Close terminates the PodWatcher.
|
||||||
func (pw *PodWatcher) Close() {
|
func (pw *PodWatcher) Close() {
|
||||||
pw.closeChan <- struct{}{}
|
pw.closeChan <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
|
func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
|
||||||
podsClient := pw.clientset.CoreV1().Pods(corev1.NamespaceDefault)
|
podsClient := pw.client.Typed.CoreV1().Pods(corev1.NamespaceDefault)
|
||||||
labelsMap, err := metav1.LabelSelectorAsMap(pw.labelSelector)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
watcher, err := podsClient.Watch(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labelsMap).String()})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer watcher.Stop()
|
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Second)
|
// Returns a watcher which notifies of changes in the relevant pods.
|
||||||
|
// We don't defer Stop() on the returned value because the sender is the
|
||||||
|
// Kubernetes SDK, and that may introduce a send-on-closed-channel panic.
|
||||||
|
watcher, err := podsClient.Watch(ctx, metav1.ListOptions{LabelSelector: pw.labelSelector.String()})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(tickerInterval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
// streamErrors is never closed.
|
// streamErrors is never closed.
|
||||||
|
@ -120,15 +121,14 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
|
||||||
pw.status[pod.Name] = true
|
pw.status[pod.Name] = true
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
if err := copyPodLogs(ctx, wg, pw.clientset, pod, pw.container, pw.dst); err != nil {
|
if err := copyPodLogs(ctx, wg, pw.client, pod, pw.container, pw.dst); err != nil {
|
||||||
streamErrors <- err
|
streamErrors <- err
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// For any pods which no longer exist, remove the pod.
|
// For any pods which no longer exist, remove the pod.
|
||||||
// TODO: check this is needed when a pod's labels change to no longer
|
// TODO: stop the log streaming.
|
||||||
// match the deployment's selector.
|
|
||||||
for podName := range pw.status {
|
for podName := range pw.status {
|
||||||
if _, ok := pw.spec[podName]; !ok {
|
if _, ok := pw.spec[podName]; !ok {
|
||||||
pw.removePod(podName)
|
pw.removePod(podName)
|
||||||
|
@ -146,8 +146,7 @@ func (pw *PodWatcher) watchPods(ctx context.Context, wg *sync.WaitGroup) error {
|
||||||
|
|
||||||
case evt, ok := <-resultChan:
|
case evt, ok := <-resultChan:
|
||||||
if !ok {
|
if !ok {
|
||||||
resultChan = nil
|
return nil
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
switch evt.Type {
|
switch evt.Type {
|
||||||
case watch.Added, watch.Modified:
|
case watch.Added, watch.Modified:
|
||||||
|
@ -168,14 +167,15 @@ func (pw *PodWatcher) removePod(podName string) {
|
||||||
delete(pw.status, podName)
|
delete(pw.status, podName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, clientset KubernetesClient, pod *corev1.Pod, container string, dst io.Writer) *streamError {
|
func copyPodLogs(ctx context.Context, wg *sync.WaitGroup, client KubernetesClient, pod *corev1.Pod, container string, dst io.Writer) *streamError {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
podLogOpts := corev1.PodLogOptions{
|
podLogOpts := corev1.PodLogOptions{
|
||||||
Follow: true,
|
Follow: true,
|
||||||
Container: container,
|
Container: container,
|
||||||
}
|
}
|
||||||
req := clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts)
|
|
||||||
|
req := client.Typed.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts)
|
||||||
logs, err := req.Stream(ctx)
|
logs, err := req.Stream(ctx)
|
||||||
|
|
||||||
// If one container is still being created, do not treat this as a fatal error.
|
// If one container is still being created, do not treat this as a fatal error.
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -17,6 +18,7 @@ import (
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
testclient "k8s.io/client-go/kubernetes/fake"
|
testclient "k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
|
@ -60,10 +62,7 @@ func (m *mockClientset) GetLogs(podName string, _ *corev1.PodLogOptions) *rest.R
|
||||||
return fakeClient.Request()
|
return fakeClient.Request()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodWatcherClosedWatcher(t *testing.T) {
|
func TestPodWatcherClose(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
podsWatcher := watch.NewFake()
|
podsWatcher := watch.NewFake()
|
||||||
|
|
||||||
clientset := mockClientset{
|
clientset := mockClientset{
|
||||||
|
@ -73,29 +72,29 @@ func TestPodWatcherClosedWatcher(t *testing.T) {
|
||||||
}
|
}
|
||||||
clientset.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
|
clientset.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
|
||||||
|
|
||||||
client := logs.KubernetesClient{Interface: &clientset}
|
client := logs.KubernetesClient{Typed: &clientset}
|
||||||
selector := metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}
|
selector := labels.SelectorFromSet(map[string]string{"foo": "bar"})
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer podsWatcher.Stop()
|
|
||||||
podsWatcher.Add(&corev1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
|
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
|
||||||
})
|
|
||||||
}()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
pw := logs.NewPodWatcher(client, "mycontainer", &selector, &buf)
|
pw := logs.NewPodWatcher(client, "mycontainer", selector, &buf)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
podsWatcher.Add(&corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}})
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
// Close() should cause the watcher to return cleanly:
|
||||||
|
pw.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := pw.WatchPods(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// WatchPods should wait for the logs and return cleanly.
|
|
||||||
err := pw.WatchPods(ctx)
|
|
||||||
require.Equal(t, context.DeadlineExceeded, err)
|
|
||||||
assert.Equal(t, "[foo] it worked\n", buf.String())
|
assert.Equal(t, "[foo] it worked\n", buf.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPodWatcher(t *testing.T) {
|
func TestPodWatcher(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
|
podEvents []*corev1.Pod
|
||||||
getLogsRespBody string
|
getLogsRespBody string
|
||||||
getLogsStatusCode int
|
getLogsStatusCode int
|
||||||
getLogsErr error
|
getLogsErr error
|
||||||
|
@ -103,33 +102,39 @@ func TestPodWatcher(t *testing.T) {
|
||||||
wantErr string
|
wantErr string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "unexpected error getting logs",
|
name: "unexpected error getting logs",
|
||||||
|
podEvents: []*corev1.Pod{
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}},
|
||||||
|
},
|
||||||
getLogsErr: errors.New("nope"),
|
getLogsErr: errors.New("nope"),
|
||||||
wantOut: nil,
|
wantOut: nil,
|
||||||
wantErr: "nope",
|
wantErr: "nope",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "recoverable error getting logs",
|
name: "recoverable error getting logs",
|
||||||
|
podEvents: []*corev1.Pod{
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}},
|
||||||
|
},
|
||||||
getLogsErr: &apierrors.StatusError{ErrStatus: metav1.Status{Message: "is waiting to start: ContainerCreating", Reason: metav1.StatusReasonBadRequest}},
|
getLogsErr: &apierrors.StatusError{ErrStatus: metav1.Status{Message: "is waiting to start: ContainerCreating", Reason: metav1.StatusReasonBadRequest}},
|
||||||
wantOut: nil,
|
wantOut: nil,
|
||||||
wantErr: context.DeadlineExceeded.Error(),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "success",
|
name: "success",
|
||||||
|
podEvents: []*corev1.Pod{
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}},
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}},
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodPending}},
|
||||||
|
},
|
||||||
getLogsRespBody: "some logs",
|
getLogsRespBody: "some logs",
|
||||||
getLogsStatusCode: http.StatusOK,
|
getLogsStatusCode: http.StatusOK,
|
||||||
wantOut: []string{"[foo] some logs", "[bar] some logs"},
|
wantOut: []string{"[foo] some logs", "[bar] some logs"},
|
||||||
wantErr: context.DeadlineExceeded.Error(),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
podsWatcher := watch.NewFake()
|
podsWatcher := watch.NewFake()
|
||||||
defer podsWatcher.Stop()
|
defer log.Println("exiting test func")
|
||||||
|
|
||||||
clientset := mockClientset{
|
clientset := mockClientset{
|
||||||
getLogsRespBody: tc.getLogsRespBody,
|
getLogsRespBody: tc.getLogsRespBody,
|
||||||
|
@ -139,42 +144,33 @@ func TestPodWatcher(t *testing.T) {
|
||||||
}
|
}
|
||||||
clientset.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
|
clientset.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
|
||||||
|
|
||||||
client := logs.KubernetesClient{Interface: &clientset}
|
client := logs.KubernetesClient{Typed: &clientset}
|
||||||
selector := metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}
|
selector := labels.SelectorFromSet(map[string]string{"foo": "bar"})
|
||||||
|
|
||||||
go func() {
|
|
||||||
pods := []*corev1.Pod{
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
|
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "default"},
|
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "default"},
|
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodPending},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, pod := range pods {
|
|
||||||
podsWatcher.Add(pod)
|
|
||||||
time.Sleep(time.Millisecond * 250)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
pw := logs.NewPodWatcher(client, "mycontainer", &selector, &buf)
|
pw := logs.NewPodWatcher(client, "mycontainer", selector, &buf)
|
||||||
|
|
||||||
err := pw.WatchPods(ctx)
|
go func() {
|
||||||
|
for _, pod := range tc.podEvents {
|
||||||
|
podsWatcher.Add(pod)
|
||||||
|
time.Sleep(time.Millisecond * 500)
|
||||||
|
}
|
||||||
|
pw.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := pw.WatchPods(context.Background())
|
||||||
|
|
||||||
if tc.wantErr == "" {
|
if tc.wantErr == "" {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
} else {
|
} else {
|
||||||
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), tc.wantErr)
|
require.Contains(t, err.Error(), tc.wantErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if tc.wantOut != nil {
|
if tc.wantOut != nil {
|
||||||
assert.ElementsMatch(t, tc.wantOut, bufToLines(&buf))
|
lines := bufToLines(&buf)
|
||||||
|
require.Len(t, lines, len(tc.wantOut))
|
||||||
|
assert.ElementsMatch(t, tc.wantOut, lines)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
120
logs/watcher.go
120
logs/watcher.go
|
@ -8,17 +8,23 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
"k8s.io/client-go/dynamic"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
)
|
)
|
||||||
|
|
||||||
// KubernetesClient wraps a Kubernetes clientset.
|
// KubernetesClient provides both typed and untyped interfaces to the
|
||||||
|
// Kubernetes API.
|
||||||
type KubernetesClient struct {
|
type KubernetesClient struct {
|
||||||
kubernetes.Interface
|
Typed kubernetes.Interface
|
||||||
|
Untyped dynamic.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
// concurrentWriter implements an io.Writer that can be safely written to from
|
// concurrentWriter implements an io.Writer that can be safely written to from
|
||||||
|
@ -42,16 +48,24 @@ type PodWatcherInterface interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodWatcherFunc builds a PodWatcher.
|
// PodWatcherFunc builds a PodWatcher.
|
||||||
type PodWatcherFunc func(KubernetesClient, string, *metav1.LabelSelector, io.Writer) PodWatcherInterface
|
type PodWatcherFunc func(KubernetesClient, string, labels.Selector, io.Writer) PodWatcherInterface
|
||||||
|
|
||||||
|
// WatcherParams defines the input parameters of a Watcher.
|
||||||
|
type WatcherParams struct {
|
||||||
|
Name string
|
||||||
|
Type string
|
||||||
|
Namespace string
|
||||||
|
Container string
|
||||||
|
StrictExist bool
|
||||||
|
}
|
||||||
|
|
||||||
// Watcher watches a deployment and tails the logs for its currently active
|
// Watcher watches a deployment and tails the logs for its currently active
|
||||||
// pods.
|
// pods.
|
||||||
type Watcher struct {
|
type Watcher struct {
|
||||||
deployName string
|
params WatcherParams
|
||||||
container string
|
client KubernetesClient
|
||||||
strictExist bool
|
resourceUID types.UID
|
||||||
clientset KubernetesClient
|
podSelector labels.Selector
|
||||||
deployment *appsv1.Deployment
|
|
||||||
podWatcher PodWatcherInterface
|
podWatcher PodWatcherInterface
|
||||||
podWatcherFunc PodWatcherFunc
|
podWatcherFunc PodWatcherFunc
|
||||||
errChan chan error
|
errChan chan error
|
||||||
|
@ -59,12 +73,10 @@ type Watcher struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWatcher creates a new Watcher.
|
// NewWatcher creates a new Watcher.
|
||||||
func NewWatcher(deployName string, container string, strictExist bool, clientset KubernetesClient, podWatcherFunc PodWatcherFunc, dst io.Writer) *Watcher {
|
func NewWatcher(params WatcherParams, client KubernetesClient, podWatcherFunc PodWatcherFunc, dst io.Writer) *Watcher {
|
||||||
return &Watcher{
|
return &Watcher{
|
||||||
deployName: deployName,
|
params: params,
|
||||||
container: container,
|
client: client,
|
||||||
strictExist: strictExist,
|
|
||||||
clientset: clientset,
|
|
||||||
podWatcherFunc: podWatcherFunc,
|
podWatcherFunc: podWatcherFunc,
|
||||||
errChan: make(chan error),
|
errChan: make(chan error),
|
||||||
dst: &concurrentWriter{w: dst},
|
dst: &concurrentWriter{w: dst},
|
||||||
|
@ -73,30 +85,35 @@ func NewWatcher(deployName string, container string, strictExist bool, clientset
|
||||||
|
|
||||||
// Watch watches a deployment.
|
// Watch watches a deployment.
|
||||||
func (w *Watcher) Watch(ctx context.Context) error {
|
func (w *Watcher) Watch(ctx context.Context) error {
|
||||||
deploymentsClient := w.clientset.AppsV1().Deployments(corev1.NamespaceDefault)
|
ns := w.params.Namespace
|
||||||
|
if ns == "" {
|
||||||
// Check if the deployment exists before we commence watching, to allow us to
|
ns = corev1.NamespaceDefault
|
||||||
// return an error if needed.
|
|
||||||
_, err := deploymentsClient.Get(ctx, w.deployName, metav1.GetOptions{})
|
|
||||||
var statusErr *apierrors.StatusError
|
|
||||||
if errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonNotFound {
|
|
||||||
if w.strictExist {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Printf(`deployment "%s" does not exist, waiting`, w.deployName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := metav1.ListOptions{Watch: true, FieldSelector: "metadata.name=" + w.deployName}
|
// Supported resource types are deployments, statefulsets and replicasets
|
||||||
deploymentsWatcher, err := deploymentsClient.Watch(ctx, opts)
|
// (all apps/v1).
|
||||||
|
resourceID := schema.GroupVersionResource{
|
||||||
|
Resource: w.params.Type,
|
||||||
|
Group: "apps",
|
||||||
|
Version: "v1",
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.checkResourceExists(ctx, ns, resourceID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := metav1.ListOptions{Watch: true, FieldSelector: "metadata.name=" + w.params.Name}
|
||||||
|
watcher, err := w.client.Untyped.Resource(resourceID).Namespace(ns).Watch(ctx, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer deploymentsWatcher.Stop()
|
|
||||||
|
defer watcher.Stop()
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Second)
|
ticker := time.NewTicker(time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
resultChan := deploymentsWatcher.ResultChan()
|
resultChan := watcher.ResultChan()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case evt, ok := <-resultChan:
|
case evt, ok := <-resultChan:
|
||||||
|
@ -106,8 +123,19 @@ func (w *Watcher) Watch(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
switch evt.Type {
|
switch evt.Type {
|
||||||
case watch.Added, watch.Modified:
|
case watch.Added, watch.Modified:
|
||||||
deployment := evt.Object.(*appsv1.Deployment)
|
resource := evt.Object.(*unstructured.Unstructured)
|
||||||
w.addDeployment(ctx, deployment)
|
uid := resource.GetUID()
|
||||||
|
// TODO: handle matchExpressions
|
||||||
|
selectorAsMap, ok, err := unstructured.NestedStringMap(resource.Object, "spec", "selector", "matchLabels")
|
||||||
|
if !ok || err != nil {
|
||||||
|
// matchLabels don't exist or cannot be parsed.
|
||||||
|
// Should this be fatal?
|
||||||
|
log.Printf("warning: unable to parse matchLabels: ok = %t, err = %v", ok, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector := labels.SelectorFromSet(selectorAsMap)
|
||||||
|
w.addDeployment(ctx, uid, selector)
|
||||||
|
|
||||||
case watch.Deleted:
|
case watch.Deleted:
|
||||||
w.removeDeployment()
|
w.removeDeployment()
|
||||||
}
|
}
|
||||||
|
@ -119,20 +147,31 @@ func (w *Watcher) Watch(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Watcher) addDeployment(ctx context.Context, deployment *appsv1.Deployment) {
|
func (w *Watcher) checkResourceExists(ctx context.Context, namespace string, resourceID schema.GroupVersionResource) error {
|
||||||
if w.deployment != nil && w.deployment.UID == deployment.UID {
|
_, err := w.client.Untyped.Resource(resourceID).Namespace(namespace).Get(ctx, w.params.Name, metav1.GetOptions{})
|
||||||
|
var statusErr *apierrors.StatusError
|
||||||
|
if !w.params.StrictExist && errors.As(err, &statusErr) && statusErr.Status().Reason == metav1.StatusReasonNotFound {
|
||||||
|
log.Printf(`%s "%s" does not exist, waiting`, resourceID.Resource, w.params.Name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Watcher) addDeployment(ctx context.Context, resourceUID types.UID, podSelector labels.Selector) {
|
||||||
|
if w.resourceUID == resourceUID {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.removeDeployment()
|
w.removeDeployment()
|
||||||
|
|
||||||
log.Println("[DeploymentWatcher] add deployment")
|
log.Println("[DeploymentWatcher] add podWatcher")
|
||||||
|
|
||||||
w.deployment = deployment
|
w.resourceUID = resourceUID
|
||||||
|
w.podSelector = podSelector
|
||||||
w.podWatcher = w.podWatcherFunc(
|
w.podWatcher = w.podWatcherFunc(
|
||||||
w.clientset,
|
w.client,
|
||||||
w.container,
|
w.params.Container,
|
||||||
deployment.Spec.Selector,
|
w.podSelector,
|
||||||
w.dst,
|
w.dst,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -145,9 +184,10 @@ func (w *Watcher) addDeployment(ctx context.Context, deployment *appsv1.Deployme
|
||||||
|
|
||||||
func (w *Watcher) removeDeployment() {
|
func (w *Watcher) removeDeployment() {
|
||||||
if w.podWatcher != nil {
|
if w.podWatcher != nil {
|
||||||
log.Println("[DeploymentWatcher] remove deployment")
|
log.Println("[DeploymentWatcher] remove podWatcher")
|
||||||
w.podWatcher.Close()
|
w.podWatcher.Close()
|
||||||
w.podWatcher = nil
|
w.podWatcher = nil
|
||||||
}
|
}
|
||||||
w.deployment = nil
|
w.resourceUID = ""
|
||||||
|
w.podSelector = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,10 +12,14 @@ import (
|
||||||
"git.netflux.io/rob/kubectl-persistent-logger/logs"
|
"git.netflux.io/rob/kubectl-persistent-logger/logs"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
|
dynamicclient "k8s.io/client-go/dynamic/fake"
|
||||||
testclient "k8s.io/client-go/kubernetes/fake"
|
testclient "k8s.io/client-go/kubernetes/fake"
|
||||||
k8stest "k8s.io/client-go/testing"
|
k8stest "k8s.io/client-go/testing"
|
||||||
)
|
)
|
||||||
|
@ -26,17 +30,28 @@ func (m *mockPodWatcher) WatchPods(ctx context.Context) error { return m.err }
|
||||||
func (m *mockPodWatcher) Close() {}
|
func (m *mockPodWatcher) Close() {}
|
||||||
|
|
||||||
func mockPodwatcherFunc(err error) logs.PodWatcherFunc {
|
func mockPodwatcherFunc(err error) logs.PodWatcherFunc {
|
||||||
return func(logs.KubernetesClient, string, *metav1.LabelSelector, io.Writer) logs.PodWatcherInterface {
|
return func(logs.KubernetesClient, string, labels.Selector, io.Writer) logs.PodWatcherInterface {
|
||||||
return &mockPodWatcher{err: err}
|
return &mockPodWatcher{err: err}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWatcherAllowNonExistent(t *testing.T) {
|
func buildDeployment(t *testing.T, name string) *unstructured.Unstructured {
|
||||||
clientset := testclient.NewSimpleClientset()
|
deployment := new(unstructured.Unstructured)
|
||||||
|
deployment.SetAPIVersion("v1")
|
||||||
|
deployment.SetKind("deployment")
|
||||||
|
deployment.SetName("mydeployment")
|
||||||
|
deployment.SetNamespace("default")
|
||||||
|
deployment.SetUID(types.UID("foo"))
|
||||||
|
require.NoError(t, unstructured.SetNestedField(deployment.Object, map[string]any{"app": "myapp"}, "spec", "selector", "matchLabels"))
|
||||||
|
return deployment
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWatcherStrictExist(t *testing.T) {
|
||||||
|
client := logs.KubernetesClient{Untyped: dynamicclient.NewSimpleDynamicClient(runtime.NewScheme())}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
client := logs.KubernetesClient{Interface: clientset}
|
params := logs.WatcherParams{Name: "mydeployment", Type: "deployments", Namespace: "default", StrictExist: true}
|
||||||
watcher := logs.NewWatcher("mydeployment", "mycontainer", true, client, mockPodwatcherFunc(nil), &buf)
|
watcher := logs.NewWatcher(params, client, mockPodwatcherFunc(nil), &buf)
|
||||||
|
|
||||||
err := watcher.Watch(context.Background())
|
err := watcher.Watch(context.Background())
|
||||||
assert.EqualError(t, err, `deployments.apps "mydeployment" not found`)
|
assert.EqualError(t, err, `deployments.apps "mydeployment" not found`)
|
||||||
|
@ -44,19 +59,20 @@ func TestWatcherAllowNonExistent(t *testing.T) {
|
||||||
|
|
||||||
func TestWatcherPodWatcherError(t *testing.T) {
|
func TestWatcherPodWatcherError(t *testing.T) {
|
||||||
deploymentsWatcher := watch.NewFake()
|
deploymentsWatcher := watch.NewFake()
|
||||||
clientset := testclient.NewSimpleClientset(
|
|
||||||
&appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "mydeployment", Namespace: "default"}},
|
untypedClient := dynamicclient.NewSimpleDynamicClient(runtime.NewScheme())
|
||||||
)
|
untypedClient.PrependWatchReactor("deployments", k8stest.DefaultWatchReactor(deploymentsWatcher, nil))
|
||||||
clientset.PrependWatchReactor("deployments", k8stest.DefaultWatchReactor(deploymentsWatcher, nil))
|
client := logs.KubernetesClient{Untyped: untypedClient}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
client := logs.KubernetesClient{Interface: clientset}
|
|
||||||
wantErr := errors.New("foo")
|
wantErr := errors.New("foo")
|
||||||
watcher := logs.NewWatcher("mydeployment", "mycontainer", true, client, mockPodwatcherFunc(wantErr), &buf)
|
params := logs.WatcherParams{Name: "mydeployment", Type: "deployments", Namespace: "default"}
|
||||||
|
watcher := logs.NewWatcher(params, client, mockPodwatcherFunc(wantErr), &buf)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer deploymentsWatcher.Stop()
|
defer deploymentsWatcher.Stop()
|
||||||
deployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "mydeployment", Namespace: "default"}}
|
|
||||||
|
deployment := buildDeployment(t, "mydeployment")
|
||||||
deploymentsWatcher.Add(deployment)
|
deploymentsWatcher.Add(deployment)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -66,15 +82,17 @@ func TestWatcherPodWatcherError(t *testing.T) {
|
||||||
|
|
||||||
func TestWatcherClosedChannel(t *testing.T) {
|
func TestWatcherClosedChannel(t *testing.T) {
|
||||||
deploymentsWatcher := watch.NewFake()
|
deploymentsWatcher := watch.NewFake()
|
||||||
clientset := testclient.NewSimpleClientset(
|
|
||||||
&appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "mydeployment", Namespace: "default"}},
|
untypedClient := dynamicclient.NewSimpleDynamicClient(runtime.NewScheme())
|
||||||
)
|
untypedClient.PrependWatchReactor("deployments", k8stest.DefaultWatchReactor(deploymentsWatcher, nil))
|
||||||
clientset.PrependWatchReactor("deployments", k8stest.DefaultWatchReactor(deploymentsWatcher, nil))
|
client := logs.KubernetesClient{Untyped: untypedClient}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
client := logs.KubernetesClient{Interface: clientset}
|
params := logs.WatcherParams{Name: "mydeployment", Type: "deployments", Namespace: "default"}
|
||||||
watcher := logs.NewWatcher("mydeployment", "mycontainer", true, client, nil, &buf)
|
watcher := logs.NewWatcher(params, client, nil, &buf)
|
||||||
go deploymentsWatcher.Stop()
|
// Immediately stop the watcher, which closes the ResultChan.
|
||||||
|
// This should be expected to be handled.
|
||||||
|
deploymentsWatcher.Stop()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -90,31 +108,25 @@ func TestWatcherWithPodWatcher(t *testing.T) {
|
||||||
|
|
||||||
deploymentsWatcher := watch.NewFake()
|
deploymentsWatcher := watch.NewFake()
|
||||||
defer deploymentsWatcher.Stop()
|
defer deploymentsWatcher.Stop()
|
||||||
|
|
||||||
podsWatcher := watch.NewFake()
|
podsWatcher := watch.NewFake()
|
||||||
defer podsWatcher.Stop()
|
defer podsWatcher.Stop()
|
||||||
|
|
||||||
clientset := testclient.NewSimpleClientset()
|
typedClient := testclient.NewSimpleClientset()
|
||||||
clientset.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
|
typedClient.PrependWatchReactor("pods", k8stest.DefaultWatchReactor(podsWatcher, nil))
|
||||||
clientset.PrependWatchReactor("deployments", k8stest.DefaultWatchReactor(deploymentsWatcher, nil))
|
untypedClient := dynamicclient.NewSimpleDynamicClient(runtime.NewScheme())
|
||||||
|
untypedClient.PrependWatchReactor("deployments", k8stest.DefaultWatchReactor(deploymentsWatcher, nil))
|
||||||
|
client := logs.KubernetesClient{Typed: typedClient, Untyped: untypedClient}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
deployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "mydeployment", Namespace: "default"}}
|
deployment := buildDeployment(t, "mydeployment")
|
||||||
deploymentsWatcher.Add(deployment)
|
deploymentsWatcher.Add(deployment)
|
||||||
time.Sleep(time.Millisecond * 250)
|
time.Sleep(time.Millisecond * 250)
|
||||||
|
|
||||||
pods := []*corev1.Pod{
|
pods := []*corev1.Pod{
|
||||||
{
|
{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}},
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
|
{ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodRunning}},
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
{ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "default"}, Status: corev1.PodStatus{Phase: corev1.PodPending}},
|
||||||
},
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "default"},
|
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodRunning},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "default"},
|
|
||||||
Status: corev1.PodStatus{Phase: corev1.PodPending},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
podsWatcher.Add(pod)
|
podsWatcher.Add(pod)
|
||||||
|
@ -123,14 +135,14 @@ func TestWatcherWithPodWatcher(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
client := logs.KubernetesClient{Interface: clientset}
|
params := logs.WatcherParams{Name: "mydeployment", Type: "deployments", Namespace: "default"}
|
||||||
watcher := logs.NewWatcher("mydeployment", "mycontainer", false, client, logs.NewPodWatcher, &buf)
|
watcher := logs.NewWatcher(params, client, logs.NewPodWatcher, &buf)
|
||||||
|
|
||||||
err := watcher.Watch(ctx)
|
err := watcher.Watch(ctx)
|
||||||
require.EqualError(t, err, context.DeadlineExceeded.Error())
|
require.EqualError(t, err, context.DeadlineExceeded.Error())
|
||||||
lines := bufToLines(&buf)
|
lines := bufToLines(&buf)
|
||||||
assert.Len(t, lines, 2)
|
require.Len(t, lines, 2)
|
||||||
assert.ElementsMatch(t, []string{"[foo] fake logs", "[bar] fake logs"}, bufToLines(&buf))
|
assert.ElementsMatch(t, []string{"[foo] fake logs", "[bar] fake logs"}, lines)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bufToLines(buf *bytes.Buffer) []string {
|
func bufToLines(buf *bytes.Buffer) []string {
|
||||||
|
|
45
main.go
45
main.go
|
@ -7,21 +7,43 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"git.netflux.io/rob/kubectl-persistent-logger/logs"
|
"git.netflux.io/rob/kubectl-persistent-logger/logs"
|
||||||
|
"k8s.io/client-go/dynamic"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
clientconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
|
clientconfig "sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
var (
|
var (
|
||||||
deployName *string
|
container string
|
||||||
container *string
|
strictExist bool
|
||||||
strictExist *bool
|
|
||||||
)
|
)
|
||||||
deployName = flag.String("deployment", "", "name of a deployment to monitor")
|
|
||||||
container = flag.String("container", "", "name of a specific container")
|
flag.StringVar(&container, "container", "", "name of a specific container")
|
||||||
strictExist = flag.Bool("strict-exist", false, "require deployment to exist on launch")
|
flag.BoolVar(&strictExist, "strict", false, "require deployment to exist on launch")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
|
var params logs.WatcherParams
|
||||||
|
|
||||||
|
switch len(flag.Args()) {
|
||||||
|
case 1:
|
||||||
|
// TODO: handle type/name style
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(1)
|
||||||
|
case 2:
|
||||||
|
kind, err := logs.ParseType(flag.Arg(0))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
params = logs.WatcherParams{
|
||||||
|
Type: kind,
|
||||||
|
Name: flag.Arg(1),
|
||||||
|
Namespace: "default",
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
flag.Usage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
cfg, err := clientconfig.GetConfig()
|
cfg, err := clientconfig.GetConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
@ -32,12 +54,15 @@ func main() {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dynamicclient, err := dynamic.NewForConfig(cfg)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
watcher := logs.NewWatcher(
|
watcher := logs.NewWatcher(
|
||||||
*deployName,
|
params,
|
||||||
*container,
|
logs.KubernetesClient{Typed: clientset, Untyped: dynamicclient},
|
||||||
*strictExist,
|
|
||||||
logs.KubernetesClient{Interface: clientset},
|
|
||||||
logs.NewPodWatcher,
|
logs.NewPodWatcher,
|
||||||
os.Stdout,
|
os.Stdout,
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in New Issue