WIP: refactor/api #1
@ -60,25 +60,6 @@ func (a *App) Run(ctx context.Context) error {
|
||||
return fmt.Errorf("create gRPC stream: %w", err)
|
||||
}
|
||||
|
||||
g.Go(func() error {
|
||||
for {
|
||||
envelope, recErr := stream.Recv()
|
||||
if recErr != nil {
|
||||
return fmt.Errorf("receive envelope: %w", recErr)
|
||||
}
|
||||
|
||||
pbEvt := envelope.GetEvent()
|
||||
if pbEvt == nil {
|
||||
a.logger.Error("Received envelope without event")
|
||||
continue
|
||||
}
|
||||
|
||||
evt := protocol.EventFromProto(pbEvt)
|
||||
a.logger.Debug("Received event from gRPC stream", "event", evt)
|
||||
a.bus.Send(evt)
|
||||
}
|
||||
})
|
||||
|
||||
ui, err := terminal.NewUI(ctx, terminal.Params{
|
||||
EventBus: a.bus,
|
||||
Dispatcher: func(cmd event.Command) {
|
||||
@ -99,9 +80,53 @@ func (a *App) Run(ctx context.Context) error {
|
||||
|
||||
g.Go(func() error { return ui.Run(ctx) })
|
||||
|
||||
// After the UI is available, perform a handshake with the server.
|
||||
// Ordering is important here. We want to ensure that the UI is ready to
|
||||
// react to events received from the server. Performing the handshake ensures
|
||||
// the client has received at least one event.
|
||||
if err := a.doHandshake(stream); err != nil {
|
||||
return fmt.Errorf("do handshake: %w", err)
|
||||
}
|
||||
|
||||
g.Go(func() error {
|
||||
for {
|
||||
envelope, recErr := stream.Recv()
|
||||
if recErr != nil {
|
||||
return fmt.Errorf("receive envelope: %w", recErr)
|
||||
}
|
||||
|
||||
pbEvt := envelope.GetEvent()
|
||||
if pbEvt == nil {
|
||||
a.logger.Error("Received envelope without event")
|
||||
continue
|
||||
}
|
||||
|
||||
evt := protocol.EventFromProto(pbEvt)
|
||||
a.logger.Debug("Received event from gRPC stream", "event", evt.EventName(), "payload", evt)
|
||||
a.bus.Send(evt)
|
||||
}
|
||||
})
|
||||
|
||||
if err := g.Wait(); err == terminal.ErrUserClosed {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("errgroup.Wait: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) doHandshake(stream pb.InternalAPI_CommunicateClient) error {
|
||||
env, err := stream.Recv()
|
||||
if err != nil {
|
||||
return fmt.Errorf("receive ready event: %w", err)
|
||||
}
|
||||
|
||||
if evt := env.GetEvent(); evt == nil || evt.GetInternalApiReady() == nil {
|
||||
return fmt.Errorf("expected ready event but got: %T", env)
|
||||
}
|
||||
|
||||
if err = stream.Send(&pb.Envelope{Payload: &pb.Envelope_Command{Command: &pb.Command{CommandType: &pb.Command_StartInternalStream{}}}}); err != nil {
|
||||
return fmt.Errorf("send start command: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -49,6 +49,7 @@ func buildClientServer(
|
||||
server := server.New(server.Params{
|
||||
ConfigService: configService,
|
||||
DockerClient: dockerClient,
|
||||
WaitForClient: true,
|
||||
Logger: logger,
|
||||
})
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
@ -16,12 +17,15 @@ import (
|
||||
|
||||
"git.netflux.io/rob/octoplex/internal/config"
|
||||
"git.netflux.io/rob/octoplex/internal/container"
|
||||
"git.netflux.io/rob/octoplex/internal/container/mocks"
|
||||
"git.netflux.io/rob/octoplex/internal/domain"
|
||||
"git.netflux.io/rob/octoplex/internal/testhelpers"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/gdamore/tcell/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
@ -861,6 +865,76 @@ func TestIntegrationMediaServerError(t *testing.T) {
|
||||
assert.ErrorContains(t, result.errServer, "media server exited")
|
||||
}
|
||||
|
||||
func TestIntegrationDockerClientError(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
logger := testhelpers.NewTestLogger(t).With("component", "integration")
|
||||
|
||||
var dockerClient mocks.DockerClient
|
||||
dockerClient.EXPECT().NetworkCreate(mock.Anything, mock.Anything, mock.Anything).Return(network.CreateResponse{}, errors.New("boom"))
|
||||
|
||||
configService := setupConfigService(t, config.Config{Sources: config.Sources{MediaServer: config.MediaServerSource{RTMP: config.RTMPSource{Enabled: true}}}})
|
||||
screen, screenCaptureC, getContents := setupSimulationScreen(t)
|
||||
|
||||
client, server := buildClientServer(configService, &dockerClient, screen, screenCaptureC, logger)
|
||||
ch := runClientServer(ctx, t, client, server)
|
||||
|
||||
require.EventuallyWithT(
|
||||
t,
|
||||
func(c *assert.CollectT) {
|
||||
assert.True(c, contentsIncludes(getContents(), "An error occurred:"), "expected to see error message")
|
||||
assert.True(c, contentsIncludes(getContents(), "create container client: network create: boom"), "expected to see message")
|
||||
},
|
||||
waitTime,
|
||||
time.Second,
|
||||
"expected to see fatal error modal",
|
||||
)
|
||||
printScreen(t, getContents, "Ater displaying the fatal error modal")
|
||||
|
||||
// Quit the app:
|
||||
sendKey(t, screen, tcell.KeyEnter, ' ')
|
||||
|
||||
result := <-ch
|
||||
assert.ErrorContains(t, result.errClient, "context canceled")
|
||||
assert.EqualError(t, result.errServer, "create container client: network create: boom")
|
||||
}
|
||||
|
||||
func TestIntegrationDockerConnectionError(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
logger := testhelpers.NewTestLogger(t).With("component", "integration")
|
||||
dockerClient, err := dockerclient.NewClientWithOpts(dockerclient.WithHost("http://docker.example.com"))
|
||||
require.NoError(t, err)
|
||||
|
||||
configService := setupConfigService(t, config.Config{Sources: config.Sources{MediaServer: config.MediaServerSource{RTMP: config.RTMPSource{Enabled: true}}}})
|
||||
screen, screenCaptureC, getContents := setupSimulationScreen(t)
|
||||
|
||||
client, server := buildClientServer(configService, dockerClient, screen, screenCaptureC, logger)
|
||||
ch := runClientServer(ctx, t, client, server)
|
||||
|
||||
require.EventuallyWithT(
|
||||
t,
|
||||
func(c *assert.CollectT) {
|
||||
assert.True(c, contentsIncludes(getContents(), "An error occurred:"), "expected to see error message")
|
||||
assert.True(c, contentsIncludes(getContents(), "Could not connect to Docker. Is Docker installed"), "expected to see message")
|
||||
},
|
||||
waitTime,
|
||||
time.Second,
|
||||
"expected to see fatal error modal",
|
||||
)
|
||||
printScreen(t, getContents, "Ater displaying the fatal error modal")
|
||||
|
||||
// Quit the app:
|
||||
sendKey(t, screen, tcell.KeyEnter, ' ')
|
||||
|
||||
result := <-ch
|
||||
assert.ErrorContains(t, result.errClient, "context canceled")
|
||||
assert.ErrorContains(t, result.errServer, "dial tcp: lookup docker.example.com")
|
||||
assert.ErrorContains(t, result.errServer, "no such host")
|
||||
}
|
||||
|
||||
func TestIntegrationCopyURLs(t *testing.T) {
|
||||
type binding struct {
|
||||
key tcell.Key
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.netflux.io/rob/octoplex/internal/event"
|
||||
pb "git.netflux.io/rob/octoplex/internal/generated/grpc"
|
||||
@ -29,6 +30,7 @@ type Server struct {
|
||||
|
||||
mu sync.Mutex
|
||||
clientCount int
|
||||
clientC chan struct{}
|
||||
}
|
||||
|
||||
// newServer creates a new gRPC server.
|
||||
@ -40,6 +42,7 @@ func newServer(
|
||||
return &Server{
|
||||
dispatcher: dispatcher,
|
||||
bus: bus,
|
||||
clientC: make(chan struct{}, 1),
|
||||
logger: logger.With("component", "server"),
|
||||
}
|
||||
}
|
||||
@ -47,6 +50,24 @@ func newServer(
|
||||
func (s *Server) Communicate(stream pb.InternalAPI_CommunicateServer) error {
|
||||
g, ctx := errgroup.WithContext(stream.Context())
|
||||
|
||||
// perform handshake:
|
||||
if err := stream.Send(&pb.Envelope{Payload: &pb.Envelope_Event{Event: &pb.Event{EventType: &pb.Event_InternalApiReady{}}}}); err != nil {
|
||||
return fmt.Errorf("send ready event: %w", err)
|
||||
}
|
||||
startStreamCmd, err := stream.Recv()
|
||||
if err != nil {
|
||||
return fmt.Errorf("receive start stream command: %w", err)
|
||||
}
|
||||
if startStreamCmd.GetCommand() == nil || startStreamCmd.GetCommand().GetStartInternalStream() == nil {
|
||||
return fmt.Errorf("expected start stream command but got: %T", startStreamCmd)
|
||||
}
|
||||
|
||||
// Notify that a client has connected and completed the handshake.
|
||||
select {
|
||||
case s.clientC <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
g.Go(func() error {
|
||||
eventsC := s.bus.Register()
|
||||
defer s.bus.Deregister(eventsC)
|
||||
@ -113,3 +134,18 @@ func (s *Server) GetClientCount() int {
|
||||
|
||||
return s.clientCount
|
||||
}
|
||||
|
||||
const waitForClientTimeout = 10 * time.Second
|
||||
|
||||
// WaitForClient waits for _any_ client to connect and complete the handshake.
|
||||
// It times out if no client has connected after 10 seconds.
|
||||
func (s *Server) WaitForClient(ctx context.Context) error {
|
||||
select {
|
||||
case <-s.clientC:
|
||||
return nil
|
||||
case <-time.After(waitForClientTimeout):
|
||||
return errors.New("timeout")
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
@ -1 +0,0 @@
|
||||
package server
|
@ -29,6 +29,7 @@ type App struct {
|
||||
eventBus *event.Bus
|
||||
dispatchC chan event.Command
|
||||
dockerClient container.DockerClient
|
||||
waitForClient bool
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
@ -38,6 +39,7 @@ type Params struct {
|
||||
DockerClient container.DockerClient
|
||||
ChanSize int
|
||||
ConfigFilePath string
|
||||
WaitForClient bool
|
||||
Logger *slog.Logger
|
||||
}
|
||||
|
||||
@ -52,6 +54,7 @@ func New(params Params) *App {
|
||||
eventBus: event.NewBus(params.Logger.With("component", "event_bus")),
|
||||
dispatchC: make(chan event.Command, cmp.Or(params.ChanSize, defaultChanSize)),
|
||||
dockerClient: params.DockerClient,
|
||||
waitForClient: params.WaitForClient,
|
||||
logger: params.Logger,
|
||||
}
|
||||
}
|
||||
@ -83,6 +86,12 @@ func (a *App) Run(ctx context.Context) error {
|
||||
grpcDone <- grpcServer.Serve(lis)
|
||||
}()
|
||||
|
||||
if a.waitForClient {
|
||||
if err = internalAPI.WaitForClient(ctx); err != nil {
|
||||
return fmt.Errorf("wait for client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// emptyUI is a dummy function that sets the UI state to an empty state, and
|
||||
// re-renders the screen.
|
||||
//
|
||||
|
21
main.go
21
main.go
@ -32,12 +32,16 @@ var (
|
||||
date string
|
||||
)
|
||||
|
||||
// errInterrupt is an error type that indicates an interrupt signal was
|
||||
// received.
|
||||
type errInterrupt struct{}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e errInterrupt) Error() string {
|
||||
return "interrupt signal received"
|
||||
}
|
||||
|
||||
// ExitCode implements the ExitCoder interface.
|
||||
func (e errInterrupt) ExitCode() int {
|
||||
return 130
|
||||
}
|
||||
@ -50,7 +54,6 @@ func main() {
|
||||
{
|
||||
Name: "client",
|
||||
Usage: "Run the client",
|
||||
Flags: []cli.Flag{ /* client flags */ },
|
||||
Action: func(c *cli.Context) error {
|
||||
return runClient(c.Context, c)
|
||||
},
|
||||
@ -58,15 +61,17 @@ func main() {
|
||||
{
|
||||
Name: "server",
|
||||
Usage: "Run the server",
|
||||
Flags: []cli.Flag{ /* server flags */ },
|
||||
Action: func(c *cli.Context) error {
|
||||
return runServer(c.Context, c, serverConfig{stderrAvailable: true, handleSigInt: true})
|
||||
return runServer(c.Context, c, serverConfig{
|
||||
stderrAvailable: true,
|
||||
handleSigInt: true,
|
||||
waitForClient: false,
|
||||
})
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "run",
|
||||
Usage: "Run server and client together (testing)",
|
||||
Flags: []cli.Flag{ /* optional combined flags */ },
|
||||
Action: func(c *cli.Context) error {
|
||||
return runClientAndServer(c)
|
||||
},
|
||||
@ -124,6 +129,7 @@ func runClient(ctx context.Context, _ *cli.Context) error {
|
||||
type serverConfig struct {
|
||||
stderrAvailable bool
|
||||
handleSigInt bool
|
||||
waitForClient bool
|
||||
}
|
||||
|
||||
func runServer(ctx context.Context, _ *cli.Context, serverCfg serverConfig) error {
|
||||
@ -187,6 +193,7 @@ func runServer(ctx context.Context, _ *cli.Context, serverCfg serverConfig) erro
|
||||
ConfigService: configService,
|
||||
DockerClient: dockerClient,
|
||||
ConfigFilePath: configService.Path(),
|
||||
WaitForClient: serverCfg.waitForClient,
|
||||
Logger: logger,
|
||||
})
|
||||
|
||||
@ -226,7 +233,11 @@ func runClientAndServer(c *cli.Context) error {
|
||||
})
|
||||
|
||||
g.Go(func() error {
|
||||
if err := runServer(ctx, c, serverConfig{stderrAvailable: false, handleSigInt: false}); err != nil {
|
||||
if err := runServer(ctx, c, serverConfig{
|
||||
stderrAvailable: false,
|
||||
handleSigInt: false,
|
||||
waitForClient: true,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@ message Command {
|
||||
StopDestinationCommand stop_destination = 4;
|
||||
CloseOtherInstancesCommand close_other_instances = 5;
|
||||
QuitCommand quit = 6;
|
||||
StartInternalStreamCommand start_internal_stream = 7;
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,6 +50,8 @@ message CloseOtherInstancesCommand {}
|
||||
|
||||
message QuitCommand {}
|
||||
|
||||
message StartInternalStreamCommand {}
|
||||
|
||||
message Event {
|
||||
oneof event_type {
|
||||
AppStateChangedEvent app_state_changed = 1;
|
||||
@ -61,6 +64,7 @@ message Event {
|
||||
MediaServerStartedEvent media_server_started = 8;
|
||||
OtherInstanceDetectedEvent other_instance_detected = 9;
|
||||
FatalErrorEvent fatal_error = 10;
|
||||
InternalAPIReadyEvent internal_api_ready = 11;
|
||||
}
|
||||
}
|
||||
|
||||
@ -158,3 +162,5 @@ message OtherInstanceDetectedEvent {}
|
||||
message FatalErrorEvent {
|
||||
string message = 1;
|
||||
}
|
||||
|
||||
message InternalAPIReadyEvent {}
|
||||
|
Loading…
x
Reference in New Issue
Block a user