Add video and thumbnail display
This commit is contained in:
parent
ce3817dab8
commit
b64ce1d424
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -30,7 +31,7 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Add("Content-Type", ContentTypeApplicationJSON)
|
w.Header().Add("Content-Type", ContentTypeApplicationJSON)
|
||||||
w.Header().Add("Access-Control-Allow-Origin", "*")
|
w.Header().Add("Access-Control-Allow-Origin", "*")
|
||||||
|
|
||||||
if strings.HasPrefix(r.URL.Path, "/api/download") {
|
if strings.HasPrefix(r.URL.Path, "/api/media_sets") {
|
||||||
videoID := r.URL.Query().Get("video_id")
|
videoID := r.URL.Query().Get("video_id")
|
||||||
if videoID == "" {
|
if videoID == "" {
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
@ -54,7 +55,8 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||||
var err error
|
var err error
|
||||||
var youtubeClient youtubev2.Client
|
var youtubeClient youtubev2.Client
|
||||||
downloader := youtube.NewDownloader(&youtubeClient)
|
downloader := youtube.NewDownloader(&youtubeClient)
|
||||||
mediaSet, err = downloader.Download(videoID)
|
log.Printf("background context = %p, req context = %p", context.Background(), r.Context())
|
||||||
|
mediaSet, err = downloader.Download(r.Context(), videoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error downloading MediaSet: %v", err)
|
log.Printf("error downloading MediaSet: %v", err)
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
@ -75,12 +77,6 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||||
if strings.HasPrefix(r.URL.Path, "/api/audio") {
|
if strings.HasPrefix(r.URL.Path, "/api/audio") {
|
||||||
log.Printf("got headers for audio request: %+v", r.Header)
|
log.Printf("got headers for audio request: %+v", r.Header)
|
||||||
videoID := r.URL.Query().Get("video_id")
|
videoID := r.URL.Query().Get("video_id")
|
||||||
if videoID == "" {
|
|
||||||
w.WriteHeader(http.StatusBadRequest)
|
|
||||||
w.Write([]byte(`{"error": "no video ID provided"}`))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
mediaSet := media.MediaSet{ID: videoID}
|
mediaSet := media.MediaSet{ID: videoID}
|
||||||
if err := mediaSet.Load(); err != nil {
|
if err := mediaSet.Load(); err != nil {
|
||||||
log.Printf("error loading MediaSet: %v", err)
|
log.Printf("error loading MediaSet: %v", err)
|
||||||
|
@ -89,11 +85,44 @@ func handleRequest(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: ensure content-type matches the actual downloaded format.
|
||||||
|
w.Header().Set("Content-Type", "audio/webm")
|
||||||
http.ServeFile(w, r, mediaSet.EncodedAudioPath())
|
http.ServeFile(w, r, mediaSet.EncodedAudioPath())
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(r.URL.Path, "/api/video") {
|
||||||
|
videoID := r.URL.Query().Get("video_id")
|
||||||
|
mediaSet := media.MediaSet{ID: videoID}
|
||||||
|
if err := mediaSet.Load(); err != nil {
|
||||||
|
log.Printf("error loading MediaSet: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte(`{"error": "could not fetch media"}`))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
http.ServeFile(w, r, mediaSet.VideoPath())
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(r.URL.Path, "/api/thumbnails") {
|
||||||
|
videoID := r.URL.Query().Get("video_id")
|
||||||
|
mediaSet := media.MediaSet{ID: videoID}
|
||||||
|
if err := mediaSet.Load(); err != nil {
|
||||||
|
log.Printf("error loading MediaSet: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte(`{"error": "could not fetch media"}`))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "image/jpeg")
|
||||||
|
http.ServeFile(w, r, mediaSet.ThumbnailPath())
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(r.URL.Path, "/api/peaks") {
|
if strings.HasPrefix(r.URL.Path, "/api/peaks") {
|
||||||
videoID := r.URL.Query().Get("video_id")
|
videoID := r.URL.Query().Get("video_id")
|
||||||
if videoID == "" {
|
if videoID == "" {
|
||||||
|
|
|
@ -6,30 +6,50 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const SizeOfInt16 = 2
|
const SizeOfInt16 = 2
|
||||||
|
|
||||||
// MediaSet represents the media and metadata associated with a single media
|
type Audio struct {
|
||||||
// resource (for example, a YouTube video).
|
|
||||||
type MediaSet struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Source string `json:"source"`
|
|
||||||
Bytes int64 `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
Channels int `json:"channels"`
|
Channels int `json:"channels"`
|
||||||
Frames int64 `json:"frames"`
|
Frames int64 `json:"frames"`
|
||||||
SampleRate int `json:"sample_rate"`
|
SampleRate int `json:"sample_rate"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Video struct {
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
Duration time.Duration `json:"duration"`
|
||||||
|
ThumbnailWidth int `json:"thumbnail_width"`
|
||||||
|
ThumbnailHeight int `json:"thumbnail_height"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MediaSet represents the media and metadata associated with a single media
|
||||||
|
// resource (for example, a YouTube video).
|
||||||
|
type MediaSet struct {
|
||||||
|
Audio Audio `json:"audio"`
|
||||||
|
Video Video `json:"video"`
|
||||||
|
|
||||||
|
ID string `json:"id"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
|
||||||
exists bool
|
exists bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: pass io.Readers/Writers instead of strings.
|
||||||
func (m *MediaSet) RawAudioPath() string { return fmt.Sprintf("cache/%s.raw", m.ID) }
|
func (m *MediaSet) RawAudioPath() string { return fmt.Sprintf("cache/%s.raw", m.ID) }
|
||||||
func (m *MediaSet) EncodedAudioPath() string { return fmt.Sprintf("cache/%s.m4a", m.ID) }
|
func (m *MediaSet) EncodedAudioPath() string { return fmt.Sprintf("cache/%s.m4a", m.ID) }
|
||||||
func (m *MediaSet) VideoPath() string { return fmt.Sprintf("cache/%s.mp4", m.ID) }
|
func (m *MediaSet) VideoPath() string { return fmt.Sprintf("cache/%s.webm", m.ID) }
|
||||||
|
func (m *MediaSet) ThumbnailPath() string { return fmt.Sprintf("cache/%s.jpg", m.ID) }
|
||||||
func (m *MediaSet) MetadataPath() string { return fmt.Sprintf("cache/%s.json", m.ID) }
|
func (m *MediaSet) MetadataPath() string { return fmt.Sprintf("cache/%s.json", m.ID) }
|
||||||
|
|
||||||
func (m *MediaSet) Exists() bool {
|
func (m *MediaSet) Exists() bool {
|
||||||
|
if m.ID == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
if m.exists {
|
if m.exists {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -41,6 +61,10 @@ func (m *MediaSet) Exists() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MediaSet) Load() error {
|
func (m *MediaSet) Load() error {
|
||||||
|
if m.ID == "" {
|
||||||
|
return errors.New("error opening mediaset with blank ID")
|
||||||
|
}
|
||||||
|
|
||||||
metadataFile, err := os.Open(m.MetadataPath())
|
metadataFile, err := os.Open(m.MetadataPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error opening metadata file: %v", err)
|
return fmt.Errorf("error opening metadata file: %v", err)
|
||||||
|
@ -65,7 +89,7 @@ func (m *MediaSet) Peaks(start, end int64, numBins int) ([][]int16, error) {
|
||||||
}
|
}
|
||||||
defer fptr.Close()
|
defer fptr.Close()
|
||||||
|
|
||||||
startByte := start * int64(m.Channels) * SizeOfInt16
|
startByte := start * int64(m.Audio.Channels) * SizeOfInt16
|
||||||
if _, err = fptr.Seek(startByte, io.SeekStart); err != nil {
|
if _, err = fptr.Seek(startByte, io.SeekStart); err != nil {
|
||||||
return nil, fmt.Errorf("audio seek error: %v", err)
|
return nil, fmt.Errorf("audio seek error: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -73,12 +97,12 @@ func (m *MediaSet) Peaks(start, end int64, numBins int) ([][]int16, error) {
|
||||||
numFrames := end - start
|
numFrames := end - start
|
||||||
framesPerBin := numFrames / int64(numBins)
|
framesPerBin := numFrames / int64(numBins)
|
||||||
|
|
||||||
peaks := make([][]int16, m.Channels)
|
peaks := make([][]int16, m.Audio.Channels)
|
||||||
for i := 0; i < m.Channels; i++ {
|
for i := 0; i < m.Audio.Channels; i++ {
|
||||||
peaks[i] = make([]int16, numBins)
|
peaks[i] = make([]int16, numBins)
|
||||||
}
|
}
|
||||||
|
|
||||||
samples := make([]int16, framesPerBin*int64(m.Channels))
|
samples := make([]int16, framesPerBin*int64(m.Audio.Channels))
|
||||||
|
|
||||||
for binNum := 0; binNum < numBins; binNum++ {
|
for binNum := 0; binNum < numBins; binNum++ {
|
||||||
if err := binary.Read(fptr, binary.LittleEndian, samples); err != nil {
|
if err := binary.Read(fptr, binary.LittleEndian, samples); err != nil {
|
||||||
|
@ -88,12 +112,13 @@ func (m *MediaSet) Peaks(start, end int64, numBins int) ([][]int16, error) {
|
||||||
if samp < 0 {
|
if samp < 0 {
|
||||||
samp = -samp
|
samp = -samp
|
||||||
}
|
}
|
||||||
chanIndex := i % m.Channels
|
chanIndex := i % m.Audio.Channels
|
||||||
if samp > peaks[chanIndex][binNum] {
|
if samp > peaks[chanIndex][binNum] {
|
||||||
peaks[chanIndex][binNum] = samp
|
peaks[chanIndex][binNum] = samp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Println("finished generating peaks")
|
||||||
return peaks, nil
|
return peaks, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,14 +2,18 @@ package youtube
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.netflux.io/rob/clipper/media"
|
"git.netflux.io/rob/clipper/media"
|
||||||
|
|
||||||
|
@ -22,12 +26,16 @@ const (
|
||||||
EncodedAudioCodec = "pcm_s16le"
|
EncodedAudioCodec = "pcm_s16le"
|
||||||
EncodedAudioFormat = "s16le"
|
EncodedAudioFormat = "s16le"
|
||||||
EncodedAudioSampleRate = 48000
|
EncodedAudioSampleRate = 48000
|
||||||
|
|
||||||
|
thumbnailWidth = 30
|
||||||
|
thumbnailHeight = 100
|
||||||
|
videoItag = 18
|
||||||
)
|
)
|
||||||
|
|
||||||
// YoutubeClient wraps the youtube.Client client.
|
// YoutubeClient wraps the youtube.Client client.
|
||||||
type YoutubeClient interface {
|
type YoutubeClient interface {
|
||||||
GetVideo(string) (*youtubev2.Video, error)
|
GetVideoContext(context.Context, string) (*youtubev2.Video, error)
|
||||||
GetStream(*youtubev2.Video, *youtubev2.Format) (io.ReadCloser, int64, error)
|
GetStreamContext(context.Context, *youtubev2.Video, *youtubev2.Format) (io.ReadCloser, int64, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Downloader downloads a set of Youtube media for a given video ID, including
|
// Downloader downloads a set of Youtube media for a given video ID, including
|
||||||
|
@ -41,85 +49,67 @@ func NewDownloader(youtubeClient YoutubeClient) *Downloader {
|
||||||
return &Downloader{youtubeClient: youtubeClient}
|
return &Downloader{youtubeClient: youtubeClient}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type audioResult struct {
|
||||||
|
*media.Audio
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// videoMediaSet represents the video part of a media.MediaSet:
|
||||||
|
type videoMediaSet struct {
|
||||||
|
bytes int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type videoResult struct {
|
||||||
|
*media.Video
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
// Download downloads the relevant audio and video files for the provided
|
// Download downloads the relevant audio and video files for the provided
|
||||||
// Youtube video ID. If successful, a *media.MediaSet struct containing
|
// Youtube video ID. If successful, a *media.MediaSet struct containing
|
||||||
// metadata about the downloaded items is returned.
|
// metadata about the downloaded items is returned.
|
||||||
func (d *Downloader) Download(videoID string) (*media.MediaSet, error) {
|
func (d *Downloader) Download(ctx context.Context, videoID string) (*media.MediaSet, error) {
|
||||||
video, err := d.youtubeClient.GetVideo(videoID)
|
var video *youtubev2.Video
|
||||||
|
video, err := d.youtubeClient.GetVideoContext(ctx, videoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error fetching video: %v", err)
|
return nil, fmt.Errorf("error fetching video: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: improve selection of audio and video format.
|
|
||||||
// Perhaps download both separately?
|
|
||||||
var format *youtubev2.Format
|
|
||||||
for i := range video.Formats {
|
|
||||||
candidate := video.Formats[i]
|
|
||||||
if candidate.FPS == 0 || candidate.AudioChannels == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if format == nil || (candidate.ContentLength > 0 && candidate.ContentLength < format.ContentLength) {
|
|
||||||
format = &candidate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if format == nil {
|
|
||||||
return nil, errors.New("error selecting format: no format available")
|
|
||||||
}
|
|
||||||
log.Printf("selected format: %+v", format)
|
|
||||||
|
|
||||||
stream, _, err := d.youtubeClient.GetStream(video, format)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error fetching stream: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mediaSet := media.MediaSet{ID: videoID, Source: "youtube"}
|
mediaSet := media.MediaSet{ID: videoID, Source: "youtube"}
|
||||||
|
|
||||||
rawAudioFile, err := os.Create(mediaSet.RawAudioPath())
|
audioResultChan := make(chan audioResult, 1)
|
||||||
if err != nil {
|
videoResultChan := make(chan videoResult, 1)
|
||||||
return nil, fmt.Errorf("error creating raw audio file: %v", err)
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer close(audioResultChan)
|
||||||
|
audio, audioErr := d.downloadAudio(ctx, video, mediaSet.EncodedAudioPath(), mediaSet.RawAudioPath())
|
||||||
|
result := audioResult{audio, audioErr}
|
||||||
|
audioResultChan <- result
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer close(videoResultChan)
|
||||||
|
video, videoErr := d.downloadVideo(ctx, video, mediaSet.VideoPath(), mediaSet.ThumbnailPath())
|
||||||
|
result := videoResult{video, videoErr}
|
||||||
|
videoResultChan <- result
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
audioResult := <-audioResultChan
|
||||||
|
videoResult := <-videoResultChan
|
||||||
|
|
||||||
|
if err = audioResult.err; err != nil {
|
||||||
|
return nil, fmt.Errorf("error downloading audio: %v", err)
|
||||||
|
}
|
||||||
|
if err = videoResult.err; err != nil {
|
||||||
|
return nil, fmt.Errorf("error downloading video: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
encodedAudioFile, err := os.Create(mediaSet.EncodedAudioPath())
|
mediaSet.Audio = *audioResult.Audio
|
||||||
if err != nil {
|
mediaSet.Video = *videoResult.Video
|
||||||
return nil, fmt.Errorf("error creating encoded audio file: %v", err)
|
|
||||||
}
|
|
||||||
streamReader := io.TeeReader(stream, encodedAudioFile)
|
|
||||||
|
|
||||||
var errOut bytes.Buffer
|
|
||||||
cmd := exec.Command("ffmpeg", "-i", "-", "-f", EncodedAudioFormat, "-ar", strconv.Itoa(EncodedAudioSampleRate), "-acodec", EncodedAudioCodec, "-")
|
|
||||||
cmd.Stdin = streamReader
|
|
||||||
cmd.Stdout = rawAudioFile
|
|
||||||
cmd.Stderr = &errOut
|
|
||||||
|
|
||||||
if err = cmd.Run(); err != nil {
|
|
||||||
log.Println(errOut.String())
|
|
||||||
return nil, fmt.Errorf("error processing audio: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = rawAudioFile.Close(); err != nil {
|
|
||||||
return nil, fmt.Errorf("error writing raw audio file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rawAudioFile, err = os.Open(mediaSet.RawAudioPath())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error reading file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := rawAudioFile.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error reading file info: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
numFrames := fi.Size() / int64(SizeOfInt16) / int64(format.AudioChannels)
|
|
||||||
sampleRate, err := strconv.Atoi(format.AudioSampleRate)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid samplerate: %s", format.AudioSampleRate)
|
|
||||||
}
|
|
||||||
|
|
||||||
mediaSet.Bytes = fi.Size()
|
|
||||||
mediaSet.Channels = format.AudioChannels
|
|
||||||
mediaSet.Frames = numFrames
|
|
||||||
mediaSet.SampleRate = sampleRate
|
|
||||||
|
|
||||||
metadataFile, err := os.Create(mediaSet.MetadataPath())
|
metadataFile, err := os.Create(mediaSet.MetadataPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -134,5 +124,126 @@ func (d *Downloader) Download(videoID string) (*media.MediaSet, error) {
|
||||||
return nil, fmt.Errorf("error writing metadata file: %v", err)
|
return nil, fmt.Errorf("error writing metadata file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Println("finished downloading mediaset")
|
||||||
|
|
||||||
return &mediaSet, nil
|
return &mediaSet, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Downloader) downloadAudio(ctx context.Context, video *youtubev2.Video, outPath, rawOutPath string) (*media.Audio, error) {
|
||||||
|
var format *youtubev2.Format
|
||||||
|
for _, candidate := range video.Formats.WithAudioChannels() {
|
||||||
|
if format == nil || (candidate.ContentLength > 0 && candidate.ContentLength < format.ContentLength) {
|
||||||
|
candidate := candidate
|
||||||
|
format = &candidate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if format == nil {
|
||||||
|
return nil, errors.New("error selecting audio format: no format available")
|
||||||
|
}
|
||||||
|
log.Printf("selected audio format: %+v", format)
|
||||||
|
|
||||||
|
stream, _, err := d.youtubeClient.GetStreamContext(ctx, video, format)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error fetching audio stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawAudioFile, err := os.Create(rawOutPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating raw audio file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
encodedAudioFile, err := os.Create(outPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating encoded audio file: %v", err)
|
||||||
|
}
|
||||||
|
streamReader := io.TeeReader(stream, encodedAudioFile)
|
||||||
|
|
||||||
|
var errOut bytes.Buffer
|
||||||
|
cmd := exec.CommandContext(ctx, "ffmpeg", "-i", "-", "-f", EncodedAudioFormat, "-ar", strconv.Itoa(EncodedAudioSampleRate), "-acodec", EncodedAudioCodec, "-")
|
||||||
|
cmd.Stdin = streamReader
|
||||||
|
cmd.Stdout = rawAudioFile
|
||||||
|
cmd.Stderr = &errOut
|
||||||
|
|
||||||
|
if err = cmd.Run(); err != nil {
|
||||||
|
log.Println(errOut.String())
|
||||||
|
return nil, fmt.Errorf("error processing audio: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = rawAudioFile.Close(); err != nil {
|
||||||
|
return nil, fmt.Errorf("error writing raw audio file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rawAudioFile, err = os.Open(rawOutPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error opening raw audio file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fi, err := rawAudioFile.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading raw audio file info: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
numFrames := fi.Size() / int64(SizeOfInt16) / int64(format.AudioChannels)
|
||||||
|
sampleRate, err := strconv.Atoi(format.AudioSampleRate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid samplerate: %s", format.AudioSampleRate)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &media.Audio{
|
||||||
|
Bytes: fi.Size(),
|
||||||
|
Channels: format.AudioChannels,
|
||||||
|
Frames: numFrames,
|
||||||
|
SampleRate: sampleRate,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func thumbnailGridSize(seconds int) (int, int) {
|
||||||
|
x := int(math.Floor(math.Sqrt(float64(seconds))))
|
||||||
|
if x*x < seconds {
|
||||||
|
return x + 1, x
|
||||||
|
}
|
||||||
|
return x, x
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Downloader) downloadVideo(ctx context.Context, video *youtubev2.Video, outPath, thumbnailOutPath string) (*media.Video, error) {
|
||||||
|
// TODO: check if iTag 18 always exists, and works in a good variety of browsers.
|
||||||
|
format := video.Formats.FindByItag(videoItag)
|
||||||
|
log.Printf("selected video format: %+v", format)
|
||||||
|
|
||||||
|
stream, _, err := d.youtubeClient.GetStreamContext(ctx, video, format)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error fetching video stream: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
videoFile, err := os.Create(outPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error creating video file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
streamReader := io.TeeReader(stream, videoFile)
|
||||||
|
durationMsecs, err := strconv.Atoi(format.ApproxDurationMs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not parse video duration: %s", err)
|
||||||
|
}
|
||||||
|
durationSecs := durationMsecs / 1000
|
||||||
|
gridSizeX, gridSizeY := thumbnailGridSize(durationSecs)
|
||||||
|
|
||||||
|
var errOut bytes.Buffer
|
||||||
|
cmd := exec.CommandContext(ctx, "ffmpeg", "-i", "-", "-vf", fmt.Sprintf("fps=1,scale=-1:110,crop=%d:%d,tile=%dx%d", thumbnailWidth, thumbnailHeight, gridSizeX, gridSizeY), "-f", "image2pipe", "-vsync", "0", thumbnailOutPath)
|
||||||
|
cmd.Stdin = streamReader
|
||||||
|
cmd.Stderr = &errOut
|
||||||
|
|
||||||
|
if err = cmd.Run(); err != nil {
|
||||||
|
log.Println(errOut.String())
|
||||||
|
return nil, fmt.Errorf("error processing video: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Duration(durationMsecs) * time.Millisecond
|
||||||
|
|
||||||
|
return &media.Video{
|
||||||
|
Bytes: format.ContentLength,
|
||||||
|
ThumbnailWidth: thumbnailWidth,
|
||||||
|
ThumbnailHeight: thumbnailHeight,
|
||||||
|
Duration: duration,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import { useEffect, useState, useRef, MouseEvent } from 'react';
|
import { useEffect, useState, useRef, MouseEvent } from 'react';
|
||||||
import { Waveform as WaveformOverview } from './Waveform/Overview';
|
import { Waveform as WaveformOverview } from './Waveform/Overview';
|
||||||
|
import { Thumbnails } from './Waveform/Thumbnails';
|
||||||
import { Canvas as WaveformCanvas } from './Waveform/Canvas';
|
import { Canvas as WaveformCanvas } from './Waveform/Canvas';
|
||||||
import {
|
import {
|
||||||
secsToCanvasX,
|
secsToCanvasX,
|
||||||
|
@ -11,13 +12,30 @@ type Props = {
|
||||||
audioContext: AudioContext;
|
audioContext: AudioContext;
|
||||||
};
|
};
|
||||||
|
|
||||||
export type AudioFile = {
|
// Audio corresponds to media.Audio.
|
||||||
|
export type Audio = {
|
||||||
bytes: number;
|
bytes: number;
|
||||||
channels: number;
|
channels: number;
|
||||||
frames: number;
|
frames: number;
|
||||||
sampleRate: number;
|
sampleRate: number;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Video corresponds to media.Video.
|
||||||
|
export type Video = {
|
||||||
|
bytes: number;
|
||||||
|
thumbnailWidth: number;
|
||||||
|
thumbnailHeight: number;
|
||||||
|
durationMillis: number;
|
||||||
|
};
|
||||||
|
|
||||||
|
// MediaSet corresponds to media.MediaSet.
|
||||||
|
export type MediaSet = {
|
||||||
|
id: string;
|
||||||
|
source: string;
|
||||||
|
audio: Audio;
|
||||||
|
video: Video;
|
||||||
|
};
|
||||||
|
|
||||||
export type Selection = {
|
export type Selection = {
|
||||||
x1: number;
|
x1: number;
|
||||||
x2: number;
|
x2: number;
|
||||||
|
@ -34,14 +52,15 @@ export const CanvasLogicalWidth = 2000;
|
||||||
export const CanvasLogicalHeight = 500;
|
export const CanvasLogicalHeight = 500;
|
||||||
|
|
||||||
export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
const [audioFile, setAudioFile] = useState<AudioFile | null>(null);
|
const [mediaSet, setMediaSet] = useState<MediaSet | null>(null);
|
||||||
const [currentTime, setCurrentTime] = useState(0);
|
const [currentTime, setCurrentTime] = useState(0);
|
||||||
// TODO: fix linter error
|
// TODO: extract to player component.
|
||||||
const [audio, setAudio] = useState(new Audio());
|
const [audio, setAudio] = useState(new Audio());
|
||||||
const [zoomSettings, setZoomSettings] = useState(defaultZoomSettings);
|
const [zoomSettings, setZoomSettings] = useState(defaultZoomSettings);
|
||||||
const [waveformPeaks, setWaveformPeaks] = useState(null);
|
const [waveformPeaks, setWaveformPeaks] = useState(null);
|
||||||
const [overviewPeaks, setOverviewPeaks] = useState(null);
|
const [overviewPeaks, setOverviewPeaks] = useState(null);
|
||||||
const hudCanvasRef = useRef<HTMLCanvasElement>(null);
|
const hudCanvasRef = useRef<HTMLCanvasElement>(null);
|
||||||
|
const videoRef = useRef<HTMLVideoElement>(null);
|
||||||
|
|
||||||
// TODO: error handling
|
// TODO: error handling
|
||||||
const videoID = new URLSearchParams(window.location.search).get('video_id');
|
const videoID = new URLSearchParams(window.location.search).get('video_id');
|
||||||
|
@ -51,60 +70,88 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
// setup player on page load:
|
// setup player on page load:
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
(async function () {
|
(async function () {
|
||||||
audio.addEventListener('timeupdate', () => {
|
const video = videoRef.current;
|
||||||
setCurrentTime(audio.currentTime);
|
if (video == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
video.addEventListener('timeupdate', () => {
|
||||||
|
setCurrentTime(video.currentTime);
|
||||||
});
|
});
|
||||||
})();
|
})();
|
||||||
}, [audio]);
|
});
|
||||||
|
|
||||||
// fetch audio data on page load:
|
// fetch mediaset on page load:
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
(async function () {
|
(async function () {
|
||||||
console.log('fetching audio data...');
|
console.log('fetching media...');
|
||||||
|
|
||||||
const resp = await fetch(
|
const resp = await fetch(
|
||||||
`http://localhost:8888/api/download?video_id=${videoID}`
|
`http://localhost:8888/api/media_sets?video_id=${videoID}`
|
||||||
);
|
);
|
||||||
const respBody = await resp.json();
|
const respBody = await resp.json();
|
||||||
|
|
||||||
if (respBody.error) {
|
if (respBody.error) {
|
||||||
console.log('error fetching audio data:', respBody.error);
|
console.log('error fetching media set:', respBody.error);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: safer deserialization?
|
const mediaSet: MediaSet = {
|
||||||
const audioFile: AudioFile = {
|
id: respBody.id,
|
||||||
bytes: respBody.bytes,
|
source: respBody.source,
|
||||||
channels: respBody.channels,
|
audio: {
|
||||||
frames: respBody.frames,
|
sampleRate: respBody.audio.sample_rate,
|
||||||
sampleRate: respBody.sample_rate,
|
bytes: respBody.audio.bytes,
|
||||||
|
frames: respBody.audio.frames,
|
||||||
|
channels: respBody.audio.channels,
|
||||||
|
},
|
||||||
|
video: {
|
||||||
|
bytes: respBody.video.bytes,
|
||||||
|
thumbnailWidth: respBody.video.thumbnail_width,
|
||||||
|
thumbnailHeight: respBody.video.thumbnail_height,
|
||||||
|
durationMillis: Math.floor(respBody.video.duration / 1000 / 1000),
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
setAudioFile(audioFile);
|
setMediaSet(mediaSet);
|
||||||
setZoomSettings({ startFrame: 0, endFrame: audioFile.frames });
|
setZoomSettings({ startFrame: 0, endFrame: mediaSet.audio.frames });
|
||||||
})();
|
})();
|
||||||
}, [audioContext]);
|
}, [audioContext]);
|
||||||
|
|
||||||
// render overview waveform to canvas when the audio file is loaded:
|
// load video when MediaSet is loaded:
|
||||||
|
useEffect(() => {
|
||||||
|
if (mediaSet == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const video = videoRef.current;
|
||||||
|
if (video == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const url = `http://localhost:8888/api/video?video_id=${videoID}`;
|
||||||
|
video.src = url;
|
||||||
|
video.muted = false;
|
||||||
|
video.volume = 1;
|
||||||
|
video.controls = true;
|
||||||
|
}, [mediaSet]);
|
||||||
|
|
||||||
// fetch new waveform peaks when zoom settings are updated:
|
// fetch new waveform peaks when zoom settings are updated:
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
(async function () {
|
(async function () {
|
||||||
if (audioFile == null) {
|
if (mediaSet == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let endFrame = zoomSettings.endFrame;
|
let endFrame = zoomSettings.endFrame;
|
||||||
if (endFrame <= zoomSettings.startFrame) {
|
if (endFrame <= zoomSettings.startFrame) {
|
||||||
endFrame = audioFile.frames;
|
endFrame = mediaSet.audio.frames;
|
||||||
}
|
}
|
||||||
|
|
||||||
const resp = await fetch(
|
const resp = await fetch(
|
||||||
`http://localhost:8888/api/peaks?video_id=${videoID}&start=${zoomSettings.startFrame}&end=${endFrame}&bins=${CanvasLogicalWidth}`
|
`http://localhost:8888/api/peaks?video_id=${videoID}&start=${zoomSettings.startFrame}&end=${endFrame}&bins=${CanvasLogicalWidth}`
|
||||||
);
|
);
|
||||||
const peaks = await resp.json();
|
const peaks = await resp.json();
|
||||||
console.log('respBody from peaks =', peaks);
|
|
||||||
|
|
||||||
setWaveformPeaks(peaks);
|
setWaveformPeaks(peaks);
|
||||||
|
|
||||||
if (overviewPeaks == null) {
|
if (overviewPeaks == null) {
|
||||||
|
@ -118,7 +165,6 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
(async function () {
|
(async function () {
|
||||||
const canvas = hudCanvasRef.current;
|
const canvas = hudCanvasRef.current;
|
||||||
if (canvas == null) {
|
if (canvas == null) {
|
||||||
console.error('no hud canvas ref available');
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,14 +176,14 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
|
|
||||||
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
||||||
|
|
||||||
if (audioFile == null) {
|
if (mediaSet == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const x = secsToCanvasX(
|
const x = secsToCanvasX(
|
||||||
currentTime,
|
currentTime,
|
||||||
audioFile.sampleRate,
|
mediaSet.audio.sampleRate,
|
||||||
audioFile.frames
|
mediaSet.audio.frames
|
||||||
);
|
);
|
||||||
|
|
||||||
ctx.strokeStyle = 'red';
|
ctx.strokeStyle = 'red';
|
||||||
|
@ -148,10 +194,16 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
})();
|
})();
|
||||||
}, [currentTime]);
|
}, [currentTime]);
|
||||||
|
|
||||||
|
// end of hook configuration.
|
||||||
|
// TODO: render loading page here.
|
||||||
|
if (mediaSet == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
// callbacks
|
// callbacks
|
||||||
|
|
||||||
const handleMouseMove = (evt: MouseEvent<HTMLCanvasElement>) => {
|
const handleMouseMove = (evt: MouseEvent<HTMLCanvasElement>) => {
|
||||||
if (audioFile == null) {
|
if (mediaSet == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const canvasX = mouseEventToCanvasX(evt);
|
const canvasX = mouseEventToCanvasX(evt);
|
||||||
|
@ -159,7 +211,7 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
'mousemove, x =',
|
'mousemove, x =',
|
||||||
canvasX,
|
canvasX,
|
||||||
'frame =',
|
'frame =',
|
||||||
canvasXToFrame(canvasX, numFrames)
|
canvasXToFrame(canvasX, mediaSet.audio.frames)
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -172,19 +224,25 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const handlePlay = async () => {
|
const handlePlay = async () => {
|
||||||
const url = `http://localhost:8888/api/audio?video_id=${videoID}`;
|
const video = videoRef.current;
|
||||||
audio.src = url;
|
if (video == null) {
|
||||||
await audio.play();
|
return;
|
||||||
console.log('playing audio from', url);
|
}
|
||||||
|
await video.play();
|
||||||
};
|
};
|
||||||
|
|
||||||
const handlePause = () => {
|
const handlePause = () => {
|
||||||
audio.pause();
|
const video = videoRef.current;
|
||||||
console.log('paused audio');
|
if (video == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
video.pause();
|
||||||
|
console.log('paused video');
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleZoomIn = () => {
|
const handleZoomIn = () => {
|
||||||
if (audioFile == null) {
|
if (mediaSet == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
console.log('zoom in');
|
console.log('zoom in');
|
||||||
|
@ -195,7 +253,7 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleZoomOut = () => {
|
const handleZoomOut = () => {
|
||||||
if (audioFile == null) {
|
if (mediaSet == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
console.log('zoom out');
|
console.log('zoom out');
|
||||||
|
@ -203,19 +261,19 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
const newDiff = diff * 2;
|
const newDiff = diff * 2;
|
||||||
const endFrame = Math.min(
|
const endFrame = Math.min(
|
||||||
zoomSettings.endFrame + newDiff,
|
zoomSettings.endFrame + newDiff,
|
||||||
audioFile.frames
|
mediaSet.audio.frames
|
||||||
);
|
);
|
||||||
const settings = { ...zoomSettings, endFrame: endFrame };
|
const settings = { ...zoomSettings, endFrame: endFrame };
|
||||||
setZoomSettings(settings);
|
setZoomSettings(settings);
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleSelectionChange = (selection: Selection) => {
|
const handleSelectionChange = (selection: Selection) => {
|
||||||
if (audioFile == null) {
|
if (mediaSet == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const settings: ZoomSettings = {
|
const settings: ZoomSettings = {
|
||||||
startFrame: canvasXToFrame(selection.x1, audioFile.frames),
|
startFrame: canvasXToFrame(selection.x1, mediaSet.audio.frames),
|
||||||
endFrame: canvasXToFrame(selection.x2, audioFile.frames),
|
endFrame: canvasXToFrame(selection.x2, mediaSet.audio.frames),
|
||||||
};
|
};
|
||||||
setZoomSettings(settings);
|
setZoomSettings(settings);
|
||||||
};
|
};
|
||||||
|
@ -224,7 +282,7 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
|
|
||||||
const wrapperProps = {
|
const wrapperProps = {
|
||||||
width: '90%',
|
width: '90%',
|
||||||
height: '350px',
|
height: '250px',
|
||||||
position: 'relative',
|
position: 'relative',
|
||||||
margin: '0 auto',
|
margin: '0 auto',
|
||||||
} as React.CSSProperties;
|
} as React.CSSProperties;
|
||||||
|
@ -251,20 +309,34 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
zIndex: 1,
|
zIndex: 1,
|
||||||
} as React.CSSProperties;
|
} as React.CSSProperties;
|
||||||
|
|
||||||
const overviewStyles = { ...wrapperProps, height: '90px' };
|
const overviewStyles = { ...wrapperProps, height: '60px' };
|
||||||
|
|
||||||
// TODO: why is the margin needed?
|
// TODO: why is the margin needed?
|
||||||
const controlPanelStyles = { margin: '1em' } as React.CSSProperties;
|
const controlPanelStyles = { margin: '1em' } as React.CSSProperties;
|
||||||
const clockTextAreaProps = { color: '#999', width: '400px' };
|
const clockTextAreaProps = { color: '#999', width: '400px' };
|
||||||
|
const videoStyles = {
|
||||||
let numFrames = 0;
|
width: '30%',
|
||||||
if (audioFile != null) {
|
height: 'auto',
|
||||||
numFrames = audioFile.frames;
|
margin: '10px auto 0 auto',
|
||||||
}
|
zIndex: 2,
|
||||||
|
} as React.CSSProperties;
|
||||||
|
const thumbnailStyles = {
|
||||||
|
width: '90%',
|
||||||
|
height: '35px',
|
||||||
|
margin: '10px auto 0 auto',
|
||||||
|
display: 'block',
|
||||||
|
};
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<h1>clipper</h1>
|
<video ref={videoRef} style={videoStyles}></video>
|
||||||
|
<Thumbnails mediaSet={mediaSet} style={thumbnailStyles} />
|
||||||
|
<WaveformOverview
|
||||||
|
peaks={overviewPeaks}
|
||||||
|
numFrames={mediaSet.audio.frames}
|
||||||
|
style={overviewStyles}
|
||||||
|
onSelectionChange={handleSelectionChange}
|
||||||
|
></WaveformOverview>
|
||||||
<div style={wrapperProps}>
|
<div style={wrapperProps}>
|
||||||
<WaveformCanvas
|
<WaveformCanvas
|
||||||
peaks={waveformPeaks}
|
peaks={waveformPeaks}
|
||||||
|
@ -282,12 +354,6 @@ export const Waveform: React.FC<Props> = ({ audioContext }: Props) => {
|
||||||
height={CanvasLogicalHeight}
|
height={CanvasLogicalHeight}
|
||||||
></canvas>
|
></canvas>
|
||||||
</div>
|
</div>
|
||||||
<WaveformOverview
|
|
||||||
peaks={overviewPeaks}
|
|
||||||
numFrames={numFrames}
|
|
||||||
style={overviewStyles}
|
|
||||||
onSelectionChange={handleSelectionChange}
|
|
||||||
></WaveformOverview>
|
|
||||||
<div style={controlPanelStyles}>
|
<div style={controlPanelStyles}>
|
||||||
<button onClick={handlePlay}>Play</button>
|
<button onClick={handlePlay}>Play</button>
|
||||||
<button onClick={handlePause}>Pause</button>
|
<button onClick={handlePause}>Pause</button>
|
||||||
|
|
|
@ -16,6 +16,12 @@ export const canvasXToFrame = (x: number, numFrames: number): number => {
|
||||||
return Math.floor((x / CanvasLogicalWidth) * numFrames);
|
return Math.floor((x / CanvasLogicalWidth) * numFrames);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// // TODO: add tests
|
||||||
|
// export const canvasXToSecs = (x: number, numFrames: number): number => {
|
||||||
|
// const frame := canvasXToFrame(x, numFrames)
|
||||||
|
|
||||||
|
// }
|
||||||
|
|
||||||
// TODO: add tests
|
// TODO: add tests
|
||||||
export const secsToCanvasX = (
|
export const secsToCanvasX = (
|
||||||
secs: number,
|
secs: number,
|
||||||
|
|
|
@ -0,0 +1,102 @@
|
||||||
|
import { useState, useEffect, useRef } from 'react';
|
||||||
|
import { CanvasLogicalWidth, CanvasLogicalHeight, MediaSet } from '../Waveform';
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
mediaSet: MediaSet;
|
||||||
|
style: React.CSSProperties;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum State {
|
||||||
|
Loading,
|
||||||
|
Ready,
|
||||||
|
Error,
|
||||||
|
}
|
||||||
|
|
||||||
|
export const Thumbnails: React.FC<Props> = ({ mediaSet, style }: Props) => {
|
||||||
|
const [image, setImage] = useState(new Image());
|
||||||
|
const [state, setState] = useState(State.Loading);
|
||||||
|
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||||
|
|
||||||
|
// load thumbnail image when available:
|
||||||
|
useEffect(() => {
|
||||||
|
if (mediaSet == null) return;
|
||||||
|
|
||||||
|
image.src = `http://localhost:8888/api/thumbnails?video_id=${mediaSet.id}`;
|
||||||
|
image.onload = () => {
|
||||||
|
setState(State.Ready);
|
||||||
|
};
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// render canvas if image has been loaded successfully:
|
||||||
|
useEffect(() => {
|
||||||
|
if (state != State.Ready) return;
|
||||||
|
if (mediaSet == null) return;
|
||||||
|
|
||||||
|
const canvas = canvasRef.current;
|
||||||
|
if (canvas == null) {
|
||||||
|
console.error('no canvas available');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ctx = canvas.getContext('2d');
|
||||||
|
if (ctx == null) {
|
||||||
|
console.error('no thumbnail 2d context available');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const tw = mediaSet.video.thumbnailWidth;
|
||||||
|
const th = mediaSet.video.thumbnailHeight;
|
||||||
|
const iw = image.width;
|
||||||
|
const ih = image.height;
|
||||||
|
const { width: pw, height: ph } = canvas.getBoundingClientRect();
|
||||||
|
|
||||||
|
// set canvas logical width to suit the aspect ratio:
|
||||||
|
// TODO: confirm this is needed.
|
||||||
|
const ar = tw / th;
|
||||||
|
const par = pw / ph;
|
||||||
|
canvas.width = tw * (par / ar);
|
||||||
|
|
||||||
|
const durationSecs = mediaSet.video.durationMillis / 1000;
|
||||||
|
|
||||||
|
for (let dx = 0; dx < canvas.width; dx += tw) {
|
||||||
|
const secs = Math.floor((dx / canvas.width) * durationSecs);
|
||||||
|
const sx = (secs * tw) % iw;
|
||||||
|
const sy = Math.floor(secs / (iw / tw)) * th;
|
||||||
|
ctx.drawImage(image, sx, sy, tw, th, dx, 0, tw, th);
|
||||||
|
}
|
||||||
|
}, [state]);
|
||||||
|
|
||||||
|
// rendering
|
||||||
|
|
||||||
|
if (mediaSet == null || mediaSet.video == null) {
|
||||||
|
console.error('unexpected null video');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state == State.Loading) {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<div>Loading...</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state == State.Error) {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<span>Something went wrong</span>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<canvas
|
||||||
|
ref={canvasRef}
|
||||||
|
style={style}
|
||||||
|
width={CanvasLogicalWidth}
|
||||||
|
height={100}
|
||||||
|
></canvas>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
};
|
Loading…
Reference in New Issue