Compare commits

..

2 Commits

Author SHA1 Message Date
Luke Curley 73151fbcfe Small improvements and comments.
I actually kind of like the simplicity of wake() over changed()
2023-09-17 11:01:03 -07:00
Luke Curley ddf22012e0 Implement AsyncRead for segment::Subscriber
Untested.
2023-09-17 10:53:32 -07:00
93 changed files with 2242 additions and 4341 deletions

View File

@ -1,3 +1,2 @@
target target
dev dev
*.mp4

View File

@ -8,10 +8,3 @@ insert_final_newline = true
indent_style = tab indent_style = tab
indent_size = 4 indent_size = 4
max_line_length = 120 max_line_length = 120
[*.md]
trim_trailing_whitespace = false
[*.yml]
indent_style = space
indent_size = 2

29
.github/workflows/check.yml vendored Normal file
View File

@ -0,0 +1,29 @@
name: Test & Lint
on:
pull_request:
branches: ["main"]
env:
CARGO_TERM_COLOR: always
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: toolchain
uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: clippy, rustfmt
- name: test
run: cargo test --verbose
- name: clippy
run: cargo clippy
- name: fmt
run: cargo fmt --check

View File

@ -1,65 +0,0 @@
name: main
on:
push:
branches: ["main"]
env:
REGISTRY: docker.io
IMAGE: kixelated/moq-rs
IMAGE-PUB: kixelated/moq-pub
SERVICE: api # Restart the API service TODO and relays
jobs:
deploy:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
id-token: write
# Only one release at a time and cancel prior releases
concurrency:
group: release
cancel-in-progress: true
steps:
- uses: actions/checkout@v3
# I'm paying for Depot for faster ARM builds.
- uses: depot/setup-action@v1
- uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
# Build and push Docker image with Depot
- uses: depot/build-push-action@v1
with:
project: r257ctfqm6
context: .
push: true
tags: ${{env.REGISTRY}}/${{env.IMAGE}}
platforms: linux/amd64,linux/arm64
# Same, but include ffmpeg for publishing BBB
- uses: depot/build-push-action@v1
with:
project: r257ctfqm6
context: .
push: true
target: moq-pub # instead of the default target
tags: ${{env.REGISTRY}}/${{env.IMAGE-PUB}}
platforms: linux/amd64,linux/arm64
# Log in to GCP
- uses: google-github-actions/auth@v1
with:
credentials_json: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }}
# Deploy to cloud run
- uses: google-github-actions/deploy-cloudrun@v1
with:
service: ${{env.SERVICE}}
image: ${{env.REGISTRY}}/${{env.IMAGE}}

View File

@ -1,28 +0,0 @@
name: pr
on:
pull_request:
branches: ["main"]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
# Install Rust with clippy/rustfmt
- uses: actions-rust-lang/setup-rust-toolchain@v1
with:
components: clippy, rustfmt
# Make sure u guys don't write bad code
- run: cargo test --verbose
- run: cargo clippy --no-deps
- run: cargo fmt --check
# Check for unused dependencies
- uses: bnjbvr/cargo-machete@main

96
.github/workflows/publish.yml vendored Normal file
View File

@ -0,0 +1,96 @@
name: Publish Docker Image
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
on:
schedule:
- cron: "26 7 * * *"
push:
branches: ["main"]
# Publish semver tags as releases.
tags: ["v*.*.*"]
pull_request:
branches: ["main"]
env:
# Use docker.io for Docker Hub if empty
REGISTRY: ghcr.io
# github.repository as <account>/<repo>
IMAGE_NAME: ${{ github.repository }}
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
# This is used to complete the identity challenge
# with sigstore/fulcio when running outside of PRs.
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Install the cosign tool except on PR
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@6e04d228eb30da1757ee4e1dd75a0ec73a653e06 #v3.1.1
with:
cosign-release: "v2.1.1"
# Set up BuildKit Docker container builder to be able to build
# multi-platform images and export cache
# https://github.com/docker/setup-buildx-action
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Extract metadata (tags, labels) for Docker
# https://github.com/docker/metadata-action
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 # v5.0.0
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# Build and push Docker image with Buildx (don't push on PR)
# https://github.com/docker/build-push-action
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
# Sign the resulting Docker image digest except on PRs.
# This will only write to the public Rekor transparency log when the Docker
# repository is public to avoid leaking data. If you would like to publish
# transparency data even for private images, pass --force to cosign below.
# https://github.com/sigstore/cosign
- name: Sign the published Docker image
if: ${{ github.event_name != 'pull_request' }}
env:
# https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable
TAGS: ${{ steps.meta.outputs.tags }}
DIGEST: ${{ steps.build-and-push.outputs.digest }}
# This step uses the identity token to provision an ephemeral certificate
# against the sigstore community Fulcio instance.
run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST}

1
.gitignore vendored
View File

@ -1,4 +1,3 @@
.DS_Store .DS_Store
target/ target/
logs/ logs/
*.mp4

805
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,3 @@
[workspace] [workspace]
members = ["moq-transport", "moq-relay", "moq-pub", "moq-api"] members = ["moq-transport", "moq-relay", "moq-pub"]
resolver = "2" resolver = "2"

View File

@ -12,28 +12,14 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/build/target \ --mount=type=cache,target=/build/target \
cargo build --release && cp /build/target/release/moq-* /usr/local/cargo/bin cargo build --release && cp /build/target/release/moq-* /usr/local/cargo/bin
# Special image for moq-pub with ffmpeg and a publish script included. # Runtime image
FROM rust:latest as moq-pub FROM rust:latest
# Install required utilities and ffmpeg
RUN apt-get update && \
apt-get install -y ffmpeg wget
# Copy the publish script into the image
COPY deploy/publish.sh /usr/local/bin/publish
# Copy the compiled binary
COPY --from=builder /usr/local/cargo/bin/moq-pub /usr/local/cargo/bin/moq-pub
CMD [ "publish" ]
# moq-rs image with just the binaries
FROM rust:latest as moq-rs
LABEL org.opencontainers.image.source=https://github.com/kixelated/moq-rs LABEL org.opencontainers.image.source=https://github.com/kixelated/moq-rs
LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0" LABEL org.opencontainers.image.licenses="MIT OR Apache-2.0"
# Fly.io entrypoint # Fly.io entrypoint
ADD deploy/fly-relay.sh . ADD fly-relay.sh .
# Copy the compiled binaries # Copy the compiled binaries
COPY --from=builder /usr/local/cargo/bin /usr/local/cargo/bin COPY --from=builder /usr/local/cargo/bin /usr/local/cargo/bin

View File

@ -1,53 +0,0 @@
# Hackathon
IETF Prague 118
## MoqTransport
Reference libraries are available at [moq-rs](https://github.com/kixelated/moq-rs) and [moq-js](https://github.com/kixelated/moq-js). The Rust library is [well documented](https://docs.rs/moq-transport/latest/moq_transport/) but the web library, not so much.
**TODO** Update both to draft-01.
**TODO** Switch any remaining forks over to extensions. ex: track_id in SUBSCRIBE
The stream mapping right now is quite rigid: `stream == group == object`.
**TODO** Support multiple objects per group. They MUST NOT use different priorities, different tracks, or out-of-order sequences.
The API and cache aren't designed to send/receive arbitrary objects over arbitrary streams as specified in the draft. I don't think it should, and it wouldn't be possible to implement in time for the hackathon anyway.
**TODO** Make an extension to enforce this stream mapping?
## Generic Relay
I'm hosting a simple CDN at: `relay.quic.video`
The traffic is sharded based on the WebTransport path to avoid namespace collisions. Think of it like a customer ID, although it's completely unauthenticated for now. Use your username or whatever string you want: `CONNECT https://relay.quic.video/alan`.
**TODO** Currently, it performs an implicit `ANNOUNCE ""` when `role=publisher`. This means there can only be a single publisher per shard and `role=both` is not supported. I should have explicit `ANNOUNCE` messages supported before the hackathon to remove this limitation.
**TODO** I don't know if I will have subscribe hints fully working in time. They will be parsed but might be ignored.
## CMAF Media
You can [publish](https://quic.video/publish) and [watch](https://quic.video/watch) broadcasts.
There's a [24/7 bunny stream](https://quic.video/watch/bbb) or you can publish your own using [moq-pub](https://github.com/kixelated/moq-rs/tree/main/moq-pub).
If you want to fetch from the relay directly, the name of the broadcast is the path. For example, `https://quic.video/watch/bbb` can be accessed at `relay.quic.video/bbb`.
The namespace is empty and the catalog track is `.catalog`. I'm currently using simple JSON catalog with no support for delta updates.
**TODO** update to the proposed [Warp catalog](https://datatracker.ietf.org/doc/draft-wilaw-moq-catalogformat/).
The media tracks uses a single (unbounded) object per group. Video groups are per GoP, while audio groups are per frame. There's also an init track containing information required to initialize the decoder.
**TODO** Base64 encode the init track in the catalog.
## Clock
**TODO** Host a clock demo that sends a group per second:
```
GROUP: YYYY-MM-DD HH:MM
OBJECT: SS
```

View File

@ -1,22 +1,44 @@
# Media over QUIC
<p align="center"> <p align="center">
<img height="128px" src="https://github.com/kixelated/moq-rs/blob/main/.github/logo.svg" alt="Media over QUIC"> <img height="256" src="https://github.com/kixelated/moq-rs/blob/main/.github/logo.svg">
</p> </p>
Media over QUIC (MoQ) is a live media delivery protocol utilizing QUIC streams. Media over QUIC (MoQ) is a live media delivery protocol utilizing QUIC streams.
See [quic.video](https://quic.video) for more information. See the [MoQ working group](https://datatracker.ietf.org/wg/moq/about/) for more information.
This repository contains a few crates: This repository contains reusable libraries and a relay server.
It requires a client to actually publish/view content, such as [moq-js](https://github.com/kixelated/moq-js).
- **moq-relay**: A relay server, accepting content from publishers and fanning it out to subscribers. Join the [Discord](https://discord.gg/FCYF3p99mr) for updates and discussion.
- **moq-pub**: A publish client, accepting media from stdin (ex. via ffmpeg) and sending it to a remote server.
- **moq-transport**: An async implementation of the underlying MoQ protocol.
- **moq-api**: A HTTP API server that stores the origin for each broadcast, backed by redis.
There's currently no way to view media with this repo; you'll need to use [moq-js](https://github.com/kixelated/moq-js) for that. ## Setup
## Development ### Certificates
Use the [dev helper scripts](dev/README.md) for local development. Unfortunately, QUIC mandates TLS and makes local development difficult.
If you have a valid certificate you can use it instead of self-signing.
Use [mkcert](https://github.com/FiloSottile/mkcert) to generate a self-signed certificate.
Unfortunately, this currently requires Go in order to [fork](https://github.com/FiloSottile/mkcert/pull/513) the tool.
```bash
./dev/cert
```
Unfortunately, WebTransport in Chrome currently (May 2023) doesn't verify certificates using the root CA.
The workaround is to use the `serverFingerprints` options, which requires the certificate MUST be only valid for at most **14 days**.
This is also why we're using a fork of mkcert, because it generates certificates valid for years by default.
This limitation will be removed once Chrome uses the system CA for WebTransport.
### Media
If you're using `moq-pub` then you'll want some test footage to broadcast.
```bash
mkdir media
wget http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4 -O dev/source.mp4
```
## Usage ## Usage
@ -24,41 +46,53 @@ Use the [dev helper scripts](dev/README.md) for local development.
**moq-relay** is a server that forwards subscriptions from publishers to subscribers, caching and deduplicating along the way. **moq-relay** is a server that forwards subscriptions from publishers to subscribers, caching and deduplicating along the way.
It's designed to be run in a datacenter, relaying media across multiple hops to deduplicate and improve QoS. It's designed to be run in a datacenter, relaying media across multiple hops to deduplicate and improve QoS.
The relays register themselves via the [moq-api](moq-api) endpoints, which is used to discover other relays and share broadcasts.
You can run the development server with the following command, automatically using the self-signed certificate generated earlier:
```bash
./dev/relay
```
Notable arguments: Notable arguments:
- `--listen <ADDR>` Listen on this address, default: `[::]:4443` - `--bind <ADDR>` Listen on this address [default: [::]:4443]
- `--tls-cert <CERT>` Use the certificate file at this path - `--cert <CERT>` Use the certificate file at this path
- `--tls-key <KEY>` Use the private key at this path - `--key <KEY>` Use the private key at this path
- `--dev` Listen via HTTPS as well, serving the `/fingerprint` of the self-signed certificate. (dev only)
This listens for WebTransport connections on `UDP https://localhost:4443` by default. This listens for WebTransport connections on `UDP https://localhost:4443` by default.
You need a client to connect to that address, to both publish and consume media. You need a client to connect to that address, to both publish and consume media.
The server also listens on `TCP localhost:4443` when in development mode.
This is exclusively to serve a `/fingerprint` endpoint via HTTPS for self-signed certificates, which are not needed in production.
### moq-pub ### moq-pub
This is a client that publishes a fMP4 stream from stdin over MoQ. This is a client that publishes a fMP4 stream from stdin over MoQ.
This can be combined with ffmpeg (and other tools) to produce a live stream. This can be combined with ffmpeg (and other tools) to produce a live stream.
The following command runs a development instance, broadcasing `dev/source.mp4` to `localhost:4443`:
```bash
./dev/pub
```
Notable arguments: Notable arguments:
- `<URL>` connect to the given address, which must start with `https://` for WebTransport. - `<URI>` connect to the given address, which must start with moq://.
**NOTE**: We're very particular about the fMP4 ingested. See [this script](dev/pub) for the required ffmpeg flags. ### moq-js
### moq-transport There's currently no way to consume broadcasts with `moq-rs`, at least until somebody writes `moq-sub`.
Until then, you can use [moq.js](https://github.com/kixelated/moq-js) both watch broadcasts and publish broadcasts.
A media-agnostic library used by [moq-relay](moq-relay) and [moq-pub](moq-pub) to serve the underlying subscriptions. There's a hosted version available at [quic.video](https://quic.video/).
It has caching/deduplication built-in, so your application is oblivious to the number of connections under the hood. There's a secret `?server` parameter that can be used to connect to a different address.
See the published [crate](https://crates.io/crates/moq-transport) and [documentation](https://docs.rs/moq-transport/latest/moq_transport/). - Publish to localhost: `https://quic.video/publish/?server=localhost:4443`
- Watch from localhost: `https://quic.video/watch/<name>/?server=localhost:4443`
### moq-api Note that self-signed certificates are ONLY supported if the server name starts with `localhost`.
You'll need to add an entry to `/etc/hosts` if you want to use a self-signed certs and an IP address.
This is a API server that exposes a REST API.
It's used by relays to inserts themselves as origins when publishing, and to find the origin when subscribing.
It's basically just a thin wrapper around redis that is only needed to run multiple relays in a (simple) cluster.
## License ## License

View File

@ -1,20 +0,0 @@
app = "englishm-moq-relay"
kill_signal = "SIGINT"
kill_timeout = 5
[env]
PORT = "4443"
[experimental]
cmd = "./fly-relay.sh"
[[services]]
internal_port = 4443
protocol = "udp"
[services.concurrency]
hard_limit = 25
soft_limit = 20
[[services.ports]]
port = "4443"

View File

@ -1,41 +0,0 @@
#!/bin/bash
set -euo pipefail
ADDR=${ADDR:-"https://relay.quic.video"}
NAME=${NAME:-"bbb"}
URL=${URL:-"http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4"}
# Download the funny bunny
wget -nv "${URL}" -O "${NAME}.mp4"
# ffmpeg
# -hide_banner: Hide the banner
# -v quiet: and any other output
# -stats: But we still want some stats on stderr
# -stream_loop -1: Loop the broadcast an infinite number of times
# -re: Output in real-time
# -i "${INPUT}": Read from a file on disk
# -vf "drawtext": Render the current time in the corner of the video
# -an: Disable audio for now
# -b:v 3M: Output video at 3Mbps
# -preset ultrafast: Don't use much CPU at the cost of quality
# -tune zerolatency: Optimize for latency at the cost of quality
# -f mp4: Output to mp4 format
# -movflags: Build a fMP4 file with a frame per fragment
# - | moq-pub: Output to stdout and moq-pub to publish
# Run ffmpeg
ffmpeg \
-stream_loop -1 \
-hide_banner \
-v quiet \
-re \
-i "${NAME}.mp4" \
-vf "drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf:text='%{gmtime\: %H\\\\\:%M\\\\\:%S.%3N}':x=(W-tw)-24:y=24:fontsize=48:fontcolor=white:box=1:boxcolor=black@0.5" \
-an \
-b:v 3M \
-preset ultrafast \
-tune zerolatency \
-f mp4 \
-movflags empty_moov+frag_every_frame+separate_moof+omit_tfhd_offset \
- | moq-pub "${ADDR}/${NAME}"

View File

@ -1,118 +0,0 @@
# Local Development
This is a collection of helpful scripts for local development.
## Setup
### moq-relay
Unfortunately, QUIC mandates TLS and makes local development difficult.
If you have a valid certificate you can use it instead of self-signing.
Use [mkcert](https://github.com/FiloSottile/mkcert) to generate a self-signed certificate.
Unfortunately, this currently requires [Go](https://golang.org/) to be installed in order to [fork](https://github.com/FiloSottile/mkcert/pull/513) the tool.
Somebody should get that merged or make something similar in Rust...
```bash
./dev/cert
```
Unfortunately, WebTransport in Chrome currently (May 2023) doesn't verify certificates using the root CA.
The workaround is to use the `serverFingerprints` options, which requires the certificate MUST be only valid for at most **14 days**.
This is also why we're using a fork of mkcert, because it generates certificates valid for years by default.
This limitation will be removed once Chrome uses the system CA for WebTransport.
### moq-pub
You'll want some test footage to broadcast.
Anything works, but make sure the codec is supported by the player since `moq-pub` does not re-encode.
Here's a criticially acclaimed short film:
```bash
mkdir media
wget http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4 -O dev/source.mp4
```
`moq-pub` uses [ffmpeg](https://ffmpeg.org/) to convert the media to fMP4.
You should have it installed already if you're a video nerd, otherwise:
```bash
brew install ffmpeg
```
### moq-api
`moq-api` uses a redis instance to store active origins for clustering.
This is not relevant for most local development and the code path is skipped by default.
However, if you want to test the clustering, you'll need either either [Docker](https://www.docker.com/) or [Podman](https://podman.io/) installed.
We run the redis instance via a container automatically as part of `dev/api`.
## Development
**tl;dr** run these commands in seperate terminals:
```bash
./dev/cert
./dev/relay
./dev/pub
```
They will each print out a URL you can use to publish/watch broadcasts.
### moq-relay
You can run the relay with the following command, automatically using the self-signed certificates generated earlier.
This listens for WebTransport connections on WebTransport `https://localhost:4443` by default.
```bash
./dev/relay
```
It will print out a URL when you can use to publish. Alternatively, you can use `dev/pub` instead.
> Publish URL: https://quic.video/publish/?server=localhost:4443
### moq-pub
The following command runs a development instance, broadcasing `dev/source.mp4` to WebTransport `https://localhost:4443`:
```bash
./dev/pub
```
It will print out a URL when you can use to watch.
By default, the broadcast name is `dev` but you can overwrite it with the `NAME` env.
> Watch URL: https://quic.video/watch/dev?server=localhost:4443
If you're debugging encoding issues, you can use this script to dump the file to disk instead, defaulting to
`dev/output.mp4`.
```bash
./dev/pub-file
```
### moq-api
The following commands runs an API server, listening for HTTP requests on `http://localhost:4442` by default.
```bash
./dev/api
```
Nodes can now register themselves via the API, which means you can run multiple interconnected relays.
There's two separate `dev/relay-0` and `dev/relay-1` scripts to test clustering locally:
```bash
./dev/relay-0
./dev/relay-1
```
These listen on `:4443` and `:4444` respectively, inserting themselves into the origin database as `localhost:$PORT`.
There's also a separate `dev/pub-1` script to publish to the `:4444` instance.
You can use the exisitng `dev/pub` script to publish to the `:4443` instance.
If all goes well, you would be able to publish to one relay and watch from the other.

45
dev/api
View File

@ -1,45 +0,0 @@
#!/bin/bash
set -euo pipefail
# Change directory to the root of the project
cd "$(dirname "$0")/.."
# Use debug logging by default
export RUST_LOG="${RUST_LOG:-debug}"
# Run the API server on port 4442 by default
HOST="${HOST:-[::]}"
PORT="${PORT:-4442}"
LISTEN="${LISTEN:-$HOST:$PORT}"
# Check for Podman/Docker and set runtime accordingly
if command -v podman &> /dev/null; then
RUNTIME=podman
elif command -v docker &> /dev/null; then
RUNTIME=docker
else
echo "Neither podman or docker found in PATH. Exiting."
exit 1
fi
REDIS_PORT=${REDIS_PORT:-6400} # The default is 6379, but we'll use 6400 to avoid conflicts
# Cleanup function to stop Redis when script exits
cleanup() {
$RUNTIME rm -f moq-redis || true
}
# Stop the redis instance if it's still running
cleanup
# Run a Redis instance
REDIS_CONTAINER=$($RUNTIME run --rm --name moq-redis -d -p "$REDIS_PORT:6379" redis:latest)
# Cleanup function to stop Redis when script exits
trap cleanup EXIT
# Default to a sqlite database in memory
DATABASE="${DATABASE-sqlite::memory:}"
# Run the relay and forward any arguments
cargo run --bin moq-api -- --listen "$LISTEN" --redis "redis://localhost:$REDIS_PORT" "$@"

31
dev/pub
View File

@ -4,37 +4,22 @@ set -euo pipefail
# Change directory to the root of the project # Change directory to the root of the project
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
# Use debug logging by default
export RUST_LOG="${RUST_LOG:-debug}"
# Connect to localhost by default. # Connect to localhost by default.
HOST="${HOST:-localhost}" HOST="${HOST:-localhost:4443}"
PORT="${PORT:-4443}"
ADDR="${ADDR:-$HOST:$PORT}"
# Generate a random 16 character name by default. # Generate a random 16 character name by default.
#NAME="${NAME:-$(head /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | head -c 16)}" NAME="${NAME:-$(head /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | head -c 16)}"
# JK use the name "dev" instead # Combine the host and name into a URI.
# TODO use that random name if the host is not localhost URI="${URI:-"moq://$HOST/$NAME"}"
NAME="${NAME:-dev}"
# Combine the host and name into a URL.
URL="${URL:-"https://$ADDR/$NAME"}"
# Default to a source video # Default to a source video
INPUT="${INPUT:-dev/source.mp4}" MEDIA="${MEDIA:-dev/source.mp4}"
# Print out the watch URL
echo "Watch URL: https://quic.video/watch/$NAME?server=$ADDR"
# Run ffmpeg and pipe the output to moq-pub # Run ffmpeg and pipe the output to moq-pub
# TODO enable audio again once fixed.
ffmpeg -hide_banner -v quiet \ ffmpeg -hide_banner -v quiet \
-stream_loop -1 -re \ -stream_loop -1 -re \
-i "$INPUT" \ -i "$MEDIA" \
-c copy \
-an \ -an \
-f mp4 -movflags cmaf+separate_moof+delay_moov+skip_trailer \ -f mp4 -movflags empty_moov+frag_every_frame+separate_moof+omit_tfhd_offset - \
-frag_duration 1 \ | RUST_LOG=info cargo run --bin moq-pub -- "$URI" "$@"
- | cargo run --bin moq-pub -- "$URL" "$@"

View File

@ -1,10 +0,0 @@
#!/bin/bash
set -euo pipefail
# Change directory to the root of the project
cd "$(dirname "$0")/.."
# Connect to the 2nd relay by default.
export PORT="${PORT:-4444}"
./dev/pub

View File

@ -1,90 +0,0 @@
#!/bin/bash
set -euo pipefail
# Change directory to the root of the project
cd "$(dirname "$0")/.."
# Default to a source video
INPUT="${INPUT:-dev/source.mp4}"
# Output the fragmented MP4 to disk for testing.
OUTPUT="${OUTPUT:-dev/output.mp4}"
# Run ffmpeg the same as dev/pub, but:
# - print any errors/warnings
# - only loop twice
#
# Note this is artificially slowed down to real-time using the -re flag; you can remove it.
ffmpeg \
-re \
-y \
-i "$INPUT" \
-c copy \
-fps_mode passthrough \
-f mp4 -movflags cmaf+separate_moof+delay_moov+skip_trailer \
-frag_duration 1 \
"${OUTPUT}"
# % ffmpeg -f mp4 --ffmpeg -h muxer=mov
#
# ffmpeg version 6.0 Copyright (c) 2000-2023 the FFmpeg developers
# Muxer mov [QuickTime / MOV]:
# Common extensions: mov.
# Default video codec: h264.
# Default audio codec: aac.
# mov/mp4/tgp/psp/tg2/ipod/ismv/f4v muxer AVOptions:
# -movflags <flags> E.......... MOV muxer flags (default 0)
# rtphint E.......... Add RTP hint tracks
# empty_moov E.......... Make the initial moov atom empty
# frag_keyframe E.......... Fragment at video keyframes
# frag_every_frame E.......... Fragment at every frame
# separate_moof E.......... Write separate moof/mdat atoms for each track
# frag_custom E.......... Flush fragments on caller requests
# isml E.......... Create a live smooth streaming feed (for pushing to a publishing point)
# faststart E.......... Run a second pass to put the index (moov atom) at the beginning of the file
# omit_tfhd_offset E.......... Omit the base data offset in tfhd atoms
# disable_chpl E.......... Disable Nero chapter atom
# default_base_moof E.......... Set the default-base-is-moof flag in tfhd atoms
# dash E.......... Write DASH compatible fragmented MP4
# cmaf E.......... Write CMAF compatible fragmented MP4
# frag_discont E.......... Signal that the next fragment is discontinuous from earlier ones
# delay_moov E.......... Delay writing the initial moov until the first fragment is cut, or until the first fragment flush
# global_sidx E.......... Write a global sidx index at the start of the file
# skip_sidx E.......... Skip writing of sidx atom
# write_colr E.......... Write colr atom even if the color info is unspecified (Experimental, may be renamed or changed, do not use from scripts)
# prefer_icc E.......... If writing colr atom prioritise usage of ICC profile if it exists in stream packet side data
# write_gama E.......... Write deprecated gama atom
# use_metadata_tags E.......... Use mdta atom for metadata.
# skip_trailer E.......... Skip writing the mfra/tfra/mfro trailer for fragmented files
# negative_cts_offsets E.......... Use negative CTS offsets (reducing the need for edit lists)
# -moov_size <int> E.......... maximum moov size so it can be placed at the begin (from 0 to INT_MAX) (default 0)
# -rtpflags <flags> E.......... RTP muxer flags (default 0)
# latm E.......... Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC
# rfc2190 E.......... Use RFC 2190 packetization instead of RFC 4629 for H.263
# skip_rtcp E.......... Don't send RTCP sender reports
# h264_mode0 E.......... Use mode 0 for H.264 in RTP
# send_bye E.......... Send RTCP BYE packets when finishing
# -skip_iods <boolean> E.......... Skip writing iods atom. (default true)
# -iods_audio_profile <int> E.......... iods audio profile atom. (from -1 to 255) (default -1)
# -iods_video_profile <int> E.......... iods video profile atom. (from -1 to 255) (default -1)
# -frag_duration <int> E.......... Maximum fragment duration (from 0 to INT_MAX) (default 0)
# -min_frag_duration <int> E.......... Minimum fragment duration (from 0 to INT_MAX) (default 0)
# -frag_size <int> E.......... Maximum fragment size (from 0 to INT_MAX) (default 0)
# -ism_lookahead <int> E.......... Number of lookahead entries for ISM files (from 0 to 255) (default 0)
# -video_track_timescale <int> E.......... set timescale of all video tracks (from 0 to INT_MAX) (default 0)
# -brand <string> E.......... Override major brand
# -use_editlist <boolean> E.......... use edit list (default auto)
# -fragment_index <int> E.......... Fragment number of the next fragment (from 1 to INT_MAX) (default 1)
# -mov_gamma <float> E.......... gamma value for gama atom (from 0 to 10) (default 0)
# -frag_interleave <int> E.......... Interleave samples within fragments (max number of consecutive samples, lower is tighter interleaving, but with more overhead) (from 0 to INT_MAX) (default 0)
# -encryption_scheme <string> E.......... Configures the encryption scheme, allowed values are none, cenc-aes-ctr
# -encryption_key <binary> E.......... The media encryption key (hex)
# -encryption_kid <binary> E.......... The media encryption key identifier (hex)
# -use_stream_ids_as_track_ids <boolean> E.......... use stream ids as track ids (default false)
# -write_btrt <boolean> E.......... force or disable writing btrt (default auto)
# -write_tmcd <boolean> E.......... force or disable writing tmcd (default auto)
# -write_prft <int> E.......... Write producer reference time box with specified time source (from 0 to 2) (default 0)
# wallclock 1 E..........
# pts 2 E..........
# -empty_hdlr_name <boolean> E.......... write zero-length name string in hdlr atoms within mdia and minf atoms (default false)
# -movie_timescale <int> E.......... set movie timescale (from 1 to INT_MAX) (default 1000)

View File

@ -4,34 +4,10 @@ set -euo pipefail
# Change directory to the root of the project # Change directory to the root of the project
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
# Use debug logging by default
export RUST_LOG="${RUST_LOG:-debug}"
# Default to a self-signed certificate # Default to a self-signed certificate
# TODO automatically generate if it doesn't exist. # TODO automatically generate if it doesn't exist.
CERT="${CERT:-dev/localhost.crt}" CERT="${CERT:-dev/localhost.crt}"
KEY="${KEY:-dev/localhost.key}" KEY="${KEY:-dev/localhost.key}"
# Default to listening on localhost:4443
HOST="${HOST:-[::]}"
PORT="${PORT:-4443}"
LISTEN="${LISTEN:-$HOST:$PORT}"
# A list of optional args
ARGS=""
# Connect to the given URL to get origins.
# TODO default to a public instance?
if [ -n "${API-}" ]; then
ARGS="$ARGS --api $API"
fi
# Provide our node URL when registering origins.
if [ -n "${NODE-}" ]; then
ARGS="$ARGS --api-node $NODE"
fi
echo "Publish URL: https://quic.video/publish/?server=localhost:${PORT}"
# Run the relay and forward any arguments # Run the relay and forward any arguments
cargo run --bin moq-relay -- --listen "$LISTEN" --tls-cert "$CERT" --tls-key "$KEY" --dev $ARGS -- "$@" RUST_LOG=info cargo run --bin moq-relay -- --cert "$CERT" --key "$KEY" --fingerprint "$@"

View File

@ -1,12 +0,0 @@
#!/bin/bash
set -euo pipefail
# Change directory to the root of the project
cd "$(dirname "$0")/.."
# Run an instance that advertises itself to the origin API.
export PORT="${PORT:-4443}"
export API="${API:-http://localhost:4442}" # TODO support HTTPS
export NODE="${NODE:-https://localhost:$PORT}"
./dev/relay

View File

@ -1,12 +0,0 @@
#!/bin/bash
set -euo pipefail
# Change directory to the root of the project
cd "$(dirname "$0")/.."
# Run an instance that advertises itself to the origin API.
export PORT="${PORT:-4444}"
export API="${API:-http://localhost:4442}" # TODO support HTTPS
export NODE="${NODE:-https://localhost:$PORT}"
./dev/relay

View File

@ -1,2 +0,0 @@
#!/bin/bash
set -euo pipefail

View File

@ -5,4 +5,4 @@ mkdir cert
echo "$MOQ_CRT" | base64 -d > dev/moq-demo.crt echo "$MOQ_CRT" | base64 -d > dev/moq-demo.crt
echo "$MOQ_KEY" | base64 -d > dev/moq-demo.key echo "$MOQ_KEY" | base64 -d > dev/moq-demo.key
RUST_LOG=info /usr/local/cargo/bin/moq-relay --tls-cert dev/moq-demo.crt --tls-key dev/moq-demo.key RUST_LOG=info /usr/local/cargo/bin/moq-relay --cert dev/moq-demo.crt --key dev/moq-demo.key

19
fly.toml Normal file
View File

@ -0,0 +1,19 @@
app = "englishm-moq-relay"
kill_signal = "SIGINT"
kill_timeout = 5
[env]
PORT = "4443"
[experimental]
cmd = "./fly-relay.sh"
[[services]]
internal_port = 4443
protocol = "udp"
[services.concurrency]
hard_limit = 25
soft_limit = 20
[[services.ports]]
port = "4443"

View File

@ -1,43 +0,0 @@
[package]
name = "moq-api"
description = "Media over QUIC"
authors = ["Luke Curley"]
repository = "https://github.com/kixelated/moq-rs"
license = "MIT OR Apache-2.0"
version = "0.0.1"
edition = "2021"
keywords = ["quic", "http3", "webtransport", "media", "live"]
categories = ["multimedia", "network-programming", "web-programming"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# HTTP server
axum = "0.6"
hyper = { version = "0.14", features = ["full"] }
tokio = { version = "1", features = ["full"] }
# HTTP client
reqwest = { version = "0.11", features = ["json", "rustls-tls"] }
# JSON encoding
serde = "1"
serde_json = "1"
# CLI
clap = { version = "4", features = ["derive"] }
# Database
redis = { version = "0.23", features = [
"tokio-rustls-comp",
"connection-manager",
] }
url = { version = "2", features = ["serde"] }
# Error handling
log = "0.4"
env_logger = "0.9"
thiserror = "1"

View File

@ -1,4 +0,0 @@
# moq-api
A thin HTTP API that wraps Redis.
Basically I didn't want the relays connecting to Redis directly.

View File

@ -1,56 +0,0 @@
use url::Url;
use crate::{ApiError, Origin};
#[derive(Clone)]
pub struct Client {
// The address of the moq-api server
url: Url,
client: reqwest::Client,
}
impl Client {
pub fn new(url: Url) -> Self {
let client = reqwest::Client::new();
Self { url, client }
}
pub async fn get_origin(&self, id: &str) -> Result<Option<Origin>, ApiError> {
let url = self.url.join("origin/")?.join(id)?;
let resp = self.client.get(url).send().await?;
if resp.status() == reqwest::StatusCode::NOT_FOUND {
return Ok(None);
}
let origin: Origin = resp.json().await?;
Ok(Some(origin))
}
pub async fn set_origin(&mut self, id: &str, origin: &Origin) -> Result<(), ApiError> {
let url = self.url.join("origin/")?.join(id)?;
let resp = self.client.post(url).json(origin).send().await?;
resp.error_for_status()?;
Ok(())
}
pub async fn delete_origin(&mut self, id: &str) -> Result<(), ApiError> {
let url = self.url.join("origin/")?.join(id)?;
let resp = self.client.delete(url).send().await?;
resp.error_for_status()?;
Ok(())
}
pub async fn patch_origin(&mut self, id: &str, origin: &Origin) -> Result<(), ApiError> {
let url = self.url.join("origin/")?.join(id)?;
let resp = self.client.patch(url).json(origin).send().await?;
resp.error_for_status()?;
Ok(())
}
}

View File

@ -1,16 +0,0 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ApiError {
#[error("redis error: {0}")]
Redis(#[from] redis::RedisError),
#[error("reqwest error: {0}")]
Request(#[from] reqwest::Error),
#[error("hyper error: {0}")]
Hyper(#[from] hyper::Error),
#[error("url error: {0}")]
Url(#[from] url::ParseError),
}

View File

@ -1,7 +0,0 @@
mod client;
mod error;
mod model;
pub use client::*;
pub use error::*;
pub use model::*;

View File

@ -1,14 +0,0 @@
use clap::Parser;
mod server;
use moq_api::ApiError;
use server::{Server, ServerConfig};
#[tokio::main]
async fn main() -> Result<(), ApiError> {
env_logger::init();
let config = ServerConfig::parse();
let server = Server::new(config);
server.run().await
}

View File

@ -1,8 +0,0 @@
use serde::{Deserialize, Serialize};
use url::Url;
#[derive(Serialize, Deserialize, PartialEq, Eq)]
pub struct Origin {
pub url: Url,
}

View File

@ -1,171 +0,0 @@
use std::net;
use axum::{
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Response},
routing::get,
Json, Router,
};
use clap::Parser;
use redis::{aio::ConnectionManager, AsyncCommands};
use moq_api::{ApiError, Origin};
/// Runs a HTTP API to create/get origins for broadcasts.
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct ServerConfig {
/// Listen for HTTP requests on the given address
#[arg(long)]
pub listen: net::SocketAddr,
/// Connect to the given redis instance
#[arg(long)]
pub redis: url::Url,
}
pub struct Server {
config: ServerConfig,
}
impl Server {
pub fn new(config: ServerConfig) -> Self {
Self { config }
}
pub async fn run(self) -> Result<(), ApiError> {
log::info!("connecting to redis: url={}", self.config.redis);
// Create the redis client.
let redis = redis::Client::open(self.config.redis)?;
let redis = redis
.get_tokio_connection_manager() // TODO get_tokio_connection_manager_with_backoff?
.await?;
let app = Router::new()
.route(
"/origin/:id",
get(get_origin)
.post(set_origin)
.delete(delete_origin)
.patch(patch_origin),
)
.with_state(redis);
log::info!("serving requests: bind={}", self.config.listen);
axum::Server::bind(&self.config.listen)
.serve(app.into_make_service())
.await?;
Ok(())
}
}
async fn get_origin(
Path(id): Path<String>,
State(mut redis): State<ConnectionManager>,
) -> Result<Json<Origin>, AppError> {
let key = origin_key(&id);
let payload: Option<String> = redis.get(&key).await?;
let payload = payload.ok_or(AppError::NotFound)?;
let origin: Origin = serde_json::from_str(&payload)?;
Ok(Json(origin))
}
async fn set_origin(
State(mut redis): State<ConnectionManager>,
Path(id): Path<String>,
Json(origin): Json<Origin>,
) -> Result<(), AppError> {
// TODO validate origin
let key = origin_key(&id);
// Convert the input back to JSON after validating it add adding any fields (TODO)
let payload = serde_json::to_string(&origin)?;
let res: Option<String> = redis::cmd("SET")
.arg(key)
.arg(payload)
.arg("NX")
.arg("EX")
.arg(600) // Set the key to expire in 10 minutes; the origin needs to keep refreshing it.
.query_async(&mut redis)
.await?;
if res.is_none() {
return Err(AppError::Duplicate);
}
Ok(())
}
async fn delete_origin(Path(id): Path<String>, State(mut redis): State<ConnectionManager>) -> Result<(), AppError> {
let key = origin_key(&id);
match redis.del(key).await? {
0 => Err(AppError::NotFound),
_ => Ok(()),
}
}
// Update the expiration deadline.
async fn patch_origin(
Path(id): Path<String>,
State(mut redis): State<ConnectionManager>,
Json(origin): Json<Origin>,
) -> Result<(), AppError> {
let key = origin_key(&id);
// Make sure the contents haven't changed
// TODO make a LUA script to do this all in one operation.
let payload: Option<String> = redis.get(&key).await?;
let payload = payload.ok_or(AppError::NotFound)?;
let expected: Origin = serde_json::from_str(&payload)?;
if expected != origin {
return Err(AppError::Duplicate);
}
// Reset the timeout to 10 minutes.
match redis.expire(key, 600).await? {
0 => Err(AppError::NotFound),
_ => Ok(()),
}
}
fn origin_key(id: &str) -> String {
format!("origin.{}", id)
}
#[derive(thiserror::Error, Debug)]
enum AppError {
#[error("redis error")]
Redis(#[from] redis::RedisError),
#[error("json error")]
Json(#[from] serde_json::Error),
#[error("not found")]
NotFound,
#[error("duplicate ID")]
Duplicate,
}
// Tell axum how to convert `AppError` into a response.
impl IntoResponse for AppError {
fn into_response(self) -> Response {
match self {
AppError::Redis(e) => (StatusCode::INTERNAL_SERVER_ERROR, format!("redis error: {}", e)).into_response(),
AppError::Json(e) => (StatusCode::INTERNAL_SERVER_ERROR, format!("json error: {}", e)).into_response(),
AppError::NotFound => StatusCode::NOT_FOUND.into_response(),
AppError::Duplicate => StatusCode::CONFLICT.into_response(),
}
}
}

View File

@ -1,7 +1,7 @@
[package] [package]
name = "moq-pub" name = "moq-pub"
description = "Media over QUIC" description = "Media over QUIC"
authors = ["Mike English", "Luke Curley"] authors = ["Mike English"]
repository = "https://github.com/kixelated/moq-rs" repository = "https://github.com/kixelated/moq-rs"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
@ -18,30 +18,29 @@ moq-transport = { path = "../moq-transport" }
# QUIC # QUIC
quinn = "0.10" quinn = "0.10"
webtransport-quinn = "0.6" webtransport-quinn = "0.5"
#webtransport-quinn = { path = "../../webtransport-rs/webtransport-quinn" } webtransport-generic = "0.5"
url = "2" http = "0.2.9"
# Crypto # Crypto
rustls = { version = "0.21", features = ["dangerous_configuration"] } ring = "0.16.20"
rustls-native-certs = "0.6" rustls = "0.21.2"
rustls-pemfile = "1" rustls-pemfile = "1.0.2"
# Async stuff # Async stuff
tokio = { version = "1", features = ["full"] } tokio = { version = "1.27", features = ["full"] }
# CLI, logging, error handling # CLI, logging, error handling
clap = { version = "4", features = ["derive"] } clap = { version = "4.0", features = ["derive"] }
log = { version = "0.4", features = ["std"] } log = { version = "0.4", features = ["std"] }
env_logger = "0.9" env_logger = "0.9.3"
mp4 = "0.13" mp4 = "0.13.0"
anyhow = { version = "1", features = ["backtrace"] } rustls-native-certs = "0.6.3"
serde_json = "1" anyhow = { version = "1.0.70", features = ["backtrace"] }
rfc6381-codec = "0.1" serde_json = "1.0.105"
tracing = "0.1" rfc6381-codec = "0.1.0"
tracing-subscriber = "0.3"
[build-dependencies] [build-dependencies]
clap = { version = "4", features = ["derive"] } http = "0.2.9"
clap_mangen = "0.2" clap = { version = "4.0", features = ["derive"] }
url = "2" clap_mangen = "0.2.12"

View File

@ -5,7 +5,7 @@ A command line tool for publishing media via Media over QUIC (MoQ).
Expects to receive fragmented MP4 via standard input and connect to a MOQT relay. Expects to receive fragmented MP4 via standard input and connect to a MOQT relay.
``` ```
ffmpeg ... - | moq-pub https://localhost:4443 ffmpeg ... - | moq-pub -i - --host localhost:4443
``` ```
### Invoking `moq-pub`: ### Invoking `moq-pub`:
@ -13,7 +13,7 @@ ffmpeg ... - | moq-pub https://localhost:4443
Here's how I'm currently testing things, with a local copy of Big Buck Bunny named `bbb_source.mp4`: Here's how I'm currently testing things, with a local copy of Big Buck Bunny named `bbb_source.mp4`:
``` ```
$ ffmpeg -hide_banner -v quiet -stream_loop -1 -re -i bbb_source.mp4 -an -f mp4 -movflags empty_moov+frag_every_frame+separate_moof+omit_tfhd_offset - | RUST_LOG=moq_pub=info moq-pub https://localhost:4443 $ ffmpeg -hide_banner -v quiet -stream_loop -1 -re -i bbb_source.mp4 -an -f mp4 -movflags empty_moov+frag_every_frame+separate_moof+omit_tfhd_offset - | RUST_LOG=moq_pub=info moq-pub -i -
``` ```
This relies on having `moq-relay` (the relay server) already running locally in another shell. This relies on having `moq-relay` (the relay server) already running locally in another shell.

View File

@ -1,6 +1,5 @@
use clap::Parser; use clap::Parser;
use std::{net, path}; use std::net;
use url::Url;
#[derive(Parser, Clone, Debug)] #[derive(Parser, Clone, Debug)]
pub struct Config { pub struct Config {
@ -18,31 +17,18 @@ pub struct Config {
#[arg(long, default_value = "1500000")] #[arg(long, default_value = "1500000")]
pub bitrate: u32, pub bitrate: u32,
/// Connect to the given URL starting with https:// /// Connect to the given URI starting with moq://
#[arg(value_parser = moq_url)] #[arg(value_parser = moq_uri)]
pub url: Url, pub uri: http::Uri,
/// Use the TLS root CA at this path, encoded as PEM.
///
/// This value can be provided multiple times for multiple roots.
/// If this is empty, system roots will be used instead
#[arg(long)]
pub tls_root: Vec<path::PathBuf>,
/// Danger: Disable TLS certificate verification.
///
/// Fine for local development, but should be used in caution in production.
#[arg(long)]
pub tls_disable_verify: bool,
} }
fn moq_url(s: &str) -> Result<Url, String> { fn moq_uri(s: &str) -> Result<http::Uri, String> {
let url = Url::try_from(s).map_err(|e| e.to_string())?; let uri = http::Uri::try_from(s).map_err(|e| e.to_string())?;
// Make sure the scheme is moq // Make sure the scheme is moq
if url.scheme() != "https" { if uri.scheme_str() != Some("moq") {
return Err("url scheme must be https:// for WebTransport".to_string()); return Err("uri scheme must be moq".to_string());
} }
Ok(url) Ok(uri)
} }

View File

@ -1,5 +1,3 @@
use std::{fs, io, sync::Arc, time};
use anyhow::Context; use anyhow::Context;
use clap::Parser; use clap::Parser;
@ -9,7 +7,7 @@ use cli::*;
mod media; mod media;
use media::*; use media::*;
use moq_transport::cache::broadcast; use moq_transport::model::broadcast;
// TODO: clap complete // TODO: clap complete
@ -17,39 +15,15 @@ use moq_transport::cache::broadcast;
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
env_logger::init(); env_logger::init();
// Disable tracing so we don't get a bunch of Quinn spam.
let tracer = tracing_subscriber::FmtSubscriber::builder()
.with_max_level(tracing::Level::WARN)
.finish();
tracing::subscriber::set_global_default(tracer).unwrap();
let config = Config::parse(); let config = Config::parse();
let (publisher, subscriber) = broadcast::new(""); let (publisher, subscriber) = broadcast::new();
let mut media = Media::new(&config, publisher).await?; let mut media = Media::new(&config, publisher).await?;
// Create a list of acceptable root certificates. // Ugh, just let me use my native root certs already
let mut roots = rustls::RootCertStore::empty(); let mut roots = rustls::RootCertStore::empty();
for cert in rustls_native_certs::load_native_certs().expect("could not load platform certs") {
if config.tls_root.is_empty() { roots.add(&rustls::Certificate(cert.0)).unwrap();
// Add the platform's native root certificates.
for cert in rustls_native_certs::load_native_certs().context("could not load platform certs")? {
roots
.add(&rustls::Certificate(cert.0))
.context("failed to add root cert")?;
}
} else {
// Add the specified root certificates.
for root in &config.tls_root {
let root = fs::File::open(root).context("failed to open root cert file")?;
let mut root = io::BufReader::new(root);
let root = rustls_pemfile::certs(&mut root).context("failed to read root cert")?;
anyhow::ensure!(root.len() == 1, "expected a single root cert");
let root = rustls::Certificate(root[0].to_owned());
roots.add(&root).context("failed to add root cert")?;
}
} }
let mut tls_config = rustls::ClientConfig::builder() let mut tls_config = rustls::ClientConfig::builder()
@ -57,12 +31,6 @@ async fn main() -> anyhow::Result<()> {
.with_root_certificates(roots) .with_root_certificates(roots)
.with_no_client_auth(); .with_no_client_auth();
// Allow disabling TLS verification altogether.
if config.tls_disable_verify {
let noop = NoCertificateVerification {};
tls_config.dangerous().set_certificate_verifier(Arc::new(noop));
}
tls_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()]; // this one is important tls_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()]; // this one is important
let arc_tls_config = std::sync::Arc::new(tls_config); let arc_tls_config = std::sync::Arc::new(tls_config);
@ -71,9 +39,14 @@ async fn main() -> anyhow::Result<()> {
let mut endpoint = quinn::Endpoint::client(config.bind)?; let mut endpoint = quinn::Endpoint::client(config.bind)?;
endpoint.set_default_client_config(quinn_client_config); endpoint.set_default_client_config(quinn_client_config);
log::info!("connecting to relay: url={}", config.url); log::info!("connecting to {}", config.uri);
let session = webtransport_quinn::connect(&endpoint, &config.url) // Change the uri scheme to "https" for WebTransport
let mut parts = config.uri.into_parts();
parts.scheme = Some(http::uri::Scheme::HTTPS);
let uri = http::Uri::from_parts(parts)?;
let session = webtransport_quinn::connect(&endpoint, &uri)
.await .await
.context("failed to create WebTransport session")?; .context("failed to create WebTransport session")?;
@ -89,19 +62,3 @@ async fn main() -> anyhow::Result<()> {
Ok(()) Ok(())
} }
pub struct NoCertificateVerification {}
impl rustls::client::ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}

View File

@ -1,10 +1,9 @@
use crate::cli::Config; use crate::cli::Config;
use anyhow::{self, Context}; use anyhow::{self, Context};
use moq_transport::cache::{broadcast, fragment, segment, track}; use moq_transport::model::{broadcast, segment, track};
use moq_transport::VarInt; use moq_transport::VarInt;
use mp4::{self, ReadBox}; use mp4::{self, ReadBox};
use serde_json::json; use serde_json::json;
use std::cmp::max;
use std::collections::HashMap; use std::collections::HashMap;
use std::io::Cursor; use std::io::Cursor;
use std::time; use std::time;
@ -16,12 +15,11 @@ pub struct Media {
_catalog: track::Publisher, _catalog: track::Publisher,
_init: track::Publisher, _init: track::Publisher,
// Tracks based on their track ID. tracks: HashMap<String, Track>,
tracks: HashMap<u32, Track>,
} }
impl Media { impl Media {
pub async fn new(_config: &Config, mut broadcast: broadcast::Publisher) -> anyhow::Result<Self> { pub async fn new(config: &Config, mut broadcast: broadcast::Publisher) -> anyhow::Result<Self> {
let mut stdin = tokio::io::stdin(); let mut stdin = tokio::io::stdin();
let ftyp = read_atom(&mut stdin).await?; let ftyp = read_atom(&mut stdin).await?;
anyhow::ensure!(&ftyp[4..8] == b"ftyp", "expected ftyp atom"); anyhow::ensure!(&ftyp[4..8] == b"ftyp", "expected ftyp atom");
@ -41,39 +39,33 @@ impl Media {
let moov = mp4::MoovBox::read_box(&mut moov_reader, moov_header.size)?; let moov = mp4::MoovBox::read_box(&mut moov_reader, moov_header.size)?;
// Create the catalog track with a single segment. // Create the catalog track with a single segment.
let mut init_track = broadcast.create_track("0.mp4")?; let mut init_track = broadcast.create_track("1.mp4")?;
let mut init_segment = init_track.create_segment(segment::Info { let mut init_segment = init_track.create_segment(segment::Info {
sequence: VarInt::ZERO, sequence: VarInt::ZERO,
priority: 0, priority: i32::MAX,
expires: None, expires: None,
})?; })?;
// Create a single fragment, optionally setting the size init_segment.write_chunk(init.into())?;
let mut init_fragment = init_segment.create_fragment(fragment::Info {
sequence: VarInt::ZERO,
size: None, // size is only needed when we have multiple fragments.
})?;
init_fragment.write_chunk(init.into())?;
let mut tracks = HashMap::new(); let mut tracks = HashMap::new();
for trak in &moov.traks { for trak in &moov.traks {
let id = trak.tkhd.track_id; let id = trak.tkhd.track_id;
let name = format!("{}.m4s", id); let name = id.to_string();
let timescale = track_timescale(&moov, id); let timescale = track_timescale(&moov, id);
// Store the track publisher in a map so we can update it later. // Store the track publisher in a map so we can update it later.
let track = broadcast.create_track(&name)?; let track = broadcast.create_track(&name)?;
let track = Track::new(track, timescale); let track = Track::new(track, timescale);
tracks.insert(id, track); tracks.insert(name, track);
} }
let mut catalog = broadcast.create_track(".catalog")?; let mut catalog = broadcast.create_track(".catalog")?;
// Create the catalog track // Create the catalog track
Self::serve_catalog(&mut catalog, &init_track.name, &moov)?; Self::serve_catalog(&mut catalog, config, init_track.name.to_string(), &moov, &tracks)?;
Ok(Media { Ok(Media {
_broadcast: broadcast, _broadcast: broadcast,
@ -86,7 +78,7 @@ impl Media {
pub async fn run(&mut self) -> anyhow::Result<()> { pub async fn run(&mut self) -> anyhow::Result<()> {
let mut stdin = tokio::io::stdin(); let mut stdin = tokio::io::stdin();
// The current track name // The current track name
let mut current = None; let mut track_name = None;
loop { loop {
let atom = read_atom(&mut stdin).await?; let atom = read_atom(&mut stdin).await?;
@ -100,21 +92,22 @@ impl Media {
// Process the moof. // Process the moof.
let fragment = Fragment::new(moof)?; let fragment = Fragment::new(moof)?;
let name = fragment.track.to_string();
// Get the track for this moof. // Get the track for this moof.
let track = self.tracks.get_mut(&fragment.track).context("failed to find track")?; let track = self.tracks.get_mut(&name).context("failed to find track")?;
// Save the track ID for the next iteration, which must be a mdat. // Save the track ID for the next iteration, which must be a mdat.
anyhow::ensure!(current.is_none(), "multiple moof atoms"); anyhow::ensure!(track_name.is_none(), "multiple moof atoms");
current.replace(fragment.track); track_name.replace(name);
// Publish the moof header, creating a new segment if it's a keyframe. // Publish the moof header, creating a new segment if it's a keyframe.
track.header(atom, fragment).context("failed to publish moof")?; track.header(atom, fragment).context("failed to publish moof")?;
} }
mp4::BoxType::MdatBox => { mp4::BoxType::MdatBox => {
// Get the track ID from the previous moof. // Get the track ID from the previous moof.
let track = current.take().context("missing moof")?; let name = track_name.take().context("missing moof")?;
let track = self.tracks.get_mut(&track).context("failed to find track")?; let track = self.tracks.get_mut(&name).context("failed to find track")?;
// Publish the mdat atom. // Publish the mdat atom.
track.data(atom).context("failed to publish mdat")?; track.data(atom).context("failed to publish mdat")?;
@ -129,102 +122,63 @@ impl Media {
fn serve_catalog( fn serve_catalog(
track: &mut track::Publisher, track: &mut track::Publisher,
init_track_name: &str, config: &Config,
init_track_name: String,
moov: &mp4::MoovBox, moov: &mp4::MoovBox,
_tracks: &HashMap<String, Track>,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
let mut segment = track.create_segment(segment::Info { let mut segment = track.create_segment(segment::Info {
sequence: VarInt::ZERO, sequence: VarInt::ZERO,
priority: 0, priority: i32::MAX,
expires: None, expires: None,
})?; })?;
let mut tracks = Vec::new(); // avc1[.PPCCLL]
//
// let profile = 0x64;
// let constraints = 0x00;
// let level = 0x1f;
for trak in &moov.traks { // TODO: do build multi-track catalog by looping through moov.traks
let mut track = json!({ let trak = moov.traks[0].clone();
"container": "mp4", let avc1 = trak
"init_track": init_track_name, .mdia
"data_track": format!("{}.m4s", trak.tkhd.track_id), .minf
}); .stbl
.stsd
.avc1
.ok_or(anyhow::anyhow!("avc1 atom not found"))?;
let stsd = &trak.mdia.minf.stbl.stsd; let profile = avc1.avcc.avc_profile_indication;
if let Some(avc1) = &stsd.avc1 { let constraints = avc1.avcc.profile_compatibility; // Not 100% certain here, but it's 0x00 on my current test video
// avc1[.PPCCLL] let level = avc1.avcc.avc_level_indication;
//
// let profile = 0x64;
// let constraints = 0x00;
// let level = 0x1f;
let profile = avc1.avcc.avc_profile_indication;
let constraints = avc1.avcc.profile_compatibility; // Not 100% certain here, but it's 0x00 on my current test video
let level = avc1.avcc.avc_level_indication;
let width = avc1.width; let width = avc1.width;
let height = avc1.height; let height = avc1.height;
let codec = rfc6381_codec::Codec::avc1(profile, constraints, level); let codec = rfc6381_codec::Codec::avc1(profile, constraints, level);
let codec_str = codec.to_string(); let codec_str = codec.to_string();
track["kind"] = json!("video");
track["codec"] = json!(codec_str);
track["width"] = json!(width);
track["height"] = json!(height);
} else if let Some(_hev1) = &stsd.hev1 {
// TODO https://github.com/gpac/mp4box.js/blob/325741b592d910297bf609bc7c400fc76101077b/src/box-codecs.js#L106
anyhow::bail!("HEVC not yet supported")
} else if let Some(mp4a) = &stsd.mp4a {
let desc = &mp4a
.esds
.as_ref()
.context("missing esds box for MP4a")?
.es_desc
.dec_config;
let codec_str = format!("mp4a.{:02x}.{}", desc.object_type_indication, desc.dec_specific.profile);
track["kind"] = json!("audio");
track["codec"] = json!(codec_str);
track["channel_count"] = json!(mp4a.channelcount);
track["sample_rate"] = json!(mp4a.samplerate.value());
track["sample_size"] = json!(mp4a.samplesize);
let bitrate = max(desc.max_bitrate, desc.avg_bitrate);
if bitrate > 0 {
track["bit_rate"] = json!(bitrate);
}
} else if let Some(vp09) = &stsd.vp09 {
// https://github.com/gpac/mp4box.js/blob/325741b592d910297bf609bc7c400fc76101077b/src/box-codecs.js#L238
let vpcc = &vp09.vpcc;
let codec_str = format!("vp09.0.{:02x}.{:02x}.{:02x}", vpcc.profile, vpcc.level, vpcc.bit_depth);
track["kind"] = json!("video");
track["codec"] = json!(codec_str);
track["width"] = json!(vp09.width); // no idea if this needs to be multiplied
track["height"] = json!(vp09.height); // no idea if this needs to be multiplied
// TODO Test if this actually works; I'm just guessing based on mp4box.js
anyhow::bail!("VP9 not yet supported")
} else {
// TODO add av01 support: https://github.com/gpac/mp4box.js/blob/325741b592d910297bf609bc7c400fc76101077b/src/box-codecs.js#L251
anyhow::bail!("unknown codec for track: {}", trak.tkhd.track_id);
}
tracks.push(track);
}
let catalog = json!({ let catalog = json!({
"tracks": tracks "tracks": [
{
"container": "mp4",
"kind": "video",
"init_track": init_track_name,
"data_track": "1", // assume just one track for now
"codec": codec_str,
"width": width,
"height": height,
"frame_rate": config.fps,
"bit_rate": config.bitrate,
}
]
}); });
let catalog_str = serde_json::to_string_pretty(&catalog)?; let catalog_str = serde_json::to_string_pretty(&catalog)?;
log::info!("catalog: {}", catalog_str); log::info!("catalog: {}", catalog_str);
// Create a single fragment for the segment.
let mut fragment = segment.create_fragment(fragment::Info {
sequence: VarInt::ZERO,
size: None, // Size is only needed when we have multiple fragments.
})?;
// Add the segment and add the fragment. // Add the segment and add the fragment.
fragment.write_chunk(catalog_str.into())?; segment.write_chunk(catalog_str.into())?;
Ok(()) Ok(())
} }
@ -272,7 +226,7 @@ struct Track {
track: track::Publisher, track: track::Publisher,
// The current segment // The current segment
current: Option<fragment::Publisher>, segment: Option<segment::Publisher>,
// The number of units per second. // The number of units per second.
timescale: u64, timescale: u64,
@ -286,16 +240,16 @@ impl Track {
Self { Self {
track, track,
sequence: 0, sequence: 0,
current: None, segment: None,
timescale, timescale,
} }
} }
pub fn header(&mut self, raw: Vec<u8>, fragment: Fragment) -> anyhow::Result<()> { pub fn header(&mut self, raw: Vec<u8>, fragment: Fragment) -> anyhow::Result<()> {
if let Some(current) = self.current.as_mut() { if let Some(segment) = self.segment.as_mut() {
if !fragment.keyframe { if !fragment.keyframe {
// Use the existing segment // Use the existing segment
current.write_chunk(raw.into())?; segment.write_chunk(raw.into())?;
return Ok(()); return Ok(());
} }
} }
@ -304,7 +258,7 @@ impl Track {
// Compute the timestamp in milliseconds. // Compute the timestamp in milliseconds.
// Overflows after 583 million years, so we're fine. // Overflows after 583 million years, so we're fine.
let timestamp: u32 = fragment let _timestamp: i32 = fragment
.timestamp(self.timescale) .timestamp(self.timescale)
.as_millis() .as_millis()
.try_into() .try_into()
@ -313,34 +267,26 @@ impl Track {
// Create a new segment. // Create a new segment.
let mut segment = self.track.create_segment(segment::Info { let mut segment = self.track.create_segment(segment::Info {
sequence: VarInt::try_from(self.sequence).context("sequence too large")?, sequence: VarInt::try_from(self.sequence).context("sequence too large")?,
priority: i32::MAX, // TODO
// Newer segments are higher priority
priority: u32::MAX.checked_sub(timestamp).context("priority too large")?,
// Delete segments after 10s. // Delete segments after 10s.
expires: Some(time::Duration::from_secs(10)), expires: Some(time::Duration::from_secs(10)),
})?; })?;
// Create a single fragment for the segment that we will keep appending.
let mut fragment = segment.create_fragment(fragment::Info {
sequence: VarInt::ZERO,
size: None,
})?;
self.sequence += 1; self.sequence += 1;
// Insert the raw atom into the segment. // Insert the raw atom into the segment.
fragment.write_chunk(raw.into())?; segment.write_chunk(raw.into())?;
// Save for the next iteration // Save for the next iteration
self.current = Some(fragment); self.segment = Some(segment);
Ok(()) Ok(())
} }
pub fn data(&mut self, raw: Vec<u8>) -> anyhow::Result<()> { pub fn data(&mut self, raw: Vec<u8>) -> anyhow::Result<()> {
let fragment = self.current.as_mut().context("missing current fragment")?; let segment = self.segment.as_mut().context("missing segment")?;
fragment.write_chunk(raw.into())?; segment.write_chunk(raw.into())?;
Ok(()) Ok(())
} }

View File

@ -13,39 +13,28 @@ categories = ["multimedia", "network-programming", "web-programming"]
[dependencies] [dependencies]
moq-transport = { path = "../moq-transport" } moq-transport = { path = "../moq-transport" }
moq-api = { path = "../moq-api" }
# QUIC # QUIC
quinn = "0.10" quinn = "0.10"
webtransport-quinn = "0.6" webtransport-generic = "0.5"
#webtransport-quinn = { path = "../../webtransport-rs/webtransport-quinn" } webtransport-quinn = "0.5"
url = "2"
# Crypto # Crypto
ring = "0.16" ring = "0.16.20"
rustls = { version = "0.21", features = ["dangerous_configuration"] } rustls = "0.21.2"
rustls-pemfile = "1" rustls-pemfile = "1.0.2"
rustls-native-certs = "0.6"
webpki = "0.22"
# Async stuff # Async stuff
tokio = { version = "1", features = ["full"] } tokio = { version = "1.27", features = ["full"] }
# Web server to serve the fingerprint # Web server to serve the fingerprint
axum = { version = "0.6", features = ["tokio"] } warp = { version = "0.3.3", features = ["tls"] }
axum-server = { version = "0.5", features = ["tls-rustls"] } hex = "0.4.3"
hex = "0.4"
tower-http = { version = "0.4", features = ["cors"] }
# Error handling
anyhow = { version = "1", features = ["backtrace"] }
thiserror = "1"
# CLI
clap = { version = "4", features = ["derive"] }
# Logging # Logging
clap = { version = "4.0", features = ["derive"] }
log = { version = "0.4", features = ["std"] } log = { version = "0.4", features = ["std"] }
env_logger = "0.9" env_logger = "0.9.3"
anyhow = "1.0.70"
tracing = "0.1" tracing = "0.1"
tracing-subscriber = "0.3" tracing-subscriber = "0.3.0"

View File

@ -1,5 +1,4 @@
use std::{net, path}; use std::{net, path};
use url::Url;
use clap::Parser; use clap::Parser;
@ -8,48 +7,17 @@ use clap::Parser;
pub struct Config { pub struct Config {
/// Listen on this address /// Listen on this address
#[arg(long, default_value = "[::]:4443")] #[arg(long, default_value = "[::]:4443")]
pub listen: net::SocketAddr, pub bind: net::SocketAddr,
/// Use the certificates at this path, encoded as PEM. /// Use the certificate file at this path
///
/// You can use this option multiple times for multiple certificates.
/// The first match for the provided SNI will be used, otherwise the last cert will be used.
/// You also need to provide the private key multiple times via `key``.
#[arg(long)] #[arg(long)]
pub tls_cert: Vec<path::PathBuf>, pub cert: path::PathBuf,
/// Use the private key at this path, encoded as PEM. /// Use the private key at this path
///
/// There must be a key for every certificate provided via `cert`.
#[arg(long)] #[arg(long)]
pub tls_key: Vec<path::PathBuf>, pub key: path::PathBuf,
/// Use the TLS root at this path, encoded as PEM. /// Listen on HTTPS and serve /fingerprint, for self-signed certificates
///
/// This value can be provided multiple times for multiple roots.
/// If this is empty, system roots will be used instead
#[arg(long)]
pub tls_root: Vec<path::PathBuf>,
/// Danger: Disable TLS certificate verification.
///
/// Fine for local development and between relays, but should be used in caution in production.
#[arg(long)]
pub tls_disable_verify: bool,
/// Optional: Use the moq-api via HTTP to store origin information.
#[arg(long)]
pub api: Option<Url>,
/// Our internal address which we advertise to other origins.
/// We use QUIC, so the certificate must be valid for this address.
/// This needs to be prefixed with https:// to use WebTransport.
/// This is only used when --api is set and only for publishing broadcasts.
#[arg(long)]
pub api_node: Option<Url>,
/// Enable development mode.
/// Currently, this only listens on HTTPS and serves /fingerprint, for self-signed certificates
#[arg(long, action)] #[arg(long, action)]
pub dev: bool, pub fingerprint: bool,
} }

View File

@ -1,51 +0,0 @@
use thiserror::Error;
#[derive(Error, Debug)]
pub enum RelayError {
#[error("transport error: {0}")]
Transport(#[from] moq_transport::session::SessionError),
#[error("cache error: {0}")]
Cache(#[from] moq_transport::cache::CacheError),
#[error("api error: {0}")]
MoqApi(#[from] moq_api::ApiError),
#[error("url error: {0}")]
Url(#[from] url::ParseError),
#[error("webtransport client error: {0}")]
WebTransportClient(#[from] webtransport_quinn::ClientError),
#[error("webtransport server error: {0}")]
WebTransportServer(#[from] webtransport_quinn::ServerError),
#[error("missing node")]
MissingNode,
}
impl moq_transport::MoqError for RelayError {
fn code(&self) -> u32 {
match self {
Self::Transport(err) => err.code(),
Self::Cache(err) => err.code(),
Self::MoqApi(_err) => 504,
Self::Url(_) => 500,
Self::MissingNode => 500,
Self::WebTransportClient(_) => 504,
Self::WebTransportServer(_) => 500,
}
}
fn reason(&self) -> String {
match self {
Self::Transport(err) => format!("transport error: {}", err.reason()),
Self::Cache(err) => format!("cache error: {}", err.reason()),
Self::MoqApi(err) => format!("api error: {}", err),
Self::Url(err) => format!("url error: {}", err),
Self::MissingNode => "missing node".to_owned(),
Self::WebTransportServer(err) => format!("upstream server error: {}", err),
Self::WebTransportClient(err) => format!("upstream client error: {}", err),
}
}
}

View File

@ -1,21 +1,17 @@
use std::{fs, io, sync};
use anyhow::Context; use anyhow::Context;
use clap::Parser; use clap::Parser;
use ring::digest::{digest, SHA256};
use warp::Filter;
mod config; mod config;
mod error; mod server;
mod origin;
mod quic;
mod session; mod session;
mod tls;
mod web;
pub use config::*; pub use config::*;
pub use error::*; pub use server::*;
pub use origin::*;
pub use quic::*;
pub use session::*; pub use session::*;
pub use tls::*;
pub use web::*;
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
@ -28,24 +24,47 @@ async fn main() -> anyhow::Result<()> {
tracing::subscriber::set_global_default(tracer).unwrap(); tracing::subscriber::set_global_default(tracer).unwrap();
let config = Config::parse(); let config = Config::parse();
let tls = Tls::load(&config)?;
// Create a QUIC server for media. // Create a server to actually serve the media
let quic = Quic::new(config.clone(), tls.clone()) let server = Server::new(config.clone()).context("failed to create server")?;
.await
.context("failed to create server")?;
// Create the web server if the --dev flag was set. // Run all of the above
// This is currently only useful in local development so it's not enabled by default. tokio::select! {
if config.dev { res = server.run() => res.context("failed to run server"),
let web = Web::new(config, tls); res = serve_http(config), if config.fingerprint => res.context("failed to run HTTP server"),
// Unfortunately we can't use preconditions because Tokio still executes the branch; just ignore the result
tokio::select! {
res = quic.serve() => res.context("failed to run quic server"),
res = web.serve() => res.context("failed to run web server"),
}
} else {
quic.serve().await.context("failed to run quic server")
} }
} }
// Run a HTTP server using Warp
// TODO remove this when Chrome adds support for self-signed certificates using WebTransport
async fn serve_http(config: Config) -> anyhow::Result<()> {
// Read the PEM certificate file
let crt = fs::File::open(&config.cert)?;
let mut crt = io::BufReader::new(crt);
// Parse the DER certificate
let certs = rustls_pemfile::certs(&mut crt)?;
let cert = certs.first().expect("no certificate found");
// Compute the SHA-256 digest
let fingerprint = digest(&SHA256, cert.as_ref());
let fingerprint = hex::encode(fingerprint.as_ref());
let fingerprint = sync::Arc::new(fingerprint);
let cors = warp::cors().allow_any_origin();
// What an annoyingly complicated way to serve a static String
// I spent a long time trying to find the exact way of cloning and dereferencing the Arc.
let routes = warp::path!("fingerprint")
.map(move || (*(fingerprint.clone())).clone())
.with(cors);
warp::serve(routes)
.tls()
.cert_path(config.cert)
.key_path(config.key)
.run(config.bind)
.await;
Ok(())
}

View File

@ -1,216 +0,0 @@
use std::ops::{Deref, DerefMut};
use std::{
collections::HashMap,
sync::{Arc, Mutex, Weak},
};
use moq_api::ApiError;
use moq_transport::cache::{broadcast, CacheError};
use url::Url;
use tokio::time;
use crate::RelayError;
#[derive(Clone)]
pub struct Origin {
// An API client used to get/set broadcasts.
// If None then we never use a remote origin.
// TODO: Stub this out instead.
api: Option<moq_api::Client>,
// The internal address of our node.
// If None then we can never advertise ourselves as an origin.
// TODO: Stub this out instead.
node: Option<Url>,
// A map of active broadcasts by ID.
cache: Arc<Mutex<HashMap<String, Weak<Subscriber>>>>,
// A QUIC endpoint we'll use to fetch from other origins.
quic: quinn::Endpoint,
}
impl Origin {
pub fn new(api: Option<moq_api::Client>, node: Option<Url>, quic: quinn::Endpoint) -> Self {
Self {
api,
node,
cache: Default::default(),
quic,
}
}
/// Create a new broadcast with the given ID.
///
/// Publisher::run needs to be called to periodically refresh the origin cache.
pub async fn publish(&mut self, id: &str) -> Result<Publisher, RelayError> {
let (publisher, subscriber) = broadcast::new(id);
let subscriber = {
let mut cache = self.cache.lock().unwrap();
// Check if the broadcast already exists.
// TODO This is racey, because a new publisher could be created while existing subscribers are still active.
if cache.contains_key(id) {
return Err(CacheError::Duplicate.into());
}
// Create subscriber that will remove from the cache when dropped.
let subscriber = Arc::new(Subscriber {
broadcast: subscriber,
origin: self.clone(),
});
cache.insert(id.to_string(), Arc::downgrade(&subscriber));
subscriber
};
// Create a publisher that constantly updates itself as the origin in moq-api.
// It holds a reference to the subscriber to prevent dropping early.
let mut publisher = Publisher {
broadcast: publisher,
subscriber,
api: None,
};
// Insert the publisher into the database.
if let Some(api) = self.api.as_mut() {
// Make a URL for the broadcast.
let url = self.node.as_ref().ok_or(RelayError::MissingNode)?.clone().join(id)?;
let origin = moq_api::Origin { url };
api.set_origin(id, &origin).await?;
// Refresh every 5 minutes
publisher.api = Some((api.clone(), origin));
}
Ok(publisher)
}
pub fn subscribe(&self, id: &str) -> Arc<Subscriber> {
let mut cache = self.cache.lock().unwrap();
if let Some(broadcast) = cache.get(id) {
if let Some(broadcast) = broadcast.upgrade() {
return broadcast;
}
}
let (publisher, subscriber) = broadcast::new(id);
let subscriber = Arc::new(Subscriber {
broadcast: subscriber,
origin: self.clone(),
});
cache.insert(id.to_string(), Arc::downgrade(&subscriber));
let mut this = self.clone();
let id = id.to_string();
// Rather than fetching from the API and connecting via QUIC inline, we'll spawn a task to do it.
// This way we could stop polling this session and it won't impact other session.
// It also means we'll only connect the API and QUIC once if N subscribers suddenly show up.
// However, the downside is that we don't return an error immediately.
// If that's important, it can be done but it gets a bit racey.
tokio::spawn(async move {
if let Err(err) = this.serve(&id, publisher).await {
log::warn!("failed to serve remote broadcast: id={} err={}", id, err);
}
});
subscriber
}
async fn serve(&mut self, id: &str, publisher: broadcast::Publisher) -> Result<(), RelayError> {
log::debug!("finding origin: id={}", id);
// Fetch the origin from the API.
let origin = self
.api
.as_mut()
.ok_or(CacheError::NotFound)?
.get_origin(id)
.await?
.ok_or(CacheError::NotFound)?;
log::debug!("fetching from origin: id={} url={}", id, origin.url);
// Establish the webtransport session.
let session = webtransport_quinn::connect(&self.quic, &origin.url).await?;
let session = moq_transport::session::Client::subscriber(session, publisher).await?;
session.run().await?;
Ok(())
}
}
pub struct Subscriber {
pub broadcast: broadcast::Subscriber,
origin: Origin,
}
impl Drop for Subscriber {
fn drop(&mut self) {
self.origin.cache.lock().unwrap().remove(&self.broadcast.id);
}
}
impl Deref for Subscriber {
type Target = broadcast::Subscriber;
fn deref(&self) -> &Self::Target {
&self.broadcast
}
}
pub struct Publisher {
pub broadcast: broadcast::Publisher,
api: Option<(moq_api::Client, moq_api::Origin)>,
#[allow(dead_code)]
subscriber: Arc<Subscriber>,
}
impl Publisher {
pub async fn run(&mut self) -> Result<(), ApiError> {
// Every 5m tell the API we're still alive.
// TODO don't hard-code these values
let mut interval = time::interval(time::Duration::from_secs(60 * 5));
loop {
if let Some((api, origin)) = self.api.as_mut() {
api.patch_origin(&self.broadcast.id, origin).await?;
}
// TODO move to start of loop; this is just for testing
interval.tick().await;
}
}
pub async fn close(&mut self) -> Result<(), ApiError> {
if let Some((api, _)) = self.api.as_mut() {
api.delete_origin(&self.broadcast.id).await?;
}
Ok(())
}
}
impl Deref for Publisher {
type Target = broadcast::Publisher;
fn deref(&self) -> &Self::Target {
&self.broadcast
}
}
impl DerefMut for Publisher {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.broadcast
}
}

View File

@ -1,85 +0,0 @@
use std::{sync::Arc, time};
use anyhow::Context;
use tokio::task::JoinSet;
use crate::{Config, Origin, Session, Tls};
pub struct Quic {
quic: quinn::Endpoint,
// The active connections.
conns: JoinSet<anyhow::Result<()>>,
// The map of active broadcasts by path.
origin: Origin,
}
impl Quic {
// Create a QUIC endpoint that can be used for both clients and servers.
pub async fn new(config: Config, tls: Tls) -> anyhow::Result<Self> {
let mut client_config = tls.client.clone();
let mut server_config = tls.server.clone();
client_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()];
server_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()];
// Enable BBR congestion control
// TODO validate the implementation
let mut transport_config = quinn::TransportConfig::default();
transport_config.max_idle_timeout(Some(time::Duration::from_secs(10).try_into().unwrap()));
transport_config.keep_alive_interval(Some(time::Duration::from_secs(4))); // TODO make this smarter
transport_config.congestion_controller_factory(Arc::new(quinn::congestion::BbrConfig::default()));
transport_config.mtu_discovery_config(None); // Disable MTU discovery
let transport_config = Arc::new(transport_config);
let mut client_config = quinn::ClientConfig::new(Arc::new(client_config));
let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(server_config));
server_config.transport_config(transport_config.clone());
client_config.transport_config(transport_config);
// There's a bit more boilerplate to make a generic endpoint.
let runtime = quinn::default_runtime().context("no async runtime")?;
let endpoint_config = quinn::EndpointConfig::default();
let socket = std::net::UdpSocket::bind(config.listen).context("failed to bind UDP socket")?;
// Create the generic QUIC endpoint.
let mut quic = quinn::Endpoint::new(endpoint_config, Some(server_config), socket, runtime)
.context("failed to create QUIC endpoint")?;
quic.set_default_client_config(client_config);
let api = config.api.map(|url| {
log::info!("using moq-api: url={}", url);
moq_api::Client::new(url)
});
if let Some(ref node) = config.api_node {
log::info!("advertising origin: url={}", node);
}
let origin = Origin::new(api, config.api_node, quic.clone());
let conns = JoinSet::new();
Ok(Self { quic, origin, conns })
}
pub async fn serve(mut self) -> anyhow::Result<()> {
log::info!("listening on {}", self.quic.local_addr()?);
loop {
tokio::select! {
res = self.quic.accept() => {
let conn = res.context("failed to accept QUIC connection")?;
let mut session = Session::new(self.origin.clone());
self.conns.spawn(async move { session.run(conn).await });
},
res = self.conns.join_next(), if !self.conns.is_empty() => {
let res = res.expect("no tasks").expect("task aborted");
if let Err(err) = res {
log::warn!("connection terminated: {:?}", err);
}
},
}
}
}
}

93
moq-relay/src/server.rs Normal file
View File

@ -0,0 +1,93 @@
use std::{
collections::HashMap,
fs, io,
sync::{Arc, Mutex},
time,
};
use anyhow::Context;
use moq_transport::model::broadcast;
use tokio::task::JoinSet;
use crate::{Config, Session};
pub struct Server {
server: quinn::Endpoint,
// The active connections.
conns: JoinSet<anyhow::Result<()>>,
// The map of active broadcasts by path.
broadcasts: Arc<Mutex<HashMap<String, broadcast::Subscriber>>>,
}
impl Server {
// Create a new server
pub fn new(config: Config) -> anyhow::Result<Self> {
// Read the PEM certificate chain
let certs = fs::File::open(config.cert).context("failed to open cert file")?;
let mut certs = io::BufReader::new(certs);
let certs = rustls_pemfile::certs(&mut certs)?
.into_iter()
.map(rustls::Certificate)
.collect();
// Read the PEM private key
let keys = fs::File::open(config.key).context("failed to open key file")?;
let mut keys = io::BufReader::new(keys);
let mut keys = rustls_pemfile::pkcs8_private_keys(&mut keys)?;
anyhow::ensure!(keys.len() == 1, "expected a single key");
let key = rustls::PrivateKey(keys.remove(0));
let mut tls_config = rustls::ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_protocol_versions(&[&rustls::version::TLS13])
.unwrap()
.with_no_client_auth()
.with_single_cert(certs, key)?;
tls_config.max_early_data_size = u32::MAX;
tls_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()];
let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(tls_config));
// Enable BBR congestion control
// TODO validate the implementation
let mut transport_config = quinn::TransportConfig::default();
transport_config.keep_alive_interval(Some(time::Duration::from_secs(2)));
transport_config.congestion_controller_factory(Arc::new(quinn::congestion::BbrConfig::default()));
server_config.transport = Arc::new(transport_config);
let server = quinn::Endpoint::server(server_config, config.bind)?;
let broadcasts = Default::default();
let conns = JoinSet::new();
Ok(Self {
server,
broadcasts,
conns,
})
}
pub async fn run(mut self) -> anyhow::Result<()> {
loop {
tokio::select! {
res = self.server.accept() => {
let conn = res.context("failed to accept QUIC connection")?;
let mut session = Session::new(self.broadcasts.clone());
self.conns.spawn(async move { session.run(conn).await });
},
res = self.conns.join_next(), if !self.conns.is_empty() => {
let res = res.expect("no tasks").expect("task aborted");
if let Err(err) = res {
log::warn!("connection terminated: {:?}", err);
}
},
}
}
}
}

View File

@ -1,41 +1,32 @@
use std::{
collections::{hash_map, HashMap},
sync::{Arc, Mutex},
};
use anyhow::Context; use anyhow::Context;
use moq_transport::{session::Request, setup::Role, MoqError}; use moq_transport::{model::broadcast, session::Request, setup::Role};
use crate::Origin;
#[derive(Clone)] #[derive(Clone)]
pub struct Session { pub struct Session {
origin: Origin, broadcasts: Arc<Mutex<HashMap<String, broadcast::Subscriber>>>,
} }
impl Session { impl Session {
pub fn new(origin: Origin) -> Self { pub fn new(broadcasts: Arc<Mutex<HashMap<String, broadcast::Subscriber>>>) -> Self {
Self { origin } Self { broadcasts }
} }
pub async fn run(&mut self, conn: quinn::Connecting) -> anyhow::Result<()> { pub async fn run(&mut self, conn: quinn::Connecting) -> anyhow::Result<()> {
log::debug!("received QUIC handshake: ip={:?}", conn.remote_address());
// Wait for the QUIC connection to be established. // Wait for the QUIC connection to be established.
let conn = conn.await.context("failed to establish QUIC connection")?; let conn = conn.await.context("failed to establish QUIC connection")?;
log::debug!(
"established QUIC connection: ip={:?} id={}",
conn.remote_address(),
conn.stable_id()
);
let id = conn.stable_id();
// Wait for the CONNECT request. // Wait for the CONNECT request.
let request = webtransport_quinn::accept(conn) let request = webtransport_quinn::accept(conn)
.await .await
.context("failed to receive WebTransport request")?; .context("failed to receive WebTransport request")?;
// Strip any leading and trailing slashes to get the broadcast name. let path = request.uri().path().to_string();
let path = request.url().path().trim_matches('/').to_string();
log::debug!("received WebTransport CONNECT: id={} path={}", id, path);
// Accept the CONNECT request. // Accept the CONNECT request.
let session = request let session = request
@ -48,64 +39,58 @@ impl Session {
.await .await
.context("failed to accept handshake")?; .context("failed to accept handshake")?;
log::debug!("received MoQ SETUP: id={} role={:?}", id, request.role());
let role = request.role(); let role = request.role();
match role { match role {
Role::Publisher => { Role::Publisher => self.serve_publisher(request, &path).await,
if let Err(err) = self.serve_publisher(id, request, &path).await { Role::Subscriber => self.serve_subscriber(request, &path).await,
log::warn!("error serving publisher: id={} path={} err={:#?}", id, path, err); Role::Both => request.reject(300),
}
}
Role::Subscriber => {
if let Err(err) = self.serve_subscriber(id, request, &path).await {
log::warn!("error serving subscriber: id={} path={} err={:#?}", id, path, err);
}
}
Role::Both => {
log::warn!("role both not supported: id={}", id);
request.reject(300);
}
};
log::debug!("closing connection: id={}", id);
Ok(())
}
async fn serve_publisher(&mut self, id: usize, request: Request, path: &str) -> anyhow::Result<()> {
log::info!("serving publisher: id={}, path={}", id, path);
let mut origin = match self.origin.publish(path).await {
Ok(origin) => origin,
Err(err) => {
request.reject(err.code());
return Err(err.into());
}
};
let session = request.subscriber(origin.broadcast.clone()).await?;
tokio::select! {
_ = session.run() => origin.close().await?,
_ = origin.run() => (), // TODO send error to session
}; };
Ok(()) Ok(())
} }
async fn serve_subscriber(&mut self, id: usize, request: Request, path: &str) -> anyhow::Result<()> { async fn serve_publisher(&mut self, request: Request, path: &str) {
log::info!("serving subscriber: id={} path={}", id, path); log::info!("publisher: path={}", path);
let subscriber = self.origin.subscribe(path); let (publisher, subscriber) = broadcast::new();
let session = request.publisher(subscriber.broadcast.clone()).await?; match self.broadcasts.lock().unwrap().entry(path.to_string()) {
hash_map::Entry::Occupied(_) => return request.reject(409),
hash_map::Entry::Vacant(entry) => entry.insert(subscriber),
};
if let Err(err) = self.run_publisher(request, publisher).await {
log::warn!("pubisher error: path={} err={:?}", path, err);
}
self.broadcasts.lock().unwrap().remove(path);
}
async fn run_publisher(&mut self, request: Request, publisher: broadcast::Publisher) -> anyhow::Result<()> {
let session = request.subscriber(publisher).await?;
session.run().await?; session.run().await?;
Ok(())
}
// Make sure this doesn't get dropped too early async fn serve_subscriber(&mut self, request: Request, path: &str) {
drop(subscriber); log::info!("subscriber: path={}", path);
let broadcast = match self.broadcasts.lock().unwrap().get(path) {
Some(broadcast) => broadcast.clone(),
None => {
return request.reject(404);
}
};
if let Err(err) = self.run_subscriber(request, broadcast).await {
log::warn!("subscriber error: path={} err={:?}", path, err);
}
}
async fn run_subscriber(&mut self, request: Request, broadcast: broadcast::Subscriber) -> anyhow::Result<()> {
let session = request.publisher(broadcast).await?;
session.run().await?;
Ok(()) Ok(())
} }
} }

View File

@ -1,182 +0,0 @@
use anyhow::Context;
use ring::digest::{digest, SHA256};
use rustls::server::{ClientHello, ResolvesServerCert};
use rustls::sign::CertifiedKey;
use rustls::{Certificate, PrivateKey, RootCertStore};
use std::io::{self, Cursor, Read};
use std::path;
use std::sync::Arc;
use std::{fs, time};
use webpki::{DnsNameRef, EndEntityCert};
use crate::Config;
#[derive(Clone)]
pub struct Tls {
pub server: rustls::ServerConfig,
pub client: rustls::ClientConfig,
pub fingerprints: Vec<String>,
}
impl Tls {
pub fn load(config: &Config) -> anyhow::Result<Self> {
let mut serve = ServeCerts::default();
// Load the certificate and key files based on their index.
anyhow::ensure!(
config.tls_cert.len() == config.tls_key.len(),
"--tls-cert and --tls-key counts differ"
);
for (chain, key) in config.tls_cert.iter().zip(config.tls_key.iter()) {
serve.load(chain, key)?;
}
// Create a list of acceptable root certificates.
let mut roots = RootCertStore::empty();
if config.tls_root.is_empty() {
// Add the platform's native root certificates.
for cert in rustls_native_certs::load_native_certs().context("could not load platform certs")? {
roots.add(&Certificate(cert.0)).context("failed to add root cert")?;
}
} else {
// Add the specified root certificates.
for root in &config.tls_root {
let root = fs::File::open(root).context("failed to open root cert file")?;
let mut root = io::BufReader::new(root);
let root = rustls_pemfile::certs(&mut root).context("failed to read root cert")?;
anyhow::ensure!(root.len() == 1, "expected a single root cert");
let root = Certificate(root[0].to_owned());
roots.add(&root).context("failed to add root cert")?;
}
}
// Create the TLS configuration we'll use as a client (relay -> relay)
let mut client = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
// Allow disabling TLS verification altogether.
if config.tls_disable_verify {
let noop = NoCertificateVerification {};
client.dangerous().set_certificate_verifier(Arc::new(noop));
}
let fingerprints = serve.fingerprints();
// Create the TLS configuration we'll use as a server (relay <- browser)
let server = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_cert_resolver(Arc::new(serve));
let certs = Self {
server,
client,
fingerprints,
};
Ok(certs)
}
}
#[derive(Default)]
struct ServeCerts {
list: Vec<Arc<CertifiedKey>>,
}
impl ServeCerts {
// Load a certificate and cooresponding key from a file
pub fn load(&mut self, chain: &path::PathBuf, key: &path::PathBuf) -> anyhow::Result<()> {
// Read the PEM certificate chain
let chain = fs::File::open(chain).context("failed to open cert file")?;
let mut chain = io::BufReader::new(chain);
let chain: Vec<Certificate> = rustls_pemfile::certs(&mut chain)?
.into_iter()
.map(Certificate)
.collect();
anyhow::ensure!(!chain.is_empty(), "could not find certificate");
// Read the PEM private key
let mut keys = fs::File::open(key).context("failed to open key file")?;
// Read the keys into a Vec so we can parse it twice.
let mut buf = Vec::new();
keys.read_to_end(&mut buf)?;
// Try to parse a PKCS#8 key
// -----BEGIN PRIVATE KEY-----
let mut keys = rustls_pemfile::pkcs8_private_keys(&mut Cursor::new(&buf))?;
// Try again but with EC keys this time
// -----BEGIN EC PRIVATE KEY-----
if keys.is_empty() {
keys = rustls_pemfile::ec_private_keys(&mut Cursor::new(&buf))?
};
anyhow::ensure!(!keys.is_empty(), "could not find private key");
anyhow::ensure!(keys.len() < 2, "expected a single key");
let key = PrivateKey(keys.remove(0));
let key = rustls::sign::any_supported_type(&key)?;
let certified = Arc::new(CertifiedKey::new(chain, key));
self.list.push(certified);
Ok(())
}
// Return the SHA256 fingerprint of our certificates.
pub fn fingerprints(&self) -> Vec<String> {
self.list
.iter()
.map(|ck| {
let fingerprint = digest(&SHA256, ck.cert[0].as_ref());
let fingerprint = hex::encode(fingerprint.as_ref());
fingerprint
})
.collect()
}
}
impl ResolvesServerCert for ServeCerts {
fn resolve(&self, client_hello: ClientHello<'_>) -> Option<Arc<CertifiedKey>> {
if let Some(name) = client_hello.server_name() {
if let Ok(dns_name) = DnsNameRef::try_from_ascii_str(name) {
for ck in &self.list {
// TODO I gave up on caching the parsed result because of lifetime hell.
// If this shows up on benchmarks, somebody should fix it.
let leaf = ck.cert.first().expect("missing certificate");
let parsed = EndEntityCert::try_from(leaf.0.as_ref()).expect("failed to parse certificate");
if parsed.verify_is_valid_for_dns_name(dns_name).is_ok() {
return Some(ck.clone());
}
}
}
}
// Default to the last certificate if we couldn't find one.
self.list.last().cloned()
}
}
pub struct NoCertificateVerification {}
impl rustls::client::ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}

View File

@ -1,44 +0,0 @@
use std::sync::Arc;
use axum::{extract::State, http::Method, response::IntoResponse, routing::get, Router};
use axum_server::{tls_rustls::RustlsAcceptor, Server};
use tower_http::cors::{Any, CorsLayer};
use crate::{Config, Tls};
// Run a HTTP server using Axum
// TODO remove this when Chrome adds support for self-signed certificates using WebTransport
pub struct Web {
app: Router,
server: Server<RustlsAcceptor>,
}
impl Web {
pub fn new(config: Config, tls: Tls) -> Self {
// Get the first certificate's fingerprint.
// TODO serve all of them so we can support multiple signature algorithms.
let fingerprint = tls.fingerprints.first().expect("missing certificate").clone();
let mut tls_config = tls.server.clone();
tls_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
let tls_config = axum_server::tls_rustls::RustlsConfig::from_config(Arc::new(tls_config));
let app = Router::new()
.route("/fingerprint", get(serve_fingerprint))
.layer(CorsLayer::new().allow_origin(Any).allow_methods([Method::GET]))
.with_state(fingerprint);
let server = axum_server::bind_rustls(config.listen, tls_config);
Self { app, server }
}
pub async fn serve(self) -> anyhow::Result<()> {
self.server.serve(self.app.into_make_service()).await?;
Ok(())
}
}
async fn serve_fingerprint(State(fingerprint): State<String>) -> impl IntoResponse {
fingerprint
}

1162
moq-transport/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -15,15 +15,12 @@ categories = ["multimedia", "network-programming", "web-programming"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
bytes = "1" bytes = "1.4"
thiserror = "1" thiserror = "1"
tokio = { version = "1", features = ["macros", "io-util", "sync"] } anyhow = "1"
tokio = { version = "1.27", features = ["macros", "io-util", "sync"] }
log = "0.4" log = "0.4"
indexmap = "2" indexmap = "2"
quinn = "0.10" quinn = "0.10"
webtransport-quinn = "0.6" webtransport-quinn = "0.5.2"
#webtransport-quinn = { path = "../../webtransport-rs/webtransport-quinn" }
async-trait = "0.1"
paste = "1"

View File

@ -1,51 +0,0 @@
use thiserror::Error;
use crate::MoqError;
#[derive(Clone, Debug, Error)]
pub enum CacheError {
/// A clean termination, represented as error code 0.
/// This error is automatically used when publishers or subscribers are dropped without calling close.
#[error("closed")]
Closed,
/// An ANNOUNCE_RESET or SUBSCRIBE_RESET was sent by the publisher.
#[error("reset code={0:?}")]
Reset(u32),
/// An ANNOUNCE_STOP or SUBSCRIBE_STOP was sent by the subscriber.
#[error("stop")]
Stop,
/// The requested resource was not found.
#[error("not found")]
NotFound,
/// A resource already exists with that ID.
#[error("duplicate")]
Duplicate,
}
impl MoqError for CacheError {
/// An integer code that is sent over the wire.
fn code(&self) -> u32 {
match self {
Self::Closed => 0,
Self::Reset(code) => *code,
Self::Stop => 206,
Self::NotFound => 404,
Self::Duplicate => 409,
}
}
/// A reason that is sent over the wire.
fn reason(&self) -> String {
match self {
Self::Closed => "closed".to_owned(),
Self::Reset(code) => format!("reset code: {}", code),
Self::Stop => "stop".to_owned(),
Self::NotFound => "not found".to_owned(),
Self::Duplicate => "duplicate".to_owned(),
}
}
}

View File

@ -1,21 +0,0 @@
//! Allows a publisher to push updates, automatically caching and fanning it out to any subscribers.
//!
//! The hierarchy is: [broadcast] -> [track] -> [segment] -> [fragment] -> [Bytes](bytes::Bytes)
//!
//! The naming scheme doesn't match the spec because it's more strict, and bikeshedding of course:
//!
//! - [broadcast] is kinda like "track namespace"
//! - [track] is "track"
//! - [segment] is "group" but MUST use a single stream.
//! - [fragment] is "object" but MUST have the same properties as the segment.
pub mod broadcast;
mod error;
pub mod fragment;
pub mod segment;
pub mod track;
pub(crate) mod watch;
pub(crate) use watch::*;
pub use error::*;

View File

@ -1,216 +0,0 @@
//! A segment is a stream of fragments with a header, split into a [Publisher] and [Subscriber] handle.
//!
//! A [Publisher] writes an ordered stream of fragments.
//! Each fragment can have a sequence number, allowing the subscriber to detect gaps fragments.
//!
//! A [Subscriber] reads an ordered stream of fragments.
//! The subscriber can be cloned, in which case each subscriber receives a copy of each fragment. (fanout)
//!
//! The segment is closed with [CacheError::Closed] when all publishers or subscribers are dropped.
use core::fmt;
use std::{ops::Deref, sync::Arc, time};
use crate::VarInt;
use super::{fragment, CacheError, Watch};
/// Create a new segment with the given info.
pub fn new(info: Info) -> (Publisher, Subscriber) {
let state = Watch::new(State::default());
let info = Arc::new(info);
let publisher = Publisher::new(state.clone(), info.clone());
let subscriber = Subscriber::new(state, info);
(publisher, subscriber)
}
/// Static information about the segment.
#[derive(Debug)]
pub struct Info {
// The sequence number of the segment within the track.
// NOTE: These may be received out of order or with gaps.
pub sequence: VarInt,
// The priority of the segment within the BROADCAST.
pub priority: u32,
// Cache the segment for at most this long.
pub expires: Option<time::Duration>,
}
struct State {
// The data that has been received thus far.
fragments: Vec<fragment::Subscriber>,
// Set when the publisher is dropped.
closed: Result<(), CacheError>,
}
impl State {
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> {
self.closed.clone()?;
self.closed = Err(err);
Ok(())
}
}
impl Default for State {
fn default() -> Self {
Self {
fragments: Vec::new(),
closed: Ok(()),
}
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("State")
.field("fragments", &self.fragments)
.field("closed", &self.closed)
.finish()
}
}
/// Used to write data to a segment and notify subscribers.
pub struct Publisher {
// Mutable segment state.
state: Watch<State>,
// Immutable segment state.
info: Arc<Info>,
// Closes the segment when all Publishers are dropped.
_dropped: Arc<Dropped>,
}
impl Publisher {
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self { state, info, _dropped }
}
/// Write a fragment
pub fn push_fragment(&mut self, fragment: fragment::Subscriber) -> Result<(), CacheError> {
let mut state = self.state.lock_mut();
state.closed.clone()?;
state.fragments.push(fragment);
Ok(())
}
pub fn create_fragment(&mut self, fragment: fragment::Info) -> Result<fragment::Publisher, CacheError> {
let (publisher, subscriber) = fragment::new(fragment);
self.push_fragment(subscriber)?;
Ok(publisher)
}
/// Close the segment with an error.
pub fn close(self, err: CacheError) -> Result<(), CacheError> {
self.state.lock_mut().close(err)
}
}
impl Deref for Publisher {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
impl fmt::Debug for Publisher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Publisher")
.field("state", &self.state)
.field("info", &self.info)
.finish()
}
}
/// Notified when a segment has new data available.
#[derive(Clone)]
pub struct Subscriber {
// Modify the segment state.
state: Watch<State>,
// Immutable segment state.
info: Arc<Info>,
// The number of chunks that we've read.
// NOTE: Cloned subscribers inherit this index, but then run in parallel.
index: usize,
// Dropped when all Subscribers are dropped.
_dropped: Arc<Dropped>,
}
impl Subscriber {
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self {
state,
info,
index: 0,
_dropped,
}
}
/// Block until the next chunk of bytes is available.
pub async fn next_fragment(&mut self) -> Result<Option<fragment::Subscriber>, CacheError> {
loop {
let notify = {
let state = self.state.lock();
if self.index < state.fragments.len() {
let fragment = state.fragments[self.index].clone();
self.index += 1;
return Ok(Some(fragment));
}
match &state.closed {
Err(CacheError::Closed) => return Ok(None),
Err(err) => return Err(err.clone()),
Ok(()) => state.changed(),
}
};
notify.await; // Try again when the state changes
}
}
}
impl Deref for Subscriber {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
impl fmt::Debug for Subscriber {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Subscriber")
.field("state", &self.state)
.field("info", &self.info)
.field("index", &self.index)
.finish()
}
}
struct Dropped {
// Modify the segment state.
state: Watch<State>,
}
impl Dropped {
fn new(state: Watch<State>) -> Self {
Self { state }
}
}
impl Drop for Dropped {
fn drop(&mut self) {
self.state.lock_mut().close(CacheError::Closed).ok();
}
}

View File

@ -1,5 +1,5 @@
use super::{BoundsExceeded, VarInt}; use super::{BoundsExceeded, VarInt};
use std::{io, str}; use std::str;
use thiserror::Error; use thiserror::Error;
@ -7,13 +7,6 @@ use thiserror::Error;
// TODO Use trait aliases when they're stable, or add these bounds to every method. // TODO Use trait aliases when they're stable, or add these bounds to every method.
pub trait AsyncRead: tokio::io::AsyncRead + Unpin + Send {} pub trait AsyncRead: tokio::io::AsyncRead + Unpin + Send {}
impl AsyncRead for webtransport_quinn::RecvStream {} impl AsyncRead for webtransport_quinn::RecvStream {}
impl<T> AsyncRead for tokio::io::Take<&mut T> where T: AsyncRead {}
impl<T: AsRef<[u8]> + Unpin + Send> AsyncRead for io::Cursor<T> {}
#[async_trait::async_trait]
pub trait Decode: Sized {
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError>;
}
/// A decode error. /// A decode error.
#[derive(Error, Debug)] #[derive(Error, Debug)]
@ -24,32 +17,12 @@ pub enum DecodeError {
#[error("invalid string")] #[error("invalid string")]
InvalidString(#[from] str::Utf8Error), InvalidString(#[from] str::Utf8Error),
#[error("invalid message: {0:?}")] #[error("invalid type: {0:?}")]
InvalidMessage(VarInt), InvalidType(VarInt),
#[error("invalid role: {0:?}")]
InvalidRole(VarInt),
#[error("invalid subscribe location")]
InvalidSubscribeLocation,
#[error("varint bounds exceeded")] #[error("varint bounds exceeded")]
BoundsExceeded(#[from] BoundsExceeded), BoundsExceeded(#[from] BoundsExceeded),
// TODO move these to ParamError
#[error("duplicate parameter")]
DupliateParameter,
#[error("missing parameter")]
MissingParameter,
#[error("invalid parameter")]
InvalidParameter,
#[error("io error: {0}")] #[error("io error: {0}")]
IoError(#[from] std::io::Error), IoError(#[from] std::io::Error),
// Used to signal that the stream has ended.
#[error("no more messages")]
Final,
} }

View File

@ -6,12 +6,6 @@ use thiserror::Error;
// TODO Use trait aliases when they're stable, or add these bounds to every method. // TODO Use trait aliases when they're stable, or add these bounds to every method.
pub trait AsyncWrite: tokio::io::AsyncWrite + Unpin + Send {} pub trait AsyncWrite: tokio::io::AsyncWrite + Unpin + Send {}
impl AsyncWrite for webtransport_quinn::SendStream {} impl AsyncWrite for webtransport_quinn::SendStream {}
impl AsyncWrite for Vec<u8> {}
#[async_trait::async_trait]
pub trait Encode: Sized {
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError>;
}
/// An encode error. /// An encode error.
#[derive(Error, Debug)] #[derive(Error, Debug)]

View File

@ -1,11 +1,9 @@
mod decode; mod decode;
mod encode; mod encode;
mod params;
mod string; mod string;
mod varint; mod varint;
pub use decode::*; pub use decode::*;
pub use encode::*; pub use encode::*;
pub use params::*;
pub use string::*; pub use string::*;
pub use varint::*; pub use varint::*;

View File

@ -1,85 +0,0 @@
use std::io::Cursor;
use std::{cmp::max, collections::HashMap};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::coding::{AsyncRead, AsyncWrite, Decode, Encode};
use crate::{
coding::{DecodeError, EncodeError},
VarInt,
};
#[derive(Default, Debug, Clone)]
pub struct Params(pub HashMap<VarInt, Vec<u8>>);
#[async_trait::async_trait]
impl Decode for Params {
async fn decode<R: AsyncRead>(mut r: &mut R) -> Result<Self, DecodeError> {
let mut params = HashMap::new();
// I hate this shit so much; let me encode my role and get on with my life.
let count = VarInt::decode(r).await?;
for _ in 0..count.into_inner() {
let kind = VarInt::decode(r).await?;
if params.contains_key(&kind) {
return Err(DecodeError::DupliateParameter);
}
let size = VarInt::decode(r).await?;
// Don't allocate the entire requested size to avoid a possible attack
// Instead, we allocate up to 1024 and keep appending as we read further.
let mut pr = r.take(size.into_inner());
let mut buf = Vec::with_capacity(max(1024, pr.limit() as usize));
pr.read_to_end(&mut buf).await?;
params.insert(kind, buf);
r = pr.into_inner();
}
Ok(Params(params))
}
}
#[async_trait::async_trait]
impl Encode for Params {
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::try_from(self.0.len())?.encode(w).await?;
for (kind, value) in self.0.iter() {
kind.encode(w).await?;
VarInt::try_from(value.len())?.encode(w).await?;
w.write_all(value).await?;
}
Ok(())
}
}
impl Params {
pub fn new() -> Self {
Self::default()
}
pub async fn set<P: Encode>(&mut self, kind: VarInt, p: P) -> Result<(), EncodeError> {
let mut value = Vec::new();
p.encode(&mut value).await?;
self.0.insert(kind, value);
Ok(())
}
pub fn has(&self, kind: VarInt) -> bool {
self.0.contains_key(&kind)
}
pub async fn get<P: Decode>(&mut self, kind: VarInt) -> Result<Option<P>, DecodeError> {
if let Some(value) = self.0.remove(&kind) {
let mut cursor = Cursor::new(value);
Ok(Some(P::decode(&mut cursor).await?))
} else {
Ok(None)
}
}
}

View File

@ -5,25 +5,20 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::VarInt; use crate::VarInt;
use super::{Decode, DecodeError, Encode, EncodeError}; use super::{DecodeError, EncodeError};
#[async_trait::async_trait] /// Encode a string with a varint length prefix.
impl Encode for String { pub async fn encode_string<W: AsyncWrite>(s: &str, w: &mut W) -> Result<(), EncodeError> {
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> { let size = VarInt::try_from(s.len())?;
let size = VarInt::try_from(self.len())?; size.encode(w).await?;
size.encode(w).await?; w.write_all(s.as_ref()).await?;
w.write_all(self.as_ref()).await?; Ok(())
Ok(())
}
} }
#[async_trait::async_trait] /// Decode a string with a varint length prefix.
impl Decode for String { pub async fn decode_string<R: AsyncRead>(r: &mut R) -> Result<String, DecodeError> {
/// Decode a string with a varint length prefix. let size = VarInt::decode(r).await?.into_inner();
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> { let mut str = String::with_capacity(min(1024, size) as usize);
let size = VarInt::decode(r).await?.into_inner(); r.take(size).read_to_string(&mut str).await?;
let mut str = String::with_capacity(min(1024, size) as usize); Ok(str)
r.take(size).read_to_string(&mut str).await?;
Ok(str)
}
} }

View File

@ -9,7 +9,7 @@ use crate::coding::{AsyncRead, AsyncWrite};
use thiserror::Error; use thiserror::Error;
use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use super::{Decode, DecodeError, Encode, EncodeError}; use super::{DecodeError, EncodeError};
#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)] #[derive(Debug, Copy, Clone, Eq, PartialEq, Error)]
#[error("value out of range")] #[error("value out of range")]
@ -164,23 +164,14 @@ impl fmt::Display for VarInt {
} }
} }
#[async_trait::async_trait]
impl Decode for VarInt {
/// Decode a varint from the given reader.
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let b = r.read_u8().await?;
Self::decode_byte(b, r).await
}
}
impl VarInt { impl VarInt {
/// Decode a varint given the first byte, reading the rest as needed. /// Decode a varint from the given reader.
/// This is silly but useful for determining if the stream has ended. pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
pub async fn decode_byte<R: AsyncRead>(b: u8, r: &mut R) -> Result<Self, DecodeError> {
let tag = b >> 6;
let mut buf = [0u8; 8]; let mut buf = [0u8; 8];
buf[0] = b & 0b0011_1111; r.read_exact(buf[0..1].as_mut()).await?;
let tag = buf[0] >> 6;
buf[0] &= 0b0011_1111;
let x = match tag { let x = match tag {
0b00 => u64::from(buf[0]), 0b00 => u64::from(buf[0]),
@ -201,12 +192,9 @@ impl VarInt {
Ok(Self(x)) Ok(Self(x))
} }
}
#[async_trait::async_trait]
impl Encode for VarInt {
/// Encode a varint to the given writer. /// Encode a varint to the given writer.
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
let x = self.0; let x = self.0;
if x < 2u64.pow(6) { if x < 2u64.pow(6) {
w.write_u8(x as u8).await?; w.write_u8(x as u8).await?;

View File

@ -1,7 +1,93 @@
pub trait MoqError { use std::io;
/// An integer code that is sent over the wire.
fn code(&self) -> u32;
/// An optional reason sometimes sent over the wire. use thiserror::Error;
fn reason(&self) -> String;
use crate::VarInt;
/// A MoQTransport error with an associated error code.
#[derive(Copy, Clone, Debug, Error)]
pub enum Error {
/// A clean termination, represented as error code 0.
/// This error is automatically used when publishers or subscribers are dropped without calling close.
#[error("closed")]
Closed,
/// An ANNOUNCE_RESET or SUBSCRIBE_RESET was sent by the publisher.
#[error("reset code={0:?}")]
Reset(u32),
/// An ANNOUNCE_STOP or SUBSCRIBE_STOP was sent by the subscriber.
#[error("stop")]
Stop,
/// The requested resource was not found.
#[error("not found")]
NotFound,
/// A resource already exists with that ID.
#[error("duplicate")]
Duplicate,
/// The role negiotiated in the handshake was violated. For example, a publisher sent a SUBSCRIBE, or a subscriber sent an OBJECT.
#[error("role violation: msg={0}")]
Role(VarInt),
/// An error occured while reading from the QUIC stream.
#[error("failed to read from stream")]
Read,
/// An error occured while writing to the QUIC stream.
#[error("failed to write to stream")]
Write,
/// An unclassified error because I'm lazy. TODO classify these errors
#[error("unknown error")]
Unknown,
}
impl Error {
/// An integer code that is sent over the wire.
pub fn code(&self) -> u32 {
match self {
Self::Closed => 0,
Self::Reset(code) => *code,
Self::Stop => 206,
Self::NotFound => 404,
Self::Role(_) => 405,
Self::Duplicate => 409,
Self::Unknown => 500,
Self::Write => 501,
Self::Read => 502,
}
}
/// A reason that is sent over the wire.
pub fn reason(&self) -> &str {
match self {
Self::Closed => "closed",
Self::Reset(_) => "reset",
Self::Stop => "stop",
Self::NotFound => "not found",
Self::Duplicate => "duplicate",
Self::Role(_msg) => "role violation",
Self::Unknown => "unknown",
Self::Read => "read error",
Self::Write => "write error",
}
}
/// Crudely tries to convert the Error into an io::Error.
pub fn as_io(&self) -> io::Error {
match self {
Self::Closed => io::ErrorKind::ConnectionAborted.into(),
Self::Reset(_) => io::ErrorKind::ConnectionReset.into(),
Self::Stop => io::ErrorKind::ConnectionAborted.into(),
Self::NotFound => io::ErrorKind::NotFound.into(),
Self::Duplicate => io::ErrorKind::AlreadyExists.into(),
Self::Role(_) => io::ErrorKind::PermissionDenied.into(),
Self::Unknown => io::ErrorKind::Other.into(),
Self::Read => io::ErrorKind::BrokenPipe.into(),
Self::Write => io::ErrorKind::BrokenPipe.into(),
}
}
} }

View File

@ -5,14 +5,16 @@
//! The specification is a work in progress and will change. //! The specification is a work in progress and will change.
//! See the [specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/) and [github](https://github.com/moq-wg/moq-transport) for any updates. //! See the [specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/) and [github](https://github.com/moq-wg/moq-transport) for any updates.
//! //!
//! This implementation has some required extensions until the draft stablizes. See: [Extensions](crate::setup::Extensions) //! **FORKED**: This is implementation makes extensive changes to the protocol.
//! See [KIXEL_00](crate::setup::Version::KIXEL_00) for a list of differences.
//! Many of these will get merged into the specification, so don't panic.
mod coding; mod coding;
mod error; mod error;
pub mod cache;
pub mod message; pub mod message;
pub mod model;
pub mod session; pub mod session;
pub mod setup; pub mod setup;
pub use coding::VarInt; pub use coding::VarInt;
pub use error::MoqError; pub use error::*;

View File

@ -1,30 +1,22 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params}; use crate::coding::{decode_string, encode_string, DecodeError, EncodeError};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
/// Sent by the publisher to announce the availability of a group of tracks. /// Sent by the publisher to announce the availability of a group of tracks.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Announce { pub struct Announce {
/// The track namespace // The track namespace
pub namespace: String, pub namespace: String,
/// Optional parameters
pub params: Params,
} }
impl Announce { impl Announce {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = String::decode(r).await?; let namespace = decode_string(r).await?;
let params = Params::decode(r).await?; Ok(Self { namespace })
Ok(Self { namespace, params })
} }
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.namespace.encode(w).await?; encode_string(&self.namespace, w).await?;
self.params.encode(w).await?;
Ok(()) Ok(())
} }
} }

View File

@ -1,7 +1,4 @@
use crate::{ use crate::coding::{decode_string, encode_string, AsyncRead, AsyncWrite, DecodeError, EncodeError};
coding::{AsyncRead, AsyncWrite, Decode, DecodeError, Encode, EncodeError},
setup::Extensions,
};
/// Sent by the subscriber to accept an Announce. /// Sent by the subscriber to accept an Announce.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -12,12 +9,12 @@ pub struct AnnounceOk {
} }
impl AnnounceOk { impl AnnounceOk {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = String::decode(r).await?; let namespace = decode_string(r).await?;
Ok(Self { namespace }) Ok(Self { namespace })
} }
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.namespace.encode(w).await encode_string(&self.namespace, w).await
} }
} }

View File

@ -1,11 +1,10 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
/// Sent by the subscriber to reject an Announce. /// Sent by the subscriber to reject an Announce.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct AnnounceError { pub struct AnnounceReset {
// Echo back the namespace that was reset // Echo back the namespace that was reset
pub namespace: String, pub namespace: String,
@ -16,11 +15,11 @@ pub struct AnnounceError {
pub reason: String, pub reason: String,
} }
impl AnnounceError { impl AnnounceReset {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = String::decode(r).await?; let namespace = decode_string(r).await?;
let code = VarInt::decode(r).await?.try_into()?; let code = VarInt::decode(r).await?.try_into()?;
let reason = String::decode(r).await?; let reason = decode_string(r).await?;
Ok(Self { Ok(Self {
namespace, namespace,
@ -29,10 +28,10 @@ impl AnnounceError {
}) })
} }
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.namespace.encode(w).await?; encode_string(&self.namespace, w).await?;
VarInt::from_u32(self.code).encode(w).await?; VarInt::from_u32(self.code).encode(w).await?;
self.reason.encode(w).await?; encode_string(&self.reason, w).await?;
Ok(()) Ok(())
} }

View File

@ -0,0 +1,24 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the publisher to terminate an Announce.
#[derive(Clone, Debug)]
pub struct AnnounceStop {
// Echo back the namespace that was reset
pub namespace: String,
}
impl AnnounceStop {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = decode_string(r).await?;
Ok(Self { namespace })
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.namespace, w).await?;
Ok(())
}
}

View File

@ -1,7 +1,6 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError}; use crate::coding::{decode_string, encode_string, DecodeError, EncodeError};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
/// Sent by the server to indicate that the client should connect to a different server. /// Sent by the server to indicate that the client should connect to a different server.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -10,12 +9,12 @@ pub struct GoAway {
} }
impl GoAway { impl GoAway {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let url = String::decode(r).await?; let url = decode_string(r).await?;
Ok(Self { url }) Ok(Self { url })
} }
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.url.encode(w).await encode_string(&self.url, w).await
} }
} }

View File

@ -6,17 +6,16 @@
//! //!
//! Messages sent by the publisher: //! Messages sent by the publisher:
//! - [Announce] //! - [Announce]
//! - [Unannounce] //! - [AnnounceReset]
//! - [SubscribeOk] //! - [SubscribeOk]
//! - [SubscribeError]
//! - [SubscribeReset] //! - [SubscribeReset]
//! - [Object] //! - [Object]
//! //!
//! Messages sent by the subscriber: //! Messages sent by the subscriber:
//! - [Subscribe] //! - [Subscribe]
//! - [Unsubscribe] //! - [SubscribeStop]
//! - [AnnounceOk] //! - [AnnounceOk]
//! - [AnnounceError] //! - [AnnounceStop]
//! //!
//! Example flow: //! Example flow:
//! ```test //! ```test
@ -33,35 +32,30 @@
mod announce; mod announce;
mod announce_ok; mod announce_ok;
mod announce_reset; mod announce_reset;
mod announce_stop;
mod go_away; mod go_away;
mod object; mod object;
mod subscribe; mod subscribe;
mod subscribe_error;
mod subscribe_fin;
mod subscribe_ok; mod subscribe_ok;
mod subscribe_reset; mod subscribe_reset;
mod unannounce; mod subscribe_stop;
mod unsubscribe;
pub use announce::*; pub use announce::*;
pub use announce_ok::*; pub use announce_ok::*;
pub use announce_reset::*; pub use announce_reset::*;
pub use announce_stop::*;
pub use go_away::*; pub use go_away::*;
pub use object::*; pub use object::*;
pub use subscribe::*; pub use subscribe::*;
pub use subscribe_error::*;
pub use subscribe_fin::*;
pub use subscribe_ok::*; pub use subscribe_ok::*;
pub use subscribe_reset::*; pub use subscribe_reset::*;
pub use unannounce::*; pub use subscribe_stop::*;
pub use unsubscribe::*;
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; use crate::coding::{DecodeError, EncodeError, VarInt};
use std::fmt; use std::fmt;
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
// Use a macro to generate the message types rather than copy-paste. // Use a macro to generate the message types rather than copy-paste.
// This implements a decode/encode method that uses the specified type. // This implements a decode/encode method that uses the specified type.
@ -74,23 +68,23 @@ macro_rules! message_types {
} }
impl Message { impl Message {
pub async fn decode<R: AsyncRead>(r: &mut R, ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let t = VarInt::decode(r).await?; let t = VarInt::decode(r).await?;
match t.into_inner() { match t.into_inner() {
$($val => { $($val => {
let msg = $name::decode(r, ext).await?; let msg = $name::decode(r).await?;
Ok(Self::$name(msg)) Ok(Self::$name(msg))
})* })*
_ => Err(DecodeError::InvalidMessage(t)), _ => Err(DecodeError::InvalidType(t)),
} }
} }
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
match self { match self {
$(Self::$name(ref m) => { $(Self::$name(ref m) => {
VarInt::from_u32($val).encode(w).await?; VarInt::from_u32($val).encode(w).await?;
m.encode(w, ext).await m.encode(w).await
},)* },)*
} }
} }
@ -133,28 +127,15 @@ macro_rules! message_types {
message_types! { message_types! {
// NOTE: Object and Setup are in other modules. // NOTE: Object and Setup are in other modules.
// Object = 0x0 // Object = 0x0
// ObjectUnbounded = 0x2 // SetupClient = 0x1
// SetupClient = 0x40 // SetupServer = 0x2
// SetupServer = 0x41
// SUBSCRIBE family, sent by subscriber
Subscribe = 0x3, Subscribe = 0x3,
Unsubscribe = 0xa,
// SUBSCRIBE family, sent by publisher
SubscribeOk = 0x4, SubscribeOk = 0x4,
SubscribeError = 0x5, SubscribeReset = 0x5,
SubscribeFin = 0xb, SubscribeStop = 0x15,
SubscribeReset = 0xc,
// ANNOUNCE family, sent by publisher
Announce = 0x6, Announce = 0x6,
Unannounce = 0x9,
// ANNOUNCE family, sent by subscriber
AnnounceOk = 0x7, AnnounceOk = 0x7,
AnnounceError = 0x8, AnnounceReset = 0x8,
AnnounceStop = 0x18,
// Misc
GoAway = 0x10, GoAway = 0x10,
} }

View File

@ -1,10 +1,9 @@
use std::{io, time}; use std::time;
use tokio::io::AsyncReadExt; use crate::coding::{DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::setup;
/// Sent by the publisher as the header of each data stream. /// Sent by the publisher as the header of each data stream.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -14,78 +13,47 @@ pub struct Object {
pub track: VarInt, pub track: VarInt,
// The sequence number within the track. // The sequence number within the track.
pub group: VarInt,
// The sequence number within the group.
pub sequence: VarInt, pub sequence: VarInt,
// The priority, where **smaller** values are sent first. // The priority, where **larger** values are sent first.
pub priority: u32, // Proposal: int32 instead of a varint.
pub priority: i32,
// Cache the object for at most this many seconds. // Cache the object for at most this many seconds.
// Zero means never expire. // Zero means never expire.
pub expires: Option<time::Duration>, pub expires: Option<time::Duration>,
/// An optional size, allowing multiple OBJECTs on the same stream.
pub size: Option<VarInt>,
} }
impl Object { impl Object {
pub async fn decode<R: AsyncRead>(r: &mut R, extensions: &setup::Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
// Try reading the first byte, returning a special error if the stream naturally ended. let typ = VarInt::decode(r).await?;
let typ = match r.read_u8().await { if typ.into_inner() != 0 {
Ok(b) => VarInt::decode_byte(b, r).await?, return Err(DecodeError::InvalidType(typ));
Err(e) if e.kind() == io::ErrorKind::UnexpectedEof => return Err(DecodeError::Final), }
Err(e) => return Err(e.into()),
};
let size_present = match typ.into_inner() { // NOTE: size has been omitted
0 => false,
2 => true,
_ => return Err(DecodeError::InvalidMessage(typ)),
};
let track = VarInt::decode(r).await?; let track = VarInt::decode(r).await?;
let group = VarInt::decode(r).await?;
let sequence = VarInt::decode(r).await?; let sequence = VarInt::decode(r).await?;
let priority = VarInt::decode(r).await?.try_into()?; let priority = r.read_i32().await?; // big-endian
let expires = match VarInt::decode(r).await?.into_inner() {
let expires = match extensions.object_expires { 0 => None,
true => match VarInt::decode(r).await?.into_inner() { secs => Some(time::Duration::from_secs(secs)),
0 => None,
secs => Some(time::Duration::from_secs(secs)),
},
false => None,
};
// The presence of the size field depends on the type.
let size = match size_present {
true => Some(VarInt::decode(r).await?),
false => None,
}; };
Ok(Self { Ok(Self {
track, track,
group,
sequence, sequence,
priority, priority,
expires, expires,
size,
}) })
} }
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, extensions: &setup::Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
// The kind changes based on the presence of the size. VarInt::ZERO.encode(w).await?;
let kind = match self.size {
Some(_) => VarInt::from_u32(2),
None => VarInt::ZERO,
};
kind.encode(w).await?;
self.track.encode(w).await?; self.track.encode(w).await?;
self.group.encode(w).await?;
self.sequence.encode(w).await?; self.sequence.encode(w).await?;
VarInt::from_u32(self.priority).encode(w).await?; w.write_i32(self.priority).await?;
// Round up if there's any decimal points. // Round up if there's any decimal points.
let expires = match self.expires { let expires = match self.expires {
@ -95,13 +63,7 @@ impl Object {
Some(expires) => expires.as_secs(), Some(expires) => expires.as_secs(),
}; };
if extensions.object_expires { VarInt::try_from(expires)?.encode(w).await?;
VarInt::try_from(expires)?.encode(w).await?;
}
if let Some(size) = self.size {
size.encode(w).await?;
}
Ok(()) Ok(())
} }

View File

@ -1,141 +1,38 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params, VarInt}; use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
/// Sent by the subscriber to request all future objects for the given track. /// Sent by the subscriber to request all future objects for the given track.
/// ///
/// Objects will use the provided ID instead of the full track name, to save bytes. /// Objects will use the provided ID instead of the full track name, to save bytes.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Subscribe { pub struct Subscribe {
/// An ID we choose so we can map to the track_name. // An ID we choose so we can map to the track_name.
// Proposal: https://github.com/moq-wg/moq-transport/issues/209 // Proposal: https://github.com/moq-wg/moq-transport/issues/209
pub id: VarInt, pub id: VarInt,
/// The track namespace. // The track namespace.
/// pub namespace: String,
/// Must be None if `extensions.subscribe_split` is false.
pub namespace: Option<String>,
/// The track name. // The track name.
pub name: String, pub name: String,
/// The start/end group/object.
pub start_group: SubscribeLocation,
pub start_object: SubscribeLocation,
pub end_group: SubscribeLocation,
pub end_object: SubscribeLocation,
/// Optional parameters
pub params: Params,
} }
impl Subscribe { impl Subscribe {
pub async fn decode<R: AsyncRead>(r: &mut R, ext: &Extensions) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?;
let namespace = match ext.subscribe_split {
true => Some(String::decode(r).await?),
false => None,
};
let name = String::decode(r).await?;
let start_group = SubscribeLocation::decode(r).await?;
let start_object = SubscribeLocation::decode(r).await?;
let end_group = SubscribeLocation::decode(r).await?;
let end_object = SubscribeLocation::decode(r).await?;
// You can't have a start object without a start group.
if start_group == SubscribeLocation::None && start_object != SubscribeLocation::None {
return Err(DecodeError::InvalidSubscribeLocation);
}
// You can't have an end object without an end group.
if end_group == SubscribeLocation::None && end_object != SubscribeLocation::None {
return Err(DecodeError::InvalidSubscribeLocation);
}
// NOTE: There's some more location restrictions in the draft, but they're enforced at a higher level.
let params = Params::decode(r).await?;
Ok(Self {
id,
namespace,
name,
start_group,
start_object,
end_group,
end_object,
params,
})
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, ext: &Extensions) -> Result<(), EncodeError> {
self.id.encode(w).await?;
if self.namespace.is_some() != ext.subscribe_split {
panic!("namespace must be None if subscribe_split is false");
}
if ext.subscribe_split {
self.namespace.as_ref().unwrap().encode(w).await?;
}
self.name.encode(w).await?;
self.start_group.encode(w).await?;
self.start_object.encode(w).await?;
self.end_group.encode(w).await?;
self.end_object.encode(w).await?;
self.params.encode(w).await?;
Ok(())
}
}
/// Signal where the subscription should begin, relative to the current cache.
#[derive(Clone, Debug, PartialEq)]
pub enum SubscribeLocation {
None,
Absolute(VarInt),
Latest(VarInt),
Future(VarInt),
}
impl SubscribeLocation {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let kind = VarInt::decode(r).await?; let id = VarInt::decode(r).await?;
let namespace = decode_string(r).await?;
let name = decode_string(r).await?;
match kind.into_inner() { Ok(Self { id, namespace, name })
0 => Ok(Self::None),
1 => Ok(Self::Absolute(VarInt::decode(r).await?)),
2 => Ok(Self::Latest(VarInt::decode(r).await?)),
3 => Ok(Self::Future(VarInt::decode(r).await?)),
_ => Err(DecodeError::InvalidSubscribeLocation),
}
} }
}
impl Subscribe {
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
match self { self.id.encode(w).await?;
Self::None => { encode_string(&self.namespace, w).await?;
VarInt::from_u32(0).encode(w).await?; encode_string(&self.name, w).await?;
}
Self::Absolute(val) => {
VarInt::from_u32(1).encode(w).await?;
val.encode(w).await?;
}
Self::Latest(val) => {
VarInt::from_u32(2).encode(w).await?;
val.encode(w).await?;
}
Self::Future(val) => {
VarInt::from_u32(3).encode(w).await?;
val.encode(w).await?;
}
}
Ok(()) Ok(())
} }

View File

@ -1,36 +0,0 @@
use crate::coding::{AsyncRead, AsyncWrite};
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
use crate::setup::Extensions;
/// Sent by the publisher to reject a Subscribe.
#[derive(Clone, Debug)]
pub struct SubscribeError {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
// The ID for this subscription.
pub id: VarInt,
// An error code.
pub code: u32,
// An optional, human-readable reason.
pub reason: String,
}
impl SubscribeError {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?;
let code = VarInt::decode(r).await?.try_into()?;
let reason = String::decode(r).await?;
Ok(Self { id, code, reason })
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
self.id.encode(w).await?;
VarInt::from_u32(self.code).encode(w).await?;
self.reason.encode(w).await?;
Ok(())
}
}

View File

@ -1,37 +0,0 @@
use crate::coding::{AsyncRead, AsyncWrite};
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
use crate::setup::Extensions;
/// Sent by the publisher to cleanly terminate a Subscribe.
#[derive(Clone, Debug)]
pub struct SubscribeFin {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
/// The ID for this subscription.
pub id: VarInt,
/// The final group/object sent on this subscription.
pub final_group: VarInt,
pub final_object: VarInt,
}
impl SubscribeFin {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?;
let final_group = VarInt::decode(r).await?;
let final_object = VarInt::decode(r).await?;
Ok(Self {
id,
final_group,
final_object,
})
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
self.id.encode(w).await?;
self.final_group.encode(w).await?;
self.final_object.encode(w).await?;
Ok(())
}
}

View File

@ -1,31 +1,26 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; use crate::coding::{DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
/// Sent by the publisher to accept a Subscribe. /// Sent by the publisher to accept a Subscribe.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SubscribeOk { pub struct SubscribeOk {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
/// The ID for this track.
pub id: VarInt,
/// The subscription will expire in this many milliseconds. // The ID for this track.
pub expires: VarInt, pub id: VarInt,
} }
impl SubscribeOk { impl SubscribeOk {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?; let id = VarInt::decode(r).await?;
let expires = VarInt::decode(r).await?; Ok(Self { id })
Ok(Self { id, expires })
} }
} }
impl SubscribeOk { impl SubscribeOk {
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.id.encode(w).await?; self.id.encode(w).await?;
self.expires.encode(w).await?;
Ok(()) Ok(())
} }
} }

View File

@ -1,49 +1,35 @@
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt};
use crate::setup::Extensions;
/// Sent by the publisher to terminate a Subscribe. use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the publisher to reject a Subscribe.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SubscribeReset { pub struct SubscribeReset {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
/// The ID for this subscription.
// The ID for this subscription.
pub id: VarInt, pub id: VarInt,
/// An error code. // An error code.
pub code: u32, pub code: u32,
/// An optional, human-readable reason. // An optional, human-readable reason.
pub reason: String, pub reason: String,
/// The final group/object sent on this subscription.
pub final_group: VarInt,
pub final_object: VarInt,
} }
impl SubscribeReset { impl SubscribeReset {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?; let id = VarInt::decode(r).await?;
let code = VarInt::decode(r).await?.try_into()?; let code = VarInt::decode(r).await?.try_into()?;
let reason = String::decode(r).await?; let reason = decode_string(r).await?;
let final_group = VarInt::decode(r).await?;
let final_object = VarInt::decode(r).await?;
Ok(Self { Ok(Self { id, code, reason })
id,
code,
reason,
final_group,
final_object,
})
} }
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.id.encode(w).await?; self.id.encode(w).await?;
VarInt::from_u32(self.code).encode(w).await?; VarInt::from_u32(self.code).encode(w).await?;
self.reason.encode(w).await?; encode_string(&self.reason, w).await?;
self.final_group.encode(w).await?;
self.final_object.encode(w).await?;
Ok(()) Ok(())
} }

View File

@ -1,26 +1,25 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; use crate::coding::{DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
/// Sent by the subscriber to terminate a Subscribe. /// Sent by the subscriber to terminate a Subscribe.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Unsubscribe { pub struct SubscribeStop {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209 // NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
// The ID for this subscription. // The ID for this subscription.
pub id: VarInt, pub id: VarInt,
} }
impl Unsubscribe { impl SubscribeStop {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?; let id = VarInt::decode(r).await?;
Ok(Self { id }) Ok(Self { id })
} }
} }
impl Unsubscribe { impl SubscribeStop {
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.id.encode(w).await?; self.id.encode(w).await?;
Ok(()) Ok(())
} }

View File

@ -1,25 +0,0 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError};
use crate::coding::{AsyncRead, AsyncWrite};
use crate::setup::Extensions;
/// Sent by the publisher to terminate an Announce.
#[derive(Clone, Debug)]
pub struct Unannounce {
// Echo back the namespace that was reset
pub namespace: String,
}
impl Unannounce {
pub async fn decode<R: AsyncRead>(r: &mut R, _ext: &Extensions) -> Result<Self, DecodeError> {
let namespace = String::decode(r).await?;
Ok(Self { namespace })
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W, _ext: &Extensions) -> Result<(), EncodeError> {
self.namespace.encode(w).await?;
Ok(())
}
}

View File

@ -2,67 +2,61 @@
//! //!
//! The [Publisher] can create tracks, either manually or on request. //! The [Publisher] can create tracks, either manually or on request.
//! It receives all requests by a [Subscriber] for a tracks that don't exist. //! It receives all requests by a [Subscriber] for a tracks that don't exist.
//! The simplest implementation is to close every unknown track with [CacheError::NotFound]. //! The simplest implementation is to close every unknown track with [Error::NotFound].
//! //!
//! A [Subscriber] can request tracks by name. //! A [Subscriber] can request tracks by name.
//! If the track already exists, it will be returned. //! If the track already exists, it will be returned.
//! If the track doesn't exist, it will be sent to [Unknown] to be handled. //! If the track doesn't exist, it will be sent to [Unknown] to be handled.
//! A [Subscriber] can be cloned to create multiple subscriptions. //! A [Subscriber] can be cloned to create multiple subscriptions.
//! //!
//! The broadcast is automatically closed with [CacheError::Closed] when [Publisher] is dropped, or all [Subscriber]s are dropped. //! The broadcast is automatically closed with [Error::Closed] when [Publisher] is dropped, or all [Subscriber]s are dropped.
use std::{ use std::{
collections::{hash_map, HashMap, VecDeque}, collections::{hash_map, HashMap, VecDeque},
fmt, fmt,
ops::Deref,
sync::Arc, sync::Arc,
}; };
use super::{track, CacheError, Watch}; use crate::Error;
use super::{track, Watch};
/// Create a new broadcast. /// Create a new broadcast.
pub fn new(id: &str) -> (Publisher, Subscriber) { pub fn new() -> (Publisher, Subscriber) {
let state = Watch::new(State::default()); let state = Watch::new(State::default());
let info = Arc::new(Info { id: id.to_string() });
let publisher = Publisher::new(state.clone(), info.clone()); let publisher = Publisher::new(state.clone());
let subscriber = Subscriber::new(state, info); let subscriber = Subscriber::new(state);
(publisher, subscriber) (publisher, subscriber)
} }
/// Static information about a broadcast.
#[derive(Debug)]
pub struct Info {
pub id: String,
}
/// Dynamic information about the broadcast. /// Dynamic information about the broadcast.
#[derive(Debug)] #[derive(Debug)]
struct State { struct State {
tracks: HashMap<String, track::Subscriber>, tracks: HashMap<String, track::Subscriber>,
requested: VecDeque<track::Publisher>, requested: VecDeque<track::Publisher>,
closed: Result<(), CacheError>, closed: Result<(), Error>,
} }
impl State { impl State {
pub fn get(&self, name: &str) -> Result<Option<track::Subscriber>, CacheError> { pub fn get(&self, name: &str) -> Result<Option<track::Subscriber>, Error> {
// Don't check closed, so we can return from cache. // Don't check closed, so we can return from cache.
Ok(self.tracks.get(name).cloned()) Ok(self.tracks.get(name).cloned())
} }
pub fn insert(&mut self, track: track::Subscriber) -> Result<(), CacheError> { pub fn insert(&mut self, track: track::Subscriber) -> Result<(), Error> {
self.closed.clone()?; self.closed?;
match self.tracks.entry(track.name.clone()) { match self.tracks.entry(track.name.clone()) {
hash_map::Entry::Occupied(_) => return Err(CacheError::Duplicate), hash_map::Entry::Occupied(_) => return Err(Error::Duplicate),
hash_map::Entry::Vacant(v) => v.insert(track), hash_map::Entry::Vacant(v) => v.insert(track),
}; };
Ok(()) Ok(())
} }
pub fn request(&mut self, name: &str) -> Result<track::Subscriber, CacheError> { pub fn request(&mut self, name: &str) -> Result<track::Subscriber, Error> {
self.closed.clone()?; self.closed?;
// Create a new track. // Create a new track.
let (publisher, subscriber) = track::new(name); let (publisher, subscriber) = track::new(name);
@ -76,13 +70,13 @@ impl State {
Ok(subscriber) Ok(subscriber)
} }
pub fn has_next(&self) -> Result<bool, CacheError> { pub fn has_next(&self) -> Result<bool, Error> {
// Check if there's any elements in the queue before checking closed. // Check if there's any elements in the queue before checking closed.
if !self.requested.is_empty() { if !self.requested.is_empty() {
return Ok(true); return Ok(true);
} }
self.closed.clone()?; self.closed?;
Ok(false) Ok(false)
} }
@ -91,8 +85,8 @@ impl State {
self.requested.pop_front().expect("no entry in queue") self.requested.pop_front().expect("no entry in queue")
} }
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> { pub fn close(&mut self, err: Error) -> Result<(), Error> {
self.closed.clone()?; self.closed?;
self.closed = Err(err); self.closed = Err(err);
Ok(()) Ok(())
} }
@ -113,35 +107,34 @@ impl Default for State {
#[derive(Clone)] #[derive(Clone)]
pub struct Publisher { pub struct Publisher {
state: Watch<State>, state: Watch<State>,
info: Arc<Info>,
_dropped: Arc<Dropped>, _dropped: Arc<Dropped>,
} }
impl Publisher { impl Publisher {
fn new(state: Watch<State>, info: Arc<Info>) -> Self { fn new(state: Watch<State>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone())); let _dropped = Arc::new(Dropped::new(state.clone()));
Self { state, info, _dropped } Self { state, _dropped }
} }
/// Create a new track with the given name, inserting it into the broadcast. /// Create a new track with the given name, inserting it into the broadcast.
pub fn create_track(&mut self, name: &str) -> Result<track::Publisher, CacheError> { pub fn create_track(&mut self, name: &str) -> Result<track::Publisher, Error> {
let (publisher, subscriber) = track::new(name); let (publisher, subscriber) = track::new(name);
self.state.lock_mut().insert(subscriber)?; self.state.lock_mut().insert(subscriber)?;
Ok(publisher) Ok(publisher)
} }
/// Insert a track into the broadcast. /// Insert a track into the broadcast.
pub fn insert_track(&mut self, track: track::Subscriber) -> Result<(), CacheError> { pub fn insert_track(&mut self, track: track::Subscriber) -> Result<(), Error> {
self.state.lock_mut().insert(track) self.state.lock_mut().insert(track)
} }
/// Block until the next track requested by a subscriber. /// Block until the next track requested by a subscriber.
pub async fn next_track(&mut self) -> Result<track::Publisher, CacheError> { pub async fn next_track(&mut self) -> Result<Option<track::Publisher>, Error> {
loop { loop {
let notify = { let notify = {
let state = self.state.lock(); let state = self.state.lock();
if state.has_next()? { if state.has_next()? {
return Ok(state.into_mut().next()); return Ok(Some(state.into_mut().next()));
} }
state.changed() state.changed()
@ -152,25 +145,14 @@ impl Publisher {
} }
/// Close the broadcast with an error. /// Close the broadcast with an error.
pub fn close(self, err: CacheError) -> Result<(), CacheError> { pub fn close(self, err: Error) -> Result<(), Error> {
self.state.lock_mut().close(err) self.state.lock_mut().close(err)
} }
} }
impl Deref for Publisher {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
impl fmt::Debug for Publisher { impl fmt::Debug for Publisher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Publisher") f.debug_struct("Publisher").field("state", &self.state).finish()
.field("state", &self.state)
.field("info", &self.info)
.finish()
} }
} }
@ -180,20 +162,19 @@ impl fmt::Debug for Publisher {
#[derive(Clone)] #[derive(Clone)]
pub struct Subscriber { pub struct Subscriber {
state: Watch<State>, state: Watch<State>,
info: Arc<Info>,
_dropped: Arc<Dropped>, _dropped: Arc<Dropped>,
} }
impl Subscriber { impl Subscriber {
fn new(state: Watch<State>, info: Arc<Info>) -> Self { fn new(state: Watch<State>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone())); let _dropped = Arc::new(Dropped::new(state.clone()));
Self { state, info, _dropped } Self { state, _dropped }
} }
/// Get a track from the broadcast by name. /// Get a track from the broadcast by name.
/// If the track does not exist, it will be created and potentially fufilled by the publisher (via Unknown). /// If the track does not exist, it will be created and potentially fufilled by the publisher (via Unknown).
/// Otherwise, it will return [CacheError::NotFound]. /// Otherwise, it will return [Error::NotFound].
pub fn get_track(&self, name: &str) -> Result<track::Subscriber, CacheError> { pub fn get_track(&self, name: &str) -> Result<track::Subscriber, Error> {
let state = self.state.lock(); let state = self.state.lock();
if let Some(track) = state.get(name)? { if let Some(track) = state.get(name)? {
return Ok(track); return Ok(track);
@ -202,43 +183,11 @@ impl Subscriber {
// Request a new track if it does not exist. // Request a new track if it does not exist.
state.into_mut().request(name) state.into_mut().request(name)
} }
/// Check if the broadcast is closed, either because the publisher was dropped or called [Publisher::close].
pub fn is_closed(&self) -> Option<CacheError> {
self.state.lock().closed.as_ref().err().cloned()
}
/// Wait until if the broadcast is closed, either because the publisher was dropped or called [Publisher::close].
pub async fn closed(&self) -> CacheError {
loop {
let notify = {
let state = self.state.lock();
if let Some(err) = state.closed.as_ref().err() {
return err.clone();
}
state.changed()
};
notify.await;
}
}
}
impl Deref for Subscriber {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
} }
impl fmt::Debug for Subscriber { impl fmt::Debug for Subscriber {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Subscriber") f.debug_struct("Subscriber").field("state", &self.state).finish()
.field("state", &self.state)
.field("info", &self.info)
.finish()
} }
} }
@ -257,6 +206,6 @@ impl Dropped {
impl Drop for Dropped { impl Drop for Dropped {
fn drop(&mut self) { fn drop(&mut self) {
self.state.lock_mut().close(CacheError::Closed).ok(); self.state.lock_mut().close(Error::Closed).ok();
} }
} }

View File

@ -0,0 +1,11 @@
//! Allows a publisher to push updates, automatically caching and fanning it out to any subscribers.
//!
//! The naming scheme doesn't match the spec because it's vague and confusing.
//! The hierarchy is: [broadcast] -> [track] -> [segment] -> [Bytes](bytes::Bytes)
pub mod broadcast;
pub mod segment;
pub mod track;
pub(crate) mod watch;
pub(crate) use watch::*;

View File

@ -1,20 +1,28 @@
//! A fragment is a stream of bytes with a header, split into a [Publisher] and [Subscriber] handle. //! A segment is a stream of bytes with a header, split into a [Publisher] and [Subscriber] handle.
//! //!
//! A [Publisher] writes an ordered stream of bytes in chunks. //! A [Publisher] writes an ordered stream of bytes in chunks.
//! There's no framing, so these chunks can be of any size or position, and won't be maintained over the network. //! There's no framing, so these chunks can be of any size or position, and won't be maintained over the network.
//! //!
//! A [Subscriber] reads an ordered stream of bytes in chunks. //! A [Subscriber] reads an ordered stream of bytes in chunks.
//! These chunks are returned directly from the QUIC connection, so they may be of any size or position. //! These chunks are returned directly from the QUIC connection, so they may be of any size or position.
//! You can clone the [Subscriber] and each will read a copy of of all future chunks. (fanout) //! A closed [Subscriber] will receive a copy of all future chunks. (fanout)
//! //!
//! The fragment is closed with [CacheError::Closed] when all publishers or subscribers are dropped. //! The segment is closed with [Error::Closed] when all publishers or subscribers are dropped.
use core::fmt; use core::fmt;
use std::{ops::Deref, sync::Arc}; use std::{
future::poll_fn,
io,
ops::Deref,
pin::Pin,
sync::Arc,
task::{ready, Context, Poll},
time,
};
use crate::VarInt; use crate::{Error, VarInt};
use bytes::Bytes; use bytes::{Bytes, BytesMut};
use super::{CacheError, Watch}; use super::Watch;
/// Create a new segment with the given info. /// Create a new segment with the given info.
pub fn new(info: Info) -> (Publisher, Subscriber) { pub fn new(info: Info) -> (Publisher, Subscriber) {
@ -30,39 +38,36 @@ pub fn new(info: Info) -> (Publisher, Subscriber) {
/// Static information about the segment. /// Static information about the segment.
#[derive(Debug)] #[derive(Debug)]
pub struct Info { pub struct Info {
// The sequence number of the fragment within the segment. // The sequence number of the segment within the track.
// NOTE: These may be received out of order or with gaps.
pub sequence: VarInt, pub sequence: VarInt,
// The size of the fragment, optionally None if this is the last fragment in a segment. // The priority of the segment within the BROADCAST.
// TODO enforce this size. pub priority: i32,
pub size: Option<VarInt>,
// Cache the segment for at most this long.
pub expires: Option<time::Duration>,
} }
struct State { struct State {
// The data that has been received thus far. // The data that has been received thus far.
chunks: Vec<Bytes>, data: Vec<Bytes>,
// Set when the publisher is dropped. // Set when the publisher is dropped.
closed: Result<(), CacheError>, closed: Result<(), Error>,
} }
impl State { impl State {
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> { pub fn close(&mut self, err: Error) -> Result<(), Error> {
self.closed.clone()?; self.closed?;
self.closed = Err(err); self.closed = Err(err);
Ok(()) Ok(())
} }
pub fn bytes(&self) -> usize {
self.chunks.iter().map(|f| f.len()).sum::<usize>()
}
} }
impl Default for State { impl Default for State {
fn default() -> Self { fn default() -> Self {
Self { Self {
chunks: Vec::new(), data: Vec::new(),
closed: Ok(()), closed: Ok(()),
} }
} }
@ -71,9 +76,11 @@ impl Default for State {
impl fmt::Debug for State { impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// We don't want to print out the contents, so summarize. // We don't want to print out the contents, so summarize.
let size = self.data.iter().map(|chunk| chunk.len()).sum::<usize>();
let data = format!("size={} chunks={}", size, self.data.len());
f.debug_struct("State") f.debug_struct("State")
.field("chunks", &self.chunks.len().to_string()) .field("data", &data)
.field("bytes", &self.bytes().to_string())
.field("closed", &self.closed) .field("closed", &self.closed)
.finish() .finish()
} }
@ -98,15 +105,15 @@ impl Publisher {
} }
/// Write a new chunk of bytes. /// Write a new chunk of bytes.
pub fn write_chunk(&mut self, chunk: Bytes) -> Result<(), CacheError> { pub fn write_chunk(&mut self, data: Bytes) -> Result<(), Error> {
let mut state = self.state.lock_mut(); let mut state = self.state.lock_mut();
state.closed.clone()?; state.closed?;
state.chunks.push(chunk); state.data.push(data);
Ok(()) Ok(())
} }
/// Close the segment with an error. /// Close the segment with an error.
pub fn close(self, err: CacheError) -> Result<(), CacheError> { pub fn close(self, err: Error) -> Result<(), Error> {
self.state.lock_mut().close(err) self.state.lock_mut().close(err)
} }
} }
@ -141,6 +148,9 @@ pub struct Subscriber {
// NOTE: Cloned subscribers inherit this index, but then run in parallel. // NOTE: Cloned subscribers inherit this index, but then run in parallel.
index: usize, index: usize,
// A temporary buffer when using AsyncRead.
buffer: BytesMut,
// Dropped when all Subscribers are dropped. // Dropped when all Subscribers are dropped.
_dropped: Arc<Dropped>, _dropped: Arc<Dropped>,
} }
@ -153,30 +163,83 @@ impl Subscriber {
state, state,
info, info,
index: 0, index: 0,
buffer: BytesMut::new(),
_dropped, _dropped,
} }
} }
/// Block until the next chunk of bytes is available. /// Check if there is a chunk available.
pub async fn read_chunk(&mut self) -> Result<Option<Bytes>, CacheError> { pub fn poll_chunk(&mut self, cx: &mut Context<'_>) -> Poll<Result<Option<Bytes>, Error>> {
loop { // If there's already buffered data, return it.
let notify = { if !self.buffer.is_empty() {
let state = self.state.lock(); let chunk = self.buffer.split().freeze();
if self.index < state.chunks.len() { return Poll::Ready(Ok(Some(chunk)));
let chunk = state.chunks[self.index].clone();
self.index += 1;
return Ok(Some(chunk));
}
match &state.closed {
Err(CacheError::Closed) => return Ok(None),
Err(err) => return Err(err.clone()),
Ok(()) => state.changed(),
}
};
notify.await; // Try again when the state changes
} }
// Grab the lock and check if there's a new chunk available.
let state = self.state.lock();
if self.index < state.data.len() {
// Yep, clone and return it.
let chunk = state.data[self.index].clone();
self.index += 1;
return Poll::Ready(Ok(Some(chunk)));
}
// Otherwise we wait until the state changes and try again.
match state.closed {
Err(Error::Closed) => return Poll::Ready(Ok(None)),
Err(err) => return Poll::Ready(Err(err)),
Ok(()) => state.waker(cx), // Wake us up when the state changes.
};
Poll::Pending
}
/// Block until the next chunk of bytes is available.
pub async fn read_chunk(&mut self) -> Result<Option<Bytes>, Error> {
poll_fn(|cx| self.poll_chunk(cx)).await
}
}
impl tokio::io::AsyncRead for Subscriber {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if !self.buffer.is_empty() {
// Read from the existing buffer
let size = std::cmp::min(buf.remaining(), self.buffer.len());
let data = self.buffer.split_to(size).freeze();
buf.put_slice(&data);
return Poll::Ready(Ok(()));
}
// Check if there's a new chunk available
let chunk = ready!(self.poll_chunk(cx));
let chunk = match chunk {
// We'll read as much of it as we can, and buffer the rest.
Ok(Some(chunk)) => chunk,
// No more data.
Ok(None) => return Poll::Ready(Ok(())),
// Crudely cast to io::Error
Err(err) => return Poll::Ready(Err(err.as_io())),
};
// Determine how much of the chunk we can return vs buffer.
let size = std::cmp::min(buf.remaining(), chunk.len());
// Return this much.
buf.put_slice(chunk[..size].as_ref());
// Buffer this much.
self.buffer.extend_from_slice(chunk[size..].as_ref());
Poll::Ready(Ok(()))
} }
} }
@ -211,6 +274,6 @@ impl Dropped {
impl Drop for Dropped { impl Drop for Dropped {
fn drop(&mut self) { fn drop(&mut self) {
self.state.lock_mut().close(CacheError::Closed).ok(); self.state.lock_mut().close(Error::Closed).ok();
} }
} }

View File

@ -10,14 +10,14 @@
//! Segments will be cached for a potentially limited duration added to the unreliable nature. //! Segments will be cached for a potentially limited duration added to the unreliable nature.
//! A cloned [Subscriber] will receive a copy of all new segment going forward (fanout). //! A cloned [Subscriber] will receive a copy of all new segment going forward (fanout).
//! //!
//! The track is closed with [CacheError::Closed] when all publishers or subscribers are dropped. //! The track is closed with [Error::Closed] when all publishers or subscribers are dropped.
use std::{collections::BinaryHeap, fmt, ops::Deref, sync::Arc, time}; use std::{collections::BinaryHeap, fmt, ops::Deref, sync::Arc, time};
use indexmap::IndexMap; use indexmap::IndexMap;
use super::{segment, CacheError, Watch}; use super::{segment, Watch};
use crate::VarInt; use crate::{Error, VarInt};
/// Create a track with the given name. /// Create a track with the given name.
pub fn new(name: &str) -> (Publisher, Subscriber) { pub fn new(name: &str) -> (Publisher, Subscriber) {
@ -49,21 +49,21 @@ struct State {
pruned: usize, pruned: usize,
// Set when the publisher is closed/dropped, or all subscribers are dropped. // Set when the publisher is closed/dropped, or all subscribers are dropped.
closed: Result<(), CacheError>, closed: Result<(), Error>,
} }
impl State { impl State {
pub fn close(&mut self, err: CacheError) -> Result<(), CacheError> { pub fn close(&mut self, err: Error) -> Result<(), Error> {
self.closed.clone()?; self.closed?;
self.closed = Err(err); self.closed = Err(err);
Ok(()) Ok(())
} }
pub fn insert(&mut self, segment: segment::Subscriber) -> Result<(), CacheError> { pub fn insert(&mut self, segment: segment::Subscriber) -> Result<(), Error> {
self.closed.clone()?; self.closed?;
let entry = match self.lookup.entry(segment.sequence) { let entry = match self.lookup.entry(segment.sequence) {
indexmap::map::Entry::Occupied(_entry) => return Err(CacheError::Duplicate), indexmap::map::Entry::Occupied(_entry) => return Err(Error::Duplicate),
indexmap::map::Entry::Vacant(entry) => entry, indexmap::map::Entry::Vacant(entry) => entry,
}; };
@ -144,19 +144,19 @@ impl Publisher {
} }
/// Insert a new segment. /// Insert a new segment.
pub fn insert_segment(&mut self, segment: segment::Subscriber) -> Result<(), CacheError> { pub fn insert_segment(&mut self, segment: segment::Subscriber) -> Result<(), Error> {
self.state.lock_mut().insert(segment) self.state.lock_mut().insert(segment)
} }
/// Create an insert a segment with the given info. /// Create an insert a segment with the given info.
pub fn create_segment(&mut self, info: segment::Info) -> Result<segment::Publisher, CacheError> { pub fn create_segment(&mut self, info: segment::Info) -> Result<segment::Publisher, Error> {
let (publisher, subscriber) = segment::new(info); let (publisher, subscriber) = segment::new(info);
self.insert_segment(subscriber)?; self.insert_segment(subscriber)?;
Ok(publisher) Ok(publisher)
} }
/// Close the segment with an error. /// Close the segment with an error.
pub fn close(self, err: CacheError) -> Result<(), CacheError> { pub fn close(self, err: Error) -> Result<(), Error> {
self.state.lock_mut().close(err) self.state.lock_mut().close(err)
} }
} }
@ -206,8 +206,8 @@ impl Subscriber {
} }
} }
/// Block until the next segment arrives /// Block until the next segment arrives, or return None if the track is [Error::Closed].
pub async fn next_segment(&mut self) -> Result<Option<segment::Subscriber>, CacheError> { pub async fn next_segment(&mut self) -> Result<Option<segment::Subscriber>, Error> {
loop { loop {
let notify = { let notify = {
let state = self.state.lock(); let state = self.state.lock();
@ -236,9 +236,9 @@ impl Subscriber {
} }
// Otherwise check if we need to return an error. // Otherwise check if we need to return an error.
match &state.closed { match state.closed {
Err(CacheError::Closed) => return Ok(None), Err(Error::Closed) => return Ok(None),
Err(err) => return Err(err.clone()), Err(err) => return Err(err),
Ok(()) => state.changed(), Ok(()) => state.changed(),
} }
}; };
@ -279,7 +279,7 @@ impl Dropped {
impl Drop for Dropped { impl Drop for Dropped {
fn drop(&mut self) { fn drop(&mut self) {
self.state.lock_mut().close(CacheError::Closed).ok(); self.state.lock_mut().close(Error::Closed).ok();
} }
} }

View File

@ -108,6 +108,11 @@ impl<'a, T> WatchRef<'a, T> {
} }
} }
// Release the lock and provide a context to wake when next updated.
pub fn waker(mut self, cx: &mut task::Context<'_>) {
self.lock.register(cx.waker());
}
// Upgrade to a mutable references that automatically calls notify on drop. // Upgrade to a mutable references that automatically calls notify on drop.
pub fn into_mut(self) -> WatchMut<'a, T> { pub fn into_mut(self) -> WatchMut<'a, T> {
WatchMut { lock: self.lock } WatchMut { lock: self.lock }

View File

@ -1,21 +1,25 @@
use super::{Control, Publisher, SessionError, Subscriber}; use super::{Publisher, Subscriber};
use crate::{cache::broadcast, setup}; use crate::{model::broadcast, setup};
use webtransport_quinn::Session; use webtransport_quinn::{RecvStream, SendStream, Session};
use anyhow::Context;
/// An endpoint that connects to a URL to publish and/or consume live streams. /// An endpoint that connects to a URL to publish and/or consume live streams.
pub struct Client {} pub struct Client {}
impl Client { impl Client {
/// Connect using an established WebTransport session, performing the MoQ handshake as a publisher. /// Connect using an established WebTransport session, performing the MoQ handshake as a publisher.
pub async fn publisher(session: Session, source: broadcast::Subscriber) -> Result<Publisher, SessionError> { pub async fn publisher(session: Session, source: broadcast::Subscriber) -> anyhow::Result<Publisher> {
let control = Self::send_setup(&session, setup::Role::Publisher).await?; let control = Self::send_setup(&session, setup::Role::Publisher).await?;
let publisher = Publisher::new(session, control, source); let publisher = Publisher::new(session, control, source);
Ok(publisher) Ok(publisher)
} }
/// Connect using an established WebTransport session, performing the MoQ handshake as a subscriber. /// Connect using an established WebTransport session, performing the MoQ handshake as a subscriber.
pub async fn subscriber(session: Session, source: broadcast::Publisher) -> Result<Subscriber, SessionError> { pub async fn subscriber(session: Session, source: broadcast::Publisher) -> anyhow::Result<Subscriber> {
let control = Self::send_setup(&session, setup::Role::Subscriber).await?; let control = Self::send_setup(&session, setup::Role::Subscriber).await?;
let subscriber = Subscriber::new(session, control, source); let subscriber = Subscriber::new(session, control, source);
Ok(subscriber) Ok(subscriber)
} }
@ -27,46 +31,31 @@ impl Client {
} }
*/ */
async fn send_setup(session: &Session, role: setup::Role) -> Result<Control, SessionError> { async fn send_setup(session: &Session, role: setup::Role) -> anyhow::Result<(SendStream, RecvStream)> {
let mut control = session.open_bi().await?; let mut control = session.open_bi().await.context("failed to oen bidi stream")?;
let versions: setup::Versions = [setup::Version::DRAFT_01, setup::Version::KIXEL_01].into();
let client = setup::Client { let client = setup::Client {
role, role,
versions: versions.clone(), versions: vec![setup::Version::KIXEL_00].into(),
params: Default::default(),
// Offer all extensions
extensions: setup::Extensions {
object_expires: true,
subscriber_id: true,
subscribe_split: true,
},
}; };
client.encode(&mut control.0).await?; client
.encode(&mut control.0)
.await
.context("failed to send SETUP CLIENT")?;
let mut server = setup::Server::decode(&mut control.1).await?; let server = setup::Server::decode(&mut control.1)
.await
.context("failed to read SETUP")?;
match server.version { if server.version != setup::Version::KIXEL_00 {
setup::Version::DRAFT_01 => { anyhow::bail!("unsupported version: {:?}", server.version);
// We always require this extension
server.extensions.require_subscriber_id()?;
if server.role.is_publisher() {
// We only require object expires if we're a subscriber, so we don't cache objects indefinitely.
server.extensions.require_object_expires()?;
}
}
setup::Version::KIXEL_01 => {
// KIXEL_01 didn't support extensions; all were enabled.
server.extensions = client.extensions.clone()
}
_ => return Err(SessionError::Version(versions, [server.version].into())),
} }
let control = Control::new(control.0, control.1, server.extensions); // Make sure the server replied with the
if !client.role.is_compatible(server.role) {
anyhow::bail!("incompatible roles: client={:?} server={:?}", client.role, server.role);
}
Ok(control) Ok(control)
} }

View File

@ -5,41 +5,31 @@ use std::{fmt, sync::Arc};
use tokio::sync::Mutex; use tokio::sync::Mutex;
use webtransport_quinn::{RecvStream, SendStream}; use webtransport_quinn::{RecvStream, SendStream};
use super::SessionError; use crate::{message::Message, Error};
use crate::{message::Message, setup::Extensions};
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(crate) struct Control { pub(crate) struct Control {
send: Arc<Mutex<SendStream>>, send: Arc<Mutex<SendStream>>,
recv: Arc<Mutex<RecvStream>>, recv: Arc<Mutex<RecvStream>>,
pub ext: Extensions,
} }
impl Control { impl Control {
pub fn new(send: SendStream, recv: RecvStream, ext: Extensions) -> Self { pub fn new(send: SendStream, recv: RecvStream) -> Self {
Self { Self {
send: Arc::new(Mutex::new(send)), send: Arc::new(Mutex::new(send)),
recv: Arc::new(Mutex::new(recv)), recv: Arc::new(Mutex::new(recv)),
ext,
} }
} }
pub async fn send<T: Into<Message> + fmt::Debug>(&self, msg: T) -> Result<(), SessionError> { pub async fn send<T: Into<Message> + fmt::Debug>(&self, msg: T) -> Result<(), Error> {
let mut stream = self.send.lock().await; let mut stream = self.send.lock().await;
log::info!("sending message: {:?}", msg); log::info!("sending message: {:?}", msg);
msg.into() msg.into().encode(&mut *stream).await.map_err(|_e| Error::Write)
.encode(&mut *stream, &self.ext)
.await
.map_err(|e| SessionError::Unknown(e.to_string()))?;
Ok(())
} }
// It's likely a mistake to call this from two different tasks, but it's easier to just support it. // It's likely a mistake to call this from two different tasks, but it's easier to just support it.
pub async fn recv(&self) -> Result<Message, SessionError> { pub async fn recv(&self) -> Result<Message, Error> {
let mut stream = self.recv.lock().await; let mut stream = self.recv.lock().await;
let msg = Message::decode(&mut *stream, &self.ext) Message::decode(&mut *stream).await.map_err(|_e| Error::Read)
.await
.map_err(|e| SessionError::Unknown(e.to_string()))?;
Ok(msg)
} }
} }

View File

@ -1,101 +0,0 @@
use crate::{cache, coding, setup, MoqError, VarInt};
#[derive(thiserror::Error, Debug)]
pub enum SessionError {
#[error("webtransport error: {0}")]
Session(#[from] webtransport_quinn::SessionError),
#[error("cache error: {0}")]
Cache(#[from] cache::CacheError),
#[error("encode error: {0}")]
Encode(#[from] coding::EncodeError),
#[error("decode error: {0}")]
Decode(#[from] coding::DecodeError),
#[error("unsupported versions: client={0:?} server={1:?}")]
Version(setup::Versions, setup::Versions),
#[error("incompatible roles: client={0:?} server={1:?}")]
RoleIncompatible(setup::Role, setup::Role),
/// An error occured while reading from the QUIC stream.
#[error("failed to read from stream: {0}")]
Read(#[from] webtransport_quinn::ReadError),
/// An error occured while writing to the QUIC stream.
#[error("failed to write to stream: {0}")]
Write(#[from] webtransport_quinn::WriteError),
/// The role negiotiated in the handshake was violated. For example, a publisher sent a SUBSCRIBE, or a subscriber sent an OBJECT.
#[error("role violation: msg={0}")]
RoleViolation(VarInt),
/// Our enforced stream mapping was disrespected.
#[error("stream mapping conflict")]
StreamMapping,
/// The priority was invalid.
#[error("invalid priority: {0}")]
InvalidPriority(VarInt),
/// The size was invalid.
#[error("invalid size: {0}")]
InvalidSize(VarInt),
/// A required extension was not offered.
#[error("required extension not offered: {0:?}")]
RequiredExtension(VarInt),
/// An unclassified error because I'm lazy. TODO classify these errors
#[error("unknown error: {0}")]
Unknown(String),
}
impl MoqError for SessionError {
/// An integer code that is sent over the wire.
fn code(&self) -> u32 {
match self {
Self::Cache(err) => err.code(),
Self::RoleIncompatible(..) => 406,
Self::RoleViolation(..) => 405,
Self::StreamMapping => 409,
Self::Unknown(_) => 500,
Self::Write(_) => 501,
Self::Read(_) => 502,
Self::Session(_) => 503,
Self::Version(..) => 406,
Self::Encode(_) => 500,
Self::Decode(_) => 500,
Self::InvalidPriority(_) => 400,
Self::InvalidSize(_) => 400,
Self::RequiredExtension(_) => 426,
}
}
/// A reason that is sent over the wire.
fn reason(&self) -> String {
match self {
Self::Cache(err) => err.reason(),
Self::RoleViolation(kind) => format!("role violation for message type {:?}", kind),
Self::RoleIncompatible(client, server) => {
format!(
"role incompatible: client wanted {:?} but server wanted {:?}",
client, server
)
}
Self::Read(err) => format!("read error: {}", err),
Self::Write(err) => format!("write error: {}", err),
Self::Session(err) => format!("session error: {}", err),
Self::Unknown(err) => format!("unknown error: {}", err),
Self::Version(client, server) => format!("unsupported versions: client={:?} server={:?}", client, server),
Self::Encode(err) => format!("encode error: {}", err),
Self::Decode(err) => format!("decode error: {}", err),
Self::StreamMapping => "streaming mapping conflict".to_owned(),
Self::InvalidPriority(priority) => format!("invalid priority: {}", priority),
Self::InvalidSize(size) => format!("invalid size: {}", size),
Self::RequiredExtension(id) => format!("required extension was missing: {:?}", id),
}
}
}

View File

@ -14,14 +14,12 @@
mod client; mod client;
mod control; mod control;
mod error;
mod publisher; mod publisher;
mod server; mod server;
mod subscriber; mod subscriber;
pub use client::*; pub use client::*;
pub(crate) use control::*; pub(crate) use control::*;
pub use error::*;
pub use publisher::*; pub use publisher::*;
pub use server::*; pub use server::*;
pub use subscriber::*; pub use subscriber::*;

View File

@ -4,16 +4,16 @@ use std::{
}; };
use tokio::task::AbortHandle; use tokio::task::AbortHandle;
use webtransport_quinn::Session; use webtransport_quinn::{RecvStream, SendStream, Session};
use crate::{ use crate::{
cache::{broadcast, segment, track, CacheError},
message, message,
message::Message, message::Message,
MoqError, VarInt, model::{broadcast, segment, track},
Error, VarInt,
}; };
use super::{Control, SessionError}; use super::Control;
/// Serves broadcasts over the network, automatically handling subscriptions and caching. /// Serves broadcasts over the network, automatically handling subscriptions and caching.
// TODO Clone specific fields when a task actually needs it. // TODO Clone specific fields when a task actually needs it.
@ -27,80 +27,63 @@ pub struct Publisher {
} }
impl Publisher { impl Publisher {
pub(crate) fn new(webtransport: Session, control: Control, source: broadcast::Subscriber) -> Self { pub(crate) fn new(webtransport: Session, control: (SendStream, RecvStream), source: broadcast::Subscriber) -> Self {
let control = Control::new(control.0, control.1);
Self { Self {
webtransport, webtransport,
control,
subscribes: Default::default(), subscribes: Default::default(),
control,
source, source,
} }
} }
// TODO Serve a broadcast without sending an ANNOUNCE. // TODO Serve a broadcast without sending an ANNOUNCE.
// fn serve(&mut self, broadcast: broadcast::Subscriber) -> Result<(), SessionError> { // fn serve(&mut self, broadcast: broadcast::Subscriber) -> Result<(), Error> {
// TODO Wait until the next subscribe that doesn't route to an ANNOUNCE. // TODO Wait until the next subscribe that doesn't route to an ANNOUNCE.
// pub async fn subscribed(&mut self) -> Result<track::Producer, SessionError> { // pub async fn subscribed(&mut self) -> Result<track::Producer, Error> {
pub async fn run(mut self) -> Result<(), SessionError> { pub async fn run(mut self) -> Result<(), Error> {
let res = self.run_inner().await;
// Terminate all active subscribes on error.
self.subscribes
.lock()
.unwrap()
.drain()
.for_each(|(_, abort)| abort.abort());
res
}
pub async fn run_inner(&mut self) -> Result<(), SessionError> {
loop { loop {
tokio::select! { tokio::select! {
stream = self.webtransport.accept_uni() => { _stream = self.webtransport.accept_uni() => {
stream?; return Err(Error::Role(VarInt::ZERO));
return Err(SessionError::RoleViolation(VarInt::ZERO));
} }
// NOTE: this is not cancel safe, but it's fine since the other branchs are fatal. // NOTE: this is not cancel safe, but it's fine since the other branch is a fatal error.
msg = self.control.recv() => { msg = self.control.recv() => {
let msg = msg?; let msg = msg.map_err(|_x| Error::Read)?;
log::info!("message received: {:?}", msg); log::info!("message received: {:?}", msg);
if let Err(err) = self.recv_message(&msg).await { if let Err(err) = self.recv_message(&msg).await {
log::warn!("message error: {:?} {:?}", err, msg); log::warn!("message error: {:?} {:?}", err, msg);
} }
}, }
// No more broadcasts are available.
err = self.source.closed() => {
self.webtransport.close(err.code(), err.reason().as_bytes());
return Ok(());
},
} }
} }
} }
async fn recv_message(&mut self, msg: &Message) -> Result<(), SessionError> { async fn recv_message(&mut self, msg: &Message) -> Result<(), Error> {
match msg { match msg {
Message::AnnounceOk(msg) => self.recv_announce_ok(msg).await, Message::AnnounceOk(msg) => self.recv_announce_ok(msg).await,
Message::AnnounceError(msg) => self.recv_announce_error(msg).await, Message::AnnounceStop(msg) => self.recv_announce_stop(msg).await,
Message::Subscribe(msg) => self.recv_subscribe(msg).await, Message::Subscribe(msg) => self.recv_subscribe(msg).await,
Message::Unsubscribe(msg) => self.recv_unsubscribe(msg).await, Message::SubscribeStop(msg) => self.recv_subscribe_stop(msg).await,
_ => Err(SessionError::RoleViolation(msg.id())), _ => Err(Error::Role(msg.id())),
} }
} }
async fn recv_announce_ok(&mut self, _msg: &message::AnnounceOk) -> Result<(), SessionError> { async fn recv_announce_ok(&mut self, _msg: &message::AnnounceOk) -> Result<(), Error> {
// We didn't send an announce. // We didn't send an announce.
Err(CacheError::NotFound.into()) Err(Error::NotFound)
} }
async fn recv_announce_error(&mut self, _msg: &message::AnnounceError) -> Result<(), SessionError> { async fn recv_announce_stop(&mut self, _msg: &message::AnnounceStop) -> Result<(), Error> {
// We didn't send an announce. // We didn't send an announce.
Err(CacheError::NotFound.into()) Err(Error::NotFound)
} }
async fn recv_subscribe(&mut self, msg: &message::Subscribe) -> Result<(), SessionError> { async fn recv_subscribe(&mut self, msg: &message::Subscribe) -> Result<(), Error> {
// Assume that the subscribe ID is unique for now. // Assume that the subscribe ID is unique for now.
let abort = match self.start_subscribe(msg.clone()) { let abort = match self.start_subscribe(msg.clone()) {
Ok(abort) => abort, Ok(abort) => abort,
@ -109,38 +92,27 @@ impl Publisher {
// Insert the abort handle into the lookup table. // Insert the abort handle into the lookup table.
match self.subscribes.lock().unwrap().entry(msg.id) { match self.subscribes.lock().unwrap().entry(msg.id) {
hash_map::Entry::Occupied(_) => return Err(CacheError::Duplicate.into()), // TODO fatal, because we already started the task hash_map::Entry::Occupied(_) => return Err(Error::Duplicate), // TODO fatal, because we already started the task
hash_map::Entry::Vacant(entry) => entry.insert(abort), hash_map::Entry::Vacant(entry) => entry.insert(abort),
}; };
self.control self.control.send(message::SubscribeOk { id: msg.id }).await
.send(message::SubscribeOk {
id: msg.id,
expires: VarInt::ZERO,
})
.await
} }
async fn reset_subscribe<E: MoqError>(&mut self, id: VarInt, err: E) -> Result<(), SessionError> { async fn reset_subscribe(&mut self, id: VarInt, err: Error) -> Result<(), Error> {
let msg = message::SubscribeReset { let msg = message::SubscribeReset {
id, id,
code: err.code(), code: err.code(),
reason: err.reason(), reason: err.reason().to_string(),
// TODO properly populate these
// But first: https://github.com/moq-wg/moq-transport/issues/313
final_group: VarInt::ZERO,
final_object: VarInt::ZERO,
}; };
self.control.send(msg).await self.control.send(msg).await
} }
fn start_subscribe(&mut self, msg: message::Subscribe) -> Result<AbortHandle, SessionError> { fn start_subscribe(&mut self, msg: message::Subscribe) -> Result<AbortHandle, Error> {
// We currently don't use the namespace field in SUBSCRIBE // We currently don't use the namespace field in SUBSCRIBE
// Make sure the namespace is empty if it's provided. if !msg.namespace.is_empty() {
if msg.namespace.as_ref().map_or(false, |namespace| !namespace.is_empty()) { return Err(Error::NotFound);
return Err(CacheError::NotFound.into());
} }
let mut track = self.source.get_track(&msg.name)?; let mut track = self.source.get_track(&msg.name)?;
@ -153,11 +125,11 @@ impl Publisher {
let res = this.run_subscribe(msg.id, &mut track).await; let res = this.run_subscribe(msg.id, &mut track).await;
if let Err(err) = &res { if let Err(err) = &res {
log::warn!("failed to serve track: name={} err={:#?}", track.name, err); log::warn!("failed to serve track: name={} err={:?}", track.name, err);
} }
// Make sure we send a reset at the end. // Make sure we send a reset at the end.
let err = res.err().unwrap_or(CacheError::Closed.into()); let err = res.err().unwrap_or(Error::Closed);
this.reset_subscribe(msg.id, err).await.ok(); this.reset_subscribe(msg.id, err).await.ok();
// We're all done, so clean up the abort handle. // We're all done, so clean up the abort handle.
@ -167,7 +139,7 @@ impl Publisher {
Ok(handle.abort_handle()) Ok(handle.abort_handle())
} }
async fn run_subscribe(&self, id: VarInt, track: &mut track::Subscriber) -> Result<(), SessionError> { async fn run_subscribe(&self, id: VarInt, track: &mut track::Subscriber) -> Result<(), Error> {
// TODO add an Ok method to track::Publisher so we can send SUBSCRIBE_OK // TODO add an Ok method to track::Publisher so we can send SUBSCRIBE_OK
while let Some(mut segment) = track.next_segment().await? { while let Some(mut segment) = track.next_segment().await? {
@ -184,51 +156,34 @@ impl Publisher {
Ok(()) Ok(())
} }
async fn run_segment(&self, id: VarInt, segment: &mut segment::Subscriber) -> Result<(), SessionError> { async fn run_segment(&self, id: VarInt, segment: &mut segment::Subscriber) -> Result<(), Error> {
log::trace!("serving group: {:?}", segment); let object = message::Object {
track: id,
sequence: segment.sequence,
priority: segment.priority,
expires: segment.expires,
};
let mut stream = self.webtransport.open_uni().await?; log::debug!("serving object: {:?}", object);
// Convert the u32 to a i32, since the Quinn set_priority is signed. let mut stream = self.webtransport.open_uni().await.map_err(|_e| Error::Write)?;
let priority = (segment.priority as i64 - i32::MAX as i64) as i32;
stream.set_priority(priority).ok();
while let Some(mut fragment) = segment.next_fragment().await? { stream.set_priority(object.priority).ok();
let object = message::Object {
track: id,
// Properties of the segment // TODO better handle the error.
group: segment.sequence, object.encode(&mut stream).await.map_err(|_e| Error::Write)?;
priority: segment.priority,
expires: segment.expires,
// Properties of the fragment while let Some(data) = segment.read_chunk().await? {
sequence: fragment.sequence, stream.write_chunk(data).await.map_err(|_e| Error::Write)?;
size: fragment.size,
};
object
.encode(&mut stream, &self.control.ext)
.await
.map_err(|e| SessionError::Unknown(e.to_string()))?;
while let Some(chunk) = fragment.read_chunk().await? {
stream.write_all(&chunk).await?;
}
} }
Ok(()) Ok(())
} }
async fn recv_unsubscribe(&mut self, msg: &message::Unsubscribe) -> Result<(), SessionError> { async fn recv_subscribe_stop(&mut self, msg: &message::SubscribeStop) -> Result<(), Error> {
let abort = self let abort = self.subscribes.lock().unwrap().remove(&msg.id).ok_or(Error::NotFound)?;
.subscribes
.lock()
.unwrap()
.remove(&msg.id)
.ok_or(CacheError::NotFound)?;
abort.abort(); abort.abort();
self.reset_subscribe(msg.id, CacheError::Stop).await self.reset_subscribe(msg.id, Error::Stop).await
} }
} }

View File

@ -1,8 +1,10 @@
use super::{Control, Publisher, SessionError, Subscriber}; use super::{Publisher, Subscriber};
use crate::{cache::broadcast, setup}; use crate::{model::broadcast, setup};
use webtransport_quinn::{RecvStream, SendStream, Session}; use webtransport_quinn::{RecvStream, SendStream, Session};
use anyhow::Context;
/// An endpoint that accepts connections, publishing and/or consuming live streams. /// An endpoint that accepts connections, publishing and/or consuming live streams.
pub struct Server {} pub struct Server {}
@ -10,35 +12,18 @@ impl Server {
/// Accept an established Webtransport session, performing the MoQ handshake. /// Accept an established Webtransport session, performing the MoQ handshake.
/// ///
/// This returns a [Request] half-way through the handshake that allows the application to accept or deny the session. /// This returns a [Request] half-way through the handshake that allows the application to accept or deny the session.
pub async fn accept(session: Session) -> Result<Request, SessionError> { pub async fn accept(session: Session) -> anyhow::Result<Request> {
let mut control = session.accept_bi().await?; let mut control = session.accept_bi().await.context("failed to accept bidi stream")?;
let mut client = setup::Client::decode(&mut control.1).await?; let client = setup::Client::decode(&mut control.1)
.await
.context("failed to read CLIENT SETUP")?;
if client.versions.contains(&setup::Version::DRAFT_01) { client
// We always require subscriber ID. .versions
client.extensions.require_subscriber_id()?; .iter()
.find(|version| **version == setup::Version::KIXEL_00)
// We require OBJECT_EXPIRES for publishers only. .context("no supported versions")?;
if client.role.is_publisher() {
client.extensions.require_object_expires()?;
}
// We don't require SUBSCRIBE_SPLIT since it's easy enough to support, but it's clearly an oversight.
// client.extensions.require(&Extension::SUBSCRIBE_SPLIT)?;
} else if client.versions.contains(&setup::Version::KIXEL_01) {
// Extensions didn't exist in KIXEL_01, so we set them manually.
client.extensions = setup::Extensions {
object_expires: true,
subscriber_id: true,
subscribe_split: true,
};
} else {
return Err(SessionError::Version(
client.versions,
[setup::Version::DRAFT_01, setup::Version::KIXEL_01].into(),
));
}
Ok(Request { Ok(Request {
session, session,
@ -57,22 +42,18 @@ pub struct Request {
impl Request { impl Request {
/// Accept the session as a publisher, using the provided broadcast to serve subscriptions. /// Accept the session as a publisher, using the provided broadcast to serve subscriptions.
pub async fn publisher(mut self, source: broadcast::Subscriber) -> Result<Publisher, SessionError> { pub async fn publisher(mut self, source: broadcast::Subscriber) -> anyhow::Result<Publisher> {
let setup = self.setup(setup::Role::Publisher)?; self.send_setup(setup::Role::Publisher).await?;
setup.encode(&mut self.control.0).await?;
let control = Control::new(self.control.0, self.control.1, setup.extensions); let publisher = Publisher::new(self.session, self.control, source);
let publisher = Publisher::new(self.session, control, source);
Ok(publisher) Ok(publisher)
} }
/// Accept the session as a subscriber only. /// Accept the session as a subscriber only.
pub async fn subscriber(mut self, source: broadcast::Publisher) -> Result<Subscriber, SessionError> { pub async fn subscriber(mut self, source: broadcast::Publisher) -> anyhow::Result<Subscriber> {
let setup = self.setup(setup::Role::Subscriber)?; self.send_setup(setup::Role::Subscriber).await?;
setup.encode(&mut self.control.0).await?;
let control = Control::new(self.control.0, self.control.1, setup.extensions); let subscriber = Subscriber::new(self.session, self.control, source);
let subscriber = Subscriber::new(self.session, control, source);
Ok(subscriber) Ok(subscriber)
} }
@ -83,21 +64,28 @@ impl Request {
} }
*/ */
fn setup(&mut self, role: setup::Role) -> Result<setup::Server, SessionError> { async fn send_setup(&mut self, role: setup::Role) -> anyhow::Result<()> {
let server = setup::Server { let server = setup::Server {
role, role,
version: setup::Version::DRAFT_01, version: setup::Version::KIXEL_00,
extensions: self.client.extensions.clone(),
params: Default::default(),
}; };
// We need to sure we support the opposite of the client's role. // We need to sure we support the opposite of the client's role.
// ex. if the client is a publisher, we must be a subscriber ONLY. // ex. if the client is a publisher, we must be a subscriber ONLY.
if !self.client.role.is_compatible(server.role) { if !self.client.role.is_compatible(server.role) {
return Err(SessionError::RoleIncompatible(self.client.role, server.role)); anyhow::bail!(
"incompatible roles: client={:?} server={:?}",
self.client.role,
server.role
);
} }
Ok(server) server
.encode(&mut self.control.0)
.await
.context("failed to send setup server")?;
Ok(())
} }
/// Reject the request, closing the Webtransport session. /// Reject the request, closing the Webtransport session.

View File

@ -1,4 +1,4 @@
use webtransport_quinn::{RecvStream, Session}; use webtransport_quinn::{RecvStream, SendStream, Session};
use std::{ use std::{
collections::HashMap, collections::HashMap,
@ -6,14 +6,14 @@ use std::{
}; };
use crate::{ use crate::{
cache::{broadcast, fragment, segment, track, CacheError},
coding::DecodeError,
message, message,
message::Message, message::Message,
session::{Control, SessionError}, model::{broadcast, segment, track},
VarInt, Error, VarInt,
}; };
use super::Control;
/// Receives broadcasts over the network, automatically handling subscriptions and caching. /// Receives broadcasts over the network, automatically handling subscriptions and caching.
// TODO Clone specific fields when a task actually needs it. // TODO Clone specific fields when a task actually needs it.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -35,7 +35,9 @@ pub struct Subscriber {
} }
impl Subscriber { impl Subscriber {
pub(crate) fn new(webtransport: Session, control: Control, source: broadcast::Publisher) -> Self { pub(crate) fn new(webtransport: Session, control: (SendStream, RecvStream), source: broadcast::Publisher) -> Self {
let control = Control::new(control.0, control.1);
Self { Self {
webtransport, webtransport,
subscribes: Default::default(), subscribes: Default::default(),
@ -45,7 +47,7 @@ impl Subscriber {
} }
} }
pub async fn run(self) -> Result<(), SessionError> { pub async fn run(self) -> Result<(), Error> {
let inbound = self.clone().run_inbound(); let inbound = self.clone().run_inbound();
let streams = self.clone().run_streams(); let streams = self.clone().run_streams();
let source = self.clone().run_source(); let source = self.clone().run_source();
@ -58,130 +60,79 @@ impl Subscriber {
} }
} }
async fn run_inbound(mut self) -> Result<(), SessionError> { async fn run_inbound(mut self) -> Result<(), Error> {
loop { loop {
let msg = self.control.recv().await?; let msg = self.control.recv().await.map_err(|_e| Error::Read)?;
log::info!("message received: {:?}", msg); log::info!("message received: {:?}", msg);
if let Err(err) = self.recv_message(&msg) { if let Err(err) = self.recv_message(&msg).await {
log::warn!("message error: {:?} {:?}", err, msg); log::warn!("message error: {:?} {:?}", err, msg);
} }
} }
} }
fn recv_message(&mut self, msg: &Message) -> Result<(), SessionError> { async fn recv_message(&mut self, msg: &Message) -> Result<(), Error> {
match msg { match msg {
Message::Announce(_) => Ok(()), // don't care Message::Announce(_) => Ok(()), // don't care
Message::Unannounce(_) => Ok(()), // also don't care Message::AnnounceReset(_) => Ok(()), // also don't care
Message::SubscribeOk(_msg) => Ok(()), // don't care Message::SubscribeOk(_) => Ok(()), // guess what, don't care
Message::SubscribeReset(msg) => self.recv_subscribe_error(msg.id, CacheError::Reset(msg.code)), Message::SubscribeReset(msg) => self.recv_subscribe_reset(msg).await,
Message::SubscribeFin(msg) => self.recv_subscribe_error(msg.id, CacheError::Closed),
Message::SubscribeError(msg) => self.recv_subscribe_error(msg.id, CacheError::Reset(msg.code)),
Message::GoAway(_msg) => unimplemented!("GOAWAY"), Message::GoAway(_msg) => unimplemented!("GOAWAY"),
_ => Err(SessionError::RoleViolation(msg.id())), _ => Err(Error::Role(msg.id())),
} }
} }
fn recv_subscribe_error(&mut self, id: VarInt, err: CacheError) -> Result<(), SessionError> { async fn recv_subscribe_reset(&mut self, msg: &message::SubscribeReset) -> Result<(), Error> {
let err = Error::Reset(msg.code);
let mut subscribes = self.subscribes.lock().unwrap(); let mut subscribes = self.subscribes.lock().unwrap();
let subscribe = subscribes.remove(&id).ok_or(CacheError::NotFound)?; let subscribe = subscribes.remove(&msg.id).ok_or(Error::NotFound)?;
subscribe.close(err)?; subscribe.close(err)?;
Ok(()) Ok(())
} }
async fn run_streams(self) -> Result<(), SessionError> { async fn run_streams(self) -> Result<(), Error> {
loop { loop {
// Accept all incoming unidirectional streams. // Accept all incoming unidirectional streams.
let stream = self.webtransport.accept_uni().await?; let stream = self.webtransport.accept_uni().await.map_err(|_| Error::Read)?;
let this = self.clone(); let this = self.clone();
tokio::spawn(async move { tokio::spawn(async move {
if let Err(err) = this.run_stream(stream).await { if let Err(err) = this.run_stream(stream).await {
log::warn!("failed to receive stream: err={:#?}", err); log::warn!("failed to receive stream: err={:?}", err);
} }
}); });
} }
} }
async fn run_stream(self, mut stream: RecvStream) -> Result<(), SessionError> { async fn run_stream(self, mut stream: RecvStream) -> Result<(), Error> {
// Decode the object on the data stream. // Decode the object on the data stream.
let mut object = message::Object::decode(&mut stream, &self.control.ext) let object = message::Object::decode(&mut stream).await.map_err(|_| Error::Read)?;
.await
.map_err(|e| SessionError::Unknown(e.to_string()))?;
log::trace!("received object: {:?}", object); log::debug!("received object: {:?}", object);
// A new scope is needed because the async compiler is dumb // A new scope is needed because the async compiler is dumb
let mut segment = { let mut publisher = {
let mut subscribes = self.subscribes.lock().unwrap(); let mut subscribes = self.subscribes.lock().unwrap();
let track = subscribes.get_mut(&object.track).ok_or(CacheError::NotFound)?; let track = subscribes.get_mut(&object.track).ok_or(Error::NotFound)?;
track.create_segment(segment::Info { track.create_segment(segment::Info {
sequence: object.group, sequence: object.sequence,
priority: object.priority, priority: object.priority,
expires: object.expires, expires: object.expires,
})? })?
}; };
// Create the first fragment while let Some(data) = stream.read_chunk(usize::MAX, true).await.map_err(|_| Error::Read)? {
let mut fragment = segment.create_fragment(fragment::Info { publisher.write_chunk(data.bytes)?;
sequence: object.sequence,
size: object.size,
})?;
let mut remain = object.size.map(usize::from);
loop {
if let Some(0) = remain {
// Decode the next object from the stream.
let next = match message::Object::decode(&mut stream, &self.control.ext).await {
Ok(next) => next,
// No more objects
Err(DecodeError::Final) => break,
// Unknown error
Err(err) => return Err(err.into()),
};
// NOTE: This is a custom restriction; not part of the moq-transport draft.
// We require every OBJECT to contain the same priority since prioritization is done per-stream.
// We also require every OBJECT to contain the same group so we know when the group ends, and can detect gaps.
if next.priority != object.priority && next.group != object.group {
return Err(SessionError::StreamMapping);
}
// Create a new object.
fragment = segment.create_fragment(fragment::Info {
sequence: object.sequence,
size: object.size,
})?;
object = next;
remain = object.size.map(usize::from);
}
match stream.read_chunk(remain.unwrap_or(usize::MAX), true).await? {
// Unbounded object has ended
None if remain.is_none() => break,
// Bounded object ended early, oops.
None => return Err(DecodeError::UnexpectedEnd.into()),
// NOTE: This does not make a copy!
// Bytes are immutable and ref counted.
Some(data) => fragment.write_chunk(data.bytes)?,
}
} }
Ok(()) Ok(())
} }
async fn run_source(mut self) -> Result<(), SessionError> { async fn run_source(mut self) -> Result<(), Error> {
loop { while let Some(track) = self.source.next_track().await? {
// NOTE: This returns Closed when the source is closed.
let track = self.source.next_track().await?;
let name = track.name.clone(); let name = track.name.clone();
let id = VarInt::from_u32(self.next.fetch_add(1, atomic::Ordering::SeqCst)); let id = VarInt::from_u32(self.next.fetch_add(1, atomic::Ordering::SeqCst));
@ -189,19 +140,13 @@ impl Subscriber {
let msg = message::Subscribe { let msg = message::Subscribe {
id, id,
namespace: self.control.ext.subscribe_split.then(|| "".to_string()), namespace: "".to_string(),
name, name,
// TODO correctly support these
start_group: message::SubscribeLocation::Latest(VarInt::ZERO),
start_object: message::SubscribeLocation::Absolute(VarInt::ZERO),
end_group: message::SubscribeLocation::None,
end_object: message::SubscribeLocation::None,
params: Default::default(),
}; };
self.control.send(msg).await?; self.control.send(msg).await?;
} }
Ok(())
} }
} }

View File

@ -1,6 +1,6 @@
use super::{Extensions, Role, Versions}; use super::{Role, Versions};
use crate::{ use crate::{
coding::{Decode, DecodeError, Encode, EncodeError, Params}, coding::{DecodeError, EncodeError},
VarInt, VarInt,
}; };
@ -15,57 +15,29 @@ pub struct Client {
pub versions: Versions, pub versions: Versions,
/// Indicate if the client is a publisher, a subscriber, or both. /// Indicate if the client is a publisher, a subscriber, or both.
// Proposal: moq-wg/moq-transport#151
pub role: Role, pub role: Role,
/// A list of known/offered extensions.
pub extensions: Extensions,
/// Unknown parameters.
pub params: Params,
} }
impl Client { impl Client {
/// Decode a client setup message. /// Decode a client setup message.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let typ = VarInt::decode(r).await?; let typ = VarInt::decode(r).await?;
if typ.into_inner() != 0x40 { if typ.into_inner() != 1 {
return Err(DecodeError::InvalidMessage(typ)); return Err(DecodeError::InvalidType(typ));
} }
let versions = Versions::decode(r).await?; let versions = Versions::decode(r).await?;
let mut params = Params::decode(r).await?; let role = Role::decode(r).await?;
let role = params Ok(Self { versions, role })
.get::<Role>(VarInt::from_u32(0))
.await?
.ok_or(DecodeError::MissingParameter)?;
// Make sure the PATH parameter isn't used
// TODO: This assumes WebTransport support only
if params.has(VarInt::from_u32(1)) {
return Err(DecodeError::InvalidParameter);
}
let extensions = Extensions::load(&mut params).await?;
Ok(Self {
versions,
role,
extensions,
params,
})
} }
/// Encode a server setup message. /// Encode a server setup message.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::from_u32(0x40).encode(w).await?; VarInt::from_u32(1).encode(w).await?;
self.versions.encode(w).await?; self.versions.encode(w).await?;
self.role.encode(w).await?;
let mut params = self.params.clone();
params.set(VarInt::from_u32(0), self.role).await?;
self.extensions.store(&mut params).await?;
params.encode(w).await?;
Ok(()) Ok(())
} }

View File

@ -1,84 +0,0 @@
use tokio::io::{AsyncRead, AsyncWrite};
use crate::coding::{Decode, DecodeError, Encode, EncodeError, Params};
use crate::session::SessionError;
use crate::VarInt;
use paste::paste;
/// This is a custom extension scheme to allow/require draft PRs.
///
/// By convention, the extension number is the PR number + 0xe0000.
macro_rules! extensions {
{$($name:ident = $val:expr,)*} => {
#[derive(Clone, Default, Debug)]
pub struct Extensions {
$(
pub $name: bool,
)*
}
impl Extensions {
pub async fn load(params: &mut Params) -> Result<Self, DecodeError> {
let mut extensions = Self::default();
$(
if let Some(_) = params.get::<ExtensionExists>(VarInt::from_u32($val)).await? {
extensions.$name = true
}
)*
Ok(extensions)
}
pub async fn store(&self, params: &mut Params) -> Result<(), EncodeError> {
$(
if self.$name {
params.set(VarInt::from_u32($val), ExtensionExists{}).await?;
}
)*
Ok(())
}
paste! {
$(
pub fn [<require_ $name>](&self) -> Result<(), SessionError> {
match self.$name {
true => Ok(()),
false => Err(SessionError::RequiredExtension(VarInt::from_u32($val))),
}
}
)*
}
}
}
}
struct ExtensionExists;
#[async_trait::async_trait]
impl Decode for ExtensionExists {
async fn decode<R: AsyncRead>(_r: &mut R) -> Result<Self, DecodeError> {
Ok(ExtensionExists {})
}
}
#[async_trait::async_trait]
impl Encode for ExtensionExists {
async fn encode<W: AsyncWrite>(&self, _w: &mut W) -> Result<(), EncodeError> {
Ok(())
}
}
extensions! {
// required for publishers: OBJECT contains expires VarInt in seconds: https://github.com/moq-wg/moq-transport/issues/249
// TODO write up a PR
object_expires = 0xe00f9,
// required: SUBSCRIBE chooses track ID: https://github.com/moq-wg/moq-transport/pull/258
subscriber_id = 0xe0102,
// optional: SUBSCRIBE contains namespace/name tuple: https://github.com/moq-wg/moq-transport/pull/277
subscribe_split = 0xe0115,
}

View File

@ -5,13 +5,11 @@
//! Both sides negotate the [Version] and [Role]. //! Both sides negotate the [Version] and [Role].
mod client; mod client;
mod extension;
mod role; mod role;
mod server; mod server;
mod version; mod version;
pub use client::*; pub use client::*;
pub use extension::*;
pub use role::*; pub use role::*;
pub use server::*; pub use server::*;
pub use version::*; pub use version::*;

View File

@ -1,6 +1,6 @@
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; use crate::coding::{DecodeError, EncodeError, VarInt};
/// Indicates the endpoint is a publisher, subscriber, or both. /// Indicates the endpoint is a publisher, subscriber, or both.
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
@ -36,9 +36,9 @@ impl Role {
impl From<Role> for VarInt { impl From<Role> for VarInt {
fn from(r: Role) -> Self { fn from(r: Role) -> Self {
VarInt::from_u32(match r { VarInt::from_u32(match r {
Role::Publisher => 0x1, Role::Publisher => 0x0,
Role::Subscriber => 0x2, Role::Subscriber => 0x1,
Role::Both => 0x3, Role::Both => 0x2,
}) })
} }
} }
@ -48,27 +48,23 @@ impl TryFrom<VarInt> for Role {
fn try_from(v: VarInt) -> Result<Self, Self::Error> { fn try_from(v: VarInt) -> Result<Self, Self::Error> {
match v.into_inner() { match v.into_inner() {
0x1 => Ok(Self::Publisher), 0x0 => Ok(Self::Publisher),
0x2 => Ok(Self::Subscriber), 0x1 => Ok(Self::Subscriber),
0x3 => Ok(Self::Both), 0x2 => Ok(Self::Both),
_ => Err(DecodeError::InvalidRole(v)), _ => Err(DecodeError::InvalidType(v)),
} }
} }
} }
#[async_trait::async_trait] impl Role {
impl Decode for Role {
/// Decode the role. /// Decode the role.
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let v = VarInt::decode(r).await?; let v = VarInt::decode(r).await?;
v.try_into() v.try_into()
} }
}
#[async_trait::async_trait]
impl Encode for Role {
/// Encode the role. /// Encode the role.
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::from(*self).encode(w).await VarInt::from(*self).encode(w).await
} }
} }

View File

@ -1,6 +1,6 @@
use super::{Extensions, Role, Version}; use super::{Role, Version};
use crate::{ use crate::{
coding::{Decode, DecodeError, Encode, EncodeError, Params}, coding::{DecodeError, EncodeError},
VarInt, VarInt,
}; };
@ -17,54 +17,27 @@ pub struct Server {
/// Indicate if the server is a publisher, a subscriber, or both. /// Indicate if the server is a publisher, a subscriber, or both.
// Proposal: moq-wg/moq-transport#151 // Proposal: moq-wg/moq-transport#151
pub role: Role, pub role: Role,
/// Custom extensions.
pub extensions: Extensions,
/// Unknown parameters.
pub params: Params,
} }
impl Server { impl Server {
/// Decode the server setup. /// Decode the server setup.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let typ = VarInt::decode(r).await?; let typ = VarInt::decode(r).await?;
if typ.into_inner() != 0x41 { if typ.into_inner() != 2 {
return Err(DecodeError::InvalidMessage(typ)); return Err(DecodeError::InvalidType(typ));
} }
let version = Version::decode(r).await?; let version = Version::decode(r).await?;
let mut params = Params::decode(r).await?; let role = Role::decode(r).await?;
let role = params Ok(Self { version, role })
.get::<Role>(VarInt::from_u32(0))
.await?
.ok_or(DecodeError::MissingParameter)?;
// Make sure the PATH parameter isn't used
if params.has(VarInt::from_u32(1)) {
return Err(DecodeError::InvalidParameter);
}
let extensions = Extensions::load(&mut params).await?;
Ok(Self {
version,
role,
extensions,
params,
})
} }
/// Encode the server setup. /// Encode the server setup.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::from_u32(0x41).encode(w).await?; VarInt::from_u32(2).encode(w).await?;
self.version.encode(w).await?; self.version.encode(w).await?;
self.role.encode(w).await?;
let mut params = self.params.clone();
params.set(VarInt::from_u32(0), self.role).await?;
self.extensions.store(&mut params).await?;
params.encode(w).await?;
Ok(()) Ok(())
} }

View File

@ -1,4 +1,4 @@
use crate::coding::{Decode, DecodeError, Encode, EncodeError, VarInt}; use crate::coding::{DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite}; use crate::coding::{AsyncRead, AsyncWrite};
@ -6,15 +6,12 @@ use std::ops::Deref;
/// A version number negotiated during the setup. /// A version number negotiated during the setup.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Version(pub VarInt); pub struct Version(VarInt);
impl Version { impl Version {
/// https://www.ietf.org/archive/id/draft-ietf-moq-transport-00.html /// <https://www.ietf.org/archive/id/draft-ietf-moq-transport-00.html>
pub const DRAFT_00: Version = Version(VarInt::from_u32(0xff00)); pub const DRAFT_00: Version = Version(VarInt::from_u32(0xff00));
/// https://www.ietf.org/archive/id/draft-ietf-moq-transport-01.html
pub const DRAFT_01: Version = Version(VarInt::from_u32(0xff01));
/// Fork of draft-ietf-moq-transport-00. /// Fork of draft-ietf-moq-transport-00.
/// ///
/// Rough list of differences: /// Rough list of differences:
@ -59,18 +56,6 @@ impl Version {
/// # GROUP /// # GROUP
/// - GROUP concept was removed, replaced with OBJECT as a QUIC stream. /// - GROUP concept was removed, replaced with OBJECT as a QUIC stream.
pub const KIXEL_00: Version = Version(VarInt::from_u32(0xbad00)); pub const KIXEL_00: Version = Version(VarInt::from_u32(0xbad00));
/// Fork of draft-ietf-moq-transport-01.
///
/// Most of the KIXEL_00 changes made it into the draft, or were reverted.
/// This was only used for a short time until extensions were created.
///
/// - SUBSCRIBE contains a separate track namespace and track name field (accidental revert). [#277](https://github.com/moq-wg/moq-transport/pull/277)
/// - SUBSCRIBE contains the `track_id` instead of SUBSCRIBE_OK. [#145](https://github.com/moq-wg/moq-transport/issues/145)
/// - SUBSCRIBE_* reference `track_id` the instead of the `track_full_name`. [#145](https://github.com/moq-wg/moq-transport/issues/145)
/// - OBJECT `priority` is still a VarInt, but the max value is a u32 (implementation reasons)
/// - OBJECT messages within the same `group` MUST be on the same QUIC stream.
pub const KIXEL_01: Version = Version(VarInt::from_u32(0xbad01));
} }
impl From<VarInt> for Version { impl From<VarInt> for Version {
@ -103,10 +88,9 @@ impl Version {
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Versions(Vec<Version>); pub struct Versions(Vec<Version>);
#[async_trait::async_trait] impl Versions {
impl Decode for Versions {
/// Decode the version list. /// Decode the version list.
async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> { pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let count = VarInt::decode(r).await?.into_inner(); let count = VarInt::decode(r).await?.into_inner();
let mut vs = Vec::new(); let mut vs = Vec::new();
@ -117,12 +101,9 @@ impl Decode for Versions {
Ok(Self(vs)) Ok(Self(vs))
} }
}
#[async_trait::async_trait]
impl Encode for Versions {
/// Encode the version list. /// Encode the version list.
async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> { pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
let size: VarInt = self.0.len().try_into()?; let size: VarInt = self.0.len().try_into()?;
size.encode(w).await?; size.encode(w).await?;
@ -147,9 +128,3 @@ impl From<Vec<Version>> for Versions {
Self(vs) Self(vs)
} }
} }
impl<const N: usize> From<[Version; N]> for Versions {
fn from(vs: [Version; N]) -> Self {
Self(vs.to_vec())
}
}