Major moq-transport API simplification (#68)

Exponentially easier to use moq-transport as there's no message handling required. This is a BREAKING CHANGE.
This commit is contained in:
kixelated 2023-09-15 12:06:28 -07:00 committed by GitHub
parent 35c2127683
commit 88542e266c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
84 changed files with 3172 additions and 2603 deletions

View File

@ -1,3 +1,2 @@
media/*.mp4
target/*
cert/*
target
dev

348
.github/logo.svg vendored Normal file
View File

@ -0,0 +1,348 @@
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="100%" viewBox="0 0 1600 350" enable-background="new 0 0 1600 350" xml:space="preserve">
<path fill="#00BF2D" opacity="1.000000" stroke="none"
d="
M629.000000,251.175552
C696.998962,251.217285 764.499023,251.043793 831.996460,251.381424
C872.647339,251.584763 913.298706,252.386017 953.941528,253.292007
C982.745422,253.934082 1011.545044,254.980118 1040.328979,256.247833
C1072.095703,257.646881 1103.870239,259.095795 1135.590210,261.269958
C1172.600830,263.806732 1209.631958,266.431824 1246.516602,270.323273
C1295.623901,275.504242 1344.692993,281.265167 1393.597412,288.076874
C1427.318604,292.773712 1460.732422,299.676239 1494.286621,305.576050
C1495.344238,305.762024 1496.434937,305.760956 1497.510132,305.847107
C1497.821045,305.462891 1498.131958,305.078674 1498.442871,304.694458
C1488.217285,293.633759 1477.855469,282.693481 1467.882812,271.409302
C1465.925903,269.195099 1464.219971,265.425110 1464.695190,262.786316
C1465.133423,260.352997 1468.427979,257.132629 1470.885132,256.711182
C1473.629028,256.240570 1477.574585,257.756165 1479.722534,259.768066
C1486.266846,265.897827 1492.221802,272.660034 1498.360718,279.218506
C1505.749878,287.112610 1513.088989,295.053375 1520.459839,302.964661
C1521.367432,303.938873 1522.438965,304.774780 1523.260864,305.812347
C1527.773071,311.508759 1525.604370,317.245026 1518.569824,318.799347
C1514.398804,319.720978 1510.317749,321.054749 1506.204834,322.233215
C1492.317871,326.212219 1478.444458,330.239075 1464.535034,334.137756
C1463.163330,334.522247 1461.333008,334.640198 1460.143799,334.037231
C1457.149780,332.519318 1454.400635,330.518280 1451.553345,328.710907
C1453.454956,326.675415 1454.985962,323.813782 1457.329102,322.762604
C1463.820068,319.850586 1470.642944,317.674500 1477.350464,315.251526
C1478.867554,314.703461 1480.448853,314.333069 1481.939453,313.325989
C1472.812378,312.558197 1463.662720,311.992706 1454.562866,310.980316
C1435.890259,308.902893 1417.238159,306.635834 1398.588989,304.354828
C1376.142456,301.609375 1353.720581,298.658081 1331.263916,295.999207
C1308.310791,293.281616 1285.343750,290.661316 1262.354004,288.277039
C1243.162964,286.286713 1223.942993,284.541443 1204.712646,282.972260
C1179.338867,280.901733 1153.949097,279.014832 1128.553101,277.235474
C1115.102783,276.293091 1101.631714,275.571259 1088.158325,275.050995
C1040.065796,273.194092 991.970093,271.413574 943.871704,269.715393
C924.729553,269.039551 905.579163,268.302917 886.429138,268.194153
C777.940491,267.577850 669.451111,267.074097 560.961487,266.667938
C464.478882,266.306732 367.991150,266.582184 271.514618,265.650635
C228.399628,265.234344 185.303329,262.628845 142.205185,260.855164
C126.569107,260.211700 110.952034,259.103943 95.327812,258.182037
C94.409203,258.127808 93.504318,257.841095 92.610275,256.814972
C93.799576,256.692017 94.988075,256.470398 96.178314,256.460632
C124.171005,256.231537 152.166306,256.187103 180.156113,255.771332
C210.804214,255.316101 241.445053,254.377655 272.093018,253.907425
C324.735779,253.099701 377.379822,252.239914 430.026611,251.863953
C496.182678,251.391495 562.342041,251.383850 629.000000,251.175552
z"/>
<path fill="#FBFBFC" opacity="1.000000" stroke="none"
d="
M1136.531250,75.468430
C1149.502686,89.464615 1158.312866,105.204132 1161.071167,123.944977
C1164.256714,145.588089 1155.058838,161.365723 1137.358032,172.860504
C1132.915649,175.745377 1128.063721,177.999420 1123.526367,180.474442
C1126.513184,187.666153 1129.579590,195.049347 1132.855957,202.938431
C1123.931519,204.148102 1115.508179,202.346771 1110.577148,195.110870
C1105.483032,187.635696 1100.227417,187.258179 1092.027954,188.385681
C1070.494629,191.346680 1049.619141,188.256668 1031.786011,174.742630
C999.943604,150.612198 998.185486,104.367447 1027.705688,76.309776
C1039.444458,65.152596 1052.978394,57.629692 1069.411377,55.724426
C1075.942505,54.967213 1082.346558,52.011589 1088.812012,51.999477
C1108.218750,51.963127 1123.192627,61.870239 1136.531250,75.468430
M1081.841064,145.525299
C1080.419556,139.547165 1081.585205,135.960693 1085.617065,133.906250
C1089.598022,131.877777 1093.365967,133.214371 1097.057617,138.187378
C1098.537598,140.181168 1099.774536,142.367310 1101.005127,144.532379
C1104.915039,151.411697 1108.770020,158.322296 1113.107422,166.041336
C1120.483521,162.215363 1127.704468,159.163391 1134.170044,154.951767
C1143.576294,148.824509 1148.202881,139.659027 1146.197388,128.397034
C1141.447144,101.723297 1125.503296,83.583786 1101.300049,72.604919
C1092.501587,68.613853 1082.749512,66.282639 1073.111206,68.667519
C1058.141846,72.371498 1045.922119,80.847488 1036.339478,92.972885
C1019.910889,113.760803 1022.760925,144.436249 1042.867920,159.496338
C1057.771973,170.659393 1075.039429,171.245407 1092.828979,169.646286
C1089.109741,161.592758 1085.579102,153.947586 1081.841064,145.525299
z"/>
<path fill="#FAFAFB" opacity="1.000000" stroke="none"
d="
M163.409271,97.601189
C159.556641,82.961777 155.850479,68.708122 152.144302,54.454468
C151.539719,54.494175 150.935135,54.533886 150.330551,54.573593
C150.238388,56.162678 150.003922,57.758244 150.075943,59.339851
C150.856064,76.471855 152.018768,93.594795 152.371094,110.734718
C152.574844,120.646263 152.286118,130.707367 150.768463,140.475784
C149.239716,150.315628 139.963211,153.317551 133.007675,146.116348
C126.618286,139.501297 120.986832,131.806183 116.531418,123.751656
C109.635605,111.285385 104.017014,98.112022 97.877350,85.228714
C97.052658,83.498199 96.347321,81.710808 95.109352,78.844490
C93.638824,92.596100 92.969818,105.222824 90.824066,117.593391
C86.344818,143.416916 81.103470,169.108627 76.107986,194.841431
C75.765762,196.604324 75.335861,198.819595 74.120293,199.838791
C72.050568,201.574173 68.985298,203.912735 66.967407,203.420258
C64.912872,202.918823 62.141987,199.226456 62.258987,197.071014
C62.652668,189.818619 63.983761,182.589264 65.329239,175.421097
C72.090233,139.401337 76.428711,103.108971 78.109642,66.492729
C78.467834,58.690132 79.265854,50.906723 79.913078,43.118782
C80.302116,38.437618 82.696655,35.136398 87.391167,34.368179
C92.028061,33.609390 95.074997,36.559422 96.790001,40.362938
C101.086494,49.891605 105.364403,59.453991 109.039825,69.232117
C116.894081,90.127693 125.350204,110.678284 138.704605,128.815002
C139.493042,129.885788 140.304932,130.941299 141.151642,131.966141
C141.447189,132.323883 141.880798,132.567551 143.146240,133.577560
C143.830902,128.312241 145.004654,123.667191 144.932312,119.041618
C144.690186,103.559769 143.727631,88.087433 143.584473,72.606430
C143.492386,62.647972 144.171692,52.666691 144.934692,42.728611
C145.096863,40.616306 147.122131,38.647049 148.289017,36.611893
C150.504868,38.044456 153.744263,38.964149 154.762741,40.996700
C159.663269,50.776634 164.578201,60.630810 168.352448,70.876930
C176.551086,93.134140 184.218704,115.592049 191.787308,138.074326
C196.154129,151.045807 197.230148,164.420029 194.373154,178.764542
C193.094727,177.356552 191.899033,176.219727 190.911789,174.924072
C182.707550,164.156830 177.968872,151.777100 174.438080,138.884979
C170.711868,125.279350 167.175201,111.621811 163.409271,97.601189
z"/>
<path fill="#FBFBFB" opacity="1.000000" stroke="none"
d="
M1318.623291,213.559036
C1326.593018,211.154846 1334.216919,208.927917 1340.603882,207.062286
C1344.129639,171.296021 1347.595703,136.135696 1351.028809,101.308739
C1342.915527,102.577072 1334.153809,104.093391 1325.344238,105.241043
C1322.425903,105.621231 1319.333374,105.347610 1316.410278,104.847801
C1312.129639,104.115845 1309.018921,100.919800 1309.508179,96.984612
C1309.856445,94.183403 1312.863647,90.051605 1315.391113,89.441330
C1338.757202,83.799500 1362.226318,78.544655 1385.787476,73.779846
C1391.750977,72.573822 1398.291504,72.711571 1404.852295,76.730232
C1392.971924,90.214096 1376.451050,92.201271 1360.660645,98.128868
C1369.758423,132.311966 1362.319824,166.812546 1360.679443,201.668655
C1369.330933,200.256683 1377.375122,198.362335 1385.510742,197.808289
C1391.530029,197.398361 1397.741577,198.331177 1403.728638,199.425430
C1408.042236,200.213837 1408.640625,202.562515 1405.111938,204.918213
C1399.963013,208.355545 1394.470825,212.122604 1388.599731,213.484131
C1366.290161,218.657791 1343.783691,222.992584 1321.318115,227.476761
C1315.322754,228.673447 1310.937744,226.695831 1309.865845,223.001495
C1308.736206,219.107834 1311.269165,216.317764 1318.623291,213.559036
z"/>
<path fill="#FBFBFB" opacity="1.000000" stroke="none"
d="
M1287.844727,130.118118
C1294.262207,144.747940 1300.462524,159.051559 1306.968994,173.214539
C1310.164185,180.170013 1310.593384,186.843002 1306.737915,194.156265
C1300.050781,192.227844 1294.365967,189.112549 1290.990601,182.952011
C1286.840210,175.377365 1283.038086,167.611832 1279.091431,159.925705
C1278.517090,158.807190 1277.975586,157.671844 1277.343994,156.393036
C1274.793579,161.344620 1272.824829,166.260513 1269.945923,170.568115
C1256.856689,190.153320 1232.661377,197.443680 1213.129883,187.786453
C1199.905762,181.247833 1192.978516,169.579681 1189.343994,156.023926
C1183.894531,135.699097 1183.905884,114.917877 1185.908691,94.103676
C1186.309937,89.935753 1188.117554,86.130157 1192.624146,86.199341
C1197.307983,86.271255 1199.271851,90.129578 1199.567261,94.530823
C1200.413452,107.143066 1200.622437,119.827904 1202.148315,132.354294
C1203.181030,140.831161 1205.151001,149.424347 1208.237061,157.369156
C1213.500000,170.918060 1223.648682,176.521271 1238.000244,173.992706
C1253.198120,171.315033 1265.452393,163.931900 1270.945557,148.483078
C1271.809204,146.054733 1271.923828,142.860641 1271.133911,140.426361
C1266.598022,126.445602 1263.218018,112.255936 1262.750122,97.529289
C1262.626465,93.635056 1262.824219,89.307877 1264.435791,85.922951
C1265.601440,83.474945 1269.326782,80.929306 1271.949707,80.887558
C1274.252197,80.850899 1277.810791,83.795891 1278.690063,86.200768
C1280.482788,91.103928 1281.747070,96.555244 1281.603394,101.735649
C1281.326904,111.707245 1284.505249,120.671661 1287.844727,130.118118
z"/>
<path fill="#FBFBFB" opacity="1.000000" stroke="none"
d="
M342.167328,116.560349
C342.607605,128.632126 346.551636,139.397873 349.744049,150.408463
C353.743408,164.202164 356.755859,178.281998 360.194305,192.238327
C359.493927,192.600998 358.793518,192.963654 358.093140,193.326324
C353.282837,188.887039 348.472504,184.447754 343.396149,179.762924
C342.712463,181.693939 342.026672,183.621872 341.347015,185.551987
C336.777466,198.529419 325.178802,206.497086 313.291534,204.823074
C301.280579,203.131653 292.968079,193.438995 292.280273,179.400070
C291.788818,169.369019 294.304321,159.899078 300.247253,151.620850
C307.615692,141.356949 316.363403,140.147446 328.292542,147.976822
C327.980743,145.991440 327.800507,144.420532 327.481873,142.878204
C322.446899,118.508255 316.997528,94.213493 312.583099,69.732010
C311.228119,62.217468 312.484894,54.181606 312.943695,46.407955
C313.253174,41.164368 317.112122,38.454212 321.743195,37.473629
C326.174896,36.535255 329.411072,39.629780 330.726501,43.192245
C333.578522,50.916084 336.623993,58.818966 337.689606,66.899292
C339.840698,83.211403 340.724609,99.690636 342.167328,116.560349
M309.993713,172.914719
C309.980682,175.578445 309.777679,178.257812 309.995667,180.902496
C310.400360,185.813385 313.078979,188.762299 318.044159,189.620300
C326.737244,191.122452 338.776581,180.273026 338.102356,171.451462
C338.053619,170.813690 337.791229,170.173080 337.535583,169.573257
C335.144562,163.962906 332.733246,158.361206 330.440308,153.019058
C328.357971,153.635178 326.326569,154.664383 324.247040,154.773209
C317.316254,155.135880 313.925201,159.362122 311.976410,165.337982
C311.257599,167.542145 310.705719,169.800751 309.993713,172.914719
z"/>
<path fill="#FBFBFB" opacity="1.000000" stroke="none"
d="
M1513.002930,170.004578
C1494.165283,176.457748 1475.004761,181.331024 1454.884521,179.858139
C1440.411255,178.798630 1427.254639,173.872086 1416.528076,163.787704
C1401.059082,149.244965 1397.860962,127.149376 1408.758911,108.019783
C1421.130615,86.303116 1439.984131,73.373810 1464.666260,69.665749
C1469.809692,68.893051 1475.527588,70.309135 1480.654785,71.821388
C1482.884399,72.479012 1484.290283,75.929276 1486.070312,78.111168
C1483.802979,79.326584 1481.623413,81.378937 1479.254517,81.627686
C1456.195435,84.049095 1439.207275,96.295998 1426.711914,114.875160
C1416.032837,130.753922 1421.064209,147.970795 1438.437866,155.968704
C1453.955200,163.112061 1470.665405,166.288895 1487.626831,167.681854
C1495.733643,168.347626 1503.915161,168.104095 1512.499268,168.560028
C1512.958008,169.233292 1512.980469,169.618927 1513.002930,170.004578
z"/>
<path fill="#FBFBFB" opacity="1.000000" stroke="none"
d="
M239.201996,130.145264
C248.979431,126.278221 258.399506,125.809898 267.565765,130.571045
C279.664764,136.855499 281.621063,148.989212 271.858185,158.454910
C261.869293,168.139725 249.079178,173.241089 236.618393,178.784348
C235.295547,179.372803 233.914032,179.829407 231.761765,180.652298
C242.173447,194.611206 255.143845,202.371750 273.385498,202.476227
C265.802246,207.568604 258.215027,210.398682 249.902466,210.877014
C231.864685,211.914963 216.718536,199.375671 212.102386,179.896667
C207.268417,159.498505 217.625748,140.263992 239.201996,130.145264
M242.810928,159.339478
C250.042633,155.146423 257.274323,150.953354 265.286041,146.308029
C252.233353,139.755737 233.610580,150.564499 231.300339,164.740128
C235.050735,163.017868 238.613678,161.381699 242.810928,159.339478
z"/>
<path fill="#FBFBFB" opacity="1.000000" stroke="none"
d="
M624.167725,142.316818
C627.976257,136.513885 630.969482,130.369003 635.563232,125.851997
C641.728455,119.789795 648.274597,120.355034 655.143066,125.560020
C655.977478,126.192337 657.476501,126.451340 658.519653,126.219101
C663.589783,125.090317 668.150696,126.073051 672.096252,129.341476
C682.107605,137.634766 686.472229,148.416443 684.693054,161.209671
C683.076294,172.835236 674.802002,179.983856 661.441101,182.456970
C644.284973,185.632584 627.357971,176.376709 623.780945,160.850235
C622.462158,155.125778 623.890076,148.768494 624.167725,142.316818
M649.053528,141.777878
C647.990417,141.277863 646.927307,140.777863 645.846741,140.269623
C635.844177,152.499466 640.121277,164.773209 655.520142,167.670135
C657.451111,168.033401 659.509094,168.111038 661.461914,167.897369
C668.356323,167.143036 671.814758,162.153915 670.620911,155.275330
C669.067078,146.321915 662.716980,141.557297 655.674438,137.376404
C653.544861,138.765686 651.604553,140.031464 649.053528,141.777878
z"/>
<path fill="#FAFAFB" opacity="1.000000" stroke="none"
d="
M772.803223,161.083405
C770.535583,148.230927 774.162292,137.747620 784.392395,130.386093
C794.918396,122.811592 806.447388,122.673988 817.790405,128.736755
C825.595703,132.908661 827.257202,141.721008 821.407837,148.616196
C813.783691,157.603333 803.725098,160.429779 792.228149,158.507355
C791.281921,158.349136 790.339722,158.167206 788.353943,157.807709
C797.509705,171.729874 810.399170,176.276154 824.957275,177.567291
C820.938843,183.615295 807.955444,188.047073 798.095459,186.049118
C784.516785,183.297653 776.255554,174.859680 772.803223,161.083405
M809.558899,135.862030
C798.588806,134.269241 792.872375,137.437286 789.616760,147.077301
C798.363342,144.511520 808.040649,144.867493 815.703674,137.563873
C813.464600,136.878754 811.919861,136.406067 809.558899,135.862030
z"/>
<path fill="#FAFAFA" opacity="1.000000" stroke="none"
d="
M738.921509,145.878479
C743.540833,140.195419 747.723938,134.589798 752.464417,129.503799
C755.873108,125.846649 758.645569,126.472183 759.393555,131.253906
C760.107605,135.818985 760.240601,141.074524 758.657593,145.285431
C754.134583,157.316803 746.475342,167.422684 736.250244,175.312180
C726.499817,182.835358 719.246155,181.199112 712.776489,170.630692
C705.711975,159.090546 703.272278,146.248688 702.279846,133.068970
C702.142761,131.247955 704.191101,129.262344 705.227661,127.352921
C706.951050,128.577072 709.394531,129.419556 710.279236,131.088379
C715.386841,140.722626 720.126099,150.552109 725.727478,161.795441
C730.797424,155.703156 734.760620,150.940750 738.921509,145.878479
z"/>
<path fill="#F9F9FA" opacity="1.000000" stroke="none"
d="
M467.165802,147.065536
C472.151550,142.035843 476.259644,141.605576 479.298157,147.278534
C485.218536,158.331879 490.264221,169.873840 495.226379,181.405243
C496.375275,184.075195 495.731476,187.516586 495.914032,190.627243
C485.614532,190.666885 480.080963,186.271896 472.886597,173.218597
C468.179077,176.379196 463.730988,180.169205 458.632111,182.586426
C452.203400,185.634048 445.446136,185.023941 440.131805,179.805389
C434.910126,174.677826 434.783051,168.194519 437.191345,161.708786
C441.004791,151.438919 449.126953,146.544067 459.485138,145.024109
C461.408630,144.741852 463.548248,145.932449 465.833313,147.078125
C465.015228,148.906235 464.032562,150.185135 462.873444,151.276627
C459.494141,154.458847 455.682281,157.263580 452.731049,160.793991
C450.146057,163.886307 447.220856,167.825180 450.510193,171.991028
C453.720581,176.056946 458.457642,175.628967 462.928589,174.324921
C464.630066,173.828644 466.167450,172.678284 467.693420,171.692612
C470.238068,170.048920 470.982910,168.047455 469.733948,164.947800
C467.439240,159.252930 465.345367,153.405151 467.165802,147.065536
z"/>
<path fill="#FAFAFA" opacity="1.000000" stroke="none"
d="
M857.086792,179.194427
C850.600952,181.854446 846.328918,179.966370 844.737183,173.525055
C843.466309,168.382309 842.729065,163.088470 842.050903,157.823563
C841.268188,151.747162 840.628784,145.639175 840.283630,139.524780
C840.096436,136.210342 838.479553,131.607040 844.893616,131.885284
C845.673340,131.919128 846.687500,130.233536 847.330750,129.184052
C859.965393,108.569351 884.028992,113.000961 896.941345,124.514656
C891.695740,125.275612 886.481995,125.892799 881.320496,126.805115
C860.490051,130.486984 855.529175,139.071472 858.402710,160.841064
C858.792725,163.796036 859.318726,166.771271 860.180298,169.616455
C861.346863,173.468765 860.345154,176.458069 857.086792,179.194427
z"/>
<path fill="#FAFBFB" opacity="1.000000" stroke="none"
d="
M380.792236,102.062325
C380.235046,99.406120 379.549133,97.142342 379.399231,94.843613
C378.919006,87.477249 380.686188,84.940620 388.542725,81.606430
C387.097687,80.077728 385.776917,78.680450 384.315948,77.134903
C388.208435,72.881897 393.210083,73.350266 398.035309,74.871124
C409.622528,78.523277 416.637024,95.479935 411.179871,106.306816
C408.505524,111.612694 404.068512,114.634636 398.109192,114.996391
C391.277008,115.411140 386.330261,112.096550 382.769348,106.511284
C381.974762,105.264984 381.520081,103.801987 380.792236,102.062325
z"/>
<path fill="#F9F9FA" opacity="1.000000" stroke="none"
d="
M391.399902,139.195312
C393.872406,142.425400 396.737183,145.267319 397.940125,148.692810
C402.384827,161.349411 406.280365,174.198837 410.381775,186.975983
C410.483490,187.292831 410.608765,187.605957 410.672058,187.930756
C411.964386,194.566360 409.668182,199.769592 404.835266,201.174149
C400.390320,202.465942 395.465637,199.427383 393.409119,193.040955
C391.333191,186.594208 389.840393,179.908524 388.701935,173.223679
C387.197479,164.389755 386.045258,155.477432 385.207855,146.555038
C384.677216,140.901108 386.365387,139.208511 391.399902,139.195312
z"/>
<path fill="#058B2C" opacity="1.000000" stroke="none"
d="
M80.124115,257.917877
C73.365936,257.403900 67.078743,256.905029 60.791542,256.406158
C60.831306,256.228943 60.871075,256.051727 60.910839,255.874512
C70.181015,255.874512 79.451187,255.874512 88.721367,255.874512
C88.736588,256.560669 88.751801,257.246826 88.767021,257.932983
C86.043045,257.932983 83.319061,257.932983 80.124115,257.917877
z"/>
<path fill="#A4A7AE" opacity="1.000000" stroke="none"
d="
M1513.363892,170.057541
C1512.980469,169.618927 1512.958008,169.233292 1512.905029,168.553741
C1514.441284,168.074600 1516.007935,167.889343 1517.574707,167.704102
C1517.666748,168.004990 1517.758911,168.305893 1517.851074,168.606781
C1516.475708,169.108032 1515.100220,169.609268 1513.363892,170.057541
z"/>
</svg>

After

Width:  |  Height:  |  Size: 20 KiB

597
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,3 @@
[workspace]
members = [
"moq-transport",
"moq-quinn",
"moq-pub",
"moq-warp",
]
members = ["moq-transport", "moq-relay", "moq-pub"]
resolver = "2"

View File

@ -1,44 +1,19 @@
FROM rust:latest as builder
# Make a fake Rust app to keep a cached layer of compiled crates
RUN USER=root cargo new app
WORKDIR /usr/src/app
COPY Cargo.toml Cargo.lock ./
RUN mkdir -p moq-transport/src moq-quinn/src moq-warp/src moq-pub/src
COPY moq-transport/Cargo.toml moq-transport/Cargo.toml
COPY moq-quinn/Cargo.toml moq-quinn/Cargo.toml
COPY moq-pub/Cargo.toml moq-pub/Cargo.toml
COPY moq-warp/Cargo.toml moq-warp/Cargo.toml
RUN touch moq-transport/src/lib.rs
RUN touch moq-warp/src/lib.rs
RUN touch moq-pub/src/lib.rs
RUN touch moq-quinn/src/lib.rs
RUN sed -i '/default-run.*/d' moq-quinn/Cargo.toml
# Will build all dependent crates in release mode
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/src/app/target \
cargo build --release
# Copy the rest
# Create a build directory and copy over all of the files
WORKDIR /build
COPY . .
# Build (install) the actual binaries
RUN cargo install --path moq-quinn
# Reuse a cache between builds.
# I tried to `cargo install`, but it doesn't seem to work with workspaces.
# There's also issues with the cache mount since it builds into /usr/local/cargo/bin, and we can't mount that without clobbering cargo itself.
# We instead we build the binaries and copy them to the cargo bin directory.
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/build/target \
cargo build --release && cp /build/target/release/moq-* /usr/local/cargo/bin
# Runtime image
FROM rust:latest
# Run as "app" user
RUN useradd -ms /bin/bash app
USER app
WORKDIR /app
# Get compiled binaries from builder's cargo install directory
COPY --from=builder /usr/local/cargo/bin/moq-quinn /app/moq-quinn
ADD entrypoint.sh .
# No CMD or ENTRYPOINT, see fly.toml with `cmd` override.
# Copy the compiled binaries
COPY --from=builder /usr/local/cargo/bin /usr/local/cargo/bin

View File

@ -1,5 +1,9 @@
# Media over QUIC
<p align="center">
<img height="256" src="https://github.com/kixelated/moq-rs/blob/main/.github/logo.svg">
</p>
Media over QUIC (MoQ) is a live media delivery protocol utilizing QUIC streams.
See the [MoQ working group](https://datatracker.ietf.org/wg/moq/about/) for more information.
@ -8,7 +12,6 @@ It requires a client to actually publish/view content, such as [moq-js](https://
Join the [Discord](https://discord.gg/FCYF3p99mr) for updates and discussion.
## Setup
### Certificates
@ -19,8 +22,8 @@ If you have a valid certificate you can use it instead of self-signing.
Use [mkcert](https://github.com/FiloSottile/mkcert) to generate a self-signed certificate.
Unfortunately, this currently requires Go in order to [fork](https://github.com/FiloSottile/mkcert/pull/513) the tool.
```
./cert/generate
```bash
./dev/cert
```
Unfortunately, WebTransport in Chrome currently (May 2023) doesn't verify certificates using the root CA.
@ -28,16 +31,68 @@ The workaround is to use the `serverFingerprints` options, which requires the ce
This is also why we're using a fork of mkcert, because it generates certificates valid for years by default.
This limitation will be removed once Chrome uses the system CA for WebTransport.
### Media
If you're using `moq-pub` then you'll want some test footage to broadcast.
```bash
mkdir media
wget http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4 -O dev/source.mp4
```
## Usage
Run the server:
### moq-relay
```
cargo run
**moq-relay** is a server that forwards subscriptions from publishers to subscribers, caching and deduplicating along the way.
It's designed to be run in a datacenter, relaying media across multiple hops to deduplicate and improve QoS.
You can run the development server with the following command, automatically using the self-signed certificate generated earlier:
```bash
./dev/relay
```
This listens for WebTransport connections on `https://localhost:4443` by default.
Use a [MoQ client](https://github.com/kixelated/moq-js) to connect to the server.
Notable arguments:
- `--bind <ADDR>` Listen on this address [default: [::]:4443]
- `--cert <CERT>` Use the certificate file at this path
- `--key <KEY>` Use the private key at this path
This listens for WebTransport connections on `UDP https://localhost:4443` by default.
You need a client to connect to that address, to both publish and consume media.
The server also listens on `TCP localhost:4443` when in development mode.
This is exclusively to serve a `/fingerprint` endpoint via HTTPS for self-signed certificates, which are not needed in production.
### moq-pub
This is a client that publishes a fMP4 stream from stdin over MoQ.
This can be combined with ffmpeg (and other tools) to produce a live stream.
The following command runs a development instance, broadcasing `dev/source.mp4` to `localhost:4443`:
```bash
./dev/pub
```
Notable arguments:
- `<URI>` connect to the given address, which must start with moq://.
### moq-js
There's currently no way to consume broadcasts with `moq-rs`, at least until somebody writes `moq-sub`.
Until then, you can use [moq.js](https://github.com/kixelated/moq-js) both watch broadcasts and publish broadcasts.
There's a hosted version available at [quic.video](https://quic.video/).
There's a secret `?server` parameter that can be used to connect to a different address.
- Publish to localhost: `https://quic.video/publish/?server=localhost:4443`
- Watch from localhost: `https://quic.video/watch/<name>/?server=localhost:4443`
Note that self-signed certificates are ONLY supported if the server name starts with `localhost`.
You'll need to add an entry to `/etc/hosts` if you want to use a self-signed certs and an IP address.
## License

View File

@ -1,3 +1,4 @@
*.crt
*.key
*.hex
*.hex
*.mp4

25
dev/pub Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
set -euo pipefail
# Change directory to the root of the project
cd "$(dirname "$0")/.."
# Connect to localhost by default.
HOST="${HOST:-localhost:4443}"
# Generate a random 16 character name by default.
NAME="${NAME:-$(head /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | head -c 16)}"
# Combine the host and name into a URI.
URI="${URI:-"moq://$HOST/$NAME"}"
# Default to a source video
MEDIA="${MEDIA:-dev/source.mp4}"
# Run ffmpeg and pipe the output to moq-pub
ffmpeg -hide_banner -v quiet \
-stream_loop -1 -re \
-i "$MEDIA" \
-an \
-f mp4 -movflags empty_moov+frag_every_frame+separate_moof+omit_tfhd_offset - \
| RUST_LOG=info cargo run --bin moq-pub -- "$URI" "$@"

13
dev/relay Executable file
View File

@ -0,0 +1,13 @@
#!/bin/bash
set -euo pipefail
# Change directory to the root of the project
cd "$(dirname "$0")/.."
# Default to a self-signed certificate
# TODO automatically generate if it doesn't exist.
CERT="${CERT:-dev/localhost.crt}"
KEY="${KEY:-dev/localhost.key}"
# Run the relay and forward any arguments
RUST_LOG=info cargo run --bin moq-relay -- --cert "$CERT" --key "$KEY" --fingerprint "$@"

View File

@ -1,8 +0,0 @@
#!/usr/bin/env sh
mkdir cert
# Nothing to see here...
echo "$MOQ_CRT" | base64 -d > cert/moq-demo.crt
echo "$MOQ_KEY" | base64 -d > cert/moq-demo.key
RUST_LOG=info ./moq-quinn --cert cert/moq-demo.crt --key cert/moq-demo.key

View File

@ -15,8 +15,6 @@ categories = ["multimedia", "network-programming", "web-programming"]
[dependencies]
moq-transport = { path = "../moq-transport" }
#moq-transport-quinn = { path = "../moq-transport-quinn" }
moq-warp = { path = "../moq-warp" }
# QUIC
quinn = "0.10"
@ -36,9 +34,9 @@ tokio = { version = "1.27", features = ["full"] }
clap = { version = "4.0", features = ["derive"] }
log = { version = "0.4", features = ["std"] }
env_logger = "0.9.3"
anyhow = { version = "1.0.70", features = ["backtrace"]}
mp4 = "0.13.0"
rustls-native-certs = "0.6.3"
anyhow = { version = "1.0.70", features = ["backtrace"] }
serde_json = "1.0.105"
rfc6381-codec = "0.1.0"
@ -46,11 +44,3 @@ rfc6381-codec = "0.1.0"
http = "0.2.9"
clap = { version = "4.0", features = ["derive"] }
clap_mangen = "0.2.12"
[dependencies.uuid]
version = "1.4.1"
features = [
"v4", # Lets you generate random UUIDs
"fast-rng", # Use a faster (but still sufficiently random) RNG
"macro-diagnostics", # Enable better diagnostics for compile-time UUIDs
]

View File

@ -5,22 +5,9 @@ A command line tool for publishing media via Media over QUIC (MoQ).
Expects to receive fragmented MP4 via standard input and connect to a MOQT relay.
```
ffmpeg ... - | moq-pub -i - -u https://localhost:4443
ffmpeg ... - | moq-pub -i - --host localhost:4443
```
### A note on the `moq-pub` code organization
- `Media` is responsible for reading from stdin and parsing MP4 boxes. It populates a `MapSource` of `Track`s for which it holds the producer side, pushing segments of video/audio into them and notifying consumers via tokio watch async primitives.
- `SessionRunner` is where we create and hold the MOQT Session from the `moq_transport` library. We currently hard-code our implementation to use `quinn` as the underlying WebTranport implementation. We use a series of `mpsc` and `broadcast` channels to make it possible for other parts of our code to send/recieve control messages via that Session. Sending Objects is handled a little differently because we are able to clone the MOQT Session's sender wherever we need to do that.
- `MediaRunner` is responsible for consuming the `Track`s that `Media` produces and populates. `MediaRunner` spawns tasks for each `Track` to `.await` new segments and then put the media data into Objects and onto the wire (via channels into `SessionRunner`). Note that these tasks are created, but block waiting un the reception of a MOQT SUBSCRIBE message before they actually send any segments on the wire. `MediaRunner` is also responsible for sending the initial MOQT ANNOUNCE message announcing the namespace for the tracks we will send.
- `LogViewer` as the name implies is responsible for logging. It snoops on some channels going in/out of `SessionRunner` and logs MOQT control messages.
Longer term, I think it'd be interesting to refactor everything such that the `Media` + `MediaRunner` bits consume an interface that's _closer_ to what we'd like to eventually expose as a C FFI for consumption by external tools. That probably means greatly reducing the use of async Rust in the parts of this code that make up both sides of that interface boundary.
### Invoking `moq-pub`:
Here's how I'm currently testing things, with a local copy of Big Buck Bunny named `bbb_source.mp4`:
@ -29,13 +16,13 @@ Here's how I'm currently testing things, with a local copy of Big Buck Bunny nam
$ ffmpeg -hide_banner -v quiet -stream_loop -1 -re -i bbb_source.mp4 -an -f mp4 -movflags empty_moov+frag_every_frame+separate_moof+omit_tfhd_offset - | RUST_LOG=moq_pub=info moq-pub -i -
```
This relies on having `moq-quinn` (the relay server) already running locally in another shell.
This relies on having `moq-relay` (the relay server) already running locally in another shell.
Note also that we're dropping the audio track (`-an`) above until audio playback is stabilized on the `moq-js` side.
### Known issues
- Expects only one H.264/AVC1-encoded video track (catalog generation doesn't support audio tracks yet)
- Doesn't yet gracefully handle EOF - workaround: never stop sending it media (`-stream_loop -1`)
- Probably still full of lots of bugs
- Various other TODOs you can find in the code
- Expects only one H.264/AVC1-encoded video track (catalog generation doesn't support audio tracks yet)
- Doesn't yet gracefully handle EOF - workaround: never stop sending it media (`-stream_loop -1`)
- Probably still full of lots of bugs
- Various other TODOs you can find in the code

View File

@ -1,36 +1,34 @@
use clap::{Parser, ValueEnum};
use clap::Parser;
use std::net;
#[derive(Parser, Clone)]
#[command(arg_required_else_help(true))]
#[derive(Parser, Clone, Debug)]
pub struct Config {
#[arg(long, hide_short_help = true, default_value = "[::]:0")]
pub bind_address: net::SocketAddr,
/// Listen for UDP packets on the given address.
#[arg(long, default_value = "[::]:0")]
pub bind: net::SocketAddr,
#[arg(short, long, default_value = "https://localhost:4443")]
pub uri: http::uri::Uri,
/// Advertise this frame rate in the catalog (informational)
// TODO auto-detect this from the input when not provided
#[arg(long, default_value = "24")]
pub fps: u8,
#[arg(short, long, required = true, value_parser=input_parser)]
input: InputValues,
/// Advertise this bit rate in the catalog (informational)
// TODO auto-detect this from the input when not provided
#[arg(long, default_value = "1500000")]
pub bitrate: u32,
#[arg(long, hide_short_help = true, default_value = "24")]
pub catalog_fps: u8,
#[arg(long, hide_short_help = true, default_value = "1500000")]
pub catalog_bit_rate: u32,
#[arg(short, long, required = false, default_value = "")]
pub namespace: String,
/// Connect to the given URI starting with moq://
#[arg(value_parser = moq_uri)]
pub uri: http::Uri,
}
fn input_parser(s: &str) -> Result<InputValues, String> {
if s == "-" {
return Ok(InputValues::Stdin);
fn moq_uri(s: &str) -> Result<http::Uri, String> {
let uri = http::Uri::try_from(s).map_err(|e| e.to_string())?;
// Make sure the scheme is moq
if uri.scheme_str() != Some("moq") {
return Err("uri scheme must be moq".to_string());
}
Err("The only currently supported input value is: '-' (stdin)".to_string())
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum)]
pub enum InputValues {
Stdin,
Ok(uri)
}

View File

@ -1,39 +0,0 @@
use log::{debug, info};
use tokio::{select, sync::broadcast};
pub struct LogViewer {
incoming_ctl_receiver: broadcast::Receiver<moq_transport::Message>,
incoming_obj_receiver: broadcast::Receiver<moq_transport::Object>,
}
impl LogViewer {
pub async fn new(
incoming: (
broadcast::Receiver<moq_transport::Message>,
broadcast::Receiver<moq_transport::Object>,
),
) -> anyhow::Result<Self> {
Ok(Self {
incoming_ctl_receiver: incoming.0,
incoming_obj_receiver: incoming.1,
})
}
pub async fn run(&mut self) -> anyhow::Result<()> {
debug!("log_viewer.run()");
loop {
select! {
msg = self.incoming_ctl_receiver.recv() => {
info!(
"Received incoming MOQT Control message: {:?}",
&msg?
);}
obj = self.incoming_obj_receiver.recv() => {
info!(
"Received incoming MOQT Object with header: {:?}",
&obj?
);}
}
}
}
}

View File

@ -1,23 +1,13 @@
use anyhow::Context;
use clap::Parser;
use tokio::task::JoinSet;
mod session_runner;
use session_runner::*;
mod media_runner;
use media_runner::*;
mod log_viewer;
use log_viewer::*;
mod media;
use media::*;
mod cli;
use cli::*;
use uuid::Uuid;
mod media;
use media::*;
use moq_transport::model::broadcast;
// TODO: clap complete
@ -25,35 +15,49 @@ use uuid::Uuid;
async fn main() -> anyhow::Result<()> {
env_logger::init();
let mut config = Config::parse();
let config = Config::parse();
if config.namespace.is_empty() {
config.namespace = format!("quic.video/{}", Uuid::new_v4());
let (publisher, subscriber) = broadcast::new();
let mut media = Media::new(&config, publisher).await?;
// Ugh, just let me use my native root certs already
let mut roots = rustls::RootCertStore::empty();
for cert in rustls_native_certs::load_native_certs().expect("could not load platform certs") {
roots.add(&rustls::Certificate(cert.0)).unwrap();
}
let mut media = Media::new(&config).await?;
let session_runner = SessionRunner::new(&config).await?;
let mut log_viewer = LogViewer::new(session_runner.get_incoming_receivers().await).await?;
let mut media_runner = MediaRunner::new(
session_runner.get_send_objects().await,
session_runner.get_outgoing_senders().await,
session_runner.get_incoming_receivers().await,
)
.await?;
let mut tls_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
let mut join_set: JoinSet<anyhow::Result<()>> = tokio::task::JoinSet::new();
tls_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()]; // this one is important
join_set.spawn(async { session_runner.run().await.context("failed to run session runner") });
join_set.spawn(async move { log_viewer.run().await.context("failed to run media source") });
let arc_tls_config = std::sync::Arc::new(tls_config);
let quinn_client_config = quinn::ClientConfig::new(arc_tls_config);
media_runner.announce(&config.namespace, media.source()).await?;
let mut endpoint = quinn::Endpoint::client(config.bind)?;
endpoint.set_default_client_config(quinn_client_config);
join_set.spawn(async move { media.run().await.context("failed to run media source") });
join_set.spawn(async move { media_runner.run().await.context("failed to run client") });
log::info!("connecting to {}", config.uri);
while let Some(res) = join_set.join_next().await {
dbg!(&res);
res??;
// Change the uri scheme to "https" for WebTransport
let mut parts = config.uri.into_parts();
parts.scheme = Some(http::uri::Scheme::HTTPS);
let uri = http::Uri::from_parts(parts)?;
let session = webtransport_quinn::connect(&endpoint, &uri)
.await
.context("failed to create WebTransport session")?;
let session = moq_transport::session::Client::publisher(session, subscriber)
.await
.context("failed to create MoQ Transport session")?;
// TODO run a task that returns a 404 for all unknown subscriptions.
tokio::select! {
res = session.run() => res.context("session error")?,
res = media.run() => res.context("media error")?,
}
Ok(())

View File

@ -1,25 +1,25 @@
use crate::cli::Config;
use anyhow::{self, Context};
use log::{debug, info};
use moq_transport::model::{broadcast, segment, track};
use moq_transport::VarInt;
use moq_warp::model::{segment, track};
use mp4::{self, ReadBox};
use serde_json::json;
use std::collections::HashMap;
use std::io::Cursor;
use std::sync::Arc;
use std::time;
use tokio::io::AsyncReadExt;
pub struct Media {
// The tracks we're producing.
tracks: HashMap<String, Track>,
// We hold on to publisher so we don't close then while media is still being published.
_broadcast: broadcast::Publisher,
_catalog: track::Publisher,
_init: track::Publisher,
source: Arc<MapSource>,
tracks: HashMap<String, Track>,
}
impl Media {
pub async fn new(config: &Config) -> anyhow::Result<Self> {
pub async fn new(config: &Config, mut broadcast: broadcast::Publisher) -> anyhow::Result<Self> {
let mut stdin = tokio::io::stdin();
let ftyp = read_atom(&mut stdin).await?;
anyhow::ensure!(&ftyp[4..8] == b"ftyp", "expected ftyp atom");
@ -38,45 +38,43 @@ impl Media {
// Parse the moov box so we can detect the timescales for each track.
let moov = mp4::MoovBox::read_box(&mut moov_reader, moov_header.size)?;
// Create a source that can be subscribed to.
let mut source = HashMap::default();
// Create the catalog track with a single segment.
let mut init_track = broadcast.create_track("1.mp4")?;
let mut init_segment = init_track.create_segment(segment::Info {
sequence: VarInt::ZERO,
priority: i32::MAX,
expires: None,
})?;
init_segment.write_chunk(init.into())?;
let mut tracks = HashMap::new();
// Create the init track
let init_track_name = "1.mp4";
let (_init, subscriber) = Self::create_init(init);
source.insert(init_track_name.to_string(), subscriber);
for trak in &moov.traks {
let id = trak.tkhd.track_id;
let name = id.to_string();
//let name = "2".to_string();
//dbg!("trak name: {}", &name);
let timescale = track_timescale(&moov, id);
// Store the track publisher in a map so we can update it later.
let track = Track::new(&name, timescale);
source.insert(name.to_string(), track.subscribe());
let track = broadcast.create_track(&name)?;
let track = Track::new(track, timescale);
tracks.insert(name, track);
}
let mut catalog = broadcast.create_track(".catalog")?;
// Create the catalog track
let (_catalog, subscriber) = Self::create_catalog(
config,
config.namespace.to_string(),
init_track_name.to_string(),
&moov,
&tracks,
)?;
source.insert(".catalog".to_string(), subscriber);
Self::serve_catalog(&mut catalog, config, init_track.name.to_string(), &moov, &tracks)?;
let source = Arc::new(MapSource(source));
Ok(Media { tracks, source })
Ok(Media {
_broadcast: broadcast,
_catalog: catalog,
_init: init_track,
tracks,
})
}
pub async fn run(&mut self) -> anyhow::Result<()> {
let mut stdin = tokio::io::stdin();
// The current track name
@ -122,45 +120,18 @@ impl Media {
}
}
fn create_init(raw: Vec<u8>) -> (track::Publisher, track::Subscriber) {
// Create a track with a single segment containing the init data.
let mut init_track = track::Publisher::new("1.mp4");
// Subscribe to the init track before we push the segment.
let subscriber = init_track.subscribe();
let mut segment = segment::Publisher::new(segment::Info {
sequence: VarInt::from_u32(0), // first and only segment
send_order: i32::MIN, // highest priority
expires: None, // never delete from the cache
});
// Add the segment and add the fragment.
init_track.push_segment(segment.subscribe());
segment.fragments.push(raw.into());
// Return the catalog
(init_track, subscriber)
}
fn create_catalog(
fn serve_catalog(
track: &mut track::Publisher,
config: &Config,
namespace: String,
init_track_name: String,
moov: &mp4::MoovBox,
_tracks: &HashMap<String, Track>,
) -> Result<(track::Publisher, track::Subscriber), anyhow::Error> {
// Create a track with a single segment containing the init data.
let mut catalog_track = track::Publisher::new(".catalog");
// Subscribe to the catalog before we push the segment.
let catalog_subscriber = catalog_track.subscribe();
let mut segment = segment::Publisher::new(segment::Info {
sequence: VarInt::from_u32(0), // first and only segment
send_order: i32::MIN, // highest priority
expires: None, // never delete from the cache
});
) -> Result<(), anyhow::Error> {
let mut segment = track.create_segment(segment::Info {
sequence: VarInt::ZERO,
priority: i32::MAX,
expires: None,
})?;
// avc1[.PPCCLL]
//
@ -192,30 +163,24 @@ impl Media {
"tracks": [
{
"container": "mp4",
"namespace": namespace,
"kind": "video",
"init_track": init_track_name,
"data_track": "1", // assume just one track for now
"codec": codec_str,
"width": width,
"height": height,
"frame_rate": config.catalog_fps,
"bit_rate": config.catalog_bit_rate,
"frame_rate": config.fps,
"bit_rate": config.bitrate,
}
]
});
let catalog_str = serde_json::to_string_pretty(&catalog)?;
info!("catalog: {}", catalog_str);
log::info!("catalog: {}", catalog_str);
// Add the segment and add the fragment.
catalog_track.push_segment(segment.subscribe());
segment.fragments.push(catalog_str.into());
segment.write_chunk(catalog_str.into())?;
// Return the catalog
Ok((catalog_track, catalog_subscriber))
}
pub fn source(&self) -> Arc<MapSource> {
self.source.clone()
Ok(())
}
}
@ -230,8 +195,6 @@ async fn read_atom<R: AsyncReadExt + Unpin>(reader: &mut R) -> anyhow::Result<Ve
let mut raw = buf.to_vec();
debug!("size: {}", &size);
let mut limit = match size {
// Runs until the end of the file.
0 => reader.take(u64::MAX),
@ -249,13 +212,11 @@ async fn read_atom<R: AsyncReadExt + Unpin>(reader: &mut R) -> anyhow::Result<Ve
anyhow::bail!("impossible box size: {}", size)
}
// Otherwise read based on the size.
size => reader.take(size - 8),
};
// Append to the vector and return it.
let read_bytes = limit.read_to_end(&mut raw).await?;
debug!("read_bytes: {}", read_bytes);
let _read_bytes = limit.read_to_end(&mut raw).await?;
Ok(raw)
}
@ -275,9 +236,7 @@ struct Track {
}
impl Track {
fn new(name: &str, timescale: u64) -> Self {
let track = track::Publisher::new(name);
fn new(track: track::Publisher, timescale: u64) -> Self {
Self {
track,
sequence: 0,
@ -290,13 +249,12 @@ impl Track {
if let Some(segment) = self.segment.as_mut() {
if !fragment.keyframe {
// Use the existing segment
segment.fragments.push(raw.into());
segment.write_chunk(raw.into())?;
return Ok(());
}
}
// Otherwise make a new segment
let now = time::Instant::now();
// Compute the timestamp in milliseconds.
// Overflows after 583 million years, so we're fine.
@ -306,50 +264,32 @@ impl Track {
.try_into()
.context("timestamp too large")?;
// The send order is simple; newer timestamps should be higher priority.
// TODO give audio a boost?
// TODO Use timestamps for prioritization again after quinn priority bug fixed
let send_order = i32::MIN;
// Create a new segment.
let mut segment = self.track.create_segment(segment::Info {
sequence: VarInt::try_from(self.sequence).context("sequence too large")?,
priority: i32::MAX, // TODO
// Delete segments after 10s.
let expires = Some(now + time::Duration::from_secs(10)); // TODO increase this once send order is implemented
let sequence = self.sequence.try_into().context("sequence too large")?;
// Delete segments after 10s.
expires: Some(time::Duration::from_secs(10)),
})?;
self.sequence += 1;
// Create a new segment.
let segment = segment::Info {
sequence,
expires,
send_order,
};
let mut segment = segment::Publisher::new(segment);
self.track.push_segment(segment.subscribe());
// Insert the raw atom into the segment.
segment.fragments.push(raw.into());
segment.write_chunk(raw.into())?;
// Save for the next iteration
self.segment = Some(segment);
// Remove any segments older than 10s.
// TODO This can only drain from the FRONT of the queue, so don't get clever with expirations.
self.track.drain_segments(now);
Ok(())
}
pub fn data(&mut self, raw: Vec<u8>) -> anyhow::Result<()> {
let segment = self.segment.as_mut().context("missing segment")?;
segment.fragments.push(raw.into());
segment.write_chunk(raw.into())?;
Ok(())
}
pub fn subscribe(&self) -> track::Subscriber {
self.track.subscribe()
}
}
struct Fragment {
@ -434,16 +374,3 @@ fn track_timescale(moov: &mp4::MoovBox, track_id: u32) -> u64 {
trak.mdia.mdhd.timescale as u64
}
pub trait Source {
fn subscribe(&self, name: &str) -> Option<track::Subscriber>;
}
#[derive(Clone, Default, Debug)]
pub struct MapSource(pub HashMap<String, track::Subscriber>);
impl Source for MapSource {
fn subscribe(&self, name: &str) -> Option<track::Subscriber> {
self.0.get(name).cloned()
}
}

View File

@ -1,159 +0,0 @@
use crate::media::{self, MapSource};
use anyhow::bail;
use log::{debug, error};
use moq_transport::message::Message;
use moq_transport::message::{Announce, SubscribeError, SubscribeOk};
use moq_transport::{object, Object, VarInt};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::task::JoinSet;
use webtransport_generic::Session as WTSession;
pub struct MediaRunner<S: WTSession> {
send_objects: object::Sender<S>,
outgoing_ctl_sender: mpsc::Sender<Message>,
incoming_ctl_receiver: broadcast::Receiver<Message>,
source: Arc<MapSource>,
}
impl<S: WTSession> MediaRunner<S> {
pub async fn new(
send_objects: object::Sender<S>,
outgoing: mpsc::Sender<Message>,
incoming: (broadcast::Receiver<Message>, broadcast::Receiver<Object>),
) -> anyhow::Result<Self> {
let outgoing_ctl_sender = outgoing;
let (incoming_ctl_receiver, _incoming_obj_receiver) = incoming;
Ok(Self {
send_objects,
outgoing_ctl_sender,
incoming_ctl_receiver,
source: Arc::new(MapSource::default()),
})
}
pub async fn announce(&mut self, namespace: &str, source: Arc<media::MapSource>) -> anyhow::Result<()> {
debug!("media_runner.announce()");
// Only allow one souce at a time for now?
self.source = source;
// ANNOUNCE the namespace
self.outgoing_ctl_sender
.send(Message::Announce(Announce {
track_namespace: namespace.to_string(),
}))
.await?;
// wait for the go ahead
loop {
match self.incoming_ctl_receiver.recv().await? {
Message::AnnounceOk(_) => {
break;
}
Message::AnnounceError(announce_error) => {
error!(
"Failed to announce namespace '{}' with error code '{}' and reason '{}'",
&namespace, &announce_error.code, &announce_error.reason
);
// TODO: Think about how to recover here? Retry?
bail!("Failed to announce namespace");
}
_ => {
// TODO: work out how to ignore unknown/unrelated messages here without consuming them prematurely
}
}
}
Ok(())
}
pub async fn run(&mut self) -> anyhow::Result<()> {
debug!("media_runner.run()");
let source = self.source.clone();
let mut join_set: JoinSet<anyhow::Result<()>> = tokio::task::JoinSet::new();
let mut track_dispatcher: HashMap<String, tokio::sync::mpsc::Sender<VarInt>> = HashMap::new();
let mut incoming_ctl_receiver = self.incoming_ctl_receiver.resubscribe();
let outgoing_ctl_sender = self.outgoing_ctl_sender.clone();
// Pre-spawn tasks for each track we have
// and let them .await on receiving the go ahead via a channel
for (track_name, track) in source.0.iter() {
let (sender, mut receiver) = tokio::sync::mpsc::channel(1);
track_dispatcher.insert(track_name.to_string(), sender);
let mut objects = self.send_objects.clone();
let mut track = track.clone();
join_set.spawn(async move {
let track_id = receiver.recv().await.ok_or(anyhow::anyhow!("channel closed"))?;
// TODO: validate track_id is valid (not already in use), for now just trust subscribers are correct
loop {
let mut segment = track.next_segment().await?;
debug!("segment: {:?}", &segment);
let object = Object {
track: track_id,
group: segment.sequence,
sequence: VarInt::from_u32(0), // Always zero since we send an entire group as an object
send_order: segment.send_order,
};
debug!("object: {:?}", &object);
let mut stream = objects.open(object).await?;
// Write each fragment as they are available.
while let Some(fragment) = segment.fragments.next().await {
stream.write_all(&fragment).await?;
}
}
});
}
join_set.spawn(async move {
loop {
if let Message::Subscribe(subscribe) = incoming_ctl_receiver.recv().await? {
debug!("Received a subscription request");
let track_id = subscribe.track_id;
let track_name = subscribe.track_name;
debug!("Looking up track_name: {} (track_id: {})", &track_name, &track_id);
// Look up track in source
match source.0.get(&track_name.to_string()) {
None => {
// if track !exist, send subscribe error
outgoing_ctl_sender
.send(Message::SubscribeError(SubscribeError {
track_id: subscribe.track_id,
code: moq_transport::VarInt::from_u32(1),
reason: "Only bad reasons (don't know what that track is)".to_string(),
}))
.await?;
}
// if track exists, send go-ahead signal to unblock task to send data to subscriber
Some(track) => {
debug!("We have the track! (Good news everyone)");
track_dispatcher
.get(&track.name)
.ok_or(anyhow::anyhow!("missing task for track"))?
.send(track_id)
.await?;
outgoing_ctl_sender
.send(Message::SubscribeOk(SubscribeOk {
track_id: subscribe.track_id,
expires: Some(VarInt::from_u32(0)), // valid until unsubscribed
}))
.await?;
}
};
}
}
});
while let Some(res) = join_set.join_next().await {
debug!("MediaRunner task finished with result: {:?}", &res);
}
Ok(())
}
}

View File

@ -1,122 +0,0 @@
use crate::cli::Config;
use anyhow::Context;
use log::debug;
use moq_transport::{object, Object};
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::task::JoinSet;
pub struct SessionRunner {
moq_transport_session: moq_transport::Session<webtransport_quinn::Session>,
outgoing_ctl_sender: mpsc::Sender<moq_transport::Message>,
outgoing_ctl_receiver: mpsc::Receiver<moq_transport::Message>,
incoming_ctl_sender: broadcast::Sender<moq_transport::Message>,
incoming_obj_sender: broadcast::Sender<Object>,
}
impl SessionRunner {
pub async fn new(config: &Config) -> anyhow::Result<Self> {
let mut roots = rustls::RootCertStore::empty();
for cert in rustls_native_certs::load_native_certs().expect("could not load platform certs") {
roots.add(&rustls::Certificate(cert.0)).unwrap();
}
let mut tls_config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
tls_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()]; // this one is important
let arc_tls_config = std::sync::Arc::new(tls_config);
let quinn_client_config = quinn::ClientConfig::new(arc_tls_config);
let mut endpoint = quinn::Endpoint::client(config.bind_address)?;
endpoint.set_default_client_config(quinn_client_config);
let webtransport_session = webtransport_quinn::connect(&endpoint, &config.uri)
.await
.context("failed to create WebTransport session")?;
let moq_transport_session =
moq_transport::Session::connect(webtransport_session, moq_transport::setup::Role::Both)
.await
.context("failed to create MoQ Transport session")?;
// outgoing ctl msgs
let (outgoing_ctl_sender, outgoing_ctl_receiver) = mpsc::channel(5);
// incoming ctl msg
let (incoming_ctl_sender, _incoming_ctl_receiver) = broadcast::channel(5);
// incoming objs
let (incoming_obj_sender, _incoming_obj_receiver) = broadcast::channel(5);
Ok(SessionRunner {
moq_transport_session,
outgoing_ctl_sender,
outgoing_ctl_receiver,
incoming_ctl_sender,
incoming_obj_sender,
})
}
pub async fn get_outgoing_senders(&self) -> mpsc::Sender<moq_transport::Message> {
self.outgoing_ctl_sender.clone()
}
pub async fn get_incoming_receivers(
&self,
) -> (
broadcast::Receiver<moq_transport::Message>,
broadcast::Receiver<moq_transport::Object>,
) {
(
self.incoming_ctl_sender.subscribe(),
self.incoming_obj_sender.subscribe(),
)
}
pub async fn run(mut self) -> anyhow::Result<()> {
debug!("session_runner.run()");
let mut join_set: JoinSet<anyhow::Result<()>> = tokio::task::JoinSet::new();
// Send outgoing control messages
join_set.spawn(async move {
loop {
let msg = self
.outgoing_ctl_receiver
.recv()
.await
.ok_or(anyhow::anyhow!("error receiving outbound control message"))?;
debug!("Sending outgoing MOQT Control Message: {:?}", &msg);
self.moq_transport_session.send_control.send(msg).await?;
}
});
// Route incoming Control messages
join_set.spawn(async move {
loop {
let msg = self.moq_transport_session.recv_control.recv().await?;
self.incoming_ctl_sender.send(msg)?;
}
});
// Route incoming Objects headers
// NOTE: Only sends the headers for incoming objects, not the associated streams
// We don't currently expose any way to read incoming bytestreams because we don't expect any
join_set.spawn(async move {
loop {
let receive_stream = self.moq_transport_session.recv_objects.recv().await?;
self.incoming_obj_sender.send(receive_stream.0)?;
}
});
while let Some(res) = join_set.join_next().await {
debug!("SessionRunner task finished with result: {:?}", &res);
let _ = res?; // if we finish, it'll be with an error, which we can return
}
Ok(())
}
pub async fn get_send_objects(&self) -> object::Sender<webtransport_quinn::Session> {
self.moq_transport_session.send_objects.clone()
}
}

View File

@ -1,5 +1,5 @@
[package]
name = "moq-quinn"
name = "moq-relay"
description = "Media over QUIC"
authors = ["Luke Curley"]
repository = "https://github.com/kixelated/moq-rs"
@ -11,14 +11,8 @@ edition = "2021"
keywords = ["quic", "http3", "webtransport", "media", "live"]
categories = ["multimedia", "network-programming", "web-programming"]
default-run = "moq-quinn"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
moq-transport = { path = "../moq-transport" }
moq-warp = { path = "../moq-warp" }
# QUIC
quinn = "0.10"
@ -42,3 +36,5 @@ clap = { version = "4.0", features = ["derive"] }
log = { version = "0.4", features = ["std"] }
env_logger = "0.9.3"
anyhow = "1.0.70"
tracing = "0.1"
tracing-subscriber = "0.3.0"

17
moq-relay/README.md Normal file
View File

@ -0,0 +1,17 @@
# moq-relay
A server that connects publishing clients to subscribing clients.
All subscriptions are deduplicated and cached, so that a single publisher can serve many subscribers.
## Usage
The publisher must choose a unique name for their broadcast, sent as the WebTransport path when connecting to the server.
We currently do a dumb string comparison, so capatilization matters as do slashes.
For example: `CONNECT https://relay.quic.video/BigBuckBunny`
The MoqTransport handshake includes a `role` parameter, which must be `publisher` or `subscriber`.
The specification allows a `both` role but you'll get an error.
You can have one publisher and any number of subscribers connected to the same path.
If the publisher disconnects, then all subscribers receive an error and will not get updates, even if a new publisher reuses the path.

23
moq-relay/src/config.rs Normal file
View File

@ -0,0 +1,23 @@
use std::{net, path};
use clap::Parser;
/// Search for a pattern in a file and display the lines that contain it.
#[derive(Parser, Clone)]
pub struct Config {
/// Listen on this address
#[arg(long, default_value = "[::]:4443")]
pub bind: net::SocketAddr,
/// Use the certificate file at this path
#[arg(long)]
pub cert: path::PathBuf,
/// Use the private key at this path
#[arg(long)]
pub key: path::PathBuf,
/// Listen on HTTPS and serve /fingerprint, for self-signed certificates
#[arg(long, action)]
pub fingerprint: bool,
}

View File

@ -1,59 +1,45 @@
use std::{fs, io, net, path, sync};
use std::{fs, io, sync};
use anyhow::Context;
use clap::Parser;
use ring::digest::{digest, SHA256};
use warp::Filter;
mod config;
mod server;
use server::*;
mod session;
/// Search for a pattern in a file and display the lines that contain it.
#[derive(Parser, Clone)]
struct Cli {
/// Listen on this address
#[arg(short, long, default_value = "[::]:4443")]
addr: net::SocketAddr,
/// Use the certificate file at this path
#[arg(short, long, default_value = "cert/localhost.crt")]
cert: path::PathBuf,
/// Use the private key at this path
#[arg(short, long, default_value = "cert/localhost.key")]
key: path::PathBuf,
}
pub use config::*;
pub use server::*;
pub use session::*;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
env_logger::init();
let args = Cli::parse();
// Disable tracing so we don't get a bunch of Quinn spam.
let tracer = tracing_subscriber::FmtSubscriber::builder()
.with_max_level(tracing::Level::WARN)
.finish();
tracing::subscriber::set_global_default(tracer).unwrap();
// Create a web server to serve the fingerprint
let serve = serve_http(args.clone());
let config = Config::parse();
// Create a server to actually serve the media
let config = ServerConfig {
addr: args.addr,
cert: args.cert,
key: args.key,
};
let server = Server::new(config).context("failed to create server")?;
let server = Server::new(config.clone()).context("failed to create server")?;
// Run all of the above
tokio::select! {
res = server.run() => res.context("failed to run server"),
res = serve => res.context("failed to run HTTP server"),
res = serve_http(config), if config.fingerprint => res.context("failed to run HTTP server"),
}
}
// Run a HTTP server using Warp
// TODO remove this when Chrome adds support for self-signed certificates using WebTransport
async fn serve_http(args: Cli) -> anyhow::Result<()> {
async fn serve_http(config: Config) -> anyhow::Result<()> {
// Read the PEM certificate file
let crt = fs::File::open(&args.cert)?;
let crt = fs::File::open(&config.cert)?;
let mut crt = io::BufReader::new(crt);
// Parse the DER certificate
@ -75,9 +61,9 @@ async fn serve_http(args: Cli) -> anyhow::Result<()> {
warp::serve(routes)
.tls()
.cert_path(args.cert)
.key_path(args.key)
.run(args.addr)
.cert_path(config.cert)
.key_path(config.key)
.run(config.bind)
.await;
Ok(())

View File

@ -1,29 +1,30 @@
use std::{fs, io, net, path, sync, time};
use std::{
collections::HashMap,
fs, io,
sync::{Arc, Mutex},
time,
};
use anyhow::Context;
use moq_warp::relay;
use moq_transport::model::broadcast;
use tokio::task::JoinSet;
use crate::{Config, Session};
pub struct Server {
server: quinn::Endpoint,
// The media sources.
broker: relay::Broker,
// The active connections.
conns: JoinSet<anyhow::Result<()>>,
}
pub struct ServerConfig {
pub addr: net::SocketAddr,
pub cert: path::PathBuf,
pub key: path::PathBuf,
// The map of active broadcasts by path.
broadcasts: Arc<Mutex<HashMap<String, broadcast::Subscriber>>>,
}
impl Server {
// Create a new server
pub fn new(config: ServerConfig) -> anyhow::Result<Self> {
pub fn new(config: Config) -> anyhow::Result<Self> {
// Read the PEM certificate chain
let certs = fs::File::open(config.cert).context("failed to open cert file")?;
let mut certs = io::BufReader::new(certs);
@ -51,21 +52,25 @@ impl Server {
tls_config.max_early_data_size = u32::MAX;
tls_config.alpn_protocols = vec![webtransport_quinn::ALPN.to_vec()];
let mut server_config = quinn::ServerConfig::with_crypto(sync::Arc::new(tls_config));
let mut server_config = quinn::ServerConfig::with_crypto(Arc::new(tls_config));
// Enable BBR congestion control
// TODO validate the implementation
let mut transport_config = quinn::TransportConfig::default();
transport_config.keep_alive_interval(Some(time::Duration::from_secs(2)));
transport_config.congestion_controller_factory(sync::Arc::new(quinn::congestion::BbrConfig::default()));
transport_config.congestion_controller_factory(Arc::new(quinn::congestion::BbrConfig::default()));
server_config.transport = sync::Arc::new(transport_config);
let server = quinn::Endpoint::server(server_config, config.addr)?;
let broker = relay::Broker::new();
server_config.transport = Arc::new(transport_config);
let server = quinn::Endpoint::server(server_config, config.bind)?;
let broadcasts = Default::default();
let conns = JoinSet::new();
Ok(Self { server, broker, conns })
Ok(Self {
server,
broadcasts,
conns,
})
}
pub async fn run(mut self) -> anyhow::Result<()> {
@ -73,44 +78,16 @@ impl Server {
tokio::select! {
res = self.server.accept() => {
let conn = res.context("failed to accept QUIC connection")?;
let broker = self.broker.clone();
self.conns.spawn(async move { Self::handle(conn, broker).await });
let mut session = Session::new(self.broadcasts.clone());
self.conns.spawn(async move { session.run(conn).await });
},
res = self.conns.join_next(), if !self.conns.is_empty() => {
let res = res.expect("no tasks").expect("task aborted");
if let Err(err) = res {
log::error!("connection terminated: {:?}", err);
log::warn!("connection terminated: {:?}", err);
}
},
}
}
}
async fn handle(conn: quinn::Connecting, broker: relay::Broker) -> anyhow::Result<()> {
// Wait for the QUIC connection to be established.
let conn = conn.await.context("failed to establish QUIC connection")?;
// Wait for the CONNECT request.
let request = webtransport_quinn::accept(conn)
.await
.context("failed to receive WebTransport request")?;
// TODO parse the request URI
// Accept the CONNECT request.
let session = request
.ok()
.await
.context("failed to respond to WebTransport request")?;
// Perform the MoQ handshake.
let session = moq_transport::Session::accept(session, moq_transport::setup::Role::Both)
.await
.context("failed to perform MoQ handshake")?;
// Run the relay code.
let session = relay::Session::new(session, broker);
session.run().await
}
}

96
moq-relay/src/session.rs Normal file
View File

@ -0,0 +1,96 @@
use std::{
collections::{hash_map, HashMap},
sync::{Arc, Mutex},
};
use anyhow::Context;
use moq_transport::{model::broadcast, session::Request, setup::Role};
#[derive(Clone)]
pub struct Session {
broadcasts: Arc<Mutex<HashMap<String, broadcast::Subscriber>>>,
}
impl Session {
pub fn new(broadcasts: Arc<Mutex<HashMap<String, broadcast::Subscriber>>>) -> Self {
Self { broadcasts }
}
pub async fn run(&mut self, conn: quinn::Connecting) -> anyhow::Result<()> {
// Wait for the QUIC connection to be established.
let conn = conn.await.context("failed to establish QUIC connection")?;
// Wait for the CONNECT request.
let request = webtransport_quinn::accept(conn)
.await
.context("failed to receive WebTransport request")?;
let path = request.uri().path().to_string();
// Accept the CONNECT request.
let session = request
.ok()
.await
.context("failed to respond to WebTransport request")?;
// Perform the MoQ handshake.
let request = moq_transport::session::Server::accept(session)
.await
.context("failed to accept handshake")?;
let role = request.role();
match role {
Role::Publisher => self.serve_publisher(request, &path).await,
Role::Subscriber => self.serve_subscriber(request, &path).await,
Role::Both => request.reject(300),
};
Ok(())
}
async fn serve_publisher(&mut self, request: Request, path: &str) {
log::info!("publisher: path={}", path);
let (publisher, subscriber) = broadcast::new();
match self.broadcasts.lock().unwrap().entry(path.to_string()) {
hash_map::Entry::Occupied(_) => return request.reject(409),
hash_map::Entry::Vacant(entry) => entry.insert(subscriber),
};
if let Err(err) = self.run_publisher(request, publisher).await {
log::warn!("pubisher error: path={} err={:?}", path, err);
}
self.broadcasts.lock().unwrap().remove(path);
}
async fn run_publisher(&mut self, request: Request, publisher: broadcast::Publisher) -> anyhow::Result<()> {
let session = request.subscriber(publisher).await?;
session.run().await?;
Ok(())
}
async fn serve_subscriber(&mut self, request: Request, path: &str) {
log::info!("subscriber: path={}", path);
let broadcast = match self.broadcasts.lock().unwrap().get(path) {
Some(broadcast) => broadcast.clone(),
None => {
return request.reject(404);
}
};
if let Err(err) = self.run_subscriber(request, broadcast).await {
log::warn!("subscriber error: path={} err={:?}", path, err);
}
}
async fn run_subscriber(&mut self, request: Request, broadcast: broadcast::Subscriber) -> anyhow::Result<()> {
let session = request.publisher(broadcast).await?;
session.run().await?;
Ok(())
}
}

View File

@ -5,7 +5,7 @@ authors = ["Luke Curley"]
repository = "https://github.com/kixelated/moq-rs"
license = "MIT OR Apache-2.0"
version = "0.1.0"
version = "0.2.0"
edition = "2021"
keywords = ["quic", "http3", "webtransport", "media", "live"]
@ -18,5 +18,9 @@ categories = ["multimedia", "network-programming", "web-programming"]
bytes = "1.4"
thiserror = "1"
anyhow = "1"
webtransport-generic = "0.5"
tokio = { version = "1.27", features = ["macros", "io-util", "rt", "sync"] }
tokio = { version = "1.27", features = ["macros", "io-util", "sync"] }
log = "0.4"
indexmap = "2"
quinn = "0.10"
webtransport-quinn = "0.5.2"

10
moq-transport/README.md Normal file
View File

@ -0,0 +1,10 @@
[![Documentation](https://docs.rs/moq-transport/badge.svg)](https://docs.rs/moq-transport/)
[![Crates.io](https://img.shields.io/crates/v/moq-transport.svg)](https://crates.io/crates/moq-transport)
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE-MIT)
# moq-transport
A Rust implementation of the proposed IETF standard.
[Specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/)
[Github](https://github.com/moq-wg/moq-transport)

View File

@ -1,8 +1,14 @@
use super::VarInt;
use super::{BoundsExceeded, VarInt};
use std::str;
use thiserror::Error;
// I'm too lazy to add these trait bounds to every message type.
// TODO Use trait aliases when they're stable, or add these bounds to every method.
pub trait AsyncRead: tokio::io::AsyncRead + Unpin + Send {}
impl AsyncRead for webtransport_quinn::RecvStream {}
/// A decode error.
#[derive(Error, Debug)]
pub enum DecodeError {
#[error("unexpected end of buffer")]
@ -14,6 +20,9 @@ pub enum DecodeError {
#[error("invalid type: {0:?}")]
InvalidType(VarInt),
#[error("varint bounds exceeded")]
BoundsExceeded(#[from] BoundsExceeded),
#[error("io error: {0}")]
IoError(#[from] std::io::Error),
}

View File

@ -2,11 +2,20 @@ use super::BoundsExceeded;
use thiserror::Error;
// I'm too lazy to add these trait bounds to every message type.
// TODO Use trait aliases when they're stable, or add these bounds to every method.
pub trait AsyncWrite: tokio::io::AsyncWrite + Unpin + Send {}
impl AsyncWrite for webtransport_quinn::SendStream {}
/// An encode error.
#[derive(Error, Debug)]
pub enum EncodeError {
#[error("varint too large")]
BoundsExceeded(#[from] BoundsExceeded),
#[error("invalid value")]
InvalidValue,
#[error("i/o error: {0}")]
IoError(#[from] std::io::Error),
}

View File

@ -1,20 +1,22 @@
use std::cmp::min;
use crate::coding::{AsyncRead, AsyncWrite};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use webtransport_generic::{RecvStream, SendStream};
use crate::VarInt;
use super::{DecodeError, EncodeError};
pub async fn encode_string<W: SendStream>(s: &str, w: &mut W) -> Result<(), EncodeError> {
/// Encode a string with a varint length prefix.
pub async fn encode_string<W: AsyncWrite>(s: &str, w: &mut W) -> Result<(), EncodeError> {
let size = VarInt::try_from(s.len())?;
size.encode(w).await?;
w.write_all(s.as_ref()).await?;
Ok(())
}
pub async fn decode_string<R: RecvStream>(r: &mut R) -> Result<String, DecodeError> {
/// Decode a string with a varint length prefix.
pub async fn decode_string<R: AsyncRead>(r: &mut R) -> Result<String, DecodeError> {
let size = VarInt::decode(r).await?.into_inner();
let mut str = String::with_capacity(min(1024, size) as usize);
r.take(size).read_to_string(&mut str).await?;

View File

@ -5,14 +5,14 @@
use std::convert::{TryFrom, TryInto};
use std::fmt;
use crate::coding::{AsyncRead, AsyncWrite};
use thiserror::Error;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use webtransport_generic::{RecvStream, SendStream};
use super::{DecodeError, EncodeError};
#[derive(Debug, Copy, Clone, Eq, PartialEq, Error)]
#[error("value too large for varint encoding")]
#[error("value out of range")]
pub struct BoundsExceeded;
/// An integer less than 2^62
@ -24,8 +24,12 @@ pub struct BoundsExceeded;
pub struct VarInt(u64);
impl VarInt {
/// The largest possible value.
pub const MAX: Self = Self((1 << 62) - 1);
/// The smallest possible value.
pub const ZERO: Self = Self(0);
/// Construct a `VarInt` infallibly using the largest available type.
/// Larger values need to use `try_from` instead.
pub const fn from_u32(x: u32) -> Self {
@ -109,6 +113,45 @@ impl TryFrom<usize> for VarInt {
}
}
impl TryFrom<VarInt> for u32 {
type Error = BoundsExceeded;
/// Succeeds iff `x` < 2^32
fn try_from(x: VarInt) -> Result<Self, BoundsExceeded> {
if x.0 <= u32::MAX.into() {
Ok(x.0 as u32)
} else {
Err(BoundsExceeded)
}
}
}
impl TryFrom<VarInt> for u16 {
type Error = BoundsExceeded;
/// Succeeds iff `x` < 2^16
fn try_from(x: VarInt) -> Result<Self, BoundsExceeded> {
if x.0 <= u16::MAX.into() {
Ok(x.0 as u16)
} else {
Err(BoundsExceeded)
}
}
}
impl TryFrom<VarInt> for u8 {
type Error = BoundsExceeded;
/// Succeeds iff `x` < 2^8
fn try_from(x: VarInt) -> Result<Self, BoundsExceeded> {
if x.0 <= u8::MAX.into() {
Ok(x.0 as u8)
} else {
Err(BoundsExceeded)
}
}
}
impl fmt::Debug for VarInt {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
@ -122,7 +165,8 @@ impl fmt::Display for VarInt {
}
impl VarInt {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
/// Decode a varint from the given reader.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let mut buf = [0u8; 8];
r.read_exact(buf[0..1].as_mut()).await?;
@ -149,7 +193,8 @@ impl VarInt {
Ok(Self(x))
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
/// Encode a varint to the given writer.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
let x = self.0;
if x < 2u64.pow(6) {
w.write_u8(x as u8).await?;
@ -166,3 +211,10 @@ impl VarInt {
Ok(())
}
}
// This is a fork of quinn::VarInt.
impl From<quinn::VarInt> for VarInt {
fn from(v: quinn::VarInt) -> Self {
Self(v.into_inner())
}
}

View File

@ -0,0 +1,76 @@
use thiserror::Error;
use crate::VarInt;
/// A MoQTransport error with an associated error code.
#[derive(Copy, Clone, Debug, Error)]
pub enum Error {
/// A clean termination, represented as error code 0.
/// This error is automatically used when publishers or subscribers are dropped without calling close.
#[error("closed")]
Closed,
/// An ANNOUNCE_RESET or SUBSCRIBE_RESET was sent by the publisher.
#[error("reset code={0:?}")]
Reset(u32),
/// An ANNOUNCE_STOP or SUBSCRIBE_STOP was sent by the subscriber.
#[error("stop")]
Stop,
/// The requested resource was not found.
#[error("not found")]
NotFound,
/// A resource already exists with that ID.
#[error("duplicate")]
Duplicate,
/// The role negiotiated in the handshake was violated. For example, a publisher sent a SUBSCRIBE, or a subscriber sent an OBJECT.
#[error("role violation: msg={0}")]
Role(VarInt),
/// An error occured while reading from the QUIC stream.
#[error("failed to read from stream")]
Read,
/// An error occured while writing to the QUIC stream.
#[error("failed to write to stream")]
Write,
/// An unclassified error because I'm lazy. TODO classify these errors
#[error("unknown error")]
Unknown,
}
impl Error {
/// An integer code that is sent over the wire.
pub fn code(&self) -> u32 {
match self {
Self::Closed => 0,
Self::Reset(code) => *code,
Self::Stop => 206,
Self::NotFound => 404,
Self::Role(_) => 405,
Self::Duplicate => 409,
Self::Unknown => 500,
Self::Write => 501,
Self::Read => 502,
}
}
/// A reason that is sent over the wire.
pub fn reason(&self) -> &str {
match self {
Self::Closed => "closed",
Self::Reset(_) => "reset",
Self::Stop => "stop",
Self::NotFound => "not found",
Self::Duplicate => "duplicate",
Self::Role(_msg) => "role violation",
Self::Unknown => "unknown",
Self::Read => "read error",
Self::Write => "write error",
}
}
}

View File

@ -1,10 +1,20 @@
//! An implementation of the MoQ Transport protocol.
//!
//! MoQ Transport is a pub/sub protocol over QUIC.
//! While originally designed for live media, MoQ Transport is generic and can be used for other live applications.
//! The specification is a work in progress and will change.
//! See the [specification](https://datatracker.ietf.org/doc/draft-ietf-moq-transport/) and [github](https://github.com/moq-wg/moq-transport) for any updates.
//!
//! **FORKED**: This is implementation makes extensive changes to the protocol.
//! See [KIXEL_00](crate::setup::Version::KIXEL_00) for a list of differences.
//! Many of these will get merged into the specification, so don't panic.
mod coding;
mod error;
pub mod message;
pub mod object;
pub mod model;
pub mod session;
pub mod setup;
pub use coding::VarInt;
pub use message::Message;
pub use object::Object;
pub use session::Session;
pub use error::*;

View File

@ -1,21 +1,22 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the publisher to announce the availability of a group of tracks.
#[derive(Clone, Debug)]
pub struct Announce {
// The track namespace
pub track_namespace: String,
pub namespace: String,
}
impl Announce {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
let track_namespace = decode_string(r).await?;
Ok(Self { track_namespace })
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = decode_string(r).await?;
Ok(Self { namespace })
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.track_namespace, w).await?;
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.namespace, w).await?;
Ok(())
}
}

View File

@ -1,38 +0,0 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use webtransport_generic::{RecvStream, SendStream};
#[derive(Clone, Debug)]
pub struct AnnounceError {
// Echo back the namespace that was announced.
// TODO Propose using an ID to save bytes.
pub track_namespace: String,
// An error code.
pub code: VarInt,
// An optional, human-readable reason.
pub reason: String,
}
impl AnnounceError {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
let track_namespace = decode_string(r).await?;
let code = VarInt::decode(r).await?;
let reason = decode_string(r).await?;
Ok(Self {
track_namespace,
code,
reason,
})
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.track_namespace, w).await?;
self.code.encode(w).await?;
encode_string(&self.reason, w).await?;
Ok(())
}
}

View File

@ -1,21 +1,20 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{decode_string, encode_string, AsyncRead, AsyncWrite, DecodeError, EncodeError};
/// Sent by the subscriber to accept an Announce.
#[derive(Clone, Debug)]
pub struct AnnounceOk {
// Echo back the namespace that was announced.
// TODO Propose using an ID to save bytes.
pub track_namespace: String,
pub namespace: String,
}
impl AnnounceOk {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
let track_namespace = decode_string(r).await?;
Ok(Self { track_namespace })
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = decode_string(r).await?;
Ok(Self { namespace })
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.track_namespace, w).await
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.namespace, w).await
}
}

View File

@ -0,0 +1,38 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the subscriber to reject an Announce.
#[derive(Clone, Debug)]
pub struct AnnounceReset {
// Echo back the namespace that was reset
pub namespace: String,
// An error code.
pub code: u32,
// An optional, human-readable reason.
pub reason: String,
}
impl AnnounceReset {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = decode_string(r).await?;
let code = VarInt::decode(r).await?.try_into()?;
let reason = decode_string(r).await?;
Ok(Self {
namespace,
code,
reason,
})
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.namespace, w).await?;
VarInt::from_u32(self.code).encode(w).await?;
encode_string(&self.reason, w).await?;
Ok(())
}
}

View File

@ -0,0 +1,24 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the publisher to terminate an Announce.
#[derive(Clone, Debug)]
pub struct AnnounceStop {
// Echo back the namespace that was reset
pub namespace: String,
}
impl AnnounceStop {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let namespace = decode_string(r).await?;
Ok(Self { namespace })
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.namespace, w).await?;
Ok(())
}
}

View File

@ -1,19 +1,20 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the server to indicate that the client should connect to a different server.
#[derive(Clone, Debug)]
pub struct GoAway {
pub url: String,
}
impl GoAway {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let url = decode_string(r).await?;
Ok(Self { url })
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
encode_string(&self.url, w).await
}
}

View File

@ -1,48 +1,74 @@
//! Low-level message sent over the wire, as defined in the specification.
//!
//! All of these messages are sent over a bidirectional QUIC stream.
//! This introduces some head-of-line blocking but preserves ordering.
//! The only exception are OBJECT "messages", which are sent over dedicated QUIC streams.
//!
//! Messages sent by the publisher:
//! - [Announce]
//! - [AnnounceReset]
//! - [SubscribeOk]
//! - [SubscribeReset]
//! - [Object]
//!
//! Messages sent by the subscriber:
//! - [Subscribe]
//! - [SubscribeStop]
//! - [AnnounceOk]
//! - [AnnounceStop]
//!
//! Example flow:
//! ```test
//! -> ANNOUNCE namespace="foo"
//! <- ANNOUNCE_OK namespace="foo"
//! <- SUBSCRIBE id=0 namespace="foo" name="bar"
//! -> SUBSCRIBE_OK id=0
//! -> OBJECT id=0 sequence=69 priority=4 expires=30
//! -> OBJECT id=0 sequence=70 priority=4 expires=30
//! -> OBJECT id=0 sequence=70 priority=4 expires=30
//! <- SUBSCRIBE_STOP id=0
//! -> SUBSCRIBE_RESET id=0 code=206 reason="closed by peer"
//! ```
mod announce;
mod announce_error;
mod announce_ok;
mod announce_reset;
mod announce_stop;
mod go_away;
mod receiver;
mod sender;
mod object;
mod subscribe;
mod subscribe_error;
mod subscribe_ok;
mod subscribe_reset;
mod subscribe_stop;
pub use announce::*;
pub use announce_error::*;
pub use announce_ok::*;
pub use announce_reset::*;
pub use announce_stop::*;
pub use go_away::*;
pub use receiver::*;
pub use sender::*;
pub use object::*;
pub use subscribe::*;
pub use subscribe_error::*;
pub use subscribe_ok::*;
pub use subscribe_reset::*;
pub use subscribe_stop::*;
use crate::coding::{DecodeError, EncodeError, VarInt};
use std::fmt;
use webtransport_generic::{RecvStream, SendStream};
// NOTE: This is forked from moq-transport-00.
// 1. SETUP role indicates local support ("I can subscribe"), not remote support ("server must publish")
// 2. SETUP_SERVER is id=2 to disambiguate
// 3. messages do not have a specified length.
// 4. messages are sent over a single bidrectional stream (after SETUP), not unidirectional streams.
// 5. SUBSCRIBE specifies the track_id, not SUBSCRIBE_OK
// 6. optional parameters are written in order, and zero when unset (setup, announce, subscribe)
use crate::coding::{AsyncRead, AsyncWrite};
// Use a macro to generate the message types rather than copy-paste.
// This implements a decode/encode method that uses the specified type.
macro_rules! message_types {
{$($name:ident = $val:expr,)*} => {
/// All supported message types.
#[derive(Clone)]
pub enum Message {
$($name($name)),*
}
impl Message {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let t = VarInt::decode(r).await?;
match t.into_inner() {
@ -54,7 +80,7 @@ macro_rules! message_types {
}
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
match self {
$(Self::$name(ref m) => {
VarInt::from_u32($val).encode(w).await?;
@ -62,6 +88,22 @@ macro_rules! message_types {
},)*
}
}
pub fn id(&self) -> VarInt {
match self {
$(Self::$name(_) => {
VarInt::from_u32($val)
},)*
}
}
pub fn name(&self) -> &'static str {
match self {
$(Self::$name(_) => {
stringify!($name)
},)*
}
}
}
$(impl From<$name> for Message {
@ -89,9 +131,11 @@ message_types! {
// SetupServer = 0x2
Subscribe = 0x3,
SubscribeOk = 0x4,
SubscribeError = 0x5,
SubscribeReset = 0x5,
SubscribeStop = 0x15,
Announce = 0x6,
AnnounceOk = 0x7,
AnnounceError = 0x8,
AnnounceReset = 0x8,
AnnounceStop = 0x18,
GoAway = 0x10,
}

View File

@ -0,0 +1,70 @@
use std::time;
use crate::coding::{DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// Sent by the publisher as the header of each data stream.
#[derive(Clone, Debug)]
pub struct Object {
// An ID for this track.
// Proposal: https://github.com/moq-wg/moq-transport/issues/209
pub track: VarInt,
// The sequence number within the track.
pub sequence: VarInt,
// The priority, where **larger** values are sent first.
// Proposal: int32 instead of a varint.
pub priority: i32,
// Cache the object for at most this many seconds.
// Zero means never expire.
pub expires: Option<time::Duration>,
}
impl Object {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let typ = VarInt::decode(r).await?;
if typ.into_inner() != 0 {
return Err(DecodeError::InvalidType(typ));
}
// NOTE: size has been omitted
let track = VarInt::decode(r).await?;
let sequence = VarInt::decode(r).await?;
let priority = r.read_i32().await?; // big-endian
let expires = match VarInt::decode(r).await?.into_inner() {
0 => None,
secs => Some(time::Duration::from_secs(secs)),
};
Ok(Self {
track,
sequence,
priority,
expires,
})
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::ZERO.encode(w).await?;
self.track.encode(w).await?;
self.sequence.encode(w).await?;
w.write_i32(self.priority).await?;
// Round up if there's any decimal points.
let expires = match self.expires {
None => 0,
Some(time::Duration::ZERO) => return Err(EncodeError::InvalidValue), // there's no way of expressing zero currently.
Some(expires) if expires.subsec_nanos() > 0 => expires.as_secs() + 1,
Some(expires) => expires.as_secs(),
};
VarInt::try_from(expires)?.encode(w).await?;
Ok(())
}
}

View File

@ -1,19 +0,0 @@
use crate::{coding::DecodeError, message::Message};
use webtransport_generic::RecvStream;
pub struct Receiver<R: RecvStream> {
stream: R,
}
impl<R: RecvStream> Receiver<R> {
pub fn new(stream: R) -> Self {
Self { stream }
}
// Read the next full message from the stream.
// NOTE: This is not cancellable; you must poll the future to completion.
pub async fn recv(&mut self) -> Result<Message, DecodeError> {
Message::decode(&mut self.stream).await
}
}

View File

@ -1,21 +0,0 @@
use crate::message::Message;
use webtransport_generic::SendStream;
pub struct Sender<S: SendStream> {
stream: S,
}
impl<S: SendStream> Sender<S> {
pub fn new(stream: S) -> Self {
Self { stream }
}
// Read the next full message from the stream.
// NOTE: This is not cancellable; you must poll the future to completion.
pub async fn send<T: Into<Message>>(&mut self, msg: T) -> anyhow::Result<()> {
let msg = msg.into();
msg.encode(&mut self.stream).await?;
Ok(())
}
}

View File

@ -1,39 +1,38 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the subscriber to request all future objects for the given track.
///
/// Objects will use the provided ID instead of the full track name, to save bytes.
#[derive(Clone, Debug)]
pub struct Subscribe {
// An ID we choose so we can map to the track_name.
// Proposal: https://github.com/moq-wg/moq-transport/issues/209
pub track_id: VarInt,
pub id: VarInt,
// The track namespace.
pub track_namespace: String,
pub namespace: String,
// The track name.
pub track_name: String,
pub name: String,
}
impl Subscribe {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
let track_id = VarInt::decode(r).await?;
let track_namespace = decode_string(r).await?;
let track_name = decode_string(r).await?;
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?;
let namespace = decode_string(r).await?;
let name = decode_string(r).await?;
Ok(Self {
track_id,
track_namespace,
track_name,
})
Ok(Self { id, namespace, name })
}
}
impl Subscribe {
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
self.track_id.encode(w).await?;
encode_string(&self.track_namespace, w).await?;
encode_string(&self.track_name, w).await?;
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.id.encode(w).await?;
encode_string(&self.namespace, w).await?;
encode_string(&self.name, w).await?;
Ok(())
}

View File

@ -1,37 +0,0 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use webtransport_generic::{RecvStream, SendStream};
#[derive(Clone, Debug)]
pub struct SubscribeError {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
// The ID for this track.
pub track_id: VarInt,
// An error code.
pub code: VarInt,
// An optional, human-readable reason.
pub reason: String,
}
impl SubscribeError {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
let track_id = VarInt::decode(r).await?;
let code = VarInt::decode(r).await?;
let reason = decode_string(r).await?;
Ok(Self { track_id, code, reason })
}
}
impl SubscribeError {
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
self.track_id.encode(w).await?;
self.code.encode(w).await?;
encode_string(&self.reason, w).await?;
Ok(())
}
}

View File

@ -1,34 +1,26 @@
use crate::coding::{DecodeError, EncodeError, VarInt};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the publisher to accept a Subscribe.
#[derive(Clone, Debug)]
pub struct SubscribeOk {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
// The ID for this track.
pub track_id: VarInt,
// The subscription will end after this duration has elapsed.
// A value of zero is invalid.
pub expires: Option<VarInt>,
pub id: VarInt,
}
impl SubscribeOk {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
let track_id = VarInt::decode(r).await?;
let expires = VarInt::decode(r).await?;
let expires = if expires.into_inner() == 0 { None } else { Some(expires) };
Ok(Self { track_id, expires })
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?;
Ok(Self { id })
}
}
impl SubscribeOk {
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
self.track_id.encode(w).await?;
self.expires.unwrap_or_default().encode(w).await?;
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.id.encode(w).await?;
Ok(())
}
}

View File

@ -0,0 +1,36 @@
use crate::coding::{decode_string, encode_string, DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the publisher to reject a Subscribe.
#[derive(Clone, Debug)]
pub struct SubscribeReset {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
// The ID for this subscription.
pub id: VarInt,
// An error code.
pub code: u32,
// An optional, human-readable reason.
pub reason: String,
}
impl SubscribeReset {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?;
let code = VarInt::decode(r).await?.try_into()?;
let reason = decode_string(r).await?;
Ok(Self { id, code, reason })
}
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.id.encode(w).await?;
VarInt::from_u32(self.code).encode(w).await?;
encode_string(&self.reason, w).await?;
Ok(())
}
}

View File

@ -0,0 +1,26 @@
use crate::coding::{DecodeError, EncodeError, VarInt};
use crate::coding::{AsyncRead, AsyncWrite};
/// Sent by the subscriber to terminate a Subscribe.
#[derive(Clone, Debug)]
pub struct SubscribeStop {
// NOTE: No full track name because of this proposal: https://github.com/moq-wg/moq-transport/issues/209
// The ID for this subscription.
pub id: VarInt,
}
impl SubscribeStop {
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let id = VarInt::decode(r).await?;
Ok(Self { id })
}
}
impl SubscribeStop {
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.id.encode(w).await?;
Ok(())
}
}

View File

@ -0,0 +1,211 @@
//! A broadcast is a collection of tracks, split into two handles: [Publisher] and [Subscriber].
//!
//! The [Publisher] can create tracks, either manually or on request.
//! It receives all requests by a [Subscriber] for a tracks that don't exist.
//! The simplest implementation is to close every unknown track with [Error::NotFound].
//!
//! A [Subscriber] can request tracks by name.
//! If the track already exists, it will be returned.
//! If the track doesn't exist, it will be sent to [Unknown] to be handled.
//! A [Subscriber] can be cloned to create multiple subscriptions.
//!
//! The broadcast is automatically closed with [Error::Closed] when [Publisher] is dropped, or all [Subscriber]s are dropped.
use std::{
collections::{hash_map, HashMap, VecDeque},
fmt,
sync::Arc,
};
use crate::Error;
use super::{track, Watch};
/// Create a new broadcast.
pub fn new() -> (Publisher, Subscriber) {
let state = Watch::new(State::default());
let publisher = Publisher::new(state.clone());
let subscriber = Subscriber::new(state);
(publisher, subscriber)
}
/// Dynamic information about the broadcast.
#[derive(Debug)]
struct State {
tracks: HashMap<String, track::Subscriber>,
requested: VecDeque<track::Publisher>,
closed: Result<(), Error>,
}
impl State {
pub fn get(&self, name: &str) -> Result<Option<track::Subscriber>, Error> {
// Don't check closed, so we can return from cache.
Ok(self.tracks.get(name).cloned())
}
pub fn insert(&mut self, track: track::Subscriber) -> Result<(), Error> {
self.closed?;
match self.tracks.entry(track.name.clone()) {
hash_map::Entry::Occupied(_) => return Err(Error::Duplicate),
hash_map::Entry::Vacant(v) => v.insert(track),
};
Ok(())
}
pub fn request(&mut self, name: &str) -> Result<track::Subscriber, Error> {
self.closed?;
// Create a new track.
let (publisher, subscriber) = track::new(name);
// Insert the track into our Map so we deduplicate future requests.
self.tracks.insert(name.to_string(), subscriber.clone());
// Send the track to the Publisher to handle.
self.requested.push_back(publisher);
Ok(subscriber)
}
pub fn has_next(&self) -> Result<bool, Error> {
// Check if there's any elements in the queue before checking closed.
if !self.requested.is_empty() {
return Ok(true);
}
self.closed?;
Ok(false)
}
pub fn next(&mut self) -> track::Publisher {
// We panic instead of erroring to avoid a nasty wakeup loop if you don't call has_next first.
self.requested.pop_front().expect("no entry in queue")
}
pub fn close(&mut self, err: Error) -> Result<(), Error> {
self.closed?;
self.closed = Err(err);
Ok(())
}
}
impl Default for State {
fn default() -> Self {
Self {
tracks: HashMap::new(),
closed: Ok(()),
requested: VecDeque::new(),
}
}
}
/// Publish new tracks for a broadcast by name.
// TODO remove Clone
#[derive(Clone)]
pub struct Publisher {
state: Watch<State>,
_dropped: Arc<Dropped>,
}
impl Publisher {
fn new(state: Watch<State>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self { state, _dropped }
}
/// Create a new track with the given name, inserting it into the broadcast.
pub fn create_track(&mut self, name: &str) -> Result<track::Publisher, Error> {
let (publisher, subscriber) = track::new(name);
self.state.lock_mut().insert(subscriber)?;
Ok(publisher)
}
/// Insert a track into the broadcast.
pub fn insert_track(&mut self, track: track::Subscriber) -> Result<(), Error> {
self.state.lock_mut().insert(track)
}
/// Block until the next track requested by a subscriber.
pub async fn next_track(&mut self) -> Result<Option<track::Publisher>, Error> {
loop {
let notify = {
let state = self.state.lock();
if state.has_next()? {
return Ok(Some(state.into_mut().next()));
}
state.changed()
};
notify.await;
}
}
/// Close the broadcast with an error.
pub fn close(self, err: Error) -> Result<(), Error> {
self.state.lock_mut().close(err)
}
}
impl fmt::Debug for Publisher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Publisher").field("state", &self.state).finish()
}
}
/// Subscribe to a broadcast by requesting tracks.
///
/// This can be cloned to create handles.
#[derive(Clone)]
pub struct Subscriber {
state: Watch<State>,
_dropped: Arc<Dropped>,
}
impl Subscriber {
fn new(state: Watch<State>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self { state, _dropped }
}
/// Get a track from the broadcast by name.
/// If the track does not exist, it will be created and potentially fufilled by the publisher (via Unknown).
/// Otherwise, it will return [Error::NotFound].
pub fn get_track(&self, name: &str) -> Result<track::Subscriber, Error> {
let state = self.state.lock();
if let Some(track) = state.get(name)? {
return Ok(track);
}
// Request a new track if it does not exist.
state.into_mut().request(name)
}
}
impl fmt::Debug for Subscriber {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Subscriber").field("state", &self.state).finish()
}
}
// A handle that closes the broadcast when dropped:
// - when all Subscribers are dropped or
// - when Publisher and Unknown are dropped.
struct Dropped {
state: Watch<State>,
}
impl Dropped {
fn new(state: Watch<State>) -> Self {
Self { state }
}
}
impl Drop for Dropped {
fn drop(&mut self) {
self.state.lock_mut().close(Error::Closed).ok();
}
}

View File

@ -0,0 +1,11 @@
//! Allows a publisher to push updates, automatically caching and fanning it out to any subscribers.
//!
//! The naming scheme doesn't match the spec because it's vague and confusing.
//! The hierarchy is: [broadcast] -> [track] -> [segment] -> [Bytes](bytes::Bytes)
pub mod broadcast;
pub mod segment;
pub mod track;
pub(crate) mod watch;
pub(crate) use watch::*;

View File

@ -0,0 +1,215 @@
//! A segment is a stream of bytes with a header, split into a [Publisher] and [Subscriber] handle.
//!
//! A [Publisher] writes an ordered stream of bytes in chunks.
//! There's no framing, so these chunks can be of any size or position, and won't be maintained over the network.
//!
//! A [Subscriber] reads an ordered stream of bytes in chunks.
//! These chunks are returned directly from the QUIC connection, so they may be of any size or position.
//! A closed [Subscriber] will receive a copy of all future chunks. (fanout)
//!
//! The segment is closed with [Error::Closed] when all publishers or subscribers are dropped.
use core::fmt;
use std::{ops::Deref, sync::Arc, time};
use crate::{Error, VarInt};
use bytes::Bytes;
use super::Watch;
/// Create a new segment with the given info.
pub fn new(info: Info) -> (Publisher, Subscriber) {
let state = Watch::new(State::default());
let info = Arc::new(info);
let publisher = Publisher::new(state.clone(), info.clone());
let subscriber = Subscriber::new(state, info);
(publisher, subscriber)
}
/// Static information about the segment.
#[derive(Debug)]
pub struct Info {
// The sequence number of the segment within the track.
pub sequence: VarInt,
// The priority of the segment within the BROADCAST.
pub priority: i32,
// Cache the segment for at most this long.
pub expires: Option<time::Duration>,
}
struct State {
// The data that has been received thus far.
data: Vec<Bytes>,
// Set when the publisher is dropped.
closed: Result<(), Error>,
}
impl State {
pub fn close(&mut self, err: Error) -> Result<(), Error> {
self.closed?;
self.closed = Err(err);
Ok(())
}
}
impl Default for State {
fn default() -> Self {
Self {
data: Vec::new(),
closed: Ok(()),
}
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// We don't want to print out the contents, so summarize.
let size = self.data.iter().map(|chunk| chunk.len()).sum::<usize>();
let data = format!("size={} chunks={}", size, self.data.len());
f.debug_struct("State")
.field("data", &data)
.field("closed", &self.closed)
.finish()
}
}
/// Used to write data to a segment and notify subscribers.
pub struct Publisher {
// Mutable segment state.
state: Watch<State>,
// Immutable segment state.
info: Arc<Info>,
// Closes the segment when all Publishers are dropped.
_dropped: Arc<Dropped>,
}
impl Publisher {
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self { state, info, _dropped }
}
/// Write a new chunk of bytes.
pub fn write_chunk(&mut self, data: Bytes) -> Result<(), Error> {
let mut state = self.state.lock_mut();
state.closed?;
state.data.push(data);
Ok(())
}
/// Close the segment with an error.
pub fn close(self, err: Error) -> Result<(), Error> {
self.state.lock_mut().close(err)
}
}
impl Deref for Publisher {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
impl fmt::Debug for Publisher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Publisher")
.field("state", &self.state)
.field("info", &self.info)
.finish()
}
}
/// Notified when a segment has new data available.
#[derive(Clone)]
pub struct Subscriber {
// Modify the segment state.
state: Watch<State>,
// Immutable segment state.
info: Arc<Info>,
// The number of chunks that we've read.
// NOTE: Cloned subscribers inherit this index, but then run in parallel.
index: usize,
// Dropped when all Subscribers are dropped.
_dropped: Arc<Dropped>,
}
impl Subscriber {
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self {
state,
info,
index: 0,
_dropped,
}
}
/// Block until the next chunk of bytes is available.
pub async fn read_chunk(&mut self) -> Result<Option<Bytes>, Error> {
loop {
let notify = {
let state = self.state.lock();
if self.index < state.data.len() {
let chunk = state.data[self.index].clone();
self.index += 1;
return Ok(Some(chunk));
}
match state.closed {
Err(Error::Closed) => return Ok(None),
Err(err) => return Err(err),
Ok(()) => state.changed(),
}
};
notify.await; // Try again when the state changes
}
}
}
impl Deref for Subscriber {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
impl fmt::Debug for Subscriber {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Subscriber")
.field("state", &self.state)
.field("info", &self.info)
.field("index", &self.index)
.finish()
}
}
struct Dropped {
// Modify the segment state.
state: Watch<State>,
}
impl Dropped {
fn new(state: Watch<State>) -> Self {
Self { state }
}
}
impl Drop for Dropped {
fn drop(&mut self) {
self.state.lock_mut().close(Error::Closed).ok();
}
}

View File

@ -0,0 +1,337 @@
//! A track is a collection of semi-reliable and semi-ordered segments, split into a [Publisher] and [Subscriber] handle.
//!
//! A [Publisher] creates segments with a sequence number and priority.
//! The sequest number is used to determine the order of segments, while the priority is used to determine which segment to transmit first.
//! This may seem counter-intuitive, but is designed for live streaming where the newest segments may be higher priority.
//! A cloned [Publisher] can be used to create segments in parallel, but will error if a duplicate sequence number is used.
//!
//! A [Subscriber] may not receive all segments in order or at all.
//! These segments are meant to be transmitted over congested networks and the key to MoQ Tranport is to not block on them.
//! Segments will be cached for a potentially limited duration added to the unreliable nature.
//! A cloned [Subscriber] will receive a copy of all new segment going forward (fanout).
//!
//! The track is closed with [Error::Closed] when all publishers or subscribers are dropped.
use std::{collections::BinaryHeap, fmt, ops::Deref, sync::Arc, time};
use indexmap::IndexMap;
use super::{segment, Watch};
use crate::{Error, VarInt};
/// Create a track with the given name.
pub fn new(name: &str) -> (Publisher, Subscriber) {
let state = Watch::new(State::default());
let info = Arc::new(Info { name: name.to_string() });
let publisher = Publisher::new(state.clone(), info.clone());
let subscriber = Subscriber::new(state, info);
(publisher, subscriber)
}
/// Static information about a track.
#[derive(Debug)]
pub struct Info {
pub name: String,
}
struct State {
// Store segments in received order so subscribers can detect changes.
// The key is the segment sequence, which could have gaps.
// A None value means the segment has expired.
lookup: IndexMap<VarInt, Option<segment::Subscriber>>,
// Store when segments will expire in a priority queue.
expires: BinaryHeap<SegmentExpiration>,
// The number of None entries removed from the start of the lookup.
pruned: usize,
// Set when the publisher is closed/dropped, or all subscribers are dropped.
closed: Result<(), Error>,
}
impl State {
pub fn close(&mut self, err: Error) -> Result<(), Error> {
self.closed?;
self.closed = Err(err);
Ok(())
}
pub fn insert(&mut self, segment: segment::Subscriber) -> Result<(), Error> {
self.closed?;
let entry = match self.lookup.entry(segment.sequence) {
indexmap::map::Entry::Occupied(_entry) => return Err(Error::Duplicate),
indexmap::map::Entry::Vacant(entry) => entry,
};
if let Some(expires) = segment.expires {
self.expires.push(SegmentExpiration {
sequence: segment.sequence,
expires: time::Instant::now() + expires,
});
}
entry.insert(Some(segment));
// Expire any existing segments on insert.
// This means if you don't insert then you won't expire... but it's probably fine since the cache won't grow.
// TODO Use a timer to expire segments at the correct time instead
self.expire();
Ok(())
}
// Try expiring any segments
pub fn expire(&mut self) {
let now = time::Instant::now();
while let Some(segment) = self.expires.peek() {
if segment.expires > now {
break;
}
// Update the entry to None while preserving the index.
match self.lookup.entry(segment.sequence) {
indexmap::map::Entry::Occupied(mut entry) => entry.insert(None),
indexmap::map::Entry::Vacant(_) => panic!("expired segment not found"),
};
self.expires.pop();
}
// Remove None entries from the start of the lookup.
while let Some((_, None)) = self.lookup.get_index(0) {
self.lookup.shift_remove_index(0);
self.pruned += 1;
}
}
}
impl Default for State {
fn default() -> Self {
Self {
lookup: Default::default(),
expires: Default::default(),
pruned: 0,
closed: Ok(()),
}
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("State")
.field("lookup", &self.lookup)
.field("pruned", &self.pruned)
.field("closed", &self.closed)
.finish()
}
}
/// Creates new segments for a track.
pub struct Publisher {
state: Watch<State>,
info: Arc<Info>,
_dropped: Arc<Dropped>,
}
impl Publisher {
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self { state, info, _dropped }
}
/// Insert a new segment.
pub fn insert_segment(&mut self, segment: segment::Subscriber) -> Result<(), Error> {
self.state.lock_mut().insert(segment)
}
/// Create an insert a segment with the given info.
pub fn create_segment(&mut self, info: segment::Info) -> Result<segment::Publisher, Error> {
let (publisher, subscriber) = segment::new(info);
self.insert_segment(subscriber)?;
Ok(publisher)
}
/// Close the segment with an error.
pub fn close(self, err: Error) -> Result<(), Error> {
self.state.lock_mut().close(err)
}
}
impl Deref for Publisher {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
impl fmt::Debug for Publisher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Publisher")
.field("state", &self.state)
.field("info", &self.info)
.finish()
}
}
/// Receives new segments for a track.
#[derive(Clone)]
pub struct Subscriber {
state: Watch<State>,
info: Arc<Info>,
// The index of the next segment to return.
index: usize,
// If there are multiple segments to return, we put them in here to return them in priority order.
pending: BinaryHeap<SegmentPriority>,
// Dropped when all subscribers are dropped.
_dropped: Arc<Dropped>,
}
impl Subscriber {
fn new(state: Watch<State>, info: Arc<Info>) -> Self {
let _dropped = Arc::new(Dropped::new(state.clone()));
Self {
state,
info,
index: 0,
pending: Default::default(),
_dropped,
}
}
/// Block until the next segment arrives, or return None if the track is [Error::Closed].
pub async fn next_segment(&mut self) -> Result<Option<segment::Subscriber>, Error> {
loop {
let notify = {
let state = self.state.lock();
// Get our adjusted index, which could be negative if we've removed more broadcasts than read.
let mut index = self.index.saturating_sub(state.pruned);
// Push all new segments into a priority queue.
while index < state.lookup.len() {
let (_, segment) = state.lookup.get_index(index).unwrap();
// Skip None values (expired segments).
// TODO These might actually be expired, so we should check the expiration time.
if let Some(segment) = segment {
self.pending.push(SegmentPriority(segment.clone()));
}
index += 1;
}
self.index = state.pruned + index;
// Return the higher priority segment.
if let Some(segment) = self.pending.pop() {
return Ok(Some(segment.0));
}
// Otherwise check if we need to return an error.
match state.closed {
Err(Error::Closed) => return Ok(None),
Err(err) => return Err(err),
Ok(()) => state.changed(),
}
};
notify.await
}
}
}
impl Deref for Subscriber {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
impl fmt::Debug for Subscriber {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Subscriber")
.field("state", &self.state)
.field("info", &self.info)
.field("index", &self.index)
.finish()
}
}
// Closes the track on Drop.
struct Dropped {
state: Watch<State>,
}
impl Dropped {
fn new(state: Watch<State>) -> Self {
Self { state }
}
}
impl Drop for Dropped {
fn drop(&mut self) {
self.state.lock_mut().close(Error::Closed).ok();
}
}
// Used to order segments by expiration time.
struct SegmentExpiration {
sequence: VarInt,
expires: time::Instant,
}
impl Ord for SegmentExpiration {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// Reverse order so the earliest expiration is at the top of the heap.
other.expires.cmp(&self.expires)
}
}
impl PartialOrd for SegmentExpiration {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for SegmentExpiration {
fn eq(&self, other: &Self) -> bool {
self.expires == other.expires
}
}
impl Eq for SegmentExpiration {}
// Used to order segments by priority
#[derive(Clone)]
struct SegmentPriority(pub segment::Subscriber);
impl Ord for SegmentPriority {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// Reverse order so the highest priority is at the top of the heap.
// TODO I let CodePilot generate this code so yolo
other.0.priority.cmp(&self.0.priority)
}
}
impl PartialOrd for SegmentPriority {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for SegmentPriority {
fn eq(&self, other: &Self) -> bool {
self.0.priority == other.0.priority
}
}
impl Eq for SegmentPriority {}

View File

@ -0,0 +1,180 @@
use std::{
fmt,
future::Future,
ops::{Deref, DerefMut},
pin::Pin,
sync::{Arc, Mutex, MutexGuard},
task,
};
struct State<T> {
value: T,
wakers: Vec<task::Waker>,
epoch: usize,
}
impl<T> State<T> {
pub fn new(value: T) -> Self {
Self {
value,
wakers: Vec::new(),
epoch: 0,
}
}
pub fn register(&mut self, waker: &task::Waker) {
self.wakers.retain(|existing| !existing.will_wake(waker));
self.wakers.push(waker.clone());
}
pub fn notify(&mut self) {
self.epoch += 1;
for waker in self.wakers.drain(..) {
waker.wake();
}
}
}
impl<T: Default> Default for State<T> {
fn default() -> Self {
Self::new(T::default())
}
}
impl<T: fmt::Debug> fmt::Debug for State<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.value.fmt(f)
}
}
pub struct Watch<T> {
state: Arc<Mutex<State<T>>>,
}
impl<T> Watch<T> {
pub fn new(initial: T) -> Self {
let state = Arc::new(Mutex::new(State::new(initial)));
Self { state }
}
pub fn lock(&self) -> WatchRef<T> {
WatchRef {
state: self.state.clone(),
lock: self.state.lock().unwrap(),
}
}
pub fn lock_mut(&self) -> WatchMut<T> {
WatchMut {
lock: self.state.lock().unwrap(),
}
}
}
impl<T> Clone for Watch<T> {
fn clone(&self) -> Self {
Self {
state: self.state.clone(),
}
}
}
impl<T: Default> Default for Watch<T> {
fn default() -> Self {
Self::new(T::default())
}
}
impl<T: fmt::Debug> fmt::Debug for Watch<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.state.try_lock() {
Ok(lock) => lock.value.fmt(f),
Err(_) => write!(f, "<locked>"),
}
}
}
pub struct WatchRef<'a, T> {
state: Arc<Mutex<State<T>>>,
lock: MutexGuard<'a, State<T>>,
}
impl<'a, T> WatchRef<'a, T> {
// Release the lock and wait for a notification when next updated.
pub fn changed(self) -> WatchChanged<T> {
WatchChanged {
state: self.state,
epoch: self.lock.epoch,
}
}
// Upgrade to a mutable references that automatically calls notify on drop.
pub fn into_mut(self) -> WatchMut<'a, T> {
WatchMut { lock: self.lock }
}
}
impl<'a, T> Deref for WatchRef<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.lock.value
}
}
impl<'a, T: fmt::Debug> fmt::Debug for WatchRef<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.lock.fmt(f)
}
}
pub struct WatchMut<'a, T> {
lock: MutexGuard<'a, State<T>>,
}
impl<'a, T> Deref for WatchMut<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.lock.value
}
}
impl<'a, T> DerefMut for WatchMut<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.lock.value
}
}
impl<'a, T> Drop for WatchMut<'a, T> {
fn drop(&mut self) {
self.lock.notify();
}
}
impl<'a, T: fmt::Debug> fmt::Debug for WatchMut<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.lock.fmt(f)
}
}
pub struct WatchChanged<T> {
state: Arc<Mutex<State<T>>>,
epoch: usize,
}
impl<T> Future for WatchChanged<T> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll<Self::Output> {
// TODO is there an API we can make that doesn't drop this lock?
let mut state = self.state.lock().unwrap();
if state.epoch > self.epoch {
task::Poll::Ready(())
} else {
state.register(cx.waker());
task::Poll::Pending
}
}
}

View File

@ -1,60 +0,0 @@
mod receiver;
mod sender;
pub use receiver::*;
pub use sender::*;
use crate::coding::{DecodeError, EncodeError, VarInt};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use webtransport_generic::{RecvStream, SendStream};
#[derive(Clone, Debug)]
pub struct Object {
// An ID for this track.
// Proposal: https://github.com/moq-wg/moq-transport/issues/209
pub track: VarInt,
// The group sequence number.
pub group: VarInt,
// The object sequence number.
pub sequence: VarInt,
// The priority/send order.
// Proposal: int32 instead of a varint.
pub send_order: i32,
}
impl Object {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
let typ = VarInt::decode(r).await?;
if typ.into_inner() != 0 {
return Err(DecodeError::InvalidType(typ));
}
// NOTE: size has been omitted
let track = VarInt::decode(r).await?;
let group = VarInt::decode(r).await?;
let sequence = VarInt::decode(r).await?;
let send_order = r.read_i32().await?; // big-endian
Ok(Self {
track,
group,
sequence,
send_order,
})
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::from_u32(0).encode(w).await?;
self.track.encode(w).await?;
self.group.encode(w).await?;
self.sequence.encode(w).await?;
w.write_i32(self.send_order).await?;
Ok(())
}
}

View File

@ -1,42 +0,0 @@
use crate::Object;
use anyhow::Context;
use tokio::task::JoinSet;
use webtransport_generic::Session;
pub struct Receiver<S: Session> {
session: S,
// Streams that we've accepted but haven't read the header from yet.
streams: JoinSet<anyhow::Result<(Object, S::RecvStream)>>,
}
impl<S: Session> Receiver<S> {
pub fn new(session: S) -> Self {
Self {
session,
streams: JoinSet::new(),
}
}
pub async fn recv(&mut self) -> anyhow::Result<(Object, S::RecvStream)> {
loop {
tokio::select! {
res = self.session.accept_uni() => {
let stream = res.context("failed to accept stream")?;
self.streams.spawn(async move { Self::read(stream).await });
},
res = self.streams.join_next(), if !self.streams.is_empty() => {
return res.unwrap().context("failed to run join set")?;
}
}
}
}
async fn read(mut stream: S::RecvStream) -> anyhow::Result<(Object, S::RecvStream)> {
let header = Object::decode(&mut stream).await?;
Ok((header, stream))
}
}

View File

@ -1,29 +0,0 @@
use anyhow::Context;
use crate::Object;
use webtransport_generic::{SendStream, Session};
// Allow this to be cloned so we can have multiple senders.
#[derive(Clone)]
pub struct Sender<S: Session> {
// The session.
session: S,
}
impl<S: Session> Sender<S> {
pub fn new(session: S) -> Self {
Self { session }
}
pub async fn open(&mut self, object: Object) -> anyhow::Result<S::SendStream> {
let mut stream = self.session.open_uni().await.context("failed to open uni stream")?;
stream.set_priority(object.send_order);
object.encode(&mut stream).await.context("failed to write header")?;
// log::info!("created stream: {:?}", header);
Ok(stream)
}
}

View File

@ -1,87 +0,0 @@
use anyhow::Context;
use crate::{message, object, setup};
use webtransport_generic::Session as WTSession;
pub struct Session<S: WTSession> {
pub send_control: message::Sender<S::SendStream>,
pub recv_control: message::Receiver<S::RecvStream>,
pub send_objects: object::Sender<S>,
pub recv_objects: object::Receiver<S>,
}
impl<S: WTSession> Session<S> {
/// Called by a server with an established WebTransport session.
// TODO close the session with an error code
pub async fn accept(session: S, role: setup::Role) -> anyhow::Result<Self> {
let (mut send, mut recv) = session.accept_bi().await.context("failed to accept bidi stream")?;
let setup_client = setup::Client::decode(&mut recv)
.await
.context("failed to read CLIENT SETUP")?;
setup_client
.versions
.iter()
.find(|version| **version == setup::Version::DRAFT_00)
.context("no supported versions")?;
let setup_server = setup::Server {
role,
version: setup::Version::DRAFT_00,
};
setup_server
.encode(&mut send)
.await
.context("failed to send setup server")?;
let send_control = message::Sender::new(send);
let recv_control = message::Receiver::new(recv);
let send_objects = object::Sender::new(session.clone());
let recv_objects = object::Receiver::new(session.clone());
Ok(Session {
send_control,
recv_control,
send_objects,
recv_objects,
})
}
/// Called by a client with an established WebTransport session.
pub async fn connect(session: S, role: setup::Role) -> anyhow::Result<Self> {
let (mut send, mut recv) = session.open_bi().await.context("failed to oen bidi stream")?;
let setup_client = setup::Client {
role,
versions: vec![setup::Version::DRAFT_00].into(),
path: "".to_string(),
};
setup_client
.encode(&mut send)
.await
.context("failed to send SETUP CLIENT")?;
let setup_server = setup::Server::decode(&mut recv).await.context("failed to read SETUP")?;
if setup_server.version != setup::Version::DRAFT_00 {
anyhow::bail!("unsupported version: {:?}", setup_server.version);
}
let send_control = message::Sender::new(send);
let recv_control = message::Receiver::new(recv);
let send_objects = object::Sender::new(session.clone());
let recv_objects = object::Receiver::new(session.clone());
Ok(Session {
send_control,
recv_control,
send_objects,
recv_objects,
})
}
}

View File

@ -0,0 +1,62 @@
use super::{Publisher, Subscriber};
use crate::{model::broadcast, setup};
use webtransport_quinn::{RecvStream, SendStream, Session};
use anyhow::Context;
/// An endpoint that connects to a URL to publish and/or consume live streams.
pub struct Client {}
impl Client {
/// Connect using an established WebTransport session, performing the MoQ handshake as a publisher.
pub async fn publisher(session: Session, source: broadcast::Subscriber) -> anyhow::Result<Publisher> {
let control = Self::send_setup(&session, setup::Role::Publisher).await?;
let publisher = Publisher::new(session, control, source);
Ok(publisher)
}
/// Connect using an established WebTransport session, performing the MoQ handshake as a subscriber.
pub async fn subscriber(session: Session, source: broadcast::Publisher) -> anyhow::Result<Subscriber> {
let control = Self::send_setup(&session, setup::Role::Subscriber).await?;
let subscriber = Subscriber::new(session, control, source);
Ok(subscriber)
}
// TODO support performing both roles
/*
pub async fn connect(self) -> anyhow::Result<(Publisher, Subscriber)> {
self.connect_role(setup::Role::Both).await
}
*/
async fn send_setup(session: &Session, role: setup::Role) -> anyhow::Result<(SendStream, RecvStream)> {
let mut control = session.open_bi().await.context("failed to oen bidi stream")?;
let client = setup::Client {
role,
versions: vec![setup::Version::KIXEL_00].into(),
};
client
.encode(&mut control.0)
.await
.context("failed to send SETUP CLIENT")?;
let server = setup::Server::decode(&mut control.1)
.await
.context("failed to read SETUP")?;
if server.version != setup::Version::KIXEL_00 {
anyhow::bail!("unsupported version: {:?}", server.version);
}
// Make sure the server replied with the
if !client.role.is_compatible(server.role) {
anyhow::bail!("incompatible roles: client={:?} server={:?}", client.role, server.role);
}
Ok(control)
}
}

View File

@ -0,0 +1,35 @@
// A helper class to guard sending control messages behind a Mutex.
use std::{fmt, sync::Arc};
use tokio::sync::Mutex;
use webtransport_quinn::{RecvStream, SendStream};
use crate::{message::Message, Error};
#[derive(Debug, Clone)]
pub(crate) struct Control {
send: Arc<Mutex<SendStream>>,
recv: Arc<Mutex<RecvStream>>,
}
impl Control {
pub fn new(send: SendStream, recv: RecvStream) -> Self {
Self {
send: Arc::new(Mutex::new(send)),
recv: Arc::new(Mutex::new(recv)),
}
}
pub async fn send<T: Into<Message> + fmt::Debug>(&self, msg: T) -> Result<(), Error> {
let mut stream = self.send.lock().await;
log::info!("sending message: {:?}", msg);
msg.into().encode(&mut *stream).await.map_err(|_e| Error::Write)
}
// It's likely a mistake to call this from two different tasks, but it's easier to just support it.
pub async fn recv(&self) -> Result<Message, Error> {
let mut stream = self.recv.lock().await;
Message::decode(&mut *stream).await.map_err(|_e| Error::Read)
}
}

View File

@ -0,0 +1,25 @@
//! A MoQ Transport session, on top of a WebTransport session, on top of a QUIC connection.
//!
//! The handshake is relatively simple but split into different steps.
//! All of these handshakes slightly differ depending on if the endpoint is a client or server.
//! 1. Complete the QUIC handhake.
//! 2. Complete the WebTransport handshake.
//! 3. Complete the MoQ handshake.
//!
//! Use [Client] or [Server] for the MoQ handshake depending on the endpoint.
//! Then, decide if you want to create a [Publisher] or [Subscriber], or both (TODO).
//!
//! A [Publisher] can announce broadcasts, which will automatically be served over the network.
//! A [Subscriber] can subscribe to broadcasts, which will automatically be served over the network.
mod client;
mod control;
mod publisher;
mod server;
mod subscriber;
pub use client::*;
pub(crate) use control::*;
pub use publisher::*;
pub use server::*;
pub use subscriber::*;

View File

@ -0,0 +1,189 @@
use std::{
collections::{hash_map, HashMap},
sync::{Arc, Mutex},
};
use tokio::task::AbortHandle;
use webtransport_quinn::{RecvStream, SendStream, Session};
use crate::{
message,
message::Message,
model::{broadcast, segment, track},
Error, VarInt,
};
use super::Control;
/// Serves broadcasts over the network, automatically handling subscriptions and caching.
// TODO Clone specific fields when a task actually needs it.
#[derive(Clone, Debug)]
pub struct Publisher {
// A map of active subscriptions, containing an abort handle to cancel them.
subscribes: Arc<Mutex<HashMap<VarInt, AbortHandle>>>,
webtransport: Session,
control: Control,
source: broadcast::Subscriber,
}
impl Publisher {
pub(crate) fn new(webtransport: Session, control: (SendStream, RecvStream), source: broadcast::Subscriber) -> Self {
let control = Control::new(control.0, control.1);
Self {
webtransport,
subscribes: Default::default(),
control,
source,
}
}
// TODO Serve a broadcast without sending an ANNOUNCE.
// fn serve(&mut self, broadcast: broadcast::Subscriber) -> Result<(), Error> {
// TODO Wait until the next subscribe that doesn't route to an ANNOUNCE.
// pub async fn subscribed(&mut self) -> Result<track::Producer, Error> {
pub async fn run(mut self) -> Result<(), Error> {
loop {
tokio::select! {
_stream = self.webtransport.accept_uni() => {
return Err(Error::Role(VarInt::ZERO));
}
// NOTE: this is not cancel safe, but it's fine since the other branch is a fatal error.
msg = self.control.recv() => {
let msg = msg.map_err(|_x| Error::Read)?;
log::info!("message received: {:?}", msg);
if let Err(err) = self.recv_message(&msg).await {
log::warn!("message error: {:?} {:?}", err, msg);
}
}
}
}
}
async fn recv_message(&mut self, msg: &Message) -> Result<(), Error> {
match msg {
Message::AnnounceOk(msg) => self.recv_announce_ok(msg).await,
Message::AnnounceStop(msg) => self.recv_announce_stop(msg).await,
Message::Subscribe(msg) => self.recv_subscribe(msg).await,
Message::SubscribeStop(msg) => self.recv_subscribe_stop(msg).await,
_ => Err(Error::Role(msg.id())),
}
}
async fn recv_announce_ok(&mut self, _msg: &message::AnnounceOk) -> Result<(), Error> {
// We didn't send an announce.
Err(Error::NotFound)
}
async fn recv_announce_stop(&mut self, _msg: &message::AnnounceStop) -> Result<(), Error> {
// We didn't send an announce.
Err(Error::NotFound)
}
async fn recv_subscribe(&mut self, msg: &message::Subscribe) -> Result<(), Error> {
// Assume that the subscribe ID is unique for now.
let abort = match self.start_subscribe(msg.clone()) {
Ok(abort) => abort,
Err(err) => return self.reset_subscribe(msg.id, err).await,
};
// Insert the abort handle into the lookup table.
match self.subscribes.lock().unwrap().entry(msg.id) {
hash_map::Entry::Occupied(_) => return Err(Error::Duplicate), // TODO fatal, because we already started the task
hash_map::Entry::Vacant(entry) => entry.insert(abort),
};
self.control.send(message::SubscribeOk { id: msg.id }).await
}
async fn reset_subscribe(&mut self, id: VarInt, err: Error) -> Result<(), Error> {
let msg = message::SubscribeReset {
id,
code: err.code(),
reason: err.reason().to_string(),
};
self.control.send(msg).await
}
fn start_subscribe(&mut self, msg: message::Subscribe) -> Result<AbortHandle, Error> {
// We currently don't use the namespace field in SUBSCRIBE
if !msg.namespace.is_empty() {
return Err(Error::NotFound);
}
let mut track = self.source.get_track(&msg.name)?;
// TODO only clone the fields we need
let mut this = self.clone();
let handle = tokio::spawn(async move {
log::info!("serving track: name={}", track.name);
let res = this.run_subscribe(msg.id, &mut track).await;
if let Err(err) = &res {
log::warn!("failed to serve track: name={} err={:?}", track.name, err);
}
// Make sure we send a reset at the end.
let err = res.err().unwrap_or(Error::Closed);
this.reset_subscribe(msg.id, err).await.ok();
// We're all done, so clean up the abort handle.
this.subscribes.lock().unwrap().remove(&msg.id);
});
Ok(handle.abort_handle())
}
async fn run_subscribe(&self, id: VarInt, track: &mut track::Subscriber) -> Result<(), Error> {
// TODO add an Ok method to track::Publisher so we can send SUBSCRIBE_OK
while let Some(mut segment) = track.next_segment().await? {
// TODO only clone the fields we need
let this = self.clone();
tokio::spawn(async move {
if let Err(err) = this.run_segment(id, &mut segment).await {
log::warn!("failed to serve segment: {:?}", err)
}
});
}
Ok(())
}
async fn run_segment(&self, id: VarInt, segment: &mut segment::Subscriber) -> Result<(), Error> {
let object = message::Object {
track: id,
sequence: segment.sequence,
priority: segment.priority,
expires: segment.expires,
};
log::debug!("serving object: {:?}", object);
let mut stream = self.webtransport.open_uni().await.map_err(|_e| Error::Write)?;
stream.set_priority(object.priority).ok();
// TODO better handle the error.
object.encode(&mut stream).await.map_err(|_e| Error::Write)?;
while let Some(data) = segment.read_chunk().await? {
stream.write_chunk(data).await.map_err(|_e| Error::Write)?;
}
Ok(())
}
async fn recv_subscribe_stop(&mut self, msg: &message::SubscribeStop) -> Result<(), Error> {
let abort = self.subscribes.lock().unwrap().remove(&msg.id).ok_or(Error::NotFound)?;
abort.abort();
self.reset_subscribe(msg.id, Error::Stop).await
}
}

View File

@ -0,0 +1,100 @@
use super::{Publisher, Subscriber};
use crate::{model::broadcast, setup};
use webtransport_quinn::{RecvStream, SendStream, Session};
use anyhow::Context;
/// An endpoint that accepts connections, publishing and/or consuming live streams.
pub struct Server {}
impl Server {
/// Accept an established Webtransport session, performing the MoQ handshake.
///
/// This returns a [Request] half-way through the handshake that allows the application to accept or deny the session.
pub async fn accept(session: Session) -> anyhow::Result<Request> {
let mut control = session.accept_bi().await.context("failed to accept bidi stream")?;
let client = setup::Client::decode(&mut control.1)
.await
.context("failed to read CLIENT SETUP")?;
client
.versions
.iter()
.find(|version| **version == setup::Version::KIXEL_00)
.context("no supported versions")?;
Ok(Request {
session,
client,
control,
})
}
}
/// A partially complete MoQ Transport handshake.
pub struct Request {
session: Session,
client: setup::Client,
control: (SendStream, RecvStream),
}
impl Request {
/// Accept the session as a publisher, using the provided broadcast to serve subscriptions.
pub async fn publisher(mut self, source: broadcast::Subscriber) -> anyhow::Result<Publisher> {
self.send_setup(setup::Role::Publisher).await?;
let publisher = Publisher::new(self.session, self.control, source);
Ok(publisher)
}
/// Accept the session as a subscriber only.
pub async fn subscriber(mut self, source: broadcast::Publisher) -> anyhow::Result<Subscriber> {
self.send_setup(setup::Role::Subscriber).await?;
let subscriber = Subscriber::new(self.session, self.control, source);
Ok(subscriber)
}
// TODO Accept the session and perform both roles.
/*
pub async fn accept(self) -> anyhow::Result<(Publisher, Subscriber)> {
self.ok(setup::Role::Both).await
}
*/
async fn send_setup(&mut self, role: setup::Role) -> anyhow::Result<()> {
let server = setup::Server {
role,
version: setup::Version::KIXEL_00,
};
// We need to sure we support the opposite of the client's role.
// ex. if the client is a publisher, we must be a subscriber ONLY.
if !self.client.role.is_compatible(server.role) {
anyhow::bail!(
"incompatible roles: client={:?} server={:?}",
self.client.role,
server.role
);
}
server
.encode(&mut self.control.0)
.await
.context("failed to send setup server")?;
Ok(())
}
/// Reject the request, closing the Webtransport session.
pub fn reject(self, code: u32) {
self.session.close(code, b"")
}
/// The role advertised by the client.
pub fn role(&self) -> setup::Role {
self.client.role
}
}

View File

@ -0,0 +1,152 @@
use webtransport_quinn::{RecvStream, SendStream, Session};
use std::{
collections::HashMap,
sync::{atomic, Arc, Mutex},
};
use crate::{
message,
message::Message,
model::{broadcast, segment, track},
Error, VarInt,
};
use super::Control;
/// Receives broadcasts over the network, automatically handling subscriptions and caching.
// TODO Clone specific fields when a task actually needs it.
#[derive(Clone, Debug)]
pub struct Subscriber {
// The webtransport session.
webtransport: Session,
// The list of active subscriptions, each guarded by an mutex.
subscribes: Arc<Mutex<HashMap<VarInt, track::Publisher>>>,
// The sequence number for the next subscription.
next: Arc<atomic::AtomicU32>,
// A channel for sending messages.
control: Control,
// All unknown subscribes comes here.
source: broadcast::Publisher,
}
impl Subscriber {
pub(crate) fn new(webtransport: Session, control: (SendStream, RecvStream), source: broadcast::Publisher) -> Self {
let control = Control::new(control.0, control.1);
Self {
webtransport,
subscribes: Default::default(),
next: Default::default(),
control,
source,
}
}
pub async fn run(self) -> Result<(), Error> {
let inbound = self.clone().run_inbound();
let streams = self.clone().run_streams();
let source = self.clone().run_source();
// Return the first error.
tokio::select! {
res = inbound => res,
res = streams => res,
res = source => res,
}
}
async fn run_inbound(mut self) -> Result<(), Error> {
loop {
let msg = self.control.recv().await.map_err(|_e| Error::Read)?;
log::info!("message received: {:?}", msg);
if let Err(err) = self.recv_message(&msg).await {
log::warn!("message error: {:?} {:?}", err, msg);
}
}
}
async fn recv_message(&mut self, msg: &Message) -> Result<(), Error> {
match msg {
Message::Announce(_) => Ok(()), // don't care
Message::AnnounceReset(_) => Ok(()), // also don't care
Message::SubscribeOk(_) => Ok(()), // guess what, don't care
Message::SubscribeReset(msg) => self.recv_subscribe_reset(msg).await,
Message::GoAway(_msg) => unimplemented!("GOAWAY"),
_ => Err(Error::Role(msg.id())),
}
}
async fn recv_subscribe_reset(&mut self, msg: &message::SubscribeReset) -> Result<(), Error> {
let err = Error::Reset(msg.code);
let mut subscribes = self.subscribes.lock().unwrap();
let subscribe = subscribes.remove(&msg.id).ok_or(Error::NotFound)?;
subscribe.close(err)?;
Ok(())
}
async fn run_streams(self) -> Result<(), Error> {
loop {
// Accept all incoming unidirectional streams.
let stream = self.webtransport.accept_uni().await.map_err(|_| Error::Read)?;
let this = self.clone();
tokio::spawn(async move {
if let Err(err) = this.run_stream(stream).await {
log::warn!("failed to receive stream: err={:?}", err);
}
});
}
}
async fn run_stream(self, mut stream: RecvStream) -> Result<(), Error> {
// Decode the object on the data stream.
let object = message::Object::decode(&mut stream).await.map_err(|_| Error::Read)?;
log::debug!("received object: {:?}", object);
// A new scope is needed because the async compiler is dumb
let mut publisher = {
let mut subscribes = self.subscribes.lock().unwrap();
let track = subscribes.get_mut(&object.track).ok_or(Error::NotFound)?;
track.create_segment(segment::Info {
sequence: object.sequence,
priority: object.priority,
expires: object.expires,
})?
};
while let Some(data) = stream.read_chunk(usize::MAX, true).await.map_err(|_| Error::Read)? {
publisher.write_chunk(data.bytes)?;
}
Ok(())
}
async fn run_source(mut self) -> Result<(), Error> {
while let Some(track) = self.source.next_track().await? {
let name = track.name.clone();
let id = VarInt::from_u32(self.next.fetch_add(1, atomic::Ordering::SeqCst));
self.subscribes.lock().unwrap().insert(id, track);
let msg = message::Subscribe {
id,
namespace: "".to_string(),
name,
};
self.control.send(msg).await?;
}
Ok(())
}
}

View File

@ -1,30 +1,27 @@
use super::{Role, Versions};
use crate::{
coding::{decode_string, encode_string, DecodeError, EncodeError},
coding::{DecodeError, EncodeError},
VarInt,
};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
// Sent by the client to setup up the session.
/// Sent by the client to setup the session.
// NOTE: This is not a message type, but rather the control stream header.
// Proposal: https://github.com/moq-wg/moq-transport/issues/138
#[derive(Debug)]
pub struct Client {
// NOTE: This is not a message type, but rather the control stream header.
// Proposal: https://github.com/moq-wg/moq-transport/issues/138
// The list of supported versions in preferred order.
/// The list of supported versions in preferred order.
pub versions: Versions,
// Indicate if the client is a publisher, a subscriber, or both.
/// Indicate if the client is a publisher, a subscriber, or both.
// Proposal: moq-wg/moq-transport#151
pub role: Role,
// The path, non-empty ONLY when not using WebTransport.
pub path: String,
}
impl Client {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
/// Decode a client setup message.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let typ = VarInt::decode(r).await?;
if typ.into_inner() != 1 {
return Err(DecodeError::InvalidType(typ));
@ -32,16 +29,15 @@ impl Client {
let versions = Versions::decode(r).await?;
let role = Role::decode(r).await?;
let path = decode_string(r).await?;
Ok(Self { versions, role, path })
Ok(Self { versions, role })
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
/// Encode a server setup message.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::from_u32(1).encode(w).await?;
self.versions.encode(w).await?;
self.role.encode(w).await?;
encode_string(&self.path, w).await?;
Ok(())
}

View File

@ -1,3 +1,9 @@
//! Messages used for the MoQ Transport handshake.
//!
//! After establishing the WebTransport session, the client creates a bidirectional QUIC stream.
//! The client sends the [Client] message and the server responds with the [Server] message.
//! Both sides negotate the [Version] and [Role].
mod client;
mod role;
mod server;

View File

@ -1,7 +1,8 @@
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
use crate::coding::{DecodeError, EncodeError, VarInt};
/// Indicates the endpoint is a publisher, subscriber, or both.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Role {
Publisher,
@ -10,6 +11,7 @@ pub enum Role {
}
impl Role {
/// Returns true if the role is publisher.
pub fn is_publisher(&self) -> bool {
match self {
Self::Publisher | Self::Both => true,
@ -17,12 +19,18 @@ impl Role {
}
}
/// Returns true if the role is a subscriber.
pub fn is_subscriber(&self) -> bool {
match self {
Self::Subscriber | Self::Both => true,
Self::Publisher => false,
}
}
/// Returns true if two endpoints are compatible.
pub fn is_compatible(&self, other: Role) -> bool {
self.is_publisher() == other.is_subscriber() && self.is_subscriber() == other.is_publisher()
}
}
impl From<Role> for VarInt {
@ -49,12 +57,14 @@ impl TryFrom<VarInt> for Role {
}
impl Role {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
/// Decode the role.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let v = VarInt::decode(r).await?;
v.try_into()
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
/// Encode the role.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::from(*self).encode(w).await
}
}

View File

@ -4,23 +4,24 @@ use crate::{
VarInt,
};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
// Sent by the server in response to a client.
/// Sent by the server in response to a client setup.
// NOTE: This is not a message type, but rather the control stream header.
// Proposal: https://github.com/moq-wg/moq-transport/issues/138
#[derive(Debug)]
pub struct Server {
// The list of supported versions in preferred order.
/// The list of supported versions in preferred order.
pub version: Version,
// param: 0x0: Indicate if the server is a publisher, a subscriber, or both.
/// Indicate if the server is a publisher, a subscriber, or both.
// Proposal: moq-wg/moq-transport#151
pub role: Role,
}
impl Server {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
/// Decode the server setup.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let typ = VarInt::decode(r).await?;
if typ.into_inner() != 2 {
return Err(DecodeError::InvalidType(typ));
@ -32,7 +33,8 @@ impl Server {
Ok(Self { version, role })
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
/// Encode the server setup.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
VarInt::from_u32(2).encode(w).await?;
self.version.encode(w).await?;
self.role.encode(w).await?;

View File

@ -1,14 +1,61 @@
use crate::coding::{DecodeError, EncodeError, VarInt};
use webtransport_generic::{RecvStream, SendStream};
use crate::coding::{AsyncRead, AsyncWrite};
use std::ops::Deref;
/// A version number negotiated during the setup.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Version(pub VarInt);
pub struct Version(VarInt);
impl Version {
/// <https://www.ietf.org/archive/id/draft-ietf-moq-transport-00.html>
pub const DRAFT_00: Version = Version(VarInt::from_u32(0xff00));
/// Fork of draft-ietf-moq-transport-00.
///
/// Rough list of differences:
///
/// # Messages
/// - Messages are sent over a control stream or a data stream.
/// - Data streams: each unidirectional stream contains a single OBJECT message.
/// - Control stream: a (client-initiated) bidirectional stream containing SETUP and then all other messages.
/// - Messages do not contain a length; unknown messages are fatal.
///
/// # SETUP
/// - SETUP is split into SETUP_CLIENT and SETUP_SERVER with separate IDs.
/// - SETUP uses version `0xff00` for draft-00.
/// - SETUP no longer contains optional parameters; all are encoded in order and possibly zero.
/// - SETUP `role` indicates the role of the sender, not the role of the server.
/// - SETUP `path` field removed; use WebTransport for path.
///
/// # SUBSCRIBE
/// - SUBSCRIBE `full_name` is split into separate `namespace` and `name` fields.
/// - SUBSCRIBE no longer contains optional parameters; all are encoded in order and possibly zero.
/// - SUBSCRIBE no longer contains the `auth` parameter; use WebTransport for auth.
/// - SUBSCRIBE no longer contains the `group` parameter; concept no longer exists.
/// - SUBSCRIBE contains the `id` instead of SUBSCRIBE_OK.
/// - SUBSCRIBE_OK and SUBSCRIBE_ERROR reference the subscription `id` the instead of the track `full_name`.
/// - SUBSCRIBE_ERROR was renamed to SUBSCRIBE_RESET, sent by publisher to terminate a SUBSCRIBE.
/// - SUBSCRIBE_STOP was added, sent by the subscriber to terminate a SUBSCRIBE.
/// - SUBSCRIBE_OK no longer has `expires`.
///
/// # ANNOUNCE
/// - ANNOUNCE no longer contains optional parameters; all are encoded in order and possibly zero.
/// - ANNOUNCE no longer contains the `auth` field; use WebTransport for auth.
/// - ANNOUNCE_ERROR was renamed to ANNOUNCE_RESET, sent by publisher to terminate an ANNOUNCE.
/// - ANNOUNCE_STOP was added, sent by the subscriber to terminate an ANNOUNCE.
///
/// # OBJECT
/// - OBJECT uses a dedicated QUIC stream.
/// - OBJECT has no size and continues until stream FIN.
/// - OBJECT `priority` is a i32 instead of a varint. (for practical reasons)
/// - OBJECT `expires` was added, a varint in seconds.
/// - OBJECT `group` was removed.
///
/// # GROUP
/// - GROUP concept was removed, replaced with OBJECT as a QUIC stream.
pub const KIXEL_00: Version = Version(VarInt::from_u32(0xbad00));
}
impl From<VarInt> for Version {
@ -24,22 +71,26 @@ impl From<Version> for VarInt {
}
impl Version {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
/// Decode the version number.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let v = VarInt::decode(r).await?;
Ok(Self(v))
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
/// Encode the version number.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
self.0.encode(w).await?;
Ok(())
}
}
/// A list of versions in arbitrary order.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Versions(pub Vec<Version>);
pub struct Versions(Vec<Version>);
impl Versions {
pub async fn decode<R: RecvStream>(r: &mut R) -> Result<Self, DecodeError> {
/// Decode the version list.
pub async fn decode<R: AsyncRead>(r: &mut R) -> Result<Self, DecodeError> {
let count = VarInt::decode(r).await?.into_inner();
let mut vs = Vec::new();
@ -51,7 +102,8 @@ impl Versions {
Ok(Self(vs))
}
pub async fn encode<W: SendStream>(&self, w: &mut W) -> Result<(), EncodeError> {
/// Encode the version list.
pub async fn encode<W: AsyncWrite>(&self, w: &mut W) -> Result<(), EncodeError> {
let size: VarInt = self.0.len().try_into()?;
size.encode(w).await?;

View File

@ -1,24 +0,0 @@
[package]
name = "moq-warp"
description = "Media over QUIC"
authors = ["Luke Curley"]
repository = "https://github.com/kixelated/moq-rs"
license = "MIT OR Apache-2.0"
version = "0.1.0"
edition = "2021"
keywords = ["quic", "http3", "webtransport", "media", "live"]
categories = ["multimedia", "network-programming", "web-programming"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
moq-transport = { path = "../moq-transport" }
webtransport-generic = "0.5"
tokio = "1.27"
anyhow = "1.0.70"
log = "0.4" # TODO remove
bytes = "1.4"

View File

@ -1,2 +0,0 @@
pub mod model;
pub mod relay;

View File

@ -1,64 +0,0 @@
use std::{error, fmt};
use moq_transport::VarInt;
// TODO generialize broker::Broadcasts and source::Source into this module.
/*
pub struct Publisher {
pub namespace: String,
pub tracks: watch::Publisher<track::Subscriber>,
}
impl Publisher {
pub fn new(namespace: &str) -> Self {
Self {
namespace: namespace.to_string(),
tracks: watch::Publisher::new(),
}
}
pub fn subscribe(&self) -> Subscriber {
Subscriber {
namespace: self.namespace.clone(),
tracks: self.tracks.subscribe(),
}
}
}
#[derive(Clone)]
pub struct Subscriber {
pub namespace: String,
pub tracks: watch::Subscriber<track::Subscriber>,
}
*/
#[derive(Clone)]
pub struct Error {
pub code: VarInt,
pub reason: String,
}
impl error::Error for Error {}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if !self.reason.is_empty() {
write!(f, "broadcast error ({}): {}", self.code, self.reason)
} else {
write!(f, "broadcast error ({})", self.code)
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if !self.reason.is_empty() {
write!(f, "broadcast error ({}): {}", self.code, self.reason)
} else {
write!(f, "broadcast error ({})", self.code)
}
}
}

View File

@ -1,5 +0,0 @@
use super::watch;
use bytes::Bytes;
pub type Publisher = watch::Publisher<Bytes>;
pub type Subscriber = watch::Subscriber<Bytes>;

View File

@ -1,5 +0,0 @@
pub mod broadcast;
pub mod fragment;
pub mod segment;
pub mod track;
pub mod watch;

View File

@ -1,66 +0,0 @@
use super::watch;
use bytes::Bytes;
use moq_transport::VarInt;
use std::ops::Deref;
use std::sync::Arc;
use std::time;
#[derive(Clone, Debug)]
pub struct Info {
// The sequence number of the segment within the track.
pub sequence: VarInt,
// The priority of the segment within the BROADCAST.
pub send_order: i32,
// The time at which the segment expires for cache purposes.
pub expires: Option<time::Instant>,
}
pub struct Publisher {
pub info: Arc<Info>,
// A list of fragments that make up the segment.
pub fragments: watch::Publisher<Bytes>,
}
impl Publisher {
pub fn new(info: Info) -> Self {
Self {
info: Arc::new(info),
fragments: watch::Publisher::new(),
}
}
pub fn subscribe(&self) -> Subscriber {
Subscriber {
info: self.info.clone(),
fragments: self.fragments.subscribe(),
}
}
}
impl Deref for Publisher {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}
#[derive(Clone, Debug)]
pub struct Subscriber {
pub info: Arc<Info>,
// A list of fragments that make up the segment.
pub fragments: watch::Subscriber<Bytes>,
}
impl Deref for Subscriber {
type Target = Info;
fn deref(&self) -> &Self::Target {
&self.info
}
}

View File

@ -1,101 +0,0 @@
use super::{segment, watch};
use std::{error, fmt, time};
use moq_transport::VarInt;
pub struct Publisher {
pub name: String,
segments: watch::Publisher<Result<segment::Subscriber, Error>>,
}
impl Publisher {
pub fn new(name: &str) -> Publisher {
Self {
name: name.to_string(),
segments: watch::Publisher::new(),
}
}
pub fn push_segment(&mut self, segment: segment::Subscriber) {
self.segments.push(Ok(segment))
}
pub fn drain_segments(&mut self, before: time::Instant) {
self.segments.drain(|segment| {
if let Ok(segment) = segment {
if let Some(expires) = segment.expires {
return expires < before;
}
}
false
})
}
pub fn close(mut self, err: Error) {
self.segments.push(Err(err))
}
pub fn subscribe(&self) -> Subscriber {
Subscriber {
name: self.name.clone(),
segments: self.segments.subscribe(),
}
}
}
impl fmt::Debug for Publisher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "track publisher: {:?}", self.name)
}
}
#[derive(Clone, Debug)]
pub struct Subscriber {
pub name: String,
// A list of segments, which are independently decodable.
segments: watch::Subscriber<Result<segment::Subscriber, Error>>,
}
impl Subscriber {
pub async fn next_segment(&mut self) -> Result<segment::Subscriber, Error> {
let res = self.segments.next().await;
match res {
None => Err(Error {
code: VarInt::from_u32(0),
reason: String::from("closed"),
}),
Some(res) => res,
}
}
}
#[derive(Clone)]
pub struct Error {
pub code: VarInt,
pub reason: String,
}
impl error::Error for Error {}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if !self.reason.is_empty() {
write!(f, "track error ({}): {}", self.code, self.reason)
} else {
write!(f, "track error ({})", self.code)
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if !self.reason.is_empty() {
write!(f, "track error ({}): {}", self.code, self.reason)
} else {
write!(f, "track error ({})", self.code)
}
}
}

View File

@ -1,135 +0,0 @@
use core::fmt;
use std::collections::VecDeque;
use tokio::sync::watch;
#[derive(Default)]
struct State<T> {
queue: VecDeque<T>,
drained: usize,
}
impl<T> fmt::Debug for State<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"State<{}> ( queue.len(): {}, drained: {} )",
std::any::type_name::<T>(),
&self.queue.len(),
&self.drained
)
}
}
impl<T> State<T> {
fn new() -> Self {
Self {
queue: VecDeque::new(),
drained: 0,
}
}
// Add a new element to the end of the queue.
fn push(&mut self, t: T) {
self.queue.push_back(t)
}
// Remove elements from the head of the queue if they match the conditional.
fn drain<F>(&mut self, f: F) -> usize
where
F: Fn(&T) -> bool,
{
let prior = self.drained;
while let Some(first) = self.queue.front() {
if !f(first) {
break;
}
self.queue.pop_front();
self.drained += 1;
}
self.drained - prior
}
}
pub struct Publisher<T: Clone> {
sender: watch::Sender<State<T>>,
}
impl<T: Clone> Publisher<T> {
pub fn new() -> Self {
let state = State::new();
let (sender, _) = watch::channel(state);
Self { sender }
}
// Push a new element to the end of the queue.
pub fn push(&mut self, value: T) {
self.sender.send_modify(|state| state.push(value));
}
// Remove any elements from the front of the queue that match the condition.
pub fn drain<F>(&mut self, f: F)
where
F: Fn(&T) -> bool,
{
// Use send_if_modified to never notify with the updated state.
self.sender.send_if_modified(|state| {
state.drain(f);
false
});
}
// Subscribe for all NEW updates.
pub fn subscribe(&self) -> Subscriber<T> {
let index = self.sender.borrow().queue.len();
Subscriber {
state: self.sender.subscribe(),
index,
}
}
}
impl<T: Clone> Default for Publisher<T> {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone, Debug)]
pub struct Subscriber<T: Clone> {
state: watch::Receiver<State<T>>,
index: usize,
}
impl<T: Clone> Subscriber<T> {
pub async fn next(&mut self) -> Option<T> {
// Wait until the queue has a new element or if it's closed.
let state = self
.state
.wait_for(|state| self.index < state.drained + state.queue.len())
.await;
let state = match state {
Ok(state) => state,
Err(_) => return None, // publisher was dropped
};
// If our index is smaller than drained, skip past those elements we missed.
let index = self.index.saturating_sub(state.drained);
if index < state.queue.len() {
// Clone the next element in the queue.
let element = state.queue[index].clone();
// Increment our index, relative to drained so we can skip ahead if needed.
self.index = index + state.drained + 1;
Some(element)
} else {
unreachable!("impossible subscriber state")
}
}
}

View File

@ -1,76 +0,0 @@
use crate::model::{broadcast, track, watch};
use crate::relay::contribute;
use std::collections::hash_map::HashMap;
use std::sync::{Arc, Mutex};
use anyhow::Context;
#[derive(Clone, Default)]
pub struct Broker {
// Operate on the inner struct so we can share/clone the outer struct.
inner: Arc<Mutex<BrokerInner>>,
}
#[derive(Default)]
struct BrokerInner {
// TODO Automatically reclaim dropped sources.
lookup: HashMap<String, Arc<contribute::Broadcast>>,
updates: watch::Publisher<BrokerUpdate>,
}
#[derive(Clone)]
pub enum BrokerUpdate {
// Broadcast was announced
Insert(String), // TODO include source?
// Broadcast was unannounced
Remove(String, broadcast::Error),
}
impl Broker {
pub fn new() -> Self {
Default::default()
}
// Return the list of available broadcasts, and a subscriber that will return updates (add/remove).
pub fn available(&self) -> (Vec<String>, watch::Subscriber<BrokerUpdate>) {
// Grab the lock.
let this = self.inner.lock().unwrap();
// Get the list of all available tracks.
let keys = this.lookup.keys().cloned().collect();
// Get a subscriber that will return future updates.
let updates = this.updates.subscribe();
(keys, updates)
}
pub fn announce(&self, namespace: &str, source: Arc<contribute::Broadcast>) -> anyhow::Result<()> {
let mut this = self.inner.lock().unwrap();
if let Some(_existing) = this.lookup.get(namespace) {
anyhow::bail!("namespace already registered");
}
this.lookup.insert(namespace.to_string(), source);
this.updates.push(BrokerUpdate::Insert(namespace.to_string()));
Ok(())
}
pub fn unannounce(&self, namespace: &str, error: broadcast::Error) -> anyhow::Result<()> {
let mut this = self.inner.lock().unwrap();
this.lookup.remove(namespace).context("namespace was not published")?;
this.updates.push(BrokerUpdate::Remove(namespace.to_string(), error));
Ok(())
}
pub fn subscribe(&self, namespace: &str, name: &str) -> Option<track::Subscriber> {
let this = self.inner.lock().unwrap();
this.lookup.get(namespace).and_then(|v| v.subscribe(name))
}
}

View File

@ -1,308 +0,0 @@
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time;
use tokio::io::AsyncReadExt;
use tokio::sync::mpsc;
use tokio::task::JoinSet; // lock across await boundaries
use moq_transport::message::{Announce, AnnounceError, AnnounceOk, Subscribe, SubscribeError, SubscribeOk};
use moq_transport::{object, Object, VarInt};
use webtransport_generic::Session as WTSession;
use bytes::BytesMut;
use anyhow::Context;
use crate::model::{broadcast, segment, track};
use crate::relay::{
message::{Component, Contribute},
Broker,
};
// TODO experiment with making this Clone, so every task can have its own copy.
pub struct Session<S: WTSession> {
// Used to receive objects.
objects: object::Receiver<S>,
// Used to send and receive control messages.
control: Component<Contribute>,
// Globally announced namespaces, which we can add ourselves to.
broker: Broker,
// The names of active broadcasts being produced.
broadcasts: HashMap<String, Arc<Broadcast>>,
// Active tracks being produced by this session.
publishers: Publishers,
// Tasks we are currently serving.
run_segments: JoinSet<anyhow::Result<()>>, // receiving objects
}
impl<S: WTSession> Session<S> {
pub fn new(objects: object::Receiver<S>, control: Component<Contribute>, broker: Broker) -> Self {
Self {
objects,
control,
broker,
broadcasts: HashMap::new(),
publishers: Publishers::new(),
run_segments: JoinSet::new(),
}
}
pub async fn run(mut self) -> anyhow::Result<()> {
loop {
tokio::select! {
res = self.run_segments.join_next(), if !self.run_segments.is_empty() => {
let res = res.expect("no tasks").expect("task aborted");
if let Err(err) = res {
log::warn!("failed to produce segment: {:?}", err);
}
},
object = self.objects.recv() => {
let (object, stream) = object.context("failed to receive object")?;
let res = self.receive_object(object, stream).await;
if let Err(err) = res {
log::warn!("failed to receive object: {:?}", err);
}
},
subscribe = self.publishers.incoming() => {
let msg = subscribe.context("failed to receive subscription")?;
self.control.send(msg).await?;
},
msg = self.control.recv() => {
let msg = msg.context("failed to receive control message")?;
self.receive_message(msg).await?;
},
}
}
}
async fn receive_message(&mut self, msg: Contribute) -> anyhow::Result<()> {
match msg {
Contribute::Announce(msg) => self.receive_announce(msg).await,
Contribute::SubscribeOk(msg) => self.receive_subscribe_ok(msg),
Contribute::SubscribeError(msg) => self.receive_subscribe_error(msg),
}
}
async fn receive_object(&mut self, obj: Object, stream: S::RecvStream) -> anyhow::Result<()> {
let track = obj.track;
// Keep objects in memory for 10s
let expires = time::Instant::now() + time::Duration::from_secs(10);
let segment = segment::Info {
sequence: obj.sequence,
send_order: obj.send_order,
expires: Some(expires),
};
let segment = segment::Publisher::new(segment);
self.publishers
.push_segment(track, segment.subscribe())
.context("failed to publish segment")?;
// TODO implement a timeout
self.run_segments
.spawn(async move { Self::run_segment(segment, stream).await });
Ok(())
}
async fn run_segment(mut segment: segment::Publisher, mut stream: S::RecvStream) -> anyhow::Result<()> {
let mut buf = BytesMut::new();
while stream.read_buf(&mut buf).await? > 0 {
// Split off the data we read into the buffer, freezing it so multiple threads can read simitaniously.
let data = buf.split().freeze();
segment.fragments.push(data);
}
Ok(())
}
async fn receive_announce(&mut self, msg: Announce) -> anyhow::Result<()> {
match self.receive_announce_inner(&msg).await {
Ok(()) => {
let msg = AnnounceOk {
track_namespace: msg.track_namespace,
};
self.control.send(msg).await
}
Err(e) => {
let msg = AnnounceError {
track_namespace: msg.track_namespace,
code: VarInt::from_u32(1),
reason: e.to_string(),
};
self.control.send(msg).await
}
}
}
async fn receive_announce_inner(&mut self, msg: &Announce) -> anyhow::Result<()> {
// Create a broadcast and announce it.
// We don't actually start producing the broadcast until we receive a subscription.
let broadcast = Arc::new(Broadcast::new(&msg.track_namespace, &self.publishers));
self.broker.announce(&msg.track_namespace, broadcast.clone())?;
self.broadcasts.insert(msg.track_namespace.clone(), broadcast);
Ok(())
}
fn receive_subscribe_ok(&mut self, _msg: SubscribeOk) -> anyhow::Result<()> {
// TODO make sure this is for a track we are subscribed to
Ok(())
}
fn receive_subscribe_error(&mut self, msg: SubscribeError) -> anyhow::Result<()> {
let error = track::Error {
code: msg.code,
reason: msg.reason,
};
// Stop producing the track.
self.publishers
.close(msg.track_id, error)
.context("failed to close track")?;
Ok(())
}
}
impl<S: WTSession> Drop for Session<S> {
fn drop(&mut self) {
// Unannounce all broadcasts we have announced.
// TODO make this automatic so we can't screw up?
// TOOD Implement UNANNOUNCE so we can return good errors.
for broadcast in self.broadcasts.keys() {
let error = broadcast::Error {
code: VarInt::from_u32(1),
reason: "connection closed".to_string(),
};
self.broker.unannounce(broadcast, error).unwrap();
}
}
}
// A list of subscriptions for a broadcast.
#[derive(Clone)]
pub struct Broadcast {
// Our namespace
namespace: String,
// A lookup from name to a subscription (duplicate subscribers)
subscriptions: Arc<Mutex<HashMap<String, track::Subscriber>>>,
// Issue a SUBSCRIBE message for a new subscription (new subscriber)
queue: mpsc::UnboundedSender<(String, track::Publisher)>,
}
impl Broadcast {
pub fn new(namespace: &str, publishers: &Publishers) -> Self {
Self {
namespace: namespace.to_string(),
subscriptions: Default::default(),
queue: publishers.sender.clone(),
}
}
pub fn subscribe(&self, name: &str) -> Option<track::Subscriber> {
let mut subscriptions = self.subscriptions.lock().unwrap();
// Check if there's an existing subscription.
if let Some(subscriber) = subscriptions.get(name).cloned() {
return Some(subscriber);
}
// Otherwise, make a new track and tell the publisher to fufill it.
let track = track::Publisher::new(name);
let subscriber = track.subscribe();
// Save the subscriber for duplication.
subscriptions.insert(name.to_string(), subscriber.clone());
// Send the publisher to another thread to actually subscribe.
self.queue.send((self.namespace.clone(), track)).unwrap();
// Return the subscriber we created.
Some(subscriber)
}
}
pub struct Publishers {
// A lookup from subscription ID to a track being produced, or none if it's been closed.
tracks: HashMap<VarInt, Option<track::Publisher>>,
// The next subscription ID
next: u64,
// A queue of subscriptions that we need to fulfill
receiver: mpsc::UnboundedReceiver<(String, track::Publisher)>,
// A clonable queue, so other threads can issue subscriptions.
sender: mpsc::UnboundedSender<(String, track::Publisher)>,
}
impl Default for Publishers {
fn default() -> Self {
let (sender, receiver) = mpsc::unbounded_channel();
Self {
tracks: Default::default(),
next: 0,
sender,
receiver,
}
}
}
impl Publishers {
pub fn new() -> Self {
Self::default()
}
pub fn push_segment(&mut self, id: VarInt, segment: segment::Subscriber) -> anyhow::Result<()> {
let track = self.tracks.get_mut(&id).context("no track with that ID")?;
let track = track.as_mut().context("track closed")?; // TODO don't make fatal
track.push_segment(segment);
track.drain_segments(time::Instant::now());
Ok(())
}
pub fn close(&mut self, id: VarInt, err: track::Error) -> anyhow::Result<()> {
let track = self.tracks.get_mut(&id).context("no track with that ID")?;
let track = track.take().context("track closed")?;
track.close(err);
Ok(())
}
// Returns the next subscribe message we need to issue.
pub async fn incoming(&mut self) -> anyhow::Result<Subscribe> {
let (namespace, track) = self.receiver.recv().await.context("no more subscriptions")?;
let id = VarInt::try_from(self.next)?;
self.next += 1;
let msg = Subscribe {
track_id: id,
track_namespace: namespace,
track_name: track.name.clone(),
};
self.tracks.insert(id, Some(track));
Ok(msg)
}
}

View File

@ -1,205 +0,0 @@
use anyhow::Context;
use tokio::io::AsyncWriteExt;
use tokio::task::JoinSet; // allows locking across await
use moq_transport::message::{Announce, AnnounceError, AnnounceOk, Subscribe, SubscribeError, SubscribeOk};
use moq_transport::{object, Object, VarInt};
use webtransport_generic::Session as WTSession;
use crate::model::{segment, track};
use crate::relay::{
message::{Component, Distribute},
Broker, BrokerUpdate,
};
pub struct Session<S: WTSession> {
// Objects are sent to the client
objects: object::Sender<S>,
// Used to send and receive control messages.
control: Component<Distribute>,
// Globally announced namespaces, which can be subscribed to.
broker: Broker,
// A list of tasks that are currently running.
run_subscribes: JoinSet<SubscribeError>, // run subscriptions, sending the returned error if they fail
}
impl<S: WTSession> Session<S> {
pub fn new(objects: object::Sender<S>, control: Component<Distribute>, broker: Broker) -> Self {
Self {
objects,
control,
broker,
run_subscribes: JoinSet::new(),
}
}
pub async fn run(mut self) -> anyhow::Result<()> {
// Announce all available tracks and get a stream of updates.
let (available, mut updates) = self.broker.available();
for namespace in available {
self.on_available(BrokerUpdate::Insert(namespace)).await?;
}
loop {
tokio::select! {
res = self.run_subscribes.join_next(), if !self.run_subscribes.is_empty() => {
let res = res.expect("no tasks").expect("task aborted");
self.control.send(res).await?;
},
delta = updates.next() => {
let delta = delta.expect("no more broadcasts");
self.on_available(delta).await?;
},
msg = self.control.recv() => {
let msg = msg.context("failed to receive control message")?;
self.receive_message(msg).await?;
},
}
}
}
async fn receive_message(&mut self, msg: Distribute) -> anyhow::Result<()> {
match msg {
Distribute::AnnounceOk(msg) => self.receive_announce_ok(msg),
Distribute::AnnounceError(msg) => self.receive_announce_error(msg),
Distribute::Subscribe(msg) => self.receive_subscribe(msg).await,
}
}
fn receive_announce_ok(&mut self, _msg: AnnounceOk) -> anyhow::Result<()> {
// TODO make sure we sent this announce
Ok(())
}
fn receive_announce_error(&mut self, msg: AnnounceError) -> anyhow::Result<()> {
// TODO make sure we sent this announce
// TODO remove this from the list of subscribable broadcasts.
log::warn!("received error {:?}", msg);
Ok(())
}
async fn receive_subscribe(&mut self, msg: Subscribe) -> anyhow::Result<()> {
match self.receive_subscribe_inner(&msg).await {
Ok(()) => {
self.control
.send(SubscribeOk {
track_id: msg.track_id,
expires: None,
})
.await
}
Err(e) => {
self.control
.send(SubscribeError {
track_id: msg.track_id,
code: VarInt::from_u32(1),
reason: e.to_string(),
})
.await
}
}
}
async fn receive_subscribe_inner(&mut self, msg: &Subscribe) -> anyhow::Result<()> {
let track = self
.broker
.subscribe(&msg.track_namespace, &msg.track_name)
.context("could not find broadcast")?;
// TODO can we just clone self?
let objects = self.objects.clone();
let track_id = msg.track_id;
self.run_subscribes
.spawn(async move { Self::run_subscribe(objects, track_id, track).await });
Ok(())
}
async fn run_subscribe(
objects: object::Sender<S>,
track_id: VarInt,
mut track: track::Subscriber,
) -> SubscribeError {
let mut tasks = JoinSet::new();
let mut result = None;
loop {
tokio::select! {
// Accept new segments added to the track.
segment = track.next_segment(), if result.is_none() => {
match segment {
Ok(segment) => {
let objects = objects.clone();
tasks.spawn(async move { Self::serve_group(objects, track_id, segment).await });
},
Err(e) => {
result = Some(SubscribeError {
track_id,
code: e.code,
reason: e.reason,
})
},
}
},
// Poll any pending segments until they exit.
res = tasks.join_next(), if !tasks.is_empty() => {
let res = res.expect("no tasks").expect("task aborted");
if let Err(err) = res {
log::error!("failed to serve segment: {:?}", err);
}
},
else => return result.unwrap()
}
}
}
async fn serve_group(
mut objects: object::Sender<S>,
track_id: VarInt,
mut segment: segment::Subscriber,
) -> anyhow::Result<()> {
let object = Object {
track: track_id,
group: segment.sequence,
sequence: VarInt::from_u32(0), // Always zero since we send an entire group as an object
send_order: segment.send_order,
};
let mut stream = objects.open(object).await?;
// Write each fragment as they are available.
while let Some(fragment) = segment.fragments.next().await {
stream.write_all(&fragment).await?;
}
// NOTE: stream is automatically closed when dropped
Ok(())
}
async fn on_available(&mut self, delta: BrokerUpdate) -> anyhow::Result<()> {
match delta {
BrokerUpdate::Insert(name) => {
self.control
.send(Announce {
track_namespace: name.clone(),
})
.await
}
BrokerUpdate::Remove(name, error) => {
self.control
.send(AnnounceError {
track_namespace: name,
code: error.code,
reason: error.reason,
})
.await
}
}
}
}

View File

@ -1,127 +0,0 @@
use tokio::sync::mpsc;
use moq_transport::message::{
self, Announce, AnnounceError, AnnounceOk, Message, Subscribe, SubscribeError, SubscribeOk,
};
use webtransport_generic::Session;
pub struct Main<S: Session> {
send_control: message::Sender<S::SendStream>,
recv_control: message::Receiver<S::RecvStream>,
outgoing: mpsc::Receiver<Message>,
contribute: mpsc::Sender<Contribute>,
distribute: mpsc::Sender<Distribute>,
}
impl<S: Session> Main<S> {
pub async fn run(mut self) -> anyhow::Result<()> {
loop {
tokio::select! {
Some(msg) = self.outgoing.recv() => self.send_control.send(msg).await?,
Ok(msg) = self.recv_control.recv() => self.handle(msg).await?,
}
}
}
pub async fn handle(&mut self, msg: Message) -> anyhow::Result<()> {
match msg.try_into() {
Ok(msg) => self.contribute.send(msg).await?,
Err(msg) => match msg.try_into() {
Ok(msg) => self.distribute.send(msg).await?,
Err(msg) => anyhow::bail!("unsupported control message: {:?}", msg),
},
}
Ok(())
}
}
pub struct Component<T> {
incoming: mpsc::Receiver<T>,
outgoing: mpsc::Sender<Message>,
}
impl<T> Component<T> {
pub async fn send<M: Into<Message>>(&mut self, msg: M) -> anyhow::Result<()> {
self.outgoing.send(msg.into()).await?;
Ok(())
}
pub async fn recv(&mut self) -> Option<T> {
self.incoming.recv().await
}
}
// Splits a control stream into two components, based on if it's a message for contribution or distribution.
pub fn split<S: Session>(
send_control: message::Sender<S::SendStream>,
recv_control: message::Receiver<S::RecvStream>,
) -> (Main<S>, Component<Contribute>, Component<Distribute>) {
let (outgoing_tx, outgoing_rx) = mpsc::channel(1);
let (contribute_tx, contribute_rx) = mpsc::channel(1);
let (distribute_tx, distribute_rx) = mpsc::channel(1);
let control = Main {
send_control,
recv_control,
outgoing: outgoing_rx,
contribute: contribute_tx,
distribute: distribute_tx,
};
let contribute = Component {
incoming: contribute_rx,
outgoing: outgoing_tx.clone(),
};
let distribute = Component {
incoming: distribute_rx,
outgoing: outgoing_tx,
};
(control, contribute, distribute)
}
// Messages we expect to receive from the client for contribution.
#[derive(Debug)]
pub enum Contribute {
Announce(Announce),
SubscribeOk(SubscribeOk),
SubscribeError(SubscribeError),
}
impl TryFrom<Message> for Contribute {
type Error = Message;
fn try_from(msg: Message) -> Result<Self, Self::Error> {
match msg {
Message::Announce(msg) => Ok(Self::Announce(msg)),
Message::SubscribeOk(msg) => Ok(Self::SubscribeOk(msg)),
Message::SubscribeError(msg) => Ok(Self::SubscribeError(msg)),
_ => Err(msg),
}
}
}
// Messages we expect to receive from the client for distribution.
#[derive(Debug)]
pub enum Distribute {
AnnounceOk(AnnounceOk),
AnnounceError(AnnounceError),
Subscribe(Subscribe),
}
impl TryFrom<Message> for Distribute {
type Error = Message;
fn try_from(value: Message) -> Result<Self, Self::Error> {
match value {
Message::AnnounceOk(msg) => Ok(Self::AnnounceOk(msg)),
Message::AnnounceError(msg) => Ok(Self::AnnounceError(msg)),
Message::Subscribe(msg) => Ok(Self::Subscribe(msg)),
_ => Err(value),
}
}
}

View File

@ -1,8 +0,0 @@
mod broker;
mod contribute;
mod distribute;
mod message;
mod session;
pub use broker::*;
pub use session::*;

View File

@ -1,37 +0,0 @@
use crate::relay::{contribute, distribute, message, Broker};
use webtransport_generic::Session as WTSession;
pub struct Session<S: WTSession> {
// Split logic into contribution/distribution to reduce the problem space.
contribute: contribute::Session<S>,
distribute: distribute::Session<S>,
// Used to receive control messages and forward to contribute/distribute.
control: message::Main<S>,
}
impl<S: WTSession> Session<S> {
pub fn new(session: moq_transport::Session<S>, broker: Broker) -> Self {
let (control, contribute, distribute) = message::split(session.send_control, session.recv_control);
let contribute = contribute::Session::new(session.recv_objects, contribute, broker.clone());
let distribute = distribute::Session::new(session.send_objects, distribute, broker);
Self {
control,
contribute,
distribute,
}
}
pub async fn run(self) -> anyhow::Result<()> {
let control = self.control.run();
let contribute = self.contribute.run();
let distribute = self.distribute.run();
tokio::try_join!(control, contribute, distribute)?;
Ok(())
}
}