Driver/Input: Migrate audio backend to Symphonia (#89)

This extensive PR rewrites the internal mixing logic of the driver to use symphonia for parsing and decoding audio data, and rubato to resample audio. Existing logic to decode DCA and Opus formats/data have been reworked as plugins for symphonia. The main benefit is that we no longer need to keep yt-dlp and ffmpeg processes alive, saving a lot of memory and CPU: all decoding can be done in Rust! In exchange, we now need to do a lot of the HTTP handling and resumption ourselves, but this is still a huge net positive.

`Input`s have been completely reworked such that all default (non-cached) sources are lazy by default, and are no longer covered by a special-case `Restartable`. These now span a gamut from a `Compose` (lazy), to a live source, to a fully `Parsed` source. As mixing is still sync, this includes adapters for `AsyncRead`/`AsyncSeek`, and HTTP streams.

`Track`s have been reworked so that they only contain initialisation state for each track. `TrackHandles` are only created once a `Track`/`Input` has been handed over to the driver, replacing `create_player` and related functions. `TrackHandle::action` now acts on a `View` of (im)mutable state, and can request seeks/readying via `Action`.

Per-track event handling has also been improved -- we can now determine and propagate the reason behind individual track errors due to the new backend. Some `TrackHandle` commands (seek etc.) benefit from this, and now use internal callbacks to signal completion.

Due to associated PRs on felixmcfelix/songbird from avid testers, this includes general clippy tweaks, API additions, and other repo-wide cleanup. Thanks go out to the below co-authors.

Co-authored-by: Gnome! <45660393+GnomedDev@users.noreply.github.com>
Co-authored-by: Alakh <36898190+alakhpc@users.noreply.github.com>
This commit is contained in:
Kyle Simpson
2022-07-23 23:29:02 +01:00
parent 6c6ffa7ca8
commit 8cc7a22b0b
136 changed files with 9761 additions and 4891 deletions

View File

@@ -0,0 +1,332 @@
use crate::input::AudioStreamError;
use async_trait::async_trait;
use flume::{Receiver, RecvError, Sender, TryRecvError};
use futures::{future::Either, stream::FuturesUnordered, FutureExt, StreamExt};
use ringbuf::*;
use std::{
io::{
Error as IoError,
ErrorKind as IoErrorKind,
Read,
Result as IoResult,
Seek,
SeekFrom,
Write,
},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use symphonia_core::io::MediaSource;
use tokio::{
io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt},
sync::Notify,
};
struct AsyncAdapterSink {
bytes_in: Producer<u8>,
req_rx: Receiver<AdapterRequest>,
resp_tx: Sender<AdapterResponse>,
stream: Box<dyn AsyncMediaSource>,
notify_rx: Arc<Notify>,
}
impl AsyncAdapterSink {
async fn launch(mut self) {
let mut inner_buf = [0u8; 10 * 1024];
let mut read_region = 0..0;
let mut hit_end = false;
let mut blocked = false;
let mut pause_buf_moves = false;
let mut seek_res = None;
let mut seen_bytes = 0;
loop {
// if read_region is empty, refill from src.
// if that read is zero, tell other half.
// if WouldBlock, block on msg acquire,
// else non_block msg acquire.
if !pause_buf_moves {
if !hit_end && read_region.is_empty() {
if let Ok(n) = self.stream.read(&mut inner_buf).await {
read_region = 0..n;
if n == 0 {
drop(self.resp_tx.send_async(AdapterResponse::ReadZero).await);
hit_end = true;
}
seen_bytes += n as u64;
} else {
match self.stream.try_resume(seen_bytes).await {
Ok(s) => {
self.stream = s;
},
Err(_e) => break,
}
}
}
while !read_region.is_empty() && !blocked {
if let Ok(n_moved) = self
.bytes_in
.write(&inner_buf[read_region.start..read_region.end])
{
read_region.start += n_moved;
} else {
blocked = true;
}
}
}
let msg = if blocked || hit_end {
let mut fs = FuturesUnordered::new();
fs.push(Either::Left(self.req_rx.recv_async()));
fs.push(Either::Right(self.notify_rx.notified().map(|_| {
let o: Result<AdapterRequest, RecvError> = Ok(AdapterRequest::Wake);
o
})));
match fs.next().await {
Some(Ok(a)) => a,
_ => break,
}
} else {
match self.req_rx.try_recv() {
Ok(a) => a,
Err(TryRecvError::Empty) => continue,
_ => break,
}
};
match msg {
AdapterRequest::Wake => blocked = false,
AdapterRequest::ByteLen => {
drop(
self.resp_tx
.send_async(AdapterResponse::ByteLen(self.stream.byte_len().await))
.await,
);
},
AdapterRequest::Seek(pos) => {
pause_buf_moves = true;
drop(self.resp_tx.send_async(AdapterResponse::SeekClear).await);
seek_res = Some(self.stream.seek(pos).await);
},
AdapterRequest::SeekCleared => {
if let Some(res) = seek_res.take() {
drop(
self.resp_tx
.send_async(AdapterResponse::SeekResult(res))
.await,
);
}
pause_buf_moves = false;
},
}
}
}
}
/// An adapter for converting an async media source into a synchronous one
/// usable by symphonia.
///
/// This adapter takes a source implementing `AsyncRead`, and allows the receive side to
/// pass along seek requests needed. This allows for passing bytes from exclusively `AsyncRead`
/// streams (e.g., hyper HTTP sessions) to Songbird.
pub struct AsyncAdapterStream {
bytes_out: Consumer<u8>,
can_seek: bool,
// Note: this is Atomic just to work around the need for
// check_messages to take &self rather than &mut.
finalised: AtomicBool,
req_tx: Sender<AdapterRequest>,
resp_rx: Receiver<AdapterResponse>,
notify_tx: Arc<Notify>,
}
impl AsyncAdapterStream {
/// Wrap and pull from an async file stream, with an intermediate ring-buffer of size `buf_len`
/// between the async and sync halves.
#[must_use]
pub fn new(stream: Box<dyn AsyncMediaSource>, buf_len: usize) -> AsyncAdapterStream {
let (bytes_in, bytes_out) = RingBuffer::new(buf_len).split();
let (resp_tx, resp_rx) = flume::unbounded();
let (req_tx, req_rx) = flume::unbounded();
let can_seek = stream.is_seekable();
let notify_rx = Arc::new(Notify::new());
let notify_tx = notify_rx.clone();
let sink = AsyncAdapterSink {
bytes_in,
req_rx,
resp_tx,
stream,
notify_rx,
};
let stream = AsyncAdapterStream {
bytes_out,
can_seek,
finalised: false.into(),
req_tx,
resp_rx,
notify_tx,
};
tokio::spawn(async move {
sink.launch().await;
});
stream
}
fn handle_messages(&self, block: bool) -> Option<AdapterResponse> {
loop {
match self.resp_rx.try_recv() {
Ok(AdapterResponse::ReadZero) => {
self.finalised.store(true, Ordering::Relaxed);
},
Ok(a) => break Some(a),
Err(TryRecvError::Empty) if !block => break None,
Err(TryRecvError::Disconnected) => break None,
Err(TryRecvError::Empty) => {},
}
}
}
fn is_dropped_and_clear(&self) -> bool {
self.resp_rx.is_empty() && self.resp_rx.is_disconnected()
}
fn check_dropped(&self) -> IoResult<()> {
if self.is_dropped_and_clear() {
Err(IoError::new(
IoErrorKind::UnexpectedEof,
"Async half was dropped.",
))
} else {
Ok(())
}
}
}
impl Read for AsyncAdapterStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
// TODO: make this run via condvar instead?
// This needs to remain blocking or spin loopy
// Mainly because this is at odds with "keep CPU low."
loop {
drop(self.handle_messages(false));
match self.bytes_out.read(buf) {
Ok(n) => {
self.notify_tx.notify_one();
return Ok(n);
},
Err(e) if e.kind() == IoErrorKind::WouldBlock => {
// receive side must ABSOLUTELY be unblocked here.
self.notify_tx.notify_one();
if self.finalised.load(Ordering::Relaxed) {
return Ok(0);
}
self.check_dropped()?;
std::thread::yield_now();
},
a => {
println!("Misc err {:?}", a);
return a;
},
}
}
}
}
impl Seek for AsyncAdapterStream {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
if !self.can_seek {
return Err(IoError::new(
IoErrorKind::Unsupported,
"Async half does not support seek operations.",
));
}
self.check_dropped()?;
let _ = self.req_tx.send(AdapterRequest::Seek(pos));
// wait for async to tell us that it has stopped writing,
// then clear buf and allow async to write again.
self.finalised.store(false, Ordering::Relaxed);
match self.handle_messages(true) {
Some(AdapterResponse::SeekClear) => {},
None => self.check_dropped().map(|_| unreachable!())?,
_ => unreachable!(),
}
self.bytes_out.discard(self.bytes_out.capacity());
let _ = self.req_tx.send(AdapterRequest::SeekCleared);
match self.handle_messages(true) {
Some(AdapterResponse::SeekResult(a)) => a,
None => self.check_dropped().map(|_| unreachable!()),
_ => unreachable!(),
}
}
}
impl MediaSource for AsyncAdapterStream {
fn is_seekable(&self) -> bool {
self.can_seek
}
fn byte_len(&self) -> Option<u64> {
self.check_dropped().ok()?;
let _ = self.req_tx.send(AdapterRequest::ByteLen);
match self.handle_messages(true) {
Some(AdapterResponse::ByteLen(a)) => a,
None => self.check_dropped().ok().map(|_| unreachable!()),
_ => unreachable!(),
}
}
}
enum AdapterRequest {
Wake,
Seek(SeekFrom),
SeekCleared,
ByteLen,
}
enum AdapterResponse {
SeekResult(IoResult<u64>),
SeekClear,
ByteLen(Option<u64>),
ReadZero,
}
/// An async port of symphonia's [`MediaSource`].
///
/// Streams which are not seekable should implement `AsyncSeek` such that all operations
/// fail with `Unsupported`, and implement `fn is_seekable(&self) -> { false }`.
///
/// [`MediaSource`]: MediaSource
#[async_trait]
pub trait AsyncMediaSource: AsyncRead + AsyncSeek + Send + Sync + Unpin {
/// Returns if the source is seekable. This may be an expensive operation.
fn is_seekable(&self) -> bool;
/// Returns the length in bytes, if available. This may be an expensive operation.
async fn byte_len(&self) -> Option<u64>;
/// Tries to recreate this stream in event of an error, resuming from the given offset.
async fn try_resume(
&mut self,
_offset: u64,
) -> Result<Box<dyn AsyncMediaSource>, AudioStreamError> {
Err(AudioStreamError::Unsupported)
}
}

View File

@@ -0,0 +1,537 @@
use super::{compressed_cost_per_sec, default_config, CodecCacheError, ToAudioBytes};
use crate::{
constants::*,
input::{
codecs::{dca::*, CODEC_REGISTRY, PROBE},
AudioStream,
Input,
LiveInput,
},
};
use audiopus::{
coder::{Encoder as OpusEncoder, GenericCtl},
Application,
Bitrate,
Channels,
Error as OpusError,
ErrorCode as OpusErrorCode,
SampleRate,
};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::{
convert::TryInto,
io::{
Cursor,
Error as IoError,
ErrorKind as IoErrorKind,
Read,
Result as IoResult,
Seek,
SeekFrom,
},
mem,
sync::atomic::{AtomicUsize, Ordering},
};
use streamcatcher::{
Config as ScConfig,
NeedsBytes,
Stateful,
Transform,
TransformPosition,
TxCatcher,
};
use symphonia_core::{
audio::Channels as SChannels,
codecs::CodecRegistry,
io::MediaSource,
meta::{MetadataRevision, StandardTagKey, Value},
probe::{Probe, ProbedMetadata},
};
use tracing::{debug, trace};
pub struct Config {
/// Registry of audio codecs supported by the driver.
///
/// Defaults to [`CODEC_REGISTRY`], which adds audiopus-based Opus codec support
/// to all of Symphonia's default codecs.
///
/// [`CODEC_REGISTRY`]: static@CODEC_REGISTRY
pub codec_registry: &'static CodecRegistry,
/// Registry of the muxers and container formats supported by the driver.
///
/// Defaults to [`PROBE`], which includes all of Symphonia's default format handlers
/// and DCA format support.
///
/// [`PROBE`]: static@PROBE
pub format_registry: &'static Probe,
/// Configuration for the inner streamcatcher instance.
///
/// Notably, this governs size hints and resize logic.
pub streamcatcher: ScConfig,
}
impl Default for Config {
fn default() -> Self {
Self {
codec_registry: &CODEC_REGISTRY,
format_registry: &PROBE,
streamcatcher: ScConfig::default(),
}
}
}
impl Config {
pub fn default_from_cost(cost_per_sec: usize) -> Self {
let streamcatcher = default_config(cost_per_sec);
Self {
streamcatcher,
..Default::default()
}
}
}
/// A wrapper around an existing [`Input`] which compresses
/// the input using the Opus codec before storing it in memory.
///
/// The main purpose of this wrapper is to enable seeking on
/// incompatible sources and to ease resource consumption for
/// commonly reused/shared tracks. If only one Opus-compressed track
/// is playing at a time, then this removes the runtime decode cost
/// from the driver.
///
/// This is intended for use with larger, repeatedly used audio
/// tracks shared between sources, and stores the sound data
/// retrieved as **compressed Opus audio**.
///
/// Internally, this stores the stream and its metadata as a DCA1 file,
/// which can be written out to disk for later use.
///
/// [`Input`]: crate::input::Input
#[derive(Clone)]
pub struct Compressed {
/// Inner shared bytestore.
pub raw: TxCatcher<ToAudioBytes, OpusCompressor>,
}
impl Compressed {
/// Wrap an existing [`Input`] with an in-memory store, compressed using Opus.
///
/// [`Input`]: Input
pub async fn new(source: Input, bitrate: Bitrate) -> Result<Self, CodecCacheError> {
Self::with_config(source, bitrate, None).await
}
/// Wrap an existing [`Input`] with an in-memory store, compressed using Opus, with
/// custom configuration for both Symphonia and the backing store.
///
/// [`Input`]: Input
pub async fn with_config(
source: Input,
bitrate: Bitrate,
config: Option<Config>,
) -> Result<Self, CodecCacheError> {
let input = match source {
Input::Lazy(mut r) => {
let created = if r.should_create_async() {
r.create_async().await.map_err(CodecCacheError::from)
} else {
tokio::task::spawn_blocking(move || r.create().map_err(CodecCacheError::from))
.await
.map_err(CodecCacheError::from)
.and_then(|v| v)
};
created.map(LiveInput::Raw)
},
Input::Live(LiveInput::Parsed(_), _) => Err(CodecCacheError::StreamNotAtStart),
Input::Live(a, _rec) => Ok(a),
}?;
let cost_per_sec = compressed_cost_per_sec(bitrate);
let config = config.unwrap_or_else(|| Config::default_from_cost(cost_per_sec));
let promoted = tokio::task::spawn_blocking(move || {
input.promote(config.codec_registry, config.format_registry)
})
.await??;
// If success, guaranteed to be Parsed
let mut parsed = if let LiveInput::Parsed(parsed) = promoted {
parsed
} else {
unreachable!()
};
// TODO: apply length hint.
// if config.length_hint.is_none() {
// if let Some(dur) = metadata.duration {
// apply_length_hint(&mut config, dur, cost_per_sec);
// }
// }
let track_info = parsed.decoder.codec_params();
let chan_count = track_info.channels.map_or(2, SChannels::count);
let (channels, stereo) = if chan_count >= 2 {
(Channels::Stereo, true)
} else {
(Channels::Mono, false)
};
let mut encoder = OpusEncoder::new(SampleRate::Hz48000, channels, Application::Audio)?;
encoder.set_bitrate(bitrate)?;
let codec_type = parsed.decoder.codec_params().codec;
let encoding = config
.codec_registry
.get_codec(codec_type)
.map(|v| v.short_name.to_string());
let format_meta_hold = parsed.format.metadata();
let format_meta = format_meta_hold.current();
let metadata = create_metadata(
&mut parsed.meta,
format_meta,
&encoder,
chan_count as u8,
encoding,
)?;
let mut metabytes = b"DCA1\0\0\0\0".to_vec();
let orig_len = metabytes.len();
serde_json::to_writer(&mut metabytes, &metadata)?;
let meta_len = (metabytes.len() - orig_len)
.try_into()
.map_err(|_| CodecCacheError::MetadataTooLarge)?;
(&mut metabytes[4..][..mem::size_of::<i32>()])
.write_i32::<LittleEndian>(meta_len)
.expect("Magic byte writing location guaranteed to be well-founded.");
let source = ToAudioBytes::new(parsed, Some(2));
let raw = config
.streamcatcher
.build_tx(source, OpusCompressor::new(encoder, stereo, metabytes))?;
Ok(Self { raw })
}
/// Acquire a new handle to this object, creating a new
/// view of the existing cached data from the beginning.
#[must_use]
pub fn new_handle(&self) -> Self {
Self {
raw: self.raw.new_handle(),
}
}
}
fn create_metadata(
probe_metadata: &mut ProbedMetadata,
track_metadata: Option<&MetadataRevision>,
opus: &OpusEncoder,
channels: u8,
encoding: Option<String>,
) -> Result<DcaMetadata, CodecCacheError> {
let dca = DcaInfo {
version: 1,
tool: Tool {
name: env!("CARGO_PKG_NAME").into(),
version: env!("CARGO_PKG_VERSION").into(),
url: Some(env!("CARGO_PKG_HOMEPAGE").into()),
author: Some(env!("CARGO_PKG_AUTHORS").into()),
},
};
let abr = match opus.bitrate()? {
Bitrate::BitsPerSecond(i) => Some(i as u64),
Bitrate::Auto => None,
Bitrate::Max => Some(510_000),
};
let mode = match opus.application()? {
Application::Voip => "voip",
Application::Audio => "music",
Application::LowDelay => "lowdelay",
}
.to_string();
let sample_rate = opus.sample_rate()? as u32;
let opus = Opus {
mode,
sample_rate,
frame_size: MONO_FRAME_BYTE_SIZE as u64,
abr,
vbr: opus.vbr()?,
channels: channels.min(2),
};
let mut origin = Origin {
source: Some("file".into()),
abr: None,
channels: Some(channels),
encoding,
url: None,
};
let mut info = Info {
title: None,
artist: None,
album: None,
genre: None,
cover: None,
comments: None,
};
if let Some(meta) = probe_metadata.get() {
apply_meta_to_dca(&mut info, &mut origin, meta.current());
}
apply_meta_to_dca(&mut info, &mut origin, track_metadata);
Ok(DcaMetadata {
dca,
opus,
info: Some(info),
origin: Some(origin),
extra: None,
})
}
fn apply_meta_to_dca(info: &mut Info, origin: &mut Origin, src_meta: Option<&MetadataRevision>) {
if let Some(meta) = src_meta {
for tag in meta.tags() {
match tag.std_key {
Some(StandardTagKey::Album) =>
if let Value::String(s) = &tag.value {
info.album = Some(s.clone());
},
Some(StandardTagKey::Artist) =>
if let Value::String(s) = &tag.value {
info.artist = Some(s.clone());
},
Some(StandardTagKey::Comment) =>
if let Value::String(s) = &tag.value {
info.comments = Some(s.clone());
},
Some(StandardTagKey::Genre) =>
if let Value::String(s) = &tag.value {
info.genre = Some(s.clone());
},
Some(StandardTagKey::TrackTitle) =>
if let Value::String(s) = &tag.value {
info.title = Some(s.clone());
},
Some(StandardTagKey::Url | StandardTagKey::UrlSource) => {
if let Value::String(s) = &tag.value {
origin.url = Some(s.clone());
}
},
_ => {},
}
}
for _visual in meta.visuals() {
// FIXME: will require MIME type inspection and Base64 conversion.
}
}
}
/// Transform applied inside [`Compressed`], converting a floating-point PCM
/// input stream into a DCA-framed Opus stream.
///
/// Created and managed by [`Compressed`].
///
/// [`Compressed`]: Compressed
#[derive(Debug)]
pub struct OpusCompressor {
prepend: Option<Cursor<Vec<u8>>>,
encoder: OpusEncoder,
last_frame: Vec<u8>,
stereo_input: bool,
frame_pos: usize,
audio_bytes: AtomicUsize,
}
impl OpusCompressor {
fn new(encoder: OpusEncoder, stereo_input: bool, prepend: Vec<u8>) -> Self {
Self {
prepend: Some(Cursor::new(prepend)),
encoder,
last_frame: Vec::with_capacity(4000),
stereo_input,
frame_pos: 0,
audio_bytes: AtomicUsize::default(),
}
}
}
impl<T> Transform<T> for OpusCompressor
where
T: Read,
{
fn transform_read(&mut self, src: &mut T, buf: &mut [u8]) -> IoResult<TransformPosition> {
if let Some(prepend) = self.prepend.as_mut() {
match prepend.read(buf)? {
0 => {},
n => return Ok(TransformPosition::Read(n)),
}
}
self.prepend = None;
let output_start = mem::size_of::<u16>();
let mut eof = false;
let mut raw_len = 0;
let mut out = None;
let mut sample_buf = [0f32; STEREO_FRAME_SIZE];
let (samples_in_frame, interleaved_count) = if self.stereo_input {
(STEREO_FRAME_SIZE, 2)
} else {
(MONO_FRAME_SIZE, 1)
};
// Purge old frame and read new, if needed.
if self.frame_pos == self.last_frame.len() + output_start || self.last_frame.is_empty() {
self.last_frame.resize(self.last_frame.capacity(), 0);
// We can't use `read_f32_into` because we can't guarantee the buffer will be filled.
// However, we can guarantee that reads will be channel aligned at least!
for el in sample_buf[..samples_in_frame].chunks_mut(interleaved_count) {
match src.read_f32_into::<LittleEndian>(el) {
Ok(_) => {
raw_len += interleaved_count;
},
Err(e) if e.kind() == IoErrorKind::UnexpectedEof => {
eof = true;
break;
},
Err(e) => {
out = Some(Err(e));
break;
},
}
}
if out.is_none() && raw_len > 0 {
loop {
// NOTE: we don't index by raw_len because the last frame can be too small
// to occupy a "whole packet". Zero-padding is the correct behaviour.
match self
.encoder
.encode_float(&sample_buf[..samples_in_frame], &mut self.last_frame[..])
{
Ok(pkt_len) => {
trace!("Next packet to write has {:?}", pkt_len);
self.frame_pos = 0;
self.last_frame.truncate(pkt_len);
break;
},
Err(OpusError::Opus(OpusErrorCode::BufferTooSmall)) => {
// If we need more capacity to encode this frame, then take it.
trace!("Resizing inner buffer (+256).");
self.last_frame.resize(self.last_frame.len() + 256, 0);
},
Err(e) => {
debug!("Read error {:?} {:?} {:?}.", e, out, raw_len);
out = Some(Err(IoError::new(IoErrorKind::Other, e)));
break;
},
}
}
}
}
if out.is_none() {
// Write from frame we have.
let start = if self.frame_pos < output_start {
(&mut buf[..output_start])
.write_i16::<LittleEndian>(self.last_frame.len() as i16)
.expect(
"Minimum bytes requirement for Opus (2) should mean that an i16 \
may always be written.",
);
self.frame_pos += output_start;
trace!("Wrote frame header: {}.", self.last_frame.len());
output_start
} else {
0
};
let out_pos = self.frame_pos - output_start;
let remaining = self.last_frame.len() - out_pos;
let write_len = remaining.min(buf.len() - start);
buf[start..start + write_len]
.copy_from_slice(&self.last_frame[out_pos..out_pos + write_len]);
self.frame_pos += write_len;
trace!("Appended {} to inner store", write_len);
out = Some(Ok(write_len + start));
}
// NOTE: use of raw_len here preserves true sample length even if
// stream is extended to 20ms boundary.
out.unwrap_or_else(|| Err(IoError::new(IoErrorKind::Other, "Unclear.")))
.map(|compressed_sz| {
self.audio_bytes
.fetch_add(raw_len * mem::size_of::<f32>(), Ordering::Release);
if eof {
TransformPosition::Finished
} else {
TransformPosition::Read(compressed_sz)
}
})
}
}
impl NeedsBytes for OpusCompressor {
fn min_bytes_required(&self) -> usize {
2
}
}
impl Stateful for OpusCompressor {
type State = usize;
fn state(&self) -> Self::State {
self.audio_bytes.load(Ordering::Acquire)
}
}
impl Read for Compressed {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
self.raw.read(buf)
}
}
impl Seek for Compressed {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
self.raw.seek(pos)
}
}
impl MediaSource for Compressed {
fn is_seekable(&self) -> bool {
true
}
fn byte_len(&self) -> Option<u64> {
if self.raw.is_finished() {
Some(self.raw.len() as u64)
} else {
None
}
}
}
impl From<Compressed> for Input {
fn from(val: Compressed) -> Input {
let input = Box::new(val);
Input::Live(LiveInput::Raw(AudioStream { input, hint: None }), None)
}
}

View File

@@ -0,0 +1,142 @@
use super::{compressed::Config, CodecCacheError, ToAudioBytes};
use crate::{
constants::SAMPLE_RATE_RAW,
input::{AudioStream, Input, LiveInput, RawAdapter},
};
use std::io::{Read, Result as IoResult, Seek, SeekFrom};
use streamcatcher::Catcher;
use symphonia_core::{audio::Channels, io::MediaSource};
/// A wrapper around an existing [`Input`] which caches
/// the decoded and converted audio data locally in memory
/// as `f32`-format PCM data.
///
/// The main purpose of this wrapper is to enable seeking on
/// incompatible sources (i.e., ffmpeg output) and to ease resource
/// consumption for commonly reused/shared tracks. [`Compressed`]
/// offers similar functionality with different
/// tradeoffs.
///
/// This is intended for use with small, repeatedly used audio
/// tracks shared between sources, and stores the sound data
/// retrieved in **uncompressed floating point** form to minimise the
/// cost of audio processing when mixing several tracks together.
/// This must be used sparingly: these cost a significant
/// *3 Mbps (375 kiB/s)*, or 131 MiB of RAM for a 6 minute song.
///
/// [`Input`]: crate::input::Input
/// [`Compressed`]: super::Compressed
#[derive(Clone)]
pub struct Decompressed {
/// Inner shared bytestore.
pub raw: Catcher<RawAdapter<ToAudioBytes>>,
}
impl Decompressed {
/// Wrap an existing [`Input`] with an in-memory store, decompressed into `f32` PCM audio.
///
/// [`Input`]: Input
pub async fn new(source: Input) -> Result<Self, CodecCacheError> {
Self::with_config(source, None).await
}
/// Wrap an existing [`Input`] with an in-memory store, decompressed into `f32` PCM audio,
/// with custom configuration for both Symphonia and the backing store.
///
/// [`Input`]: Input
pub async fn with_config(
source: Input,
config: Option<Config>,
) -> Result<Self, CodecCacheError> {
let input = match source {
Input::Lazy(mut r) => {
let created = if r.should_create_async() {
r.create_async().await.map_err(CodecCacheError::from)
} else {
tokio::task::spawn_blocking(move || r.create().map_err(CodecCacheError::from))
.await
.map_err(CodecCacheError::from)
.and_then(|v| v)
};
created.map(LiveInput::Raw)
},
Input::Live(LiveInput::Parsed(_), _) => Err(CodecCacheError::StreamNotAtStart),
Input::Live(a, _rec) => Ok(a),
}?;
let cost_per_sec = super::raw_cost_per_sec(true);
let config = config.unwrap_or_else(|| Config::default_from_cost(cost_per_sec));
let promoted = tokio::task::spawn_blocking(move || {
input.promote(config.codec_registry, config.format_registry)
})
.await??;
// If success, guaranteed to be Parsed
let parsed = if let LiveInput::Parsed(parsed) = promoted {
parsed
} else {
unreachable!()
};
let track_info = parsed.decoder.codec_params();
let chan_count = track_info
.channels
.map(Channels::count)
.ok_or(CodecCacheError::UnknownChannelCount)?;
let sample_rate = SAMPLE_RATE_RAW as u32;
let source = RawAdapter::new(
ToAudioBytes::new(parsed, Some(chan_count)),
sample_rate,
chan_count as u32,
);
let raw = config.streamcatcher.build(source)?;
Ok(Self { raw })
}
/// Acquire a new handle to this object, creating a new
/// view of the existing cached data from the beginning.
#[must_use]
pub fn new_handle(&self) -> Self {
Self {
raw: self.raw.new_handle(),
}
}
}
impl Read for Decompressed {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
self.raw.read(buf)
}
}
impl Seek for Decompressed {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
self.raw.seek(pos)
}
}
impl MediaSource for Decompressed {
fn is_seekable(&self) -> bool {
true
}
fn byte_len(&self) -> Option<u64> {
if self.raw.is_finished() {
Some(self.raw.len() as u64)
} else {
None
}
}
}
impl From<Decompressed> for Input {
fn from(val: Decompressed) -> Input {
let input = Box::new(val);
Input::Live(LiveInput::Raw(AudioStream { input, hint: None }), None)
}
}

View File

@@ -0,0 +1,146 @@
use crate::input::AudioStreamError;
use audiopus::error::Error as OpusError;
use serde_json::Error as JsonError;
use std::{
error::Error as StdError,
fmt::{Display, Formatter, Result as FmtResult},
};
use streamcatcher::CatcherError;
use symphonia_core::errors::Error as SymphError;
use tokio::task::JoinError;
/// Errors encountered using a [`Memory`] cached source.
///
/// [`Memory`]: super::Memory
#[derive(Debug)]
pub enum Error {
/// The audio stream could not be created.
Create(AudioStreamError),
/// The audio stream failed to be created due to a panic in `spawn_blocking`.
CreatePanicked,
/// Streamcatcher's configuration was illegal, and the cache could not be created.
Streamcatcher(CatcherError),
/// The input stream had already been read (i.e., `Parsed`) and so the whole stream
/// could not be used.
StreamNotAtStart,
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match self {
Self::Create(c) => f.write_fmt(format_args!("failed to create audio stream: {}", c)),
Self::CreatePanicked => f.write_str("sync thread panicked while creating stream"),
Self::Streamcatcher(s) =>
f.write_fmt(format_args!("illegal streamcatcher config: {}", s)),
Self::StreamNotAtStart =>
f.write_str("stream cannot have been pre-read/parsed, missing headers"),
}
}
}
impl StdError for Error {}
impl From<AudioStreamError> for Error {
fn from(val: AudioStreamError) -> Self {
Self::Create(val)
}
}
impl From<CatcherError> for Error {
fn from(val: CatcherError) -> Self {
Self::Streamcatcher(val)
}
}
impl From<JoinError> for Error {
fn from(_val: JoinError) -> Self {
Self::CreatePanicked
}
}
/// Errors encountered using a [`Compressed`] or [`Decompressed`] cached source.
///
/// [`Compressed`]: super::Compressed
/// [`Decompressed`]: super::Decompressed
#[derive(Debug)]
pub enum CodecCacheError {
/// The audio stream could not be created.
Create(AudioStreamError),
/// Symphonia failed to parse the container or decode the default stream.
Parse(SymphError),
/// The Opus encoder could not be created.
Opus(OpusError),
/// The file's metadata could not be converted to JSON.
MetadataEncoding(JsonError),
/// The input's metadata was too large after conversion to JSON to fit in a DCA file.
MetadataTooLarge,
/// The audio stream failed to be created due to a panic in `spawn_blocking`.
CreatePanicked,
/// The audio stream's channel count could not be determined.
UnknownChannelCount,
/// Streamcatcher's configuration was illegal, and the cache could not be created.
Streamcatcher(CatcherError),
/// The input stream had already been read (i.e., `Parsed`) and so the whole stream
/// could not be used.
StreamNotAtStart,
}
impl Display for CodecCacheError {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
match self {
Self::Create(c) => f.write_fmt(format_args!("failed to create audio stream: {}", c)),
Self::Parse(p) => f.write_fmt(format_args!("failed to parse audio format: {}", p)),
Self::Opus(o) => f.write_fmt(format_args!("failed to create Opus encoder: {}", o)),
Self::MetadataEncoding(m) => f.write_fmt(format_args!(
"failed to convert track metadata to JSON: {}",
m
)),
Self::MetadataTooLarge => f.write_str("track metadata was too large, >= 32kiB"),
Self::CreatePanicked => f.write_str("sync thread panicked while creating stream"),
Self::UnknownChannelCount =>
f.write_str("audio stream's channel count could not be determined"),
Self::Streamcatcher(s) =>
f.write_fmt(format_args!("illegal streamcatcher config: {}", s)),
Self::StreamNotAtStart =>
f.write_str("stream cannot have been pre-read/parsed, missing headers"),
}
}
}
impl StdError for CodecCacheError {}
impl From<AudioStreamError> for CodecCacheError {
fn from(val: AudioStreamError) -> Self {
Self::Create(val)
}
}
impl From<CatcherError> for CodecCacheError {
fn from(val: CatcherError) -> Self {
Self::Streamcatcher(val)
}
}
impl From<JoinError> for CodecCacheError {
fn from(_val: JoinError) -> Self {
Self::CreatePanicked
}
}
impl From<JsonError> for CodecCacheError {
fn from(val: JsonError) -> Self {
Self::MetadataEncoding(val)
}
}
impl From<OpusError> for CodecCacheError {
fn from(val: OpusError) -> Self {
Self::Opus(val)
}
}
impl From<SymphError> for CodecCacheError {
fn from(val: SymphError) -> Self {
Self::Parse(val)
}
}

View File

@@ -0,0 +1,40 @@
use std::time::Duration;
use streamcatcher::Config;
/// Expected amount of time that an input should last.
#[derive(Copy, Clone, Debug)]
pub enum LengthHint {
/// Estimate of a source's length in bytes.
Bytes(usize),
/// Estimate of a source's length in time.
///
/// This will be converted to a bytecount at setup.
Time(Duration),
}
impl From<usize> for LengthHint {
fn from(size: usize) -> Self {
LengthHint::Bytes(size)
}
}
impl From<Duration> for LengthHint {
fn from(size: Duration) -> Self {
LengthHint::Time(size)
}
}
/// Modify the given cache configuration to initially allocate
/// enough bytes to store a length of audio at the given bitrate.
pub fn apply_length_hint<H>(config: &mut Config, hint: H, cost_per_sec: usize)
where
H: Into<LengthHint>,
{
config.length_hint = Some(match hint.into() {
LengthHint::Bytes(a) => a,
LengthHint::Time(t) => {
let s = t.as_secs() + if t.subsec_millis() > 0 { 1 } else { 0 };
(s as usize) * cost_per_sec
},
});
}

View File

@@ -0,0 +1,111 @@
use super::{default_config, raw_cost_per_sec, Error};
use crate::input::{AudioStream, Input, LiveInput};
use std::io::{Read, Result as IoResult, Seek};
use streamcatcher::{Catcher, Config};
use symphonia_core::io::MediaSource;
/// A wrapper around an existing [`Input`] which caches its data
/// in memory.
///
/// The main purpose of this wrapper is to enable fast seeking on
/// incompatible sources (i.e., HTTP streams) and to ease resource
/// consumption for commonly reused/shared tracks.
///
/// This consumes exactly as many bytes of memory as the input stream contains.
///
/// [`Input`]: Input
#[derive(Clone)]
pub struct Memory {
/// Inner shared bytestore.
pub raw: Catcher<Box<dyn MediaSource>>,
}
impl Memory {
/// Wrap an existing [`Input`] with an in-memory store with the same codec and framing.
///
/// [`Input`]: Input
pub async fn new(source: Input) -> Result<Self, Error> {
Self::with_config(source, None).await
}
/// Wrap an existing [`Input`] with an in-memory store with the same codec and framing.
///
/// `length_hint` may be used to control the size of the initial chunk, preventing
/// needless allocations and copies.
///
/// [`Input`]: Input
pub async fn with_config(source: Input, config: Option<Config>) -> Result<Self, Error> {
let input = match source {
Input::Lazy(mut r) => {
let created = if r.should_create_async() {
r.create_async().await
} else {
tokio::task::spawn_blocking(move || r.create()).await?
};
created.map(|v| v.input).map_err(Error::from)
},
Input::Live(LiveInput::Raw(a), _rec) => Ok(a.input),
Input::Live(LiveInput::Wrapped(a), _rec) =>
Ok(Box::new(a.input) as Box<dyn MediaSource>),
Input::Live(LiveInput::Parsed(_), _) => Err(Error::StreamNotAtStart),
}?;
let cost_per_sec = raw_cost_per_sec(true);
let config = config.unwrap_or_else(|| default_config(cost_per_sec));
// TODO: apply length hint.
// if config.length_hint.is_none() {
// if let Some(dur) = metadata.duration {
// apply_length_hint(&mut config, dur, cost_per_sec);
// }
// }
let raw = config.build(input)?;
Ok(Self { raw })
}
/// Acquire a new handle to this object, creating a new
/// view of the existing cached data from the beginning.
#[must_use]
pub fn new_handle(&self) -> Self {
Self {
raw: self.raw.new_handle(),
}
}
}
impl Read for Memory {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
self.raw.read(buf)
}
}
impl Seek for Memory {
fn seek(&mut self, pos: std::io::SeekFrom) -> IoResult<u64> {
self.raw.seek(pos)
}
}
impl MediaSource for Memory {
fn is_seekable(&self) -> bool {
true
}
fn byte_len(&self) -> Option<u64> {
if self.raw.is_finished() {
Some(self.raw.len() as u64)
} else {
None
}
}
}
impl From<Memory> for Input {
fn from(val: Memory) -> Input {
let input = Box::new(val);
Input::Live(LiveInput::Raw(AudioStream { input, hint: None }), None)
}
}

View File

@@ -0,0 +1,49 @@
//! In-memory, shared input sources for reuse between calls, fast seeking, and
//! direct Opus frame passthrough.
mod compressed;
mod decompressed;
mod error;
mod hint;
mod memory;
mod util;
pub(crate) use self::util::*;
pub use self::{compressed::*, decompressed::*, error::*, hint::*, memory::*};
use crate::constants::*;
use crate::input::utils;
use audiopus::Bitrate;
use std::{mem, time::Duration};
use streamcatcher::{Config, GrowthStrategy};
/// Estimates the cost, in B/s, of audio data compressed at the given bitrate.
#[must_use]
pub fn compressed_cost_per_sec(bitrate: Bitrate) -> usize {
let framing_cost_per_sec = AUDIO_FRAME_RATE * mem::size_of::<u16>();
let bitrate_raw = match bitrate {
Bitrate::BitsPerSecond(i) => i,
Bitrate::Auto => 64_000,
Bitrate::Max => 512_000,
} as usize;
(bitrate_raw / 8) + framing_cost_per_sec
}
/// Calculates the cost, in B/s, of raw floating-point audio data.
#[must_use]
pub fn raw_cost_per_sec(stereo: bool) -> usize {
utils::timestamp_to_byte_count(Duration::from_secs(1), stereo)
}
/// Provides the default config used by a cached source.
///
/// This maps to the default configuration in [`streamcatcher`], using
/// a constant chunk size of 5s worth of audio at the given bitrate estimate.
///
/// [`streamcatcher`]: https://docs.rs/streamcatcher/0.1.0/streamcatcher/struct.Config.html
#[must_use]
pub fn default_config(cost_per_sec: usize) -> Config {
Config::new().chunk_size(GrowthStrategy::Constant(5 * cost_per_sec))
}

View File

@@ -0,0 +1,458 @@
use crate::{constants::*, driver::tasks::mixer::mix_logic, input::Parsed};
use byteorder::{LittleEndian, WriteBytesExt};
use rubato::{FftFixedOut, Resampler};
use std::{
io::{ErrorKind as IoErrorKind, Read, Result as IoResult, Seek, Write},
mem,
ops::Range,
};
use symphonia_core::{
audio::{AudioBuffer, AudioBufferRef, Layout, Signal, SignalSpec},
conv::IntoSample,
io::MediaSource,
sample::Sample,
};
const SAMPLE_LEN: usize = mem::size_of::<f32>();
/// Adapter for Symphonia sources into an interleaved f32 bytestream.
///
/// This will output `f32`s in LE byte order, matching the channel count
/// of the input.
pub struct ToAudioBytes {
chan_count: usize,
chan_limit: usize,
parsed: Parsed,
/// Position with parsed's last decoded frame.
inner_pos: Range<usize>,
resample: Option<ResampleState>,
done: bool,
interrupted_samples: Vec<f32>,
interrupted_byte_pos: Range<usize>,
}
struct ResampleState {
/// Used to hold outputs from resampling, *ready to be used*.
resampled_data: Vec<Vec<f32>>,
/// The actual resampler.
resampler: FftFixedOut<f32>,
/// Used to hold inputs to resampler across packet boundaries.
scratch: AudioBuffer<f32>,
/// The range of floats in `resampled_data` which have not yet
/// been read.
resample_pos: Range<usize>,
}
impl ToAudioBytes {
pub fn new(parsed: Parsed, chan_limit: Option<usize>) -> Self {
let track_info = parsed.decoder.codec_params();
let sample_rate = track_info.sample_rate.unwrap_or(SAMPLE_RATE_RAW as u32);
let maybe_layout = track_info.channel_layout;
let maybe_chans = track_info.channels;
let chan_count = if let Some(chans) = maybe_chans {
chans.count()
} else if let Some(layout) = maybe_layout {
match layout {
Layout::Mono => 1,
Layout::Stereo => 2,
Layout::TwoPointOne => 3,
Layout::FivePointOne => 6,
}
} else {
2
};
let chan_limit = chan_limit.unwrap_or(chan_count);
let resample = (sample_rate != SAMPLE_RATE_RAW as u32).then(|| {
let spec = if let Some(chans) = maybe_chans {
SignalSpec::new(SAMPLE_RATE_RAW as u32, chans)
} else if let Some(layout) = maybe_layout {
SignalSpec::new_with_layout(SAMPLE_RATE_RAW as u32, layout)
} else {
SignalSpec::new_with_layout(SAMPLE_RATE_RAW as u32, Layout::Stereo)
};
let scratch = AudioBuffer::<f32>::new(MONO_FRAME_SIZE as u64, spec);
// TODO: integ. error handling here.
let resampler = FftFixedOut::new(
sample_rate as usize,
SAMPLE_RATE_RAW,
RESAMPLE_OUTPUT_FRAME_SIZE,
4,
chan_count,
)
.expect("Failed to create resampler.");
let resampled_data = resampler.output_buffer_allocate();
ResampleState {
resampled_data,
resampler,
scratch,
resample_pos: 0..0,
}
});
Self {
chan_count,
chan_limit,
parsed,
inner_pos: 0..0,
resample,
done: false,
interrupted_samples: Vec::with_capacity(chan_count),
interrupted_byte_pos: 0..0,
}
}
pub fn num_channels(&self) -> usize {
self.chan_count.min(self.chan_limit)
}
fn is_done(&self) -> bool {
self.done
&& self.inner_pos.is_empty()
&& self.resample.as_ref().map_or(true, |v| {
v.scratch.frames() == 0 && v.resample_pos.is_empty()
})
&& self.interrupted_byte_pos.is_empty()
}
}
impl Read for ToAudioBytes {
fn read(&mut self, mut buf: &mut [u8]) -> IoResult<usize> {
// NOTE: this is disturbingly similar to the mixer code, but different enough that we can't
// just reuse it freely.
let orig_sz = buf.len();
let num_chans = self.num_channels();
while !buf.is_empty() && !self.is_done() {
// Work to clear interrupted channel floats.
while !buf.is_empty() && !self.interrupted_byte_pos.is_empty() {
let index_of_first_f32 = self.interrupted_byte_pos.start / SAMPLE_LEN;
let f32_inner_pos = self.interrupted_byte_pos.start % SAMPLE_LEN;
let f32_bytes_remaining = SAMPLE_LEN - f32_inner_pos;
let to_write = f32_bytes_remaining.min(buf.len());
let bytes = self.interrupted_samples[index_of_first_f32].to_le_bytes();
let written = buf.write(&bytes[f32_inner_pos..][..to_write])?;
self.interrupted_byte_pos.start += written;
}
// Clear out already produced resampled floats.
if let Some(resample) = self.resample.as_mut() {
if !buf.is_empty() && !resample.resample_pos.is_empty() {
let bytes_advanced = write_resample_buffer(
&resample.resampled_data,
buf,
&mut resample.resample_pos,
&mut self.interrupted_samples,
&mut self.interrupted_byte_pos,
num_chans,
);
buf = &mut buf[bytes_advanced..];
}
if !resample.resample_pos.is_empty() {
continue;
}
}
// Now work with new packets.
let source_packet = if !self.inner_pos.is_empty() {
Some(self.parsed.decoder.last_decoded())
} else if let Ok(pkt) = self.parsed.format.next_packet() {
if pkt.track_id() != self.parsed.track_id {
continue;
}
self.parsed
.decoder
.decode(&pkt)
.map(|pkt| {
self.inner_pos = 0..pkt.frames();
pkt
})
.ok()
} else {
// EOF.
None
};
if source_packet.is_none() {
self.done = true;
if let Some(resample) = self.resample.as_mut() {
if resample.scratch.frames() != 0 {
let data = &mut resample.resampled_data;
let resampler = &mut resample.resampler;
let in_len = resample.scratch.frames();
let to_render = resampler.input_frames_next().saturating_sub(in_len);
if to_render != 0 {
resample.scratch.render_reserved(Some(to_render));
for plane in resample.scratch.planes_mut().planes() {
for val in &mut plane[in_len..] {
*val = 0.0f32;
}
}
}
// Luckily, we make use of the WHOLE input buffer here.
resampler
.process_into_buffer(resample.scratch.planes().planes(), data, None)
.unwrap();
// Calculate true end position using sample rate math
let ratio = (data[0].len() as f32) / (resample.scratch.frames() as f32);
let out_samples = (ratio * (in_len as f32)).round() as usize;
resample.scratch.clear();
resample.resample_pos = 0..out_samples;
}
}
// Now go back and make use of the buffer.
// We have to do this here because we can't make any guarantees about
// the read site having enough space to hold all samples etc.
continue;
}
let source_packet = source_packet.unwrap();
if let Some(resample) = self.resample.as_mut() {
// Do a resample using the newest packet.
let pkt_frames = source_packet.frames();
if pkt_frames == 0 {
continue;
}
let needed_in_frames = resample.resampler.input_frames_next();
let available_frames = self.inner_pos.len();
let force_copy =
resample.scratch.frames() != 0 || needed_in_frames > available_frames;
if (!force_copy) && matches!(source_packet, AudioBufferRef::F32(_)) {
// This is the only case where we can pull off a straight resample...
// I.e., skip scratch.
// NOTE: if let needed as if-let && {bool} is nightly only.
if let AudioBufferRef::F32(s_pkt) = source_packet {
let refs: Vec<&[f32]> = s_pkt
.planes()
.planes()
.iter()
.map(|s| &s[self.inner_pos.start..][..needed_in_frames])
.collect();
self.inner_pos.start += needed_in_frames;
resample
.resampler
.process_into_buffer(&refs, &mut resample.resampled_data, None)
.unwrap();
} else {
unreachable!()
}
} else {
// We either lack enough samples, or have the wrong data format, forcing
// a conversion/copy into scratch.
let old_scratch_len = resample.scratch.frames();
let missing_frames = needed_in_frames - old_scratch_len;
let frames_to_take = available_frames.min(missing_frames);
resample.scratch.render_reserved(Some(frames_to_take));
mix_logic::copy_into_resampler(
&source_packet,
&mut resample.scratch,
self.inner_pos.start,
old_scratch_len,
frames_to_take,
);
self.inner_pos.start += frames_to_take;
if resample.scratch.frames() == needed_in_frames {
resample
.resampler
.process_into_buffer(
resample.scratch.planes().planes(),
&mut resample.resampled_data,
None,
)
.unwrap();
resample.scratch.clear();
} else {
continue;
}
}
resample.resample_pos = 0..resample.resampled_data[0].len();
} else {
// Newest packet may be used straight away: just convert format
// to ensure it's f32.
let bytes_advanced = write_out(
&source_packet,
buf,
&mut self.inner_pos,
&mut self.interrupted_samples,
&mut self.interrupted_byte_pos,
num_chans,
);
buf = &mut buf[bytes_advanced..];
}
}
Ok(orig_sz - buf.len())
}
}
impl Seek for ToAudioBytes {
fn seek(&mut self, _pos: std::io::SeekFrom) -> IoResult<u64> {
Err(IoErrorKind::Unsupported.into())
}
}
impl MediaSource for ToAudioBytes {
fn is_seekable(&self) -> bool {
false
}
fn byte_len(&self) -> Option<u64> {
None
}
}
#[inline]
fn write_out(
source: &AudioBufferRef,
target: &mut [u8],
source_pos: &mut Range<usize>,
spillover: &mut Vec<f32>,
spill_range: &mut Range<usize>,
num_chans: usize,
) -> usize {
match source {
AudioBufferRef::U8(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::U16(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::U24(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::U32(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::S8(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::S16(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::S24(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::S32(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::F32(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
AudioBufferRef::F64(v) =>
write_symph_buffer(v, target, source_pos, spillover, spill_range, num_chans),
}
}
#[inline]
fn write_symph_buffer<S>(
source: &AudioBuffer<S>,
buf: &mut [u8],
source_pos: &mut Range<usize>,
spillover: &mut Vec<f32>,
spill_range: &mut Range<usize>,
num_chans: usize,
) -> usize
where
S: Sample + IntoSample<f32>,
{
let float_space = buf.len() / SAMPLE_LEN;
let interleaved_space = float_space / num_chans;
let non_contiguous_end = (float_space % num_chans) != 0;
let remaining = source_pos.len();
let to_write = remaining.min(interleaved_space);
let need_spill = non_contiguous_end && to_write < remaining;
let samples_used = to_write + if need_spill { 1 } else { 0 };
let last_sample = source_pos.start + to_write;
if need_spill {
spillover.clear();
*spill_range = 0..num_chans * SAMPLE_LEN;
}
for (i, plane) in source.planes().planes()[..num_chans].iter().enumerate() {
for (j, sample) in plane[source_pos.start..][..to_write].iter().enumerate() {
// write this into the correct slot of buf.
let addr = ((j * num_chans) + i) * SAMPLE_LEN;
(&mut buf[addr..][..SAMPLE_LEN])
.write_f32::<LittleEndian>((*sample).into_sample())
.expect("Address known to exist by length checks.");
}
if need_spill {
spillover.push(plane[last_sample].into_sample());
}
}
source_pos.start += samples_used;
to_write * num_chans * SAMPLE_LEN
}
#[inline]
fn write_resample_buffer(
source: &[Vec<f32>],
buf: &mut [u8],
source_pos: &mut Range<usize>,
spillover: &mut Vec<f32>,
spill_range: &mut Range<usize>,
num_chans: usize,
) -> usize {
let float_space = buf.len() / SAMPLE_LEN;
let interleaved_space = float_space / num_chans;
let non_contiguous_end = (float_space % num_chans) != 0;
let remaining = source_pos.len();
let to_write = remaining.min(interleaved_space);
let need_spill = non_contiguous_end && to_write < remaining;
let samples_used = to_write + if need_spill { 1 } else { 0 };
let last_sample = source_pos.start + to_write;
if need_spill {
spillover.clear();
*spill_range = 0..num_chans * SAMPLE_LEN;
}
for (i, plane) in source[..num_chans].iter().enumerate() {
for (j, sample) in plane[source_pos.start..][..to_write].iter().enumerate() {
// write this into the correct slot of buf.
let addr = ((j * num_chans) + i) * SAMPLE_LEN;
(&mut buf[addr..][..SAMPLE_LEN])
.write_f32::<LittleEndian>(*sample)
.expect("Address well-formed according to bounds checks.");
}
if need_spill {
spillover.push(plane[last_sample]);
}
}
source_pos.start += samples_used;
to_write * num_chans * SAMPLE_LEN
}

View File

@@ -0,0 +1,91 @@
use crate::input::{AudioStream, Input, LiveInput};
use std::{
io::{Read, Result as IoResult},
mem,
process::Child,
};
use symphonia_core::io::{MediaSource, ReadOnlySource};
use tokio::runtime::Handle;
use tracing::debug;
/// Handle for a child process which ensures that any subprocesses are properly closed
/// on drop.
///
/// # Warning
/// To allow proper cleanup of child processes, if you create a process chain you must
/// make sure to use `From<Vec<Child>>`. Here, the *last* process in the `Vec` will be
/// used as the audio byte source.
#[derive(Debug)]
pub struct ChildContainer(pub Vec<Child>);
impl Read for ChildContainer {
fn read(&mut self, buffer: &mut [u8]) -> IoResult<usize> {
match self.0.last_mut() {
Some(ref mut child) => child.stdout.as_mut().unwrap().read(buffer),
None => Ok(0),
}
}
}
impl ChildContainer {
/// Create a new [`ChildContainer`] from a child process
#[must_use]
pub fn new(children: Vec<Child>) -> Self {
Self(children)
}
}
impl From<Child> for ChildContainer {
fn from(container: Child) -> Self {
Self(vec![container])
}
}
impl From<Vec<Child>> for ChildContainer {
fn from(container: Vec<Child>) -> Self {
Self(container)
}
}
impl From<ChildContainer> for Input {
fn from(val: ChildContainer) -> Self {
let audio_stream = AudioStream {
input: Box::new(ReadOnlySource::new(val)) as Box<dyn MediaSource>,
hint: None,
};
Input::Live(LiveInput::Raw(audio_stream), None)
}
}
impl Drop for ChildContainer {
fn drop(&mut self) {
let children = mem::take(&mut self.0);
if let Ok(handle) = Handle::try_current() {
handle.spawn_blocking(move || {
cleanup_child_processes(children);
});
} else {
cleanup_child_processes(children);
}
}
}
fn cleanup_child_processes(mut children: Vec<Child>) {
let attempt = if let Some(child) = children.last_mut() {
child.kill()
} else {
return;
};
let attempt = attempt.and_then(|_| {
children
.iter_mut()
.rev()
.try_for_each(|child| child.wait().map(|_| ()))
});
if let Err(e) = attempt {
debug!("Error awaiting child process: {:?}", e);
}
}

View File

@@ -0,0 +1,6 @@
mod async_adapter;
pub mod cached;
mod child;
mod raw_adapter;
pub use self::{async_adapter::*, child::*, raw_adapter::*};

View File

@@ -0,0 +1,114 @@
use crate::input::{AudioStream, Input, LiveInput};
use byteorder::{LittleEndian, WriteBytesExt};
use std::io::{ErrorKind as IoErrorKind, Read, Result as IoResult, Seek, SeekFrom, Write};
use symphonia::core::io::MediaSource;
// format header is a magic string, followed by two LE u32s (sample rate, channel count)
const FMT_HEADER: &[u8; 16] = b"SbirdRaw\0\0\0\0\0\0\0\0";
/// Adapter around a raw, interleaved, `f32` PCM byte stream.
///
/// This may be used to port legacy songbird audio sources to be compatible with
/// the symphonia backend, particularly those with unknown length (making WAV
/// unsuitable).
///
/// The format is described in [`RawReader`].
///
/// [`RawReader`]: crate::input::codecs::RawReader
pub struct RawAdapter<A> {
prepend: [u8; 16],
inner: A,
pos: u64,
}
impl<A: MediaSource> RawAdapter<A> {
/// Wrap an input PCM byte source to be readable by symphonia.
pub fn new(audio_source: A, sample_rate: u32, channel_count: u32) -> Self {
let mut prepend: [u8; 16] = *FMT_HEADER;
let mut write_space = &mut prepend[8..];
write_space
.write_u32::<LittleEndian>(sample_rate)
.expect("Prepend buffer is sized to include enough space for sample rate.");
write_space
.write_u32::<LittleEndian>(channel_count)
.expect("Prepend buffer is sized to include enough space for number of channels.");
Self {
prepend,
inner: audio_source,
pos: 0,
}
}
}
impl<A: MediaSource> Read for RawAdapter<A> {
fn read(&mut self, mut buf: &mut [u8]) -> IoResult<usize> {
let out = if self.pos < self.prepend.len() as u64 {
let upos = self.pos as usize;
let remaining = self.prepend.len() - upos;
let to_write = buf.len().min(remaining);
buf.write(&self.prepend[upos..][..to_write])
} else {
self.inner.read(buf)
};
if let Ok(n) = out {
self.pos += n as u64;
}
out
}
}
impl<A: MediaSource> Seek for RawAdapter<A> {
fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> {
if self.is_seekable() {
let target_pos = match pos {
SeekFrom::Start(p) => p,
SeekFrom::End(_) => return Err(IoErrorKind::Unsupported.into()),
SeekFrom::Current(p) if p.unsigned_abs() > self.pos =>
return Err(IoErrorKind::InvalidInput.into()),
SeekFrom::Current(p) => (self.pos as i64 + p) as u64,
};
let out = if target_pos as usize <= self.prepend.len() {
self.inner.rewind().map(|_| 0)
} else {
self.inner.seek(SeekFrom::Start(target_pos))
};
match out {
Ok(0) => self.pos = target_pos,
Ok(a) => self.pos = a + self.prepend.len() as u64,
_ => {},
}
out.map(|_| self.pos)
} else {
Err(IoErrorKind::Unsupported.into())
}
}
}
impl<A: MediaSource> MediaSource for RawAdapter<A> {
fn is_seekable(&self) -> bool {
self.inner.is_seekable()
}
fn byte_len(&self) -> Option<u64> {
self.inner.byte_len().map(|m| m + self.prepend.len() as u64)
}
}
impl<A: MediaSource + Send + Sync + 'static> From<RawAdapter<A>> for Input {
fn from(val: RawAdapter<A>) -> Self {
let live = LiveInput::Raw(AudioStream {
input: Box::new(val),
hint: None,
});
Input::Live(live, None)
}
}