elapsed_time: int
stream_title: str | None
content_type: str | None
- group_childs_count: int
+ output_formats: list[str] | None
class PlayerQueuesController(CoreController):
next_item_id=None,
elapsed_time=0,
stream_title=None,
- group_childs_count=0,
+ output_formats=None,
),
)
+ # This is enough to detect any changes in the DSPDetails
+ # (so child count changed, or any output format changed)
+ output_formats = []
+ if player.output_format:
+ output_formats.append(player.output_format.output_format_str)
+ for child_id in player.group_childs:
+ if (child := self.mass.players.get(child_id)) and child.output_format:
+ output_formats.append(child.output_format.output_format_str)
+
# basic throttle: do not send state changed events if queue did not actually change
new_state = CompareState(
queue_id=queue_id,
content_type=queue.current_item.streamdetails.audio_format.output_format_str
if queue.current_item and queue.current_item.streamdetails
else None,
- group_childs_count=len(player.group_childs),
+ output_formats=output_formats,
)
changed_keys = get_changed_keys(prev_state, new_state)
# return early if nothing changed
else:
self._prev_states.pop(queue_id, None)
- if "group_childs_count" in changed_keys:
- # refresh DSP details since a player has been added/removed from the group
+ if "output_formats" in changed_keys:
+ # refresh DSP details since they may have changed
dsp = get_stream_dsp_details(self.mass, queue_id)
if queue.current_item and queue.current_item.streamdetails:
queue.current_item.streamdetails.dsp = dsp
),
input_format=pcm_format,
output_format=output_format,
- filter_params=get_player_filter_params(self.mass, queue_player.player_id, pcm_format),
+ filter_params=get_player_filter_params(
+ self.mass, queue_player.player_id, pcm_format, output_format
+ ),
):
try:
await resp.write(chunk)
input_format=flow_pcm_format,
output_format=output_format,
filter_params=get_player_filter_params(
- self.mass, queue_player.player_id, flow_pcm_format
+ self.mass, queue_player.player_id, flow_pcm_format, output_format
),
chunk_size=icy_meta_interval if enable_icy else None,
):
filters=dsp_config.filters,
output_gain=dsp_config.output_gain,
output_limiter=dsp_config.output_limiter,
+ output_format=player.output_format,
)
) -> dict[str, DSPDetails]:
"""Return DSP details of all players playing this queue, keyed by player_id."""
player = mass.players.get(queue_id)
- dsp = {}
+ dsp: dict[str, DSPDetails] = {}
group_preventing_dsp = is_grouping_preventing_dsp(player)
+ output_format = None
- # We skip the PlayerGroups as they don't provide an audio output
- # by themselves, but only sync other players.
- if not player.provider.startswith("player_group"):
+ if player.provider.startswith("player_group"):
+ if group_preventing_dsp:
+ try:
+ # We need a bit of a hack here since only the leader knows the correct output format
+ provider = mass.get_provider(player.provider)
+ if provider:
+ output_format = provider._get_sync_leader(player).output_format
+ except RuntimeError:
+ # _get_sync_leader will raise a RuntimeError if this group has no players
+ # just ignore this and continue without output_format
+ LOGGER.warning("Unable to get the sync group leader for %s", queue_id)
+ else:
+ # We only add real players (so skip the PlayerGroups as they only sync containing players)
details = get_player_dsp_details(mass, player)
details.is_leader = True
dsp[player.player_id] = details
+ if group_preventing_dsp:
+ # The leader is responsible for sending the (combined) audio stream, so get
+ # the output format from the leader.
+ output_format = player.output_format
if player and player.group_childs:
# grouped playback, get DSP details for each player in the group
dsp[child_id] = get_player_dsp_details(
mass, child_player, group_preventing_dsp=group_preventing_dsp
)
+ if group_preventing_dsp:
+ # Use the correct format from the group leader, since
+ # this player is part of a group that does not support
+ # multi device DSP processing.
+ dsp[child_id].output_format = output_format
return dsp
mass: MusicAssistant,
player_id: str,
input_format: AudioFormat,
+ output_format: AudioFormat,
) -> list[str]:
"""Get player specific filter parameters for ffmpeg (if any)."""
filter_params = []
# This should normally never happen, but if it does, we disable DSP.
dsp.enabled = False
+ # We here implicitly know what output format is used for the player
+ # in the audio processing steps. We save this information to
+ # later be able to show this to the user in the UI.
+ player.output_format = output_format
+
if dsp.enabled:
# Apply input gain
if dsp.input_gain != 0:
audio_input="-",
input_format=self.session.input_format,
output_format=AIRPLAY_PCM_FORMAT,
- filter_params=get_player_filter_params(self.mass, player_id, self.session.input_format),
+ filter_params=get_player_filter_params(
+ self.mass, player_id, self.session.input_format, AIRPLAY_PCM_FORMAT
+ ),
audio_output=write,
)
await self._ffmpeg_proc.start()
filter_params = None
if child_player_id:
filter_params = get_player_filter_params(
- self.mass, child_player_id, stream.input_format
+ self.mass, child_player_id, stream.input_format, output_format
)
async for chunk in stream.get_stream(
"Start serving multi-client flow audio stream to %s",
child_player.display_name,
)
-
+ output_format = AudioFormat(content_type=ContentType.try_parse(fmt))
async for chunk in stream.get_stream(
- output_format=AudioFormat(content_type=ContentType.try_parse(fmt)),
- filter_params=get_player_filter_params(self.mass, child_player_id, stream.audio_format)
+ output_format=output_format,
+ filter_params=get_player_filter_params(
+ self.mass, child_player_id, stream.audio_format, output_format
+ )
if child_player_id
else None,
):
audio_input=audio_source,
input_format=input_format,
output_format=DEFAULT_SNAPCAST_FORMAT,
- filter_params=get_player_filter_params(self.mass, player_id, input_format),
+ filter_params=get_player_filter_params(
+ self.mass, player_id, input_format, DEFAULT_SNAPCAST_FORMAT
+ ),
audio_output=stream_path,
) as ffmpeg_proc:
player.state = PlayerState.PLAYING