# calculate active group and active source
player.active_group = self._get_active_player_group(player)
player.active_source = self._get_active_source(player)
+ player.volume_level = player.volume_level or 0 # guard for None volume
# calculate group volume
player.group_volume = self._get_group_volume_level(player)
if player.type in (PlayerType.GROUP, PlayerType.SYNC_GROUP):
group_volume = 0
active_players = 0
for child_player in self.iter_group_members(player, True):
- group_volume += child_player.volume_level
+ group_volume += child_player.volume_level or 0
active_players += 1
if active_players:
group_volume = group_volume / active_players
)
if start_queue_item.media_type != MediaType.TRACK:
use_crossfade = False
- pcm_sample_size = int(pcm_format.sample_rate * (pcm_format.bit_depth / 8) * 2)
+ pcm_sample_size = int(
+ pcm_format.sample_rate * (pcm_format.bit_depth / 8) * pcm_format.channels
+ )
self.logger.info(
"Start Queue Flow stream for Queue %s - crossfade: %s",
queue.display_name,
use_crossfade,
)
total_bytes_sent = 0
+ started = time.time()
while True:
# get (next) queue item to stream
):
# buffer size needs to be big enough to include the crossfade part
# allow it to be a bit smaller when playback just starts
- if not use_crossfade or (total_bytes_sent + bytes_written == 0):
+ if not use_crossfade:
req_buffer_size = pcm_sample_size * 2
- elif (total_bytes_sent + bytes_written) < (crossfade_size * 2):
- req_buffer_size = pcm_sample_size * 5
- else:
+ elif (time.time() - started) > 120:
# additional 5 seconds to strip silence from last part
req_buffer_size = crossfade_size + pcm_sample_size * 5
+ else:
+ req_buffer_size = crossfade_size
# ALWAYS APPEND CHUNK TO BUFFER
buffer += chunk
#### OTHER: enough data in buffer, feed to output
while len(buffer) > req_buffer_size:
yield buffer[:pcm_sample_size]
- await asyncio.sleep(0) # yield to eventloop
bytes_written += pcm_sample_size
buffer = buffer[pcm_sample_size:]