from music_assistant.common.models.errors import ERROR_MAP
from music_assistant.common.models.event import MassEvent
from music_assistant.common.models.media_items import MediaItemImage
-from music_assistant.constants import SCHEMA_VERSION
+from music_assistant.constants import API_SCHEMA_VERSION
from .connection import WebsocketsConnection
from .music import Music
info = ServerInfoMessage.from_dict(result)
# basic check for server schema version compatibility
- if info.min_supported_schema_version > SCHEMA_VERSION:
+ if info.min_supported_schema_version > API_SCHEMA_VERSION:
# our schema version is too low and can't be handled by the server anymore.
await self.connection.disconnect()
raise InvalidServerVersion(
import pathlib
from typing import Final
-SCHEMA_VERSION: Final[int] = 22
+API_SCHEMA_VERSION: Final[int] = 22
MIN_SCHEMA_VERSION = 22
ROOT_LOGGER_NAME: Final[str] = "music_assistant"
import time
from collections import OrderedDict
from collections.abc import Iterator, MutableMapping
-from typing import TYPE_CHECKING, Any
+from typing import TYPE_CHECKING, Any, Final
from music_assistant.common.helpers.json import json_dumps, json_loads
from music_assistant.common.models.config_entries import ConfigEntry, ConfigValueType
from music_assistant.common.models.enums import ConfigEntryType
-from music_assistant.constants import (
- DB_TABLE_CACHE,
- DB_TABLE_SETTINGS,
- ROOT_LOGGER_NAME,
- SCHEMA_VERSION,
-)
+from music_assistant.constants import DB_TABLE_CACHE, DB_TABLE_SETTINGS, ROOT_LOGGER_NAME
from music_assistant.server.helpers.database import DatabaseConnection
from music_assistant.server.models.core_controller import CoreController
LOGGER = logging.getLogger(f"{ROOT_LOGGER_NAME}.cache")
CONF_CLEAR_CACHE = "clear_cache"
+DB_SCHEMA_VERSION: Final[int] = 22
class CacheController(CoreController):
except (KeyError, ValueError):
prev_version = 0
- if prev_version not in (0, SCHEMA_VERSION):
+ if prev_version not in (0, DB_SCHEMA_VERSION):
LOGGER.info(
"Performing database migration from %s to %s",
prev_version,
- SCHEMA_VERSION,
+ DB_SCHEMA_VERSION,
)
- if prev_version < SCHEMA_VERSION:
+ if prev_version < DB_SCHEMA_VERSION:
# for now just keep it simple and just recreate the table(s)
await self.database.execute(f"DROP TABLE IF EXISTS {DB_TABLE_CACHE}")
# store current schema version
await self.database.insert_or_replace(
DB_TABLE_SETTINGS,
- {"key": "version", "value": str(SCHEMA_VERSION), "type": "str"},
+ {"key": "version", "value": str(DB_SCHEMA_VERSION), "type": "str"},
)
# compact db
await self.database.execute("VACUUM")
import os
import statistics
from itertools import zip_longest
-from typing import TYPE_CHECKING
+from typing import TYPE_CHECKING, Final
from music_assistant.common.helpers.datetime import utc_timestamp
+from music_assistant.common.helpers.json import json_dumps, json_loads
from music_assistant.common.helpers.uri import parse_uri
from music_assistant.common.models.config_entries import ConfigEntry, ConfigValueType
from music_assistant.common.models.enums import (
DB_TABLE_TRACK_LOUDNESS,
DB_TABLE_TRACKS,
ROOT_LOGGER_NAME,
- SCHEMA_VERSION,
)
from music_assistant.server.helpers.api import api_command
from music_assistant.server.helpers.database import DatabaseConnection
LOGGER = logging.getLogger(f"{ROOT_LOGGER_NAME}.music")
DEFAULT_SYNC_INTERVAL = 3 * 60 # default sync interval in minutes
CONF_SYNC_INTERVAL = "sync_interval"
+DB_SCHEMA_VERSION: Final[int] = 23
class MusicController(CoreController):
except (KeyError, ValueError):
prev_version = 0
- if prev_version not in (0, SCHEMA_VERSION):
+ if prev_version not in (0, DB_SCHEMA_VERSION):
LOGGER.info(
"Performing database migration from %s to %s",
prev_version,
- SCHEMA_VERSION,
+ DB_SCHEMA_VERSION,
)
- if prev_version < 22:
+ if prev_version == 22:
+ # migrate provider_mapping column (audio_format)
+ for table in ("tracks", "albums"):
+ async for item in self.database.iter_items(table):
+ prov_mappings = json_loads(item["provider_mappings"])
+ needs_update = False
+ for mapping in prov_mappings:
+ if "content_type" in mapping:
+ needs_update = True
+ mapping["audio_format"] = {
+ "content_type": mapping.pop("content_type"),
+ "sample_rate": mapping.pop("sample_rate"),
+ "bit_depth": mapping.pop("bit_depth"),
+ "channels": mapping.pop("channels", 2),
+ "bit_rate": mapping.pop("bit_rate", 320),
+ }
+ if needs_update:
+ await self.database.update(
+ table,
+ {
+ "item_id": item["item_id"],
+ },
+ {
+ "provider_mappings": json_dumps(prov_mappings),
+ },
+ )
+ elif prev_version < 22:
# for now just keep it simple and just recreate the tables if the schema is too old
await self.database.execute(f"DROP TABLE IF EXISTS {DB_TABLE_ARTISTS}")
await self.database.execute(f"DROP TABLE IF EXISTS {DB_TABLE_ALBUMS}")
# store current schema version
await self.database.insert_or_replace(
DB_TABLE_SETTINGS,
- {"key": "version", "value": str(SCHEMA_VERSION), "type": "str"},
+ {"key": "version", "value": str(DB_SCHEMA_VERSION), "type": "str"},
)
# create indexes if needed
await self.__create_database_indexes()
base_url=f"http://{self.publish_ip}:{self.publish_port}",
static_routes=[
(
- "GET",
+ "*",
"/{queue_id}/multi/{job_id}/{player_id}/{queue_item_id}.{fmt}",
self.serve_multi_subscriber_stream,
),
(
- "GET",
+ "*",
"/{queue_id}/flow/{queue_item_id}.{fmt}",
self.serve_queue_flow_stream,
),
(
- "GET",
+ "*",
"/{queue_id}/single/{queue_item_id}.{fmt}",
self.serve_queue_item_stream,
),
input_args += ["-metadata", 'title="Music Assistant"']
# select output args
if output_format.content_type == ContentType.FLAC:
- output_args = ["-f", "flac", "-compression_level", "3"]
+ # set compression level to 0 to prevent issues with cast players
+ output_args = ["-f", "flac", "-compression_level", "0"]
elif output_format.content_type == ContentType.AAC:
output_args = ["-f", "adts", "-c:a", "aac", "-b:a", "320k"]
elif output_format.content_type == ContentType.MP3:
else:
output_args = ["-f", output_format.content_type.value]
- output_args += [
- # append channels
- "-ac",
- str(output_format.channels),
- # append sample rate
- "-ar",
- str(output_format.sample_rate),
- # output = pipe
- "-",
- ]
+ # append channels
+ output_args += ["-ac", str(output_format.channels)]
+ # append sample rate (if codec is lossless)
+ if output_format.content_type.is_lossless():
+ output_args += ["-ar", str(output_format.sample_rate)]
+ # append output = pipe
+ output_args += ["-"]
+
# collect extra and filter args
# TODO: add convolution/DSP/roomcorrections here!
extra_args = []
pass
-API_SCHEMA_VERSION = 1
-
LOGGER = logging.getLogger(__name__)
_F = TypeVar("_F", bound=Callable[..., Any])
"""Database helpers and logic."""
from __future__ import annotations
-from collections.abc import Mapping
+from collections.abc import AsyncGenerator, Mapping
from typing import Any
import aiosqlite
async def execute(self, query: str | str, values: dict = None) -> Any:
"""Execute command on the database."""
return await self._db.execute(query, values)
+
+ async def iter_items(
+ self,
+ table: str,
+ match: dict = None,
+ ) -> AsyncGenerator[Mapping, None]:
+ """Iterate all items within a table."""
+ limit: int = 500
+ offset: int = 0
+ while True:
+ next_items = await self.get_rows(
+ table=table,
+ match=match,
+ offset=offset,
+ limit=limit,
+ )
+ for item in next_items:
+ yield item
+ if len(next_items) < limit:
+ break
+ offset += limit
if handler := self._dynamic_routes.get(key):
return await handler(request)
# deny all other requests
- self.logger.debug(
+ self.logger.warning(
"Received unhandled %s request to %s from %s\nheaders: %s\n",
request.method,
request.path,
content_type=f'audio/{url.split(".")[-1].split("?")[0]}',
title="Music Assistant",
thumb=MASS_LOGO_ONLINE,
- media_info={
- "customData": {
- "queue_item_id": "flow",
- }
- },
)
return
StreamDetails,
Track,
)
-from music_assistant.constants import SCHEMA_VERSION, VARIOUS_ARTISTS, VARIOUS_ARTISTS_ID
+from music_assistant.constants import VARIOUS_ARTISTS, VARIOUS_ARTISTS_ID
from music_assistant.server.controllers.cache import use_cache
+from music_assistant.server.controllers.music import DB_SCHEMA_VERSION
from music_assistant.server.helpers.compare import compare_strings
from music_assistant.server.helpers.playlists import parse_m3u, parse_pls
from music_assistant.server.helpers.tags import parse_tags, split_items
if MediaType.TRACK not in media_types or MediaType.PLAYLIST not in media_types:
return
cache_key = f"{self.instance_id}.checksums"
- prev_checksums = await self.mass.cache.get(cache_key, SCHEMA_VERSION)
+ prev_checksums = await self.mass.cache.get(cache_key, DB_SCHEMA_VERSION)
save_checksum_interval = 0
if prev_checksums is None:
prev_checksums = {}
# save checksums every 100 processed items
# this allows us to pickup where we leftoff when initial scan gets interrupted
if save_checksum_interval == 100:
- await self.mass.cache.set(cache_key, cur_checksums, SCHEMA_VERSION)
+ await self.mass.cache.set(cache_key, cur_checksums, DB_SCHEMA_VERSION)
save_checksum_interval = 0
else:
save_checksum_interval += 1
# store (final) checksums in cache
- await self.mass.cache.set(cache_key, cur_checksums, SCHEMA_VERSION)
+ await self.mass.cache.set(cache_key, cur_checksums, DB_SCHEMA_VERSION)
async def _process_deletions(self, deleted_files: set[str]) -> None:
"""Process all deletions."""
)
)
playlist.owner = self.name
- checksum = f"{SCHEMA_VERSION}.{file_item.checksum}"
+ checksum = f"{DB_SCHEMA_VERSION}.{file_item.checksum}"
playlist.metadata.checksum = checksum
return playlist
from music_assistant.common.models.event import MassEvent
from music_assistant.common.models.provider import ProviderManifest
from music_assistant.constants import (
+ API_SCHEMA_VERSION,
CONF_PROVIDERS,
CONF_SERVER_ID,
MIN_SCHEMA_VERSION,
ROOT_LOGGER_NAME,
- SCHEMA_VERSION,
)
from music_assistant.server.controllers.cache import CacheController
from music_assistant.server.controllers.config import ConfigController
return ServerInfoMessage(
server_id=self.server_id,
server_version=self.version,
- schema_version=SCHEMA_VERSION,
+ schema_version=API_SCHEMA_VERSION,
min_supported_schema_version=MIN_SCHEMA_VERSION,
base_url=self.webserver.base_url,
homeassistant_addon=self.running_as_hass_addon,