LOGGER = logging.getLogger(f"{MASS_LOGGER_NAME}.cache")
CONF_CLEAR_CACHE = "clear_cache"
DEFAULT_CACHE_EXPIRATION = 86400 * 30 # 30 days
-DB_SCHEMA_VERSION = 6
+DB_SCHEMA_VERSION = 7
BYPASS_CACHE: ContextVar[bool] = ContextVar("BYPASS_CACHE", default=False)
prev_version,
DB_SCHEMA_VERSION,
)
-
- if prev_version < DB_SCHEMA_VERSION:
- # for now just keep it simple and just recreate the table(s)
+ try:
+ await self.__migrate_database(prev_version)
+ except Exception as err:
+ LOGGER.warning("Cache database migration failed: %s, resetting cache", err)
await self.database.execute(f"DROP TABLE IF EXISTS {DB_TABLE_CACHE}")
-
- # recreate missing table(s)
await self.__create_database_tables()
# store current schema version
)
await self.database.commit()
+ async def __migrate_database(self, prev_version: int) -> None:
+ """Perform a database migration."""
+ assert self.database is not None
+ if prev_version <= 6:
+ # clear spotify cache entries to fix bloated cache from playlist pagination bug
+ await self.database.delete(DB_TABLE_CACHE, query="WHERE provider LIKE '%spotify%'")
+ await self.database.commit()
+
def __schedule_cleanup_task(self) -> None:
"""Schedule the cleanup task."""
self.mass.create_task(self.auto_cleanup())
spotify_result = await self._get_data_with_caching(
uri, cache_checksum, limit=page_size, offset=offset, use_global_session=use_global
)
+ total = spotify_result.get("total", 0)
for index, item in enumerate(spotify_result["items"], 1):
+ # Spotify wraps/recycles items for offsets beyond the playlist size,
+ # so we need to break when we've reached the total.
+ if (offset + index) > total:
+ break
if not (item and item["track"] and item["track"]["id"]):
continue
track = parse_track(item["track"], self)