1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
|
import logging
import re
import time
import urllib.error
import urllib.parse
import urllib.request
from mopidy import httpclient
import requests
from requests.exceptions import RequestException
import mopidy_beets
from mopidy_beets.translator import parse_album, parse_track
logger = logging.getLogger(__name__)
class cache(object): # noqa: N801
# TODO: merge this to util library
def __init__(self, ctl=8, ttl=3600):
self.cache = {}
self.ctl = ctl
self.ttl = ttl
self._call_count = 1
def __call__(self, func):
def _memoized(*args):
self.func = func
now = time.time()
try:
value, last_update = self.cache[args]
age = now - last_update
if self._call_count >= self.ctl or age > self.ttl:
self._call_count = 1
raise AttributeError
self._call_count += 1
return value
except (KeyError, AttributeError):
value = self.func(*args)
self.cache[args] = (value, now)
return value
except TypeError:
return self.func(*args)
return _memoized
class BeetsRemoteClient(object):
def __init__(self, endpoint, proxy_config):
super(BeetsRemoteClient, self).__init__()
self.api = self._get_session(proxy_config)
self.api_endpoint = endpoint
logger.info("Connecting to Beets remote library %s", endpoint)
try:
self.api.get(self.api_endpoint)
self.has_connection = True
except RequestException as e:
logger.error("Beets error - connection failed: %s", e)
self.has_connection = False
def _get_session(self, proxy_config):
proxy = httpclient.format_proxy(proxy_config)
full_user_agent = httpclient.format_user_agent(
"/".join(
(
mopidy_beets.BeetsExtension.dist_name,
mopidy_beets.__version__,
)
)
)
session = requests.Session()
session.proxies.update({"http": proxy, "https": proxy})
session.headers.update({"user-agent": full_user_agent})
return session
@cache()
def get_tracks(self):
track_ids = self._get("/item/").get("item_ids") or []
tracks = [self.get_track(track_id) for track_id in track_ids]
return tracks
@cache(ctl=16)
def get_track(self, track_id):
return parse_track(self._get("/item/%s" % track_id), self)
@cache(ctl=16)
def get_album(self, album_id):
return parse_album(self._get("/album/%s" % album_id), self)
@cache()
def get_tracks_by(self, attributes, exact_text, sort_fields):
tracks = self._get_objects_by_attribute(
"/item", attributes, exact_text, sort_fields
)
return self._parse_multiple_tracks(tracks)
@cache()
def get_albums_by(self, attributes, exact_text, sort_fields):
albums = self._get_objects_by_attribute(
"/album", attributes, exact_text, sort_fields
)
return self._parse_multiple_albums(albums)
def _get_objects_by_attribute(
self, base_path, attributes, exact_text, sort_fields
):
"""The beets web-api accepts queries like:
/item/query/album_id:183/track:2
/item/query/album:Foo
/album/query/track_no:12/year+/month+
Text-based matches (e.g. 'album' or 'artist') are case-independent
'is in' matches. Thus we need to filter the result, since we want
exact matches.
@param attributes: attributes to be matched
@type attribute: list of key/value pairs or strings
@param exact_text: True for exact matches, False for
case-insensitive 'is in' matches (only relevant
for text values - not integers)
@type exact_text: bool
@param sort_fields: fieldnames, each followed by '+' or '-'
@type sort_fields: list of strings
@rtype: list of json datasets describing tracks or albums
"""
# assemble the query string
query_parts = []
# only used for 'exact_text'
exact_query_list = []
def quote_and_encode(text):
if isinstance(text, (int, float)):
text = str(text)
# Escape colons. The beets web API uses the colon to separate
# field name and search term.
text = text.replace(":", r"\:")
# quoting for the query string
return urllib.parse.quote(text)
for attribute in attributes:
if isinstance(attribute, str):
query_parts.append(quote_and_encode(attribute))
exact_query_list.append((None, attribute))
else:
# the beets API accepts upper and lower case, but always
# returns lower case attributes
key = attribute[0].lower()
value = attribute[1]
query_parts.append(
"{}:{}".format(
quote_and_encode(key), quote_and_encode(value)
)
)
# Try to add a simple regex filter, if we look for a string.
# This will reduce the ressource consumption of the query on
# the server side (and for our 'exact' matching below).
if exact_text and isinstance(value, str):
regex_query = "^{}$".format(re.escape(value))
beets_query = "{}::{}".format(
quote_and_encode(key), quote_and_encode(regex_query)
)
logger.debug(
"Beets - regular expression query: {}".format(
beets_query
)
)
query_parts.append(beets_query)
else:
# in all other cases: use non-regex matching (if requested)
exact_query_list.append((key, value))
# add sorting fields
for sort_field in sort_fields or []:
if (len(sort_field) > 1) and (sort_field[-1] in ("-", "+")):
query_parts.append(quote_and_encode(sort_field))
else:
logger.info(
"Beets - invalid sorting field ignore: %s", sort_field
)
query_string = "/".join(query_parts)
query_url = "{0}/query/{1}".format(base_path, query_string)
logger.debug("Beets query: %s", query_url)
items = self._get(query_url)["results"]
if exact_text:
# verify that text attributes do not just test 'is in', but match
# equality
for key, value in exact_query_list:
if key is None:
# the value must match one of the item attributes
items = [item for item in items if value in item.values()]
else:
# filtering is necessary only for text based attributes
if items and isinstance(items[0][key], str):
items = [item for item in items if item[key] == value]
return items
@cache()
def get_artists(self):
""" returns all artists of one or more tracks """
names = self._get("/artist/")["artist_names"]
names.sort()
# remove empty names
return [name for name in names if name]
def get_sorted_unique_track_attributes(self, field):
sort_field = {"albumartist": "albumartist_sort"}.get(field, field)
return self._get_unique_attribute_values("/item", field, sort_field)
def get_sorted_unique_album_attributes(self, field):
sort_field = {"albumartist": "albumartist_sort"}.get(field, field)
return self._get_unique_attribute_values("/album", field, sort_field)
@cache(ctl=32)
def _get_unique_attribute_values(self, base_url, field, sort_field):
""" returns all artists, genres, ... of tracks or albums """
if not hasattr(self, "__legacy_beets_api_detected"):
try:
result = self._get(
"{0}/values/{1}?sort_key={2}".format(
base_url, field, sort_field
),
raise_not_found=True,
)
except KeyError:
# The above URL was added to the Beets API after v1.3.17
# Probably we are working against an older version.
logger.warning(
"Failed to use the /item/unique/KEY feature of the Beets "
"API (introduced in v1.3.18). Falling back to the "
"slower and more ressource intensive manual approach. "
"Please upgrade Beets, if possible."
)
# Warn only once and use the manual approach for all future
# requests.
self.__legacy_beets_api_detected = True
# continue below with the fallback
else:
return result["values"]
# Fallback: use manual filtering (requires too much time and memory for
# most collections).
sorted_items = self._get("{0}/query/{1}+".format(base_url, sort_field))[
"results"
]
# extract the wanted field and remove all duplicates
unique_values = []
for item in sorted_items:
value = item[field]
if not unique_values or (value != unique_values[-1]):
unique_values.append(value)
return unique_values
def get_track_stream_url(self, track_id):
return "{0}/item/{1}/file".format(self.api_endpoint, track_id)
@cache(ctl=32)
def get_album_art_url(self, album_id):
# Sadly we cannot determine, if the Beets library really contains album
# art. Thus we need to ask for it and check the status code.
url = "{0}/album/{1}/art".format(self.api_endpoint, album_id)
try:
request = urllib.request.urlopen(url)
except IOError:
# DNS problem or similar
return None
request.close()
return url if request.getcode() == 200 else None
def _get(self, url, raise_not_found=False):
url = self.api_endpoint + url
logger.debug("Beets - requesting %s" % url)
try:
req = self.api.get(url)
except RequestException as e:
logger.error("Beets - Request %s, failed with error %s", url, e)
return None
if req.status_code != 200:
logger.error(
"Beets - Request %s, failed with status code %s",
url,
req.status_code,
)
if (req.status_code == 404) and raise_not_found:
# sometimes we need to distinguish empty and 'not found'
raise KeyError("URL not found: %s" % url)
else:
return None
else:
return req.json()
def _parse_multiple_albums(self, album_datasets):
albums = []
for dataset in album_datasets or []:
try:
albums.append(parse_album(dataset, self))
except (ValueError, KeyError) as exc:
logger.info("Beets - Failed to parse album data: %s", exc)
return [album for album in albums if album]
def _parse_multiple_tracks(self, track_datasets):
tracks = []
for dataset in track_datasets or []:
try:
tracks.append(parse_track(dataset, self))
except (ValueError, KeyError) as exc:
logger.info("Beets - Failed to parse track data: %s", exc)
return [track for track in tracks if track]
|