diff --git a/resources/lib/mutagen/__init__.py b/resources/lib/mutagen/__init__.py deleted file mode 100644 index 03ad7aee..00000000 --- a/resources/lib/mutagen/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - - -"""Mutagen aims to be an all purpose multimedia tagging library. - -:: - - import mutagen.[format] - metadata = mutagen.[format].Open(filename) - -`metadata` acts like a dictionary of tags in the file. Tags are generally a -list of string-like values, but may have additional methods available -depending on tag or format. They may also be entirely different objects -for certain keys, again depending on format. -""" - -from mutagen._util import MutagenError -from mutagen._file import FileType, StreamInfo, File -from mutagen._tags import Metadata, PaddingInfo - -version = (1, 31) -"""Version tuple.""" - -version_string = ".".join(map(str, version)) -"""Version string.""" - -MutagenError - -FileType - -StreamInfo - -File - -Metadata - -PaddingInfo diff --git a/resources/lib/mutagen/__pycache__/__init__.cpython-35.pyc b/resources/lib/mutagen/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index 0d767fdc..00000000 Binary files a/resources/lib/mutagen/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_compat.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_compat.cpython-35.pyc deleted file mode 100644 index 93f423d5..00000000 Binary files a/resources/lib/mutagen/__pycache__/_compat.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_constants.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_constants.cpython-35.pyc deleted file mode 100644 index 368544dc..00000000 Binary files a/resources/lib/mutagen/__pycache__/_constants.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_file.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_file.cpython-35.pyc deleted file mode 100644 index 2e8bbe11..00000000 Binary files a/resources/lib/mutagen/__pycache__/_file.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_mp3util.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_mp3util.cpython-35.pyc deleted file mode 100644 index e242cafa..00000000 Binary files a/resources/lib/mutagen/__pycache__/_mp3util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_tags.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_tags.cpython-35.pyc deleted file mode 100644 index b49b1dfc..00000000 Binary files a/resources/lib/mutagen/__pycache__/_tags.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_toolsutil.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_toolsutil.cpython-35.pyc deleted file mode 100644 index cabceb1e..00000000 Binary files a/resources/lib/mutagen/__pycache__/_toolsutil.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_util.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index 199c081b..00000000 Binary files a/resources/lib/mutagen/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/_vorbis.cpython-35.pyc b/resources/lib/mutagen/__pycache__/_vorbis.cpython-35.pyc deleted file mode 100644 index 5a667067..00000000 Binary files a/resources/lib/mutagen/__pycache__/_vorbis.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/aac.cpython-35.pyc b/resources/lib/mutagen/__pycache__/aac.cpython-35.pyc deleted file mode 100644 index c976f6f4..00000000 Binary files a/resources/lib/mutagen/__pycache__/aac.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/aiff.cpython-35.pyc b/resources/lib/mutagen/__pycache__/aiff.cpython-35.pyc deleted file mode 100644 index f9a42314..00000000 Binary files a/resources/lib/mutagen/__pycache__/aiff.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/apev2.cpython-35.pyc b/resources/lib/mutagen/__pycache__/apev2.cpython-35.pyc deleted file mode 100644 index 943ab32c..00000000 Binary files a/resources/lib/mutagen/__pycache__/apev2.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/easyid3.cpython-35.pyc b/resources/lib/mutagen/__pycache__/easyid3.cpython-35.pyc deleted file mode 100644 index a1031ba9..00000000 Binary files a/resources/lib/mutagen/__pycache__/easyid3.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/easymp4.cpython-35.pyc b/resources/lib/mutagen/__pycache__/easymp4.cpython-35.pyc deleted file mode 100644 index f970678f..00000000 Binary files a/resources/lib/mutagen/__pycache__/easymp4.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/flac.cpython-35.pyc b/resources/lib/mutagen/__pycache__/flac.cpython-35.pyc deleted file mode 100644 index 703d5882..00000000 Binary files a/resources/lib/mutagen/__pycache__/flac.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/m4a.cpython-35.pyc b/resources/lib/mutagen/__pycache__/m4a.cpython-35.pyc deleted file mode 100644 index 43ede4e2..00000000 Binary files a/resources/lib/mutagen/__pycache__/m4a.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/monkeysaudio.cpython-35.pyc b/resources/lib/mutagen/__pycache__/monkeysaudio.cpython-35.pyc deleted file mode 100644 index 8c719f0e..00000000 Binary files a/resources/lib/mutagen/__pycache__/monkeysaudio.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/mp3.cpython-35.pyc b/resources/lib/mutagen/__pycache__/mp3.cpython-35.pyc deleted file mode 100644 index 8a0be643..00000000 Binary files a/resources/lib/mutagen/__pycache__/mp3.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/musepack.cpython-35.pyc b/resources/lib/mutagen/__pycache__/musepack.cpython-35.pyc deleted file mode 100644 index cf4ca9e5..00000000 Binary files a/resources/lib/mutagen/__pycache__/musepack.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/ogg.cpython-35.pyc b/resources/lib/mutagen/__pycache__/ogg.cpython-35.pyc deleted file mode 100644 index 0eb57b0c..00000000 Binary files a/resources/lib/mutagen/__pycache__/ogg.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/oggflac.cpython-35.pyc b/resources/lib/mutagen/__pycache__/oggflac.cpython-35.pyc deleted file mode 100644 index ab7dadf0..00000000 Binary files a/resources/lib/mutagen/__pycache__/oggflac.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/oggopus.cpython-35.pyc b/resources/lib/mutagen/__pycache__/oggopus.cpython-35.pyc deleted file mode 100644 index fa6e46fa..00000000 Binary files a/resources/lib/mutagen/__pycache__/oggopus.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/oggspeex.cpython-35.pyc b/resources/lib/mutagen/__pycache__/oggspeex.cpython-35.pyc deleted file mode 100644 index 9cb15665..00000000 Binary files a/resources/lib/mutagen/__pycache__/oggspeex.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/oggtheora.cpython-35.pyc b/resources/lib/mutagen/__pycache__/oggtheora.cpython-35.pyc deleted file mode 100644 index a7a4e557..00000000 Binary files a/resources/lib/mutagen/__pycache__/oggtheora.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/oggvorbis.cpython-35.pyc b/resources/lib/mutagen/__pycache__/oggvorbis.cpython-35.pyc deleted file mode 100644 index 9acf6dc5..00000000 Binary files a/resources/lib/mutagen/__pycache__/oggvorbis.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/optimfrog.cpython-35.pyc b/resources/lib/mutagen/__pycache__/optimfrog.cpython-35.pyc deleted file mode 100644 index 0bcbbb85..00000000 Binary files a/resources/lib/mutagen/__pycache__/optimfrog.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/trueaudio.cpython-35.pyc b/resources/lib/mutagen/__pycache__/trueaudio.cpython-35.pyc deleted file mode 100644 index 629b654b..00000000 Binary files a/resources/lib/mutagen/__pycache__/trueaudio.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/__pycache__/wavpack.cpython-35.pyc b/resources/lib/mutagen/__pycache__/wavpack.cpython-35.pyc deleted file mode 100644 index 2b9be214..00000000 Binary files a/resources/lib/mutagen/__pycache__/wavpack.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/_compat.py b/resources/lib/mutagen/_compat.py deleted file mode 100644 index 77c465f1..00000000 --- a/resources/lib/mutagen/_compat.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import sys - - -PY2 = sys.version_info[0] == 2 -PY3 = not PY2 - -if PY2: - from StringIO import StringIO - BytesIO = StringIO - from cStringIO import StringIO as cBytesIO - from itertools import izip - - long_ = long - integer_types = (int, long) - string_types = (str, unicode) - text_type = unicode - - xrange = xrange - cmp = cmp - chr_ = chr - - def endswith(text, end): - return text.endswith(end) - - iteritems = lambda d: d.iteritems() - itervalues = lambda d: d.itervalues() - iterkeys = lambda d: d.iterkeys() - - iterbytes = lambda b: iter(b) - - exec("def reraise(tp, value, tb):\n raise tp, value, tb") - - def swap_to_string(cls): - if "__str__" in cls.__dict__: - cls.__unicode__ = cls.__str__ - - if "__bytes__" in cls.__dict__: - cls.__str__ = cls.__bytes__ - - return cls - -elif PY3: - from io import StringIO - StringIO = StringIO - from io import BytesIO - cBytesIO = BytesIO - - long_ = int - integer_types = (int,) - string_types = (str,) - text_type = str - - izip = zip - xrange = range - cmp = lambda a, b: (a > b) - (a < b) - chr_ = lambda x: bytes([x]) - - def endswith(text, end): - # usefull for paths which can be both, str and bytes - if isinstance(text, str): - if not isinstance(end, str): - end = end.decode("ascii") - else: - if not isinstance(end, bytes): - end = end.encode("ascii") - return text.endswith(end) - - iteritems = lambda d: iter(d.items()) - itervalues = lambda d: iter(d.values()) - iterkeys = lambda d: iter(d.keys()) - - iterbytes = lambda b: (bytes([v]) for v in b) - - def reraise(tp, value, tb): - raise tp(value).with_traceback(tb) - - def swap_to_string(cls): - return cls diff --git a/resources/lib/mutagen/_constants.py b/resources/lib/mutagen/_constants.py deleted file mode 100644 index 62c1ce02..00000000 --- a/resources/lib/mutagen/_constants.py +++ /dev/null @@ -1,199 +0,0 @@ -# -*- coding: utf-8 -*- - -"""Constants used by Mutagen.""" - -GENRES = [ - u"Blues", - u"Classic Rock", - u"Country", - u"Dance", - u"Disco", - u"Funk", - u"Grunge", - u"Hip-Hop", - u"Jazz", - u"Metal", - u"New Age", - u"Oldies", - u"Other", - u"Pop", - u"R&B", - u"Rap", - u"Reggae", - u"Rock", - u"Techno", - u"Industrial", - u"Alternative", - u"Ska", - u"Death Metal", - u"Pranks", - u"Soundtrack", - u"Euro-Techno", - u"Ambient", - u"Trip-Hop", - u"Vocal", - u"Jazz+Funk", - u"Fusion", - u"Trance", - u"Classical", - u"Instrumental", - u"Acid", - u"House", - u"Game", - u"Sound Clip", - u"Gospel", - u"Noise", - u"Alt. Rock", - u"Bass", - u"Soul", - u"Punk", - u"Space", - u"Meditative", - u"Instrumental Pop", - u"Instrumental Rock", - u"Ethnic", - u"Gothic", - u"Darkwave", - u"Techno-Industrial", - u"Electronic", - u"Pop-Folk", - u"Eurodance", - u"Dream", - u"Southern Rock", - u"Comedy", - u"Cult", - u"Gangsta Rap", - u"Top 40", - u"Christian Rap", - u"Pop/Funk", - u"Jungle", - u"Native American", - u"Cabaret", - u"New Wave", - u"Psychedelic", - u"Rave", - u"Showtunes", - u"Trailer", - u"Lo-Fi", - u"Tribal", - u"Acid Punk", - u"Acid Jazz", - u"Polka", - u"Retro", - u"Musical", - u"Rock & Roll", - u"Hard Rock", - u"Folk", - u"Folk-Rock", - u"National Folk", - u"Swing", - u"Fast-Fusion", - u"Bebop", - u"Latin", - u"Revival", - u"Celtic", - u"Bluegrass", - u"Avantgarde", - u"Gothic Rock", - u"Progressive Rock", - u"Psychedelic Rock", - u"Symphonic Rock", - u"Slow Rock", - u"Big Band", - u"Chorus", - u"Easy Listening", - u"Acoustic", - u"Humour", - u"Speech", - u"Chanson", - u"Opera", - u"Chamber Music", - u"Sonata", - u"Symphony", - u"Booty Bass", - u"Primus", - u"Porn Groove", - u"Satire", - u"Slow Jam", - u"Club", - u"Tango", - u"Samba", - u"Folklore", - u"Ballad", - u"Power Ballad", - u"Rhythmic Soul", - u"Freestyle", - u"Duet", - u"Punk Rock", - u"Drum Solo", - u"A Cappella", - u"Euro-House", - u"Dance Hall", - u"Goa", - u"Drum & Bass", - u"Club-House", - u"Hardcore", - u"Terror", - u"Indie", - u"BritPop", - u"Afro-Punk", - u"Polsk Punk", - u"Beat", - u"Christian Gangsta Rap", - u"Heavy Metal", - u"Black Metal", - u"Crossover", - u"Contemporary Christian", - u"Christian Rock", - u"Merengue", - u"Salsa", - u"Thrash Metal", - u"Anime", - u"JPop", - u"Synthpop", - u"Abstract", - u"Art Rock", - u"Baroque", - u"Bhangra", - u"Big Beat", - u"Breakbeat", - u"Chillout", - u"Downtempo", - u"Dub", - u"EBM", - u"Eclectic", - u"Electro", - u"Electroclash", - u"Emo", - u"Experimental", - u"Garage", - u"Global", - u"IDM", - u"Illbient", - u"Industro-Goth", - u"Jam Band", - u"Krautrock", - u"Leftfield", - u"Lounge", - u"Math Rock", - u"New Romantic", - u"Nu-Breakz", - u"Post-Punk", - u"Post-Rock", - u"Psytrance", - u"Shoegaze", - u"Space Rock", - u"Trop Rock", - u"World Music", - u"Neoclassical", - u"Audiobook", - u"Audio Theatre", - u"Neue Deutsche Welle", - u"Podcast", - u"Indie Rock", - u"G-Funk", - u"Dubstep", - u"Garage Rock", - u"Psybient", -] -"""The ID3v1 genre list.""" diff --git a/resources/lib/mutagen/_file.py b/resources/lib/mutagen/_file.py deleted file mode 100644 index 5daa2521..00000000 --- a/resources/lib/mutagen/_file.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (C) 2005 Michael Urman -# -*- coding: utf-8 -*- -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import warnings - -from mutagen._util import DictMixin -from mutagen._compat import izip - - -class FileType(DictMixin): - """An abstract object wrapping tags and audio stream information. - - Attributes: - - * info -- stream information (length, bitrate, sample rate) - * tags -- metadata tags, if any - - Each file format has different potential tags and stream - information. - - FileTypes implement an interface very similar to Metadata; the - dict interface, save, load, and delete calls on a FileType call - the appropriate methods on its tag data. - """ - - __module__ = "mutagen" - - info = None - tags = None - filename = None - _mimes = ["application/octet-stream"] - - def __init__(self, filename=None, *args, **kwargs): - if filename is None: - warnings.warn("FileType constructor requires a filename", - DeprecationWarning) - else: - self.load(filename, *args, **kwargs) - - def load(self, filename, *args, **kwargs): - raise NotImplementedError - - def __getitem__(self, key): - """Look up a metadata tag key. - - If the file has no tags at all, a KeyError is raised. - """ - - if self.tags is None: - raise KeyError(key) - else: - return self.tags[key] - - def __setitem__(self, key, value): - """Set a metadata tag. - - If the file has no tags, an appropriate format is added (but - not written until save is called). - """ - - if self.tags is None: - self.add_tags() - self.tags[key] = value - - def __delitem__(self, key): - """Delete a metadata tag key. - - If the file has no tags at all, a KeyError is raised. - """ - - if self.tags is None: - raise KeyError(key) - else: - del(self.tags[key]) - - def keys(self): - """Return a list of keys in the metadata tag. - - If the file has no tags at all, an empty list is returned. - """ - - if self.tags is None: - return [] - else: - return self.tags.keys() - - def delete(self, filename=None): - """Remove tags from a file. - - In cases where the tagging format is independent of the file type - (for example `mutagen.ID3`) all traces of the tagging format will - be removed. - In cases where the tag is part of the file type, all tags and - padding will be removed. - - The tags attribute will be cleared as well if there is one. - - Does nothing if the file has no tags. - - :raises mutagen.MutagenError: if deleting wasn't possible - """ - - if self.tags is not None: - if filename is None: - filename = self.filename - else: - warnings.warn( - "delete(filename=...) is deprecated, reload the file", - DeprecationWarning) - return self.tags.delete(filename) - - def save(self, filename=None, **kwargs): - """Save metadata tags. - - :raises mutagen.MutagenError: if saving wasn't possible - """ - - if filename is None: - filename = self.filename - else: - warnings.warn( - "save(filename=...) is deprecated, reload the file", - DeprecationWarning) - - if self.tags is not None: - return self.tags.save(filename, **kwargs) - - def pprint(self): - """Print stream information and comment key=value pairs.""" - - stream = "%s (%s)" % (self.info.pprint(), self.mime[0]) - try: - tags = self.tags.pprint() - except AttributeError: - return stream - else: - return stream + ((tags and "\n" + tags) or "") - - def add_tags(self): - """Adds new tags to the file. - - :raises mutagen.MutagenError: if tags already exist or adding is not - possible. - """ - - raise NotImplementedError - - @property - def mime(self): - """A list of mime types""" - - mimes = [] - for Kind in type(self).__mro__: - for mime in getattr(Kind, '_mimes', []): - if mime not in mimes: - mimes.append(mime) - return mimes - - @staticmethod - def score(filename, fileobj, header): - raise NotImplementedError - - -class StreamInfo(object): - """Abstract stream information object. - - Provides attributes for length, bitrate, sample rate etc. - - See the implementations for details. - """ - - __module__ = "mutagen" - - def pprint(self): - """Print stream information""" - - raise NotImplementedError - - -def File(filename, options=None, easy=False): - """Guess the type of the file and try to open it. - - The file type is decided by several things, such as the first 128 - bytes (which usually contains a file type identifier), the - filename extension, and the presence of existing tags. - - If no appropriate type could be found, None is returned. - - :param options: Sequence of :class:`FileType` implementations, defaults to - all included ones. - - :param easy: If the easy wrappers should be returnd if available. - For example :class:`EasyMP3 ` instead - of :class:`MP3 `. - """ - - if options is None: - from mutagen.asf import ASF - from mutagen.apev2 import APEv2File - from mutagen.flac import FLAC - if easy: - from mutagen.easyid3 import EasyID3FileType as ID3FileType - else: - from mutagen.id3 import ID3FileType - if easy: - from mutagen.mp3 import EasyMP3 as MP3 - else: - from mutagen.mp3 import MP3 - from mutagen.oggflac import OggFLAC - from mutagen.oggspeex import OggSpeex - from mutagen.oggtheora import OggTheora - from mutagen.oggvorbis import OggVorbis - from mutagen.oggopus import OggOpus - if easy: - from mutagen.trueaudio import EasyTrueAudio as TrueAudio - else: - from mutagen.trueaudio import TrueAudio - from mutagen.wavpack import WavPack - if easy: - from mutagen.easymp4 import EasyMP4 as MP4 - else: - from mutagen.mp4 import MP4 - from mutagen.musepack import Musepack - from mutagen.monkeysaudio import MonkeysAudio - from mutagen.optimfrog import OptimFROG - from mutagen.aiff import AIFF - from mutagen.aac import AAC - options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC, - FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack, - Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC] - - if not options: - return None - - with open(filename, "rb") as fileobj: - header = fileobj.read(128) - # Sort by name after score. Otherwise import order affects - # Kind sort order, which affects treatment of things with - # equals scores. - results = [(Kind.score(filename, fileobj, header), Kind.__name__) - for Kind in options] - - results = list(izip(results, options)) - results.sort() - (score, name), Kind = results[-1] - if score > 0: - return Kind(filename) - else: - return None diff --git a/resources/lib/mutagen/_mp3util.py b/resources/lib/mutagen/_mp3util.py deleted file mode 100644 index 409cadcb..00000000 --- a/resources/lib/mutagen/_mp3util.py +++ /dev/null @@ -1,420 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -""" -http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header -http://wiki.hydrogenaud.io/index.php?title=MP3 -""" - -from functools import partial - -from ._util import cdata, BitReader -from ._compat import xrange, iterbytes, cBytesIO - - -class LAMEError(Exception): - pass - - -class LAMEHeader(object): - """http://gabriel.mp3-tech.org/mp3infotag.html""" - - vbr_method = 0 - """0: unknown, 1: CBR, 2: ABR, 3/4/5: VBR, others: see the docs""" - - lowpass_filter = 0 - """lowpass filter value in Hz. 0 means unknown""" - - quality = -1 - """Encoding quality: 0..9""" - - vbr_quality = -1 - """VBR quality: 0..9""" - - track_peak = None - """Peak signal amplitude as float. None if unknown.""" - - track_gain_origin = 0 - """see the docs""" - - track_gain_adjustment = None - """Track gain adjustment as float (for 89db replay gain) or None""" - - album_gain_origin = 0 - """see the docs""" - - album_gain_adjustment = None - """Album gain adjustment as float (for 89db replay gain) or None""" - - encoding_flags = 0 - """see docs""" - - ath_type = -1 - """see docs""" - - bitrate = -1 - """Bitrate in kbps. For VBR the minimum bitrate, for anything else - (CBR, ABR, ..) the target bitrate. - """ - - encoder_delay_start = 0 - """Encoder delay in samples""" - - encoder_padding_end = 0 - """Padding in samples added at the end""" - - source_sample_frequency_enum = -1 - """see docs""" - - unwise_setting_used = False - """see docs""" - - stereo_mode = 0 - """see docs""" - - noise_shaping = 0 - """see docs""" - - mp3_gain = 0 - """Applied MP3 gain -127..127. Factor is 2 ** (mp3_gain / 4)""" - - surround_info = 0 - """see docs""" - - preset_used = 0 - """lame preset""" - - music_length = 0 - """Length in bytes excluding any ID3 tags""" - - music_crc = -1 - """CRC16 of the data specified by music_length""" - - header_crc = -1 - """CRC16 of this header and everything before (not checked)""" - - def __init__(self, xing, fileobj): - """Raises LAMEError if parsing fails""" - - payload = fileobj.read(27) - if len(payload) != 27: - raise LAMEError("Not enough data") - - # extended lame header - r = BitReader(cBytesIO(payload)) - revision = r.bits(4) - if revision != 0: - raise LAMEError("unsupported header revision %d" % revision) - - self.vbr_method = r.bits(4) - self.lowpass_filter = r.bits(8) * 100 - - # these have a different meaning for lame; expose them again here - self.quality = (100 - xing.vbr_scale) % 10 - self.vbr_quality = (100 - xing.vbr_scale) // 10 - - track_peak_data = r.bytes(4) - if track_peak_data == b"\x00\x00\x00\x00": - self.track_peak = None - else: - # see PutLameVBR() in LAME's VbrTag.c - self.track_peak = ( - cdata.uint32_be(track_peak_data) - 0.5) / 2 ** 23 - track_gain_type = r.bits(3) - self.track_gain_origin = r.bits(3) - sign = r.bits(1) - gain_adj = r.bits(9) / 10.0 - if sign: - gain_adj *= -1 - if track_gain_type == 1: - self.track_gain_adjustment = gain_adj - else: - self.track_gain_adjustment = None - assert r.is_aligned() - - album_gain_type = r.bits(3) - self.album_gain_origin = r.bits(3) - sign = r.bits(1) - album_gain_adj = r.bits(9) / 10.0 - if album_gain_type == 2: - self.album_gain_adjustment = album_gain_adj - else: - self.album_gain_adjustment = None - - self.encoding_flags = r.bits(4) - self.ath_type = r.bits(4) - - self.bitrate = r.bits(8) - - self.encoder_delay_start = r.bits(12) - self.encoder_padding_end = r.bits(12) - - self.source_sample_frequency_enum = r.bits(2) - self.unwise_setting_used = r.bits(1) - self.stereo_mode = r.bits(3) - self.noise_shaping = r.bits(2) - - sign = r.bits(1) - mp3_gain = r.bits(7) - if sign: - mp3_gain *= -1 - self.mp3_gain = mp3_gain - - r.skip(2) - self.surround_info = r.bits(3) - self.preset_used = r.bits(11) - self.music_length = r.bits(32) - self.music_crc = r.bits(16) - - self.header_crc = r.bits(16) - assert r.is_aligned() - - @classmethod - def parse_version(cls, fileobj): - """Returns a version string and True if a LAMEHeader follows. - The passed file object will be positioned right before the - lame header if True. - - Raises LAMEError if there is no lame version info. - """ - - # http://wiki.hydrogenaud.io/index.php?title=LAME_version_string - - data = fileobj.read(20) - if len(data) != 20: - raise LAMEError("Not a lame header") - if not data.startswith((b"LAME", b"L3.99")): - raise LAMEError("Not a lame header") - - data = data.lstrip(b"EMAL") - major, data = data[0:1], data[1:].lstrip(b".") - minor = b"" - for c in iterbytes(data): - if not c.isdigit(): - break - minor += c - data = data[len(minor):] - - try: - major = int(major.decode("ascii")) - minor = int(minor.decode("ascii")) - except ValueError: - raise LAMEError - - # the extended header was added sometimes in the 3.90 cycle - # e.g. "LAME3.90 (alpha)" should still stop here. - # (I have seen such a file) - if (major, minor) < (3, 90) or ( - (major, minor) == (3, 90) and data[-11:-10] == b"("): - flag = data.strip(b"\x00").rstrip().decode("ascii") - return u"%d.%d%s" % (major, minor, flag), False - - if len(data) <= 11: - raise LAMEError("Invalid version: too long") - - flag = data[:-11].rstrip(b"\x00") - - flag_string = u"" - patch = u"" - if flag == b"a": - flag_string = u" (alpha)" - elif flag == b"b": - flag_string = u" (beta)" - elif flag == b"r": - patch = u".1+" - elif flag == b" ": - if (major, minor) > (3, 96): - patch = u".0" - else: - patch = u".0+" - elif flag == b"" or flag == b".": - patch = u".0+" - else: - flag_string = u" (?)" - - # extended header, seek back to 9 bytes for the caller - fileobj.seek(-11, 1) - - return u"%d.%d%s%s" % (major, minor, patch, flag_string), True - - -class XingHeaderError(Exception): - pass - - -class XingHeaderFlags(object): - FRAMES = 0x1 - BYTES = 0x2 - TOC = 0x4 - VBR_SCALE = 0x8 - - -class XingHeader(object): - - frames = -1 - """Number of frames, -1 if unknown""" - - bytes = -1 - """Number of bytes, -1 if unknown""" - - toc = [] - """List of 100 file offsets in percent encoded as 0-255. E.g. entry - 50 contains the file offset in percent at 50% play time. - Empty if unknown. - """ - - vbr_scale = -1 - """VBR quality indicator 0-100. -1 if unknown""" - - lame_header = None - """A LAMEHeader instance or None""" - - lame_version = u"" - """The version of the LAME encoder e.g. '3.99.0'. Empty if unknown""" - - is_info = False - """If the header started with 'Info' and not 'Xing'""" - - def __init__(self, fileobj): - """Parses the Xing header or raises XingHeaderError. - - The file position after this returns is undefined. - """ - - data = fileobj.read(8) - if len(data) != 8 or data[:4] not in (b"Xing", b"Info"): - raise XingHeaderError("Not a Xing header") - - self.is_info = (data[:4] == b"Info") - - flags = cdata.uint32_be_from(data, 4)[0] - - if flags & XingHeaderFlags.FRAMES: - data = fileobj.read(4) - if len(data) != 4: - raise XingHeaderError("Xing header truncated") - self.frames = cdata.uint32_be(data) - - if flags & XingHeaderFlags.BYTES: - data = fileobj.read(4) - if len(data) != 4: - raise XingHeaderError("Xing header truncated") - self.bytes = cdata.uint32_be(data) - - if flags & XingHeaderFlags.TOC: - data = fileobj.read(100) - if len(data) != 100: - raise XingHeaderError("Xing header truncated") - self.toc = list(bytearray(data)) - - if flags & XingHeaderFlags.VBR_SCALE: - data = fileobj.read(4) - if len(data) != 4: - raise XingHeaderError("Xing header truncated") - self.vbr_scale = cdata.uint32_be(data) - - try: - self.lame_version, has_header = LAMEHeader.parse_version(fileobj) - if has_header: - self.lame_header = LAMEHeader(self, fileobj) - except LAMEError: - pass - - @classmethod - def get_offset(cls, info): - """Calculate the offset to the Xing header from the start of the - MPEG header including sync based on the MPEG header's content. - """ - - assert info.layer == 3 - - if info.version == 1: - if info.mode != 3: - return 36 - else: - return 21 - else: - if info.mode != 3: - return 21 - else: - return 13 - - -class VBRIHeaderError(Exception): - pass - - -class VBRIHeader(object): - - version = 0 - """VBRI header version""" - - quality = 0 - """Quality indicator""" - - bytes = 0 - """Number of bytes""" - - frames = 0 - """Number of frames""" - - toc_scale_factor = 0 - """Scale factor of TOC entries""" - - toc_frames = 0 - """Number of frames per table entry""" - - toc = [] - """TOC""" - - def __init__(self, fileobj): - """Reads the VBRI header or raises VBRIHeaderError. - - The file position is undefined after this returns - """ - - data = fileobj.read(26) - if len(data) != 26 or not data.startswith(b"VBRI"): - raise VBRIHeaderError("Not a VBRI header") - - offset = 4 - self.version, offset = cdata.uint16_be_from(data, offset) - if self.version != 1: - raise VBRIHeaderError( - "Unsupported header version: %r" % self.version) - - offset += 2 # float16.. can't do - self.quality, offset = cdata.uint16_be_from(data, offset) - self.bytes, offset = cdata.uint32_be_from(data, offset) - self.frames, offset = cdata.uint32_be_from(data, offset) - - toc_num_entries, offset = cdata.uint16_be_from(data, offset) - self.toc_scale_factor, offset = cdata.uint16_be_from(data, offset) - toc_entry_size, offset = cdata.uint16_be_from(data, offset) - self.toc_frames, offset = cdata.uint16_be_from(data, offset) - toc_size = toc_entry_size * toc_num_entries - toc_data = fileobj.read(toc_size) - if len(toc_data) != toc_size: - raise VBRIHeaderError("VBRI header truncated") - - self.toc = [] - if toc_entry_size == 2: - unpack = partial(cdata.uint16_be_from, toc_data) - elif toc_entry_size == 4: - unpack = partial(cdata.uint32_be_from, toc_data) - else: - raise VBRIHeaderError("Invalid TOC entry size") - - self.toc = [unpack(i)[0] for i in xrange(0, toc_size, toc_entry_size)] - - @classmethod - def get_offset(cls, info): - """Offset in bytes from the start of the MPEG header including sync""" - - assert info.layer == 3 - - return 36 diff --git a/resources/lib/mutagen/_tags.py b/resources/lib/mutagen/_tags.py deleted file mode 100644 index ce250adf..00000000 --- a/resources/lib/mutagen/_tags.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - - -class PaddingInfo(object): - """Abstract padding information object. - - This will be passed to the callback function that can be used - for saving tags. - - :: - - def my_callback(info: PaddingInfo): - return info.get_default_padding() - - The callback should return the amount of padding to use (>= 0) based on - the content size and the padding of the file after saving. The actual used - amount of padding might vary depending on the file format (due to - alignment etc.) - - The default implementation can be accessed using the - :meth:`get_default_padding` method in the callback. - """ - - padding = 0 - """The amount of padding left after saving in bytes (can be negative if - more data needs to be added as padding is available) - """ - - size = 0 - """The amount of data following the padding""" - - def __init__(self, padding, size): - self.padding = padding - self.size = size - - def get_default_padding(self): - """The default implementation which tries to select a reasonable - amount of padding and which might change in future versions. - - :return: Amount of padding after saving - :rtype: int - """ - - high = 1024 * 10 + self.size // 100 # 10 KiB + 1% of trailing data - low = 1024 + self.size // 1000 # 1 KiB + 0.1% of trailing data - - if self.padding >= 0: - # enough padding left - if self.padding > high: - # padding too large, reduce - return low - # just use existing padding as is - return self.padding - else: - # not enough padding, add some - return low - - def _get_padding(self, user_func): - if user_func is None: - return self.get_default_padding() - else: - return user_func(self) - - def __repr__(self): - return "<%s size=%d padding=%d>" % ( - type(self).__name__, self.size, self.padding) - - -class Metadata(object): - """An abstract dict-like object. - - Metadata is the base class for many of the tag objects in Mutagen. - """ - - __module__ = "mutagen" - - def __init__(self, *args, **kwargs): - if args or kwargs: - self.load(*args, **kwargs) - - def load(self, *args, **kwargs): - raise NotImplementedError - - def save(self, filename=None): - """Save changes to a file.""" - - raise NotImplementedError - - def delete(self, filename=None): - """Remove tags from a file. - - In most cases this means any traces of the tag will be removed - from the file. - """ - - raise NotImplementedError diff --git a/resources/lib/mutagen/_toolsutil.py b/resources/lib/mutagen/_toolsutil.py deleted file mode 100644 index e9074b71..00000000 --- a/resources/lib/mutagen/_toolsutil.py +++ /dev/null @@ -1,231 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2015 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import os -import sys -import signal -import locale -import contextlib -import optparse -import ctypes - -from ._compat import text_type, PY2, PY3, iterbytes - - -def split_escape(string, sep, maxsplit=None, escape_char="\\"): - """Like unicode/str/bytes.split but allows for the separator to be escaped - - If passed unicode/str/bytes will only return list of unicode/str/bytes. - """ - - assert len(sep) == 1 - assert len(escape_char) == 1 - - if isinstance(string, bytes): - if isinstance(escape_char, text_type): - escape_char = escape_char.encode("ascii") - iter_ = iterbytes - else: - iter_ = iter - - if maxsplit is None: - maxsplit = len(string) - - empty = string[:0] - result = [] - current = empty - escaped = False - for char in iter_(string): - if escaped: - if char != escape_char and char != sep: - current += escape_char - current += char - escaped = False - else: - if char == escape_char: - escaped = True - elif char == sep and len(result) < maxsplit: - result.append(current) - current = empty - else: - current += char - result.append(current) - return result - - -class SignalHandler(object): - - def __init__(self): - self._interrupted = False - self._nosig = False - self._init = False - - def init(self): - signal.signal(signal.SIGINT, self._handler) - signal.signal(signal.SIGTERM, self._handler) - if os.name != "nt": - signal.signal(signal.SIGHUP, self._handler) - - def _handler(self, signum, frame): - self._interrupted = True - if not self._nosig: - raise SystemExit("Aborted...") - - @contextlib.contextmanager - def block(self): - """While this context manager is active any signals for aborting - the process will be queued and exit the program once the context - is left. - """ - - self._nosig = True - yield - self._nosig = False - if self._interrupted: - raise SystemExit("Aborted...") - - -def get_win32_unicode_argv(): - """Returns a unicode argv under Windows and standard sys.argv otherwise""" - - if os.name != "nt" or not PY2: - return sys.argv - - import ctypes - from ctypes import cdll, windll, wintypes - - GetCommandLineW = cdll.kernel32.GetCommandLineW - GetCommandLineW.argtypes = [] - GetCommandLineW.restype = wintypes.LPCWSTR - - CommandLineToArgvW = windll.shell32.CommandLineToArgvW - CommandLineToArgvW.argtypes = [ - wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)] - CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR) - - LocalFree = windll.kernel32.LocalFree - LocalFree.argtypes = [wintypes.HLOCAL] - LocalFree.restype = wintypes.HLOCAL - - argc = ctypes.c_int() - argv = CommandLineToArgvW(GetCommandLineW(), ctypes.byref(argc)) - if not argv: - return - - res = argv[max(0, argc.value - len(sys.argv)):argc.value] - - LocalFree(argv) - - return res - - -def fsencoding(): - """The encoding used for paths, argv, environ, stdout and stdin""" - - if os.name == "nt": - return "" - - return locale.getpreferredencoding() or "utf-8" - - -def fsnative(text=u""): - """Returns the passed text converted to the preferred path type - for each platform. - """ - - assert isinstance(text, text_type) - - if os.name == "nt" or PY3: - return text - else: - return text.encode(fsencoding(), "replace") - return text - - -def is_fsnative(arg): - """If the passed value is of the preferred path type for each platform. - Note that on Python3+linux, paths can be bytes or str but this returns - False for bytes there. - """ - - if PY3 or os.name == "nt": - return isinstance(arg, text_type) - else: - return isinstance(arg, bytes) - - -def print_(*objects, **kwargs): - """A print which supports bytes and str+surrogates under python3. - - Needed so we can print anything passed to us through argv and environ. - Under Windows only text_type is allowed. - - Arguments: - objects: one or more bytes/text - linesep (bool): whether a line separator should be appended - sep (bool): whether objects should be printed separated by spaces - """ - - linesep = kwargs.pop("linesep", True) - sep = kwargs.pop("sep", True) - file_ = kwargs.pop("file", None) - if file_ is None: - file_ = sys.stdout - - old_cp = None - if os.name == "nt": - # Try to force the output to cp65001 aka utf-8. - # If that fails use the current one (most likely cp850, so - # most of unicode will be replaced with '?') - encoding = "utf-8" - old_cp = ctypes.windll.kernel32.GetConsoleOutputCP() - if ctypes.windll.kernel32.SetConsoleOutputCP(65001) == 0: - encoding = getattr(sys.stdout, "encoding", None) or "utf-8" - old_cp = None - else: - encoding = fsencoding() - - try: - if linesep: - objects = list(objects) + [os.linesep] - - parts = [] - for text in objects: - if isinstance(text, text_type): - if PY3: - try: - text = text.encode(encoding, 'surrogateescape') - except UnicodeEncodeError: - text = text.encode(encoding, 'replace') - else: - text = text.encode(encoding, 'replace') - parts.append(text) - - data = (b" " if sep else b"").join(parts) - try: - fileno = file_.fileno() - except (AttributeError, OSError, ValueError): - # for tests when stdout is replaced - try: - file_.write(data) - except TypeError: - file_.write(data.decode(encoding, "replace")) - else: - file_.flush() - os.write(fileno, data) - finally: - # reset the code page to what we had before - if old_cp is not None: - ctypes.windll.kernel32.SetConsoleOutputCP(old_cp) - - -class OptionParser(optparse.OptionParser): - """OptionParser subclass which supports printing Unicode under Windows""" - - def print_help(self, file=None): - print_(self.format_help(), file=file) diff --git a/resources/lib/mutagen/_util.py b/resources/lib/mutagen/_util.py deleted file mode 100644 index f05ff454..00000000 --- a/resources/lib/mutagen/_util.py +++ /dev/null @@ -1,550 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Utility classes for Mutagen. - -You should not rely on the interfaces here being stable. They are -intended for internal use in Mutagen only. -""" - -import struct -import codecs - -from fnmatch import fnmatchcase - -from ._compat import chr_, PY2, iteritems, iterbytes, integer_types, xrange, \ - izip - - -class MutagenError(Exception): - """Base class for all custom exceptions in mutagen - - .. versionadded:: 1.25 - """ - - __module__ = "mutagen" - - -def total_ordering(cls): - assert "__eq__" in cls.__dict__ - assert "__lt__" in cls.__dict__ - - cls.__le__ = lambda self, other: self == other or self < other - cls.__gt__ = lambda self, other: not (self == other or self < other) - cls.__ge__ = lambda self, other: not self < other - cls.__ne__ = lambda self, other: not self.__eq__(other) - - return cls - - -def hashable(cls): - """Makes sure the class is hashable. - - Needs a working __eq__ and __hash__ and will add a __ne__. - """ - - # py2 - assert "__hash__" in cls.__dict__ - # py3 - assert cls.__dict__["__hash__"] is not None - assert "__eq__" in cls.__dict__ - - cls.__ne__ = lambda self, other: not self.__eq__(other) - - return cls - - -def enum(cls): - assert cls.__bases__ == (object,) - - d = dict(cls.__dict__) - new_type = type(cls.__name__, (int,), d) - new_type.__module__ = cls.__module__ - - map_ = {} - for key, value in iteritems(d): - if key.upper() == key and isinstance(value, integer_types): - value_instance = new_type(value) - setattr(new_type, key, value_instance) - map_[value] = key - - def str_(self): - if self in map_: - return "%s.%s" % (type(self).__name__, map_[self]) - return "%d" % int(self) - - def repr_(self): - if self in map_: - return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self)) - return "%d" % int(self) - - setattr(new_type, "__repr__", repr_) - setattr(new_type, "__str__", str_) - - return new_type - - -@total_ordering -class DictMixin(object): - """Implement the dict API using keys() and __*item__ methods. - - Similar to UserDict.DictMixin, this takes a class that defines - __getitem__, __setitem__, __delitem__, and keys(), and turns it - into a full dict-like object. - - UserDict.DictMixin is not suitable for this purpose because it's - an old-style class. - - This class is not optimized for very large dictionaries; many - functions have linear memory requirements. I recommend you - override some of these functions if speed is required. - """ - - def __iter__(self): - return iter(self.keys()) - - def __has_key(self, key): - try: - self[key] - except KeyError: - return False - else: - return True - - if PY2: - has_key = __has_key - - __contains__ = __has_key - - if PY2: - iterkeys = lambda self: iter(self.keys()) - - def values(self): - return [self[k] for k in self.keys()] - - if PY2: - itervalues = lambda self: iter(self.values()) - - def items(self): - return list(izip(self.keys(), self.values())) - - if PY2: - iteritems = lambda s: iter(s.items()) - - def clear(self): - for key in list(self.keys()): - self.__delitem__(key) - - def pop(self, key, *args): - if len(args) > 1: - raise TypeError("pop takes at most two arguments") - try: - value = self[key] - except KeyError: - if args: - return args[0] - else: - raise - del(self[key]) - return value - - def popitem(self): - for key in self.keys(): - break - else: - raise KeyError("dictionary is empty") - return key, self.pop(key) - - def update(self, other=None, **kwargs): - if other is None: - self.update(kwargs) - other = {} - - try: - for key, value in other.items(): - self.__setitem__(key, value) - except AttributeError: - for key, value in other: - self[key] = value - - def setdefault(self, key, default=None): - try: - return self[key] - except KeyError: - self[key] = default - return default - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def __repr__(self): - return repr(dict(self.items())) - - def __eq__(self, other): - return dict(self.items()) == other - - def __lt__(self, other): - return dict(self.items()) < other - - __hash__ = object.__hash__ - - def __len__(self): - return len(self.keys()) - - -class DictProxy(DictMixin): - def __init__(self, *args, **kwargs): - self.__dict = {} - super(DictProxy, self).__init__(*args, **kwargs) - - def __getitem__(self, key): - return self.__dict[key] - - def __setitem__(self, key, value): - self.__dict[key] = value - - def __delitem__(self, key): - del(self.__dict[key]) - - def keys(self): - return self.__dict.keys() - - -def _fill_cdata(cls): - """Add struct pack/unpack functions""" - - funcs = {} - for key, name in [("b", "char"), ("h", "short"), - ("i", "int"), ("q", "longlong")]: - for echar, esuffix in [("<", "le"), (">", "be")]: - esuffix = "_" + esuffix - for unsigned in [True, False]: - s = struct.Struct(echar + (key.upper() if unsigned else key)) - get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0] - unpack = get_wrapper(s.unpack) - unpack_from = get_wrapper(s.unpack_from) - - def get_unpack_from(s): - def unpack_from(data, offset=0): - return s.unpack_from(data, offset)[0], offset + s.size - return unpack_from - - unpack_from = get_unpack_from(s) - pack = s.pack - - prefix = "u" if unsigned else "" - if s.size == 1: - esuffix = "" - bits = str(s.size * 8) - funcs["%s%s%s" % (prefix, name, esuffix)] = unpack - funcs["%sint%s%s" % (prefix, bits, esuffix)] = unpack - funcs["%s%s%s_from" % (prefix, name, esuffix)] = unpack_from - funcs["%sint%s%s_from" % (prefix, bits, esuffix)] = unpack_from - funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack - funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack - - for key, func in iteritems(funcs): - setattr(cls, key, staticmethod(func)) - - -class cdata(object): - """C character buffer to Python numeric type conversions. - - For each size/sign/endianness: - uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0) - """ - - from struct import error - error = error - - bitswap = b''.join( - chr_(sum(((val >> i) & 1) << (7 - i) for i in xrange(8))) - for val in xrange(256)) - - test_bit = staticmethod(lambda value, n: bool((value >> n) & 1)) - - -_fill_cdata(cdata) - - -def get_size(fileobj): - """Returns the size of the file object. The position when passed in will - be preserved if no error occurs. - - In case of an error raises IOError. - """ - - old_pos = fileobj.tell() - try: - fileobj.seek(0, 2) - return fileobj.tell() - finally: - fileobj.seek(old_pos, 0) - - -def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): - """Insert size bytes of empty space starting at offset. - - fobj must be an open file object, open rb+ or - equivalent. Mutagen tries to use mmap to resize the file, but - falls back to a significantly slower method if mmap fails. - """ - - assert 0 < size - assert 0 <= offset - - fobj.seek(0, 2) - filesize = fobj.tell() - movesize = filesize - offset - fobj.write(b'\x00' * size) - fobj.flush() - - try: - import mmap - file_map = mmap.mmap(fobj.fileno(), filesize + size) - try: - file_map.move(offset + size, offset, movesize) - finally: - file_map.close() - except (ValueError, EnvironmentError, ImportError, AttributeError): - # handle broken mmap scenarios, BytesIO() - fobj.truncate(filesize) - - fobj.seek(0, 2) - padsize = size - # Don't generate an enormous string if we need to pad - # the file out several megs. - while padsize: - addsize = min(BUFFER_SIZE, padsize) - fobj.write(b"\x00" * addsize) - padsize -= addsize - - fobj.seek(filesize, 0) - while movesize: - # At the start of this loop, fobj is pointing at the end - # of the data we need to move, which is of movesize length. - thismove = min(BUFFER_SIZE, movesize) - # Seek back however much we're going to read this frame. - fobj.seek(-thismove, 1) - nextpos = fobj.tell() - # Read it, so we're back at the end. - data = fobj.read(thismove) - # Seek back to where we need to write it. - fobj.seek(-thismove + size, 1) - # Write it. - fobj.write(data) - # And seek back to the end of the unmoved data. - fobj.seek(nextpos) - movesize -= thismove - - fobj.flush() - - -def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): - """Delete size bytes of empty space starting at offset. - - fobj must be an open file object, open rb+ or - equivalent. Mutagen tries to use mmap to resize the file, but - falls back to a significantly slower method if mmap fails. - """ - - assert 0 < size - assert 0 <= offset - - fobj.seek(0, 2) - filesize = fobj.tell() - movesize = filesize - offset - size - assert 0 <= movesize - - if movesize > 0: - fobj.flush() - try: - import mmap - file_map = mmap.mmap(fobj.fileno(), filesize) - try: - file_map.move(offset, offset + size, movesize) - finally: - file_map.close() - except (ValueError, EnvironmentError, ImportError, AttributeError): - # handle broken mmap scenarios, BytesIO() - fobj.seek(offset + size) - buf = fobj.read(BUFFER_SIZE) - while buf: - fobj.seek(offset) - fobj.write(buf) - offset += len(buf) - fobj.seek(offset + size) - buf = fobj.read(BUFFER_SIZE) - fobj.truncate(filesize - size) - fobj.flush() - - -def resize_bytes(fobj, old_size, new_size, offset): - """Resize an area in a file adding and deleting at the end of it. - Does nothing if no resizing is needed. - """ - - if new_size < old_size: - delete_size = old_size - new_size - delete_at = offset + new_size - delete_bytes(fobj, delete_size, delete_at) - elif new_size > old_size: - insert_size = new_size - old_size - insert_at = offset + old_size - insert_bytes(fobj, insert_size, insert_at) - - -def dict_match(d, key, default=None): - """Like __getitem__ but works as if the keys() are all filename patterns. - Returns the value of any dict key that matches the passed key. - """ - - if key in d and "[" not in key: - return d[key] - else: - for pattern, value in iteritems(d): - if fnmatchcase(key, pattern): - return value - return default - - -def decode_terminated(data, encoding, strict=True): - """Returns the decoded data until the first NULL terminator - and all data after it. - - In case the data can't be decoded raises UnicodeError. - In case the encoding is not found raises LookupError. - In case the data isn't null terminated (even if it is encoded correctly) - raises ValueError except if strict is False, then the decoded string - will be returned anyway. - """ - - codec_info = codecs.lookup(encoding) - - # normalize encoding name so we can compare by name - encoding = codec_info.name - - # fast path - if encoding in ("utf-8", "iso8859-1"): - index = data.find(b"\x00") - if index == -1: - # make sure we raise UnicodeError first, like in the slow path - res = data.decode(encoding), b"" - if strict: - raise ValueError("not null terminated") - else: - return res - return data[:index].decode(encoding), data[index + 1:] - - # slow path - decoder = codec_info.incrementaldecoder() - r = [] - for i, b in enumerate(iterbytes(data)): - c = decoder.decode(b) - if c == u"\x00": - return u"".join(r), data[i + 1:] - r.append(c) - else: - # make sure the decoder is finished - r.append(decoder.decode(b"", True)) - if strict: - raise ValueError("not null terminated") - return u"".join(r), b"" - - -class BitReaderError(Exception): - pass - - -class BitReader(object): - - def __init__(self, fileobj): - self._fileobj = fileobj - self._buffer = 0 - self._bits = 0 - self._pos = fileobj.tell() - - def bits(self, count): - """Reads `count` bits and returns an uint, MSB read first. - - May raise BitReaderError if not enough data could be read or - IOError by the underlying file object. - """ - - if count < 0: - raise ValueError - - if count > self._bits: - n_bytes = (count - self._bits + 7) // 8 - data = self._fileobj.read(n_bytes) - if len(data) != n_bytes: - raise BitReaderError("not enough data") - for b in bytearray(data): - self._buffer = (self._buffer << 8) | b - self._bits += n_bytes * 8 - - self._bits -= count - value = self._buffer >> self._bits - self._buffer &= (1 << self._bits) - 1 - assert self._bits < 8 - return value - - def bytes(self, count): - """Returns a bytearray of length `count`. Works unaligned.""" - - if count < 0: - raise ValueError - - # fast path - if self._bits == 0: - data = self._fileobj.read(count) - if len(data) != count: - raise BitReaderError("not enough data") - return data - - return bytes(bytearray(self.bits(8) for _ in xrange(count))) - - def skip(self, count): - """Skip `count` bits. - - Might raise BitReaderError if there wasn't enough data to skip, - but might also fail on the next bits() instead. - """ - - if count < 0: - raise ValueError - - if count <= self._bits: - self.bits(count) - else: - count -= self.align() - n_bytes = count // 8 - self._fileobj.seek(n_bytes, 1) - count -= n_bytes * 8 - self.bits(count) - - def get_position(self): - """Returns the amount of bits read or skipped so far""" - - return (self._fileobj.tell() - self._pos) * 8 - self._bits - - def align(self): - """Align to the next byte, returns the amount of bits skipped""" - - bits = self._bits - self._buffer = 0 - self._bits = 0 - return bits - - def is_aligned(self): - """If we are currently aligned to bytes and nothing is buffered""" - - return self._bits == 0 diff --git a/resources/lib/mutagen/_vorbis.py b/resources/lib/mutagen/_vorbis.py deleted file mode 100644 index da202400..00000000 --- a/resources/lib/mutagen/_vorbis.py +++ /dev/null @@ -1,330 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005-2006 Joe Wreschnig -# 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""Read and write Vorbis comment data. - -Vorbis comments are freeform key/value pairs; keys are -case-insensitive ASCII and values are Unicode strings. A key may have -multiple values. - -The specification is at http://www.xiph.org/vorbis/doc/v-comment.html. -""" - -import sys - -import mutagen -from ._compat import reraise, BytesIO, text_type, xrange, PY3, PY2 -from mutagen._util import DictMixin, cdata - - -def is_valid_key(key): - """Return true if a string is a valid Vorbis comment key. - - Valid Vorbis comment keys are printable ASCII between 0x20 (space) - and 0x7D ('}'), excluding '='. - - Takes str/unicode in Python 2, unicode in Python 3 - """ - - if PY3 and isinstance(key, bytes): - raise TypeError("needs to be str not bytes") - - for c in key: - if c < " " or c > "}" or c == "=": - return False - else: - return bool(key) - - -istag = is_valid_key - - -class error(IOError): - pass - - -class VorbisUnsetFrameError(error): - pass - - -class VorbisEncodingError(error): - pass - - -class VComment(mutagen.Metadata, list): - """A Vorbis comment parser, accessor, and renderer. - - All comment ordering is preserved. A VComment is a list of - key/value pairs, and so any Python list method can be used on it. - - Vorbis comments are always wrapped in something like an Ogg Vorbis - bitstream or a FLAC metadata block, so this loads string data or a - file-like object, not a filename. - - Attributes: - - * vendor -- the stream 'vendor' (i.e. writer); default 'Mutagen' - """ - - vendor = u"Mutagen " + mutagen.version_string - - def __init__(self, data=None, *args, **kwargs): - self._size = 0 - # Collect the args to pass to load, this lets child classes - # override just load and get equivalent magic for the - # constructor. - if data is not None: - if isinstance(data, bytes): - data = BytesIO(data) - elif not hasattr(data, 'read'): - raise TypeError("VComment requires bytes or a file-like") - start = data.tell() - self.load(data, *args, **kwargs) - self._size = data.tell() - start - - def load(self, fileobj, errors='replace', framing=True): - """Parse a Vorbis comment from a file-like object. - - Keyword arguments: - - * errors: - 'strict', 'replace', or 'ignore'. This affects Unicode decoding - and how other malformed content is interpreted. - * framing -- if true, fail if a framing bit is not present - - Framing bits are required by the Vorbis comment specification, - but are not used in FLAC Vorbis comment blocks. - """ - - try: - vendor_length = cdata.uint_le(fileobj.read(4)) - self.vendor = fileobj.read(vendor_length).decode('utf-8', errors) - count = cdata.uint_le(fileobj.read(4)) - for i in xrange(count): - length = cdata.uint_le(fileobj.read(4)) - try: - string = fileobj.read(length).decode('utf-8', errors) - except (OverflowError, MemoryError): - raise error("cannot read %d bytes, too large" % length) - try: - tag, value = string.split('=', 1) - except ValueError as err: - if errors == "ignore": - continue - elif errors == "replace": - tag, value = u"unknown%d" % i, string - else: - reraise(VorbisEncodingError, err, sys.exc_info()[2]) - try: - tag = tag.encode('ascii', errors) - except UnicodeEncodeError: - raise VorbisEncodingError("invalid tag name %r" % tag) - else: - # string keys in py3k - if PY3: - tag = tag.decode("ascii") - if is_valid_key(tag): - self.append((tag, value)) - - if framing and not bytearray(fileobj.read(1))[0] & 0x01: - raise VorbisUnsetFrameError("framing bit was unset") - except (cdata.error, TypeError): - raise error("file is not a valid Vorbis comment") - - def validate(self): - """Validate keys and values. - - Check to make sure every key used is a valid Vorbis key, and - that every value used is a valid Unicode or UTF-8 string. If - any invalid keys or values are found, a ValueError is raised. - - In Python 3 all keys and values have to be a string. - """ - - if not isinstance(self.vendor, text_type): - if PY3: - raise ValueError("vendor needs to be str") - - try: - self.vendor.decode('utf-8') - except UnicodeDecodeError: - raise ValueError - - for key, value in self: - try: - if not is_valid_key(key): - raise ValueError - except TypeError: - raise ValueError("%r is not a valid key" % key) - - if not isinstance(value, text_type): - if PY3: - raise ValueError("%r needs to be str" % key) - - try: - value.decode("utf-8") - except: - raise ValueError("%r is not a valid value" % value) - - return True - - def clear(self): - """Clear all keys from the comment.""" - - for i in list(self): - self.remove(i) - - def write(self, framing=True): - """Return a string representation of the data. - - Validation is always performed, so calling this function on - invalid data may raise a ValueError. - - Keyword arguments: - - * framing -- if true, append a framing bit (see load) - """ - - self.validate() - - def _encode(value): - if not isinstance(value, bytes): - return value.encode('utf-8') - return value - - f = BytesIO() - vendor = _encode(self.vendor) - f.write(cdata.to_uint_le(len(vendor))) - f.write(vendor) - f.write(cdata.to_uint_le(len(self))) - for tag, value in self: - tag = _encode(tag) - value = _encode(value) - comment = tag + b"=" + value - f.write(cdata.to_uint_le(len(comment))) - f.write(comment) - if framing: - f.write(b"\x01") - return f.getvalue() - - def pprint(self): - - def _decode(value): - if not isinstance(value, text_type): - return value.decode('utf-8', 'replace') - return value - - tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self] - return u"\n".join(tags) - - -class VCommentDict(VComment, DictMixin): - """A VComment that looks like a dictionary. - - This object differs from a dictionary in two ways. First, - len(comment) will still return the number of values, not the - number of keys. Secondly, iterating through the object will - iterate over (key, value) pairs, not keys. Since a key may have - multiple values, the same value may appear multiple times while - iterating. - - Since Vorbis comment keys are case-insensitive, all keys are - normalized to lowercase ASCII. - """ - - def __getitem__(self, key): - """A list of values for the key. - - This is a copy, so comment['title'].append('a title') will not - work. - """ - - # PY3 only - if isinstance(key, slice): - return VComment.__getitem__(self, key) - - if not is_valid_key(key): - raise ValueError - - key = key.lower() - - values = [value for (k, value) in self if k.lower() == key] - if not values: - raise KeyError(key) - else: - return values - - def __delitem__(self, key): - """Delete all values associated with the key.""" - - # PY3 only - if isinstance(key, slice): - return VComment.__delitem__(self, key) - - if not is_valid_key(key): - raise ValueError - - key = key.lower() - to_delete = [x for x in self if x[0].lower() == key] - if not to_delete: - raise KeyError(key) - else: - for item in to_delete: - self.remove(item) - - def __contains__(self, key): - """Return true if the key has any values.""" - - if not is_valid_key(key): - raise ValueError - - key = key.lower() - for k, value in self: - if k.lower() == key: - return True - else: - return False - - def __setitem__(self, key, values): - """Set a key's value or values. - - Setting a value overwrites all old ones. The value may be a - list of Unicode or UTF-8 strings, or a single Unicode or UTF-8 - string. - """ - - # PY3 only - if isinstance(key, slice): - return VComment.__setitem__(self, key, values) - - if not is_valid_key(key): - raise ValueError - - if not isinstance(values, list): - values = [values] - try: - del(self[key]) - except KeyError: - pass - - if PY2: - key = key.encode('ascii') - - for value in values: - self.append((key, value)) - - def keys(self): - """Return all keys in the comment.""" - - return list(set([k.lower() for k, v in self])) - - def as_dict(self): - """Return a copy of the comment data in a real dict.""" - - return dict([(key, self[key]) for key in self.keys()]) diff --git a/resources/lib/mutagen/aac.py b/resources/lib/mutagen/aac.py deleted file mode 100644 index 83968a05..00000000 --- a/resources/lib/mutagen/aac.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -""" -* ADTS - Audio Data Transport Stream -* ADIF - Audio Data Interchange Format -* See ISO/IEC 13818-7 / 14496-03 -""" - -from mutagen import StreamInfo -from mutagen._file import FileType -from mutagen._util import BitReader, BitReaderError, MutagenError -from mutagen._compat import endswith, xrange - - -_FREQS = [ - 96000, 88200, 64000, 48000, - 44100, 32000, 24000, 22050, - 16000, 12000, 11025, 8000, - 7350, -] - - -class _ADTSStream(object): - """Represents a series of frames belonging to the same stream""" - - parsed_frames = 0 - """Number of successfully parsed frames""" - - offset = 0 - """offset in bytes at which the stream starts (the first sync word)""" - - @classmethod - def find_stream(cls, fileobj, max_bytes): - """Returns a possibly valid _ADTSStream or None. - - Args: - max_bytes (int): maximum bytes to read - """ - - r = BitReader(fileobj) - stream = cls(r) - if stream.sync(max_bytes): - stream.offset = (r.get_position() - 12) // 8 - return stream - - def sync(self, max_bytes): - """Find the next sync. - Returns True if found.""" - - # at least 2 bytes for the sync - max_bytes = max(max_bytes, 2) - - r = self._r - r.align() - while max_bytes > 0: - try: - b = r.bytes(1) - if b == b"\xff": - if r.bits(4) == 0xf: - return True - r.align() - max_bytes -= 2 - else: - max_bytes -= 1 - except BitReaderError: - return False - return False - - def __init__(self, r): - """Use _ADTSStream.find_stream to create a stream""" - - self._fixed_header_key = None - self._r = r - self.offset = -1 - self.parsed_frames = 0 - - self._samples = 0 - self._payload = 0 - self._start = r.get_position() / 8 - self._last = self._start - - @property - def bitrate(self): - """Bitrate of the raw aac blocks, excluding framing/crc""" - - assert self.parsed_frames, "no frame parsed yet" - - if self._samples == 0: - return 0 - - return (8 * self._payload * self.frequency) // self._samples - - @property - def samples(self): - """samples so far""" - - assert self.parsed_frames, "no frame parsed yet" - - return self._samples - - @property - def size(self): - """bytes read in the stream so far (including framing)""" - - assert self.parsed_frames, "no frame parsed yet" - - return self._last - self._start - - @property - def channels(self): - """0 means unknown""" - - assert self.parsed_frames, "no frame parsed yet" - - b_index = self._fixed_header_key[6] - if b_index == 7: - return 8 - elif b_index > 7: - return 0 - else: - return b_index - - @property - def frequency(self): - """0 means unknown""" - - assert self.parsed_frames, "no frame parsed yet" - - f_index = self._fixed_header_key[4] - try: - return _FREQS[f_index] - except IndexError: - return 0 - - def parse_frame(self): - """True if parsing was successful. - Fails either because the frame wasn't valid or the stream ended. - """ - - try: - return self._parse_frame() - except BitReaderError: - return False - - def _parse_frame(self): - r = self._r - # start == position of sync word - start = r.get_position() - 12 - - # adts_fixed_header - id_ = r.bits(1) - layer = r.bits(2) - protection_absent = r.bits(1) - - profile = r.bits(2) - sampling_frequency_index = r.bits(4) - private_bit = r.bits(1) - # TODO: if 0 we could parse program_config_element() - channel_configuration = r.bits(3) - original_copy = r.bits(1) - home = r.bits(1) - - # the fixed header has to be the same for every frame in the stream - fixed_header_key = ( - id_, layer, protection_absent, profile, sampling_frequency_index, - private_bit, channel_configuration, original_copy, home, - ) - - if self._fixed_header_key is None: - self._fixed_header_key = fixed_header_key - else: - if self._fixed_header_key != fixed_header_key: - return False - - # adts_variable_header - r.skip(2) # copyright_identification_bit/start - frame_length = r.bits(13) - r.skip(11) # adts_buffer_fullness - nordbif = r.bits(2) - # adts_variable_header end - - crc_overhead = 0 - if not protection_absent: - crc_overhead += (nordbif + 1) * 16 - if nordbif != 0: - crc_overhead *= 2 - - left = (frame_length * 8) - (r.get_position() - start) - if left < 0: - return False - r.skip(left) - assert r.is_aligned() - - self._payload += (left - crc_overhead) / 8 - self._samples += (nordbif + 1) * 1024 - self._last = r.get_position() / 8 - - self.parsed_frames += 1 - return True - - -class ProgramConfigElement(object): - - element_instance_tag = None - object_type = None - sampling_frequency_index = None - channels = None - - def __init__(self, r): - """Reads the program_config_element() - - Raises BitReaderError - """ - - self.element_instance_tag = r.bits(4) - self.object_type = r.bits(2) - self.sampling_frequency_index = r.bits(4) - num_front_channel_elements = r.bits(4) - num_side_channel_elements = r.bits(4) - num_back_channel_elements = r.bits(4) - num_lfe_channel_elements = r.bits(2) - num_assoc_data_elements = r.bits(3) - num_valid_cc_elements = r.bits(4) - - mono_mixdown_present = r.bits(1) - if mono_mixdown_present == 1: - r.skip(4) - stereo_mixdown_present = r.bits(1) - if stereo_mixdown_present == 1: - r.skip(4) - matrix_mixdown_idx_present = r.bits(1) - if matrix_mixdown_idx_present == 1: - r.skip(3) - - elms = num_front_channel_elements + num_side_channel_elements + \ - num_back_channel_elements - channels = 0 - for i in xrange(elms): - channels += 1 - element_is_cpe = r.bits(1) - if element_is_cpe: - channels += 1 - r.skip(4) - channels += num_lfe_channel_elements - self.channels = channels - - r.skip(4 * num_lfe_channel_elements) - r.skip(4 * num_assoc_data_elements) - r.skip(5 * num_valid_cc_elements) - r.align() - comment_field_bytes = r.bits(8) - r.skip(8 * comment_field_bytes) - - -class AACError(MutagenError): - pass - - -class AACInfo(StreamInfo): - """AAC stream information. - - Attributes: - - * channels -- number of audio channels - * length -- file length in seconds, as a float - * sample_rate -- audio sampling rate in Hz - * bitrate -- audio bitrate, in bits per second - - The length of the stream is just a guess and might not be correct. - """ - - channels = 0 - length = 0 - sample_rate = 0 - bitrate = 0 - - def __init__(self, fileobj): - # skip id3v2 header - start_offset = 0 - header = fileobj.read(10) - from mutagen.id3 import BitPaddedInt - if header.startswith(b"ID3"): - size = BitPaddedInt(header[6:]) - start_offset = size + 10 - - fileobj.seek(start_offset) - adif = fileobj.read(4) - if adif == b"ADIF": - self._parse_adif(fileobj) - self._type = "ADIF" - else: - self._parse_adts(fileobj, start_offset) - self._type = "ADTS" - - def _parse_adif(self, fileobj): - r = BitReader(fileobj) - try: - copyright_id_present = r.bits(1) - if copyright_id_present: - r.skip(72) # copyright_id - r.skip(1 + 1) # original_copy, home - bitstream_type = r.bits(1) - self.bitrate = r.bits(23) - npce = r.bits(4) - if bitstream_type == 0: - r.skip(20) # adif_buffer_fullness - - pce = ProgramConfigElement(r) - try: - self.sample_rate = _FREQS[pce.sampling_frequency_index] - except IndexError: - pass - self.channels = pce.channels - - # other pces.. - for i in xrange(npce): - ProgramConfigElement(r) - r.align() - except BitReaderError as e: - raise AACError(e) - - # use bitrate + data size to guess length - start = fileobj.tell() - fileobj.seek(0, 2) - length = fileobj.tell() - start - if self.bitrate != 0: - self.length = (8.0 * length) / self.bitrate - - def _parse_adts(self, fileobj, start_offset): - max_initial_read = 512 - max_resync_read = 10 - max_sync_tries = 10 - - frames_max = 100 - frames_needed = 3 - - # Try up to X times to find a sync word and read up to Y frames. - # If more than Z frames are valid we assume a valid stream - offset = start_offset - for i in xrange(max_sync_tries): - fileobj.seek(offset) - s = _ADTSStream.find_stream(fileobj, max_initial_read) - if s is None: - raise AACError("sync not found") - # start right after the last found offset - offset += s.offset + 1 - - for i in xrange(frames_max): - if not s.parse_frame(): - break - if not s.sync(max_resync_read): - break - - if s.parsed_frames >= frames_needed: - break - else: - raise AACError( - "no valid stream found (only %d frames)" % s.parsed_frames) - - self.sample_rate = s.frequency - self.channels = s.channels - self.bitrate = s.bitrate - - # size from stream start to end of file - fileobj.seek(0, 2) - stream_size = fileobj.tell() - (offset + s.offset) - # approx - self.length = float(s.samples * stream_size) / (s.size * s.frequency) - - def pprint(self): - return u"AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % ( - self._type, self.sample_rate, self.length, self.channels, - self.bitrate) - - -class AAC(FileType): - """Load ADTS or ADIF streams containing AAC. - - Tagging is not supported. - Use the ID3/APEv2 classes directly instead. - """ - - _mimes = ["audio/x-aac"] - - def load(self, filename): - self.filename = filename - with open(filename, "rb") as h: - self.info = AACInfo(h) - - def add_tags(self): - raise AACError("doesn't support tags") - - @staticmethod - def score(filename, fileobj, header): - filename = filename.lower() - s = endswith(filename, ".aac") or endswith(filename, ".adts") or \ - endswith(filename, ".adif") - s += b"ADIF" in header - return s - - -Open = AAC -error = AACError - -__all__ = ["AAC", "Open"] diff --git a/resources/lib/mutagen/aiff.py b/resources/lib/mutagen/aiff.py deleted file mode 100644 index dc580063..00000000 --- a/resources/lib/mutagen/aiff.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Evan Purkhiser -# 2014 Ben Ockmore -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""AIFF audio stream information and tags.""" - -import sys -import struct -from struct import pack - -from ._compat import endswith, text_type, reraise -from mutagen import StreamInfo, FileType - -from mutagen.id3 import ID3 -from mutagen.id3._util import ID3NoHeaderError, error as ID3Error -from mutagen._util import resize_bytes, delete_bytes, MutagenError - -__all__ = ["AIFF", "Open", "delete"] - - -class error(MutagenError, RuntimeError): - pass - - -class InvalidChunk(error, IOError): - pass - - -# based on stdlib's aifc -_HUGE_VAL = 1.79769313486231e+308 - - -def is_valid_chunk_id(id): - assert isinstance(id, text_type) - - return ((len(id) <= 4) and (min(id) >= u' ') and - (max(id) <= u'~')) - - -def read_float(data): # 10 bytes - expon, himant, lomant = struct.unpack('>hLL', data) - sign = 1 - if expon < 0: - sign = -1 - expon = expon + 0x8000 - if expon == himant == lomant == 0: - f = 0.0 - elif expon == 0x7FFF: - f = _HUGE_VAL - else: - expon = expon - 16383 - f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) - return sign * f - - -class IFFChunk(object): - """Representation of a single IFF chunk""" - - # Chunk headers are 8 bytes long (4 for ID and 4 for the size) - HEADER_SIZE = 8 - - def __init__(self, fileobj, parent_chunk=None): - self.__fileobj = fileobj - self.parent_chunk = parent_chunk - self.offset = fileobj.tell() - - header = fileobj.read(self.HEADER_SIZE) - if len(header) < self.HEADER_SIZE: - raise InvalidChunk() - - self.id, self.data_size = struct.unpack('>4si', header) - - try: - self.id = self.id.decode('ascii') - except UnicodeDecodeError: - raise InvalidChunk() - - if not is_valid_chunk_id(self.id): - raise InvalidChunk() - - self.size = self.HEADER_SIZE + self.data_size - self.data_offset = fileobj.tell() - - def read(self): - """Read the chunks data""" - - self.__fileobj.seek(self.data_offset) - return self.__fileobj.read(self.data_size) - - def write(self, data): - """Write the chunk data""" - - if len(data) > self.data_size: - raise ValueError - - self.__fileobj.seek(self.data_offset) - self.__fileobj.write(data) - - def delete(self): - """Removes the chunk from the file""" - - delete_bytes(self.__fileobj, self.size, self.offset) - if self.parent_chunk is not None: - self.parent_chunk._update_size( - self.parent_chunk.data_size - self.size) - - def _update_size(self, data_size): - """Update the size of the chunk""" - - self.__fileobj.seek(self.offset + 4) - self.__fileobj.write(pack('>I', data_size)) - if self.parent_chunk is not None: - size_diff = self.data_size - data_size - self.parent_chunk._update_size( - self.parent_chunk.data_size - size_diff) - self.data_size = data_size - self.size = data_size + self.HEADER_SIZE - - def resize(self, new_data_size): - """Resize the file and update the chunk sizes""" - - resize_bytes( - self.__fileobj, self.data_size, new_data_size, self.data_offset) - self._update_size(new_data_size) - - -class IFFFile(object): - """Representation of a IFF file""" - - def __init__(self, fileobj): - self.__fileobj = fileobj - self.__chunks = {} - - # AIFF Files always start with the FORM chunk which contains a 4 byte - # ID before the start of other chunks - fileobj.seek(0) - self.__chunks[u'FORM'] = IFFChunk(fileobj) - - # Skip past the 4 byte FORM id - fileobj.seek(IFFChunk.HEADER_SIZE + 4) - - # Where the next chunk can be located. We need to keep track of this - # since the size indicated in the FORM header may not match up with the - # offset determined from the size of the last chunk in the file - self.__next_offset = fileobj.tell() - - # Load all of the chunks - while True: - try: - chunk = IFFChunk(fileobj, self[u'FORM']) - except InvalidChunk: - break - self.__chunks[chunk.id.strip()] = chunk - - # Calculate the location of the next chunk, - # considering the pad byte - self.__next_offset = chunk.offset + chunk.size - self.__next_offset += self.__next_offset % 2 - fileobj.seek(self.__next_offset) - - def __contains__(self, id_): - """Check if the IFF file contains a specific chunk""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - return id_ in self.__chunks - - def __getitem__(self, id_): - """Get a chunk from the IFF file""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - try: - return self.__chunks[id_] - except KeyError: - raise KeyError( - "%r has no %r chunk" % (self.__fileobj.name, id_)) - - def __delitem__(self, id_): - """Remove a chunk from the IFF file""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - self.__chunks.pop(id_).delete() - - def insert_chunk(self, id_): - """Insert a new chunk at the end of the IFF file""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - self.__fileobj.seek(self.__next_offset) - self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0)) - self.__fileobj.seek(self.__next_offset) - chunk = IFFChunk(self.__fileobj, self[u'FORM']) - self[u'FORM']._update_size(self[u'FORM'].data_size + chunk.size) - - self.__chunks[id_] = chunk - self.__next_offset = chunk.offset + chunk.size - - -class AIFFInfo(StreamInfo): - """AIFF audio stream information. - - Information is parsed from the COMM chunk of the AIFF file - - Useful attributes: - - * length -- audio length, in seconds - * bitrate -- audio bitrate, in bits per second - * channels -- The number of audio channels - * sample_rate -- audio sample rate, in Hz - * sample_size -- The audio sample size - """ - - length = 0 - bitrate = 0 - channels = 0 - sample_rate = 0 - - def __init__(self, fileobj): - iff = IFFFile(fileobj) - try: - common_chunk = iff[u'COMM'] - except KeyError as e: - raise error(str(e)) - - data = common_chunk.read() - - info = struct.unpack('>hLh10s', data[:18]) - channels, frame_count, sample_size, sample_rate = info - - self.sample_rate = int(read_float(sample_rate)) - self.sample_size = sample_size - self.channels = channels - self.bitrate = channels * sample_size * self.sample_rate - self.length = frame_count / float(self.sample_rate) - - def pprint(self): - return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % ( - self.channels, self.bitrate, self.sample_rate, self.length) - - -class _IFFID3(ID3): - """A AIFF file with ID3v2 tags""" - - def _pre_load_header(self, fileobj): - try: - fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset) - except (InvalidChunk, KeyError): - raise ID3NoHeaderError("No ID3 chunk") - - def save(self, filename=None, v2_version=4, v23_sep='/', padding=None): - """Save ID3v2 data to the AIFF file""" - - if filename is None: - filename = self.filename - - # Unlike the parent ID3.save method, we won't save to a blank file - # since we would have to construct a empty AIFF file - with open(filename, 'rb+') as fileobj: - iff_file = IFFFile(fileobj) - - if u'ID3' not in iff_file: - iff_file.insert_chunk(u'ID3') - - chunk = iff_file[u'ID3'] - - try: - data = self._prepare_data( - fileobj, chunk.data_offset, chunk.data_size, v2_version, - v23_sep, padding) - except ID3Error as e: - reraise(error, e, sys.exc_info()[2]) - - new_size = len(data) - new_size += new_size % 2 # pad byte - assert new_size % 2 == 0 - chunk.resize(new_size) - data += (new_size - len(data)) * b'\x00' - assert new_size == len(data) - chunk.write(data) - - def delete(self, filename=None): - """Completely removes the ID3 chunk from the AIFF file""" - - if filename is None: - filename = self.filename - delete(filename) - self.clear() - - -def delete(filename): - """Completely removes the ID3 chunk from the AIFF file""" - - with open(filename, "rb+") as file_: - try: - del IFFFile(file_)[u'ID3'] - except KeyError: - pass - - -class AIFF(FileType): - """An AIFF audio file. - - :ivar info: :class:`AIFFInfo` - :ivar tags: :class:`ID3` - """ - - _mimes = ["audio/aiff", "audio/x-aiff"] - - @staticmethod - def score(filename, fileobj, header): - filename = filename.lower() - - return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") + - endswith(filename, b".aiff") + endswith(filename, b".aifc")) - - def add_tags(self): - """Add an empty ID3 tag to the file.""" - if self.tags is None: - self.tags = _IFFID3() - else: - raise error("an ID3 tag already exists") - - def load(self, filename, **kwargs): - """Load stream and tag information from a file.""" - self.filename = filename - - try: - self.tags = _IFFID3(filename, **kwargs) - except ID3NoHeaderError: - self.tags = None - except ID3Error as e: - raise error(e) - - with open(filename, "rb") as fileobj: - self.info = AIFFInfo(fileobj) - - -Open = AIFF diff --git a/resources/lib/mutagen/apev2.py b/resources/lib/mutagen/apev2.py deleted file mode 100644 index 3b79aba9..00000000 --- a/resources/lib/mutagen/apev2.py +++ /dev/null @@ -1,710 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""APEv2 reading and writing. - -The APEv2 format is most commonly used with Musepack files, but is -also the format of choice for WavPack and other formats. Some MP3s -also have APEv2 tags, but this can cause problems with many MP3 -decoders and taggers. - -APEv2 tags, like Vorbis comments, are freeform key=value pairs. APEv2 -keys can be any ASCII string with characters from 0x20 to 0x7E, -between 2 and 255 characters long. Keys are case-sensitive, but -readers are recommended to be case insensitive, and it is forbidden to -multiple keys which differ only in case. Keys are usually stored -title-cased (e.g. 'Artist' rather than 'artist'). - -APEv2 values are slightly more structured than Vorbis comments; values -are flagged as one of text, binary, or an external reference (usually -a URI). - -Based off the format specification found at -http://wiki.hydrogenaudio.org/index.php?title=APEv2_specification. -""" - -__all__ = ["APEv2", "APEv2File", "Open", "delete"] - -import sys -import struct -from collections import MutableSequence - -from ._compat import (cBytesIO, PY3, text_type, PY2, reraise, swap_to_string, - xrange) -from mutagen import Metadata, FileType, StreamInfo -from mutagen._util import (DictMixin, cdata, delete_bytes, total_ordering, - MutagenError) - - -def is_valid_apev2_key(key): - if not isinstance(key, text_type): - if PY3: - raise TypeError("APEv2 key must be str") - - try: - key = key.decode('ascii') - except UnicodeDecodeError: - return False - - # PY26 - Change to set literal syntax (since set is faster than list here) - return ((2 <= len(key) <= 255) and (min(key) >= u' ') and - (max(key) <= u'~') and - (key not in [u"OggS", u"TAG", u"ID3", u"MP+"])) - -# There are three different kinds of APE tag values. -# "0: Item contains text information coded in UTF-8 -# 1: Item contains binary information -# 2: Item is a locator of external stored information [e.g. URL] -# 3: reserved" -TEXT, BINARY, EXTERNAL = xrange(3) - -HAS_HEADER = 1 << 31 -HAS_NO_FOOTER = 1 << 30 -IS_HEADER = 1 << 29 - - -class error(IOError, MutagenError): - pass - - -class APENoHeaderError(error, ValueError): - pass - - -class APEUnsupportedVersionError(error, ValueError): - pass - - -class APEBadItemError(error, ValueError): - pass - - -class _APEv2Data(object): - # Store offsets of the important parts of the file. - start = header = data = footer = end = None - # Footer or header; seek here and read 32 to get version/size/items/flags - metadata = None - # Actual tag data - tag = None - - version = None - size = None - items = None - flags = 0 - - # The tag is at the start rather than the end. A tag at both - # the start and end of the file (i.e. the tag is the whole file) - # is not considered to be at the start. - is_at_start = False - - def __init__(self, fileobj): - self.__find_metadata(fileobj) - - if self.header is None: - self.metadata = self.footer - elif self.footer is None: - self.metadata = self.header - else: - self.metadata = max(self.header, self.footer) - - if self.metadata is None: - return - - self.__fill_missing(fileobj) - self.__fix_brokenness(fileobj) - if self.data is not None: - fileobj.seek(self.data) - self.tag = fileobj.read(self.size) - - def __find_metadata(self, fileobj): - # Try to find a header or footer. - - # Check for a simple footer. - try: - fileobj.seek(-32, 2) - except IOError: - fileobj.seek(0, 2) - return - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - self.footer = self.metadata = fileobj.tell() - return - - # Check for an APEv2 tag followed by an ID3v1 tag at the end. - try: - fileobj.seek(-128, 2) - if fileobj.read(3) == b"TAG": - - fileobj.seek(-35, 1) # "TAG" + header length - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - self.footer = fileobj.tell() - return - - # ID3v1 tag at the end, maybe preceded by Lyrics3v2. - # (http://www.id3.org/lyrics3200.html) - # (header length - "APETAGEX") - "LYRICS200" - fileobj.seek(15, 1) - if fileobj.read(9) == b'LYRICS200': - fileobj.seek(-15, 1) # "LYRICS200" + size tag - try: - offset = int(fileobj.read(6)) - except ValueError: - raise IOError - - fileobj.seek(-32 - offset - 6, 1) - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - self.footer = fileobj.tell() - return - - except IOError: - pass - - # Check for a tag at the start. - fileobj.seek(0, 0) - if fileobj.read(8) == b"APETAGEX": - self.is_at_start = True - self.header = 0 - - def __fill_missing(self, fileobj): - fileobj.seek(self.metadata + 8) - self.version = fileobj.read(4) - self.size = cdata.uint_le(fileobj.read(4)) - self.items = cdata.uint_le(fileobj.read(4)) - self.flags = cdata.uint_le(fileobj.read(4)) - - if self.header is not None: - self.data = self.header + 32 - # If we're reading the header, the size is the header - # offset + the size, which includes the footer. - self.end = self.data + self.size - fileobj.seek(self.end - 32, 0) - if fileobj.read(8) == b"APETAGEX": - self.footer = self.end - 32 - elif self.footer is not None: - self.end = self.footer + 32 - self.data = self.end - self.size - if self.flags & HAS_HEADER: - self.header = self.data - 32 - else: - self.header = self.data - else: - raise APENoHeaderError("No APE tag found") - - # exclude the footer from size - if self.footer is not None: - self.size -= 32 - - def __fix_brokenness(self, fileobj): - # Fix broken tags written with PyMusepack. - if self.header is not None: - start = self.header - else: - start = self.data - fileobj.seek(start) - - while start > 0: - # Clean up broken writing from pre-Mutagen PyMusepack. - # It didn't remove the first 24 bytes of header. - try: - fileobj.seek(-24, 1) - except IOError: - break - else: - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - start = fileobj.tell() - else: - break - self.start = start - - -class _CIDictProxy(DictMixin): - - def __init__(self, *args, **kwargs): - self.__casemap = {} - self.__dict = {} - super(_CIDictProxy, self).__init__(*args, **kwargs) - # Internally all names are stored as lowercase, but the case - # they were set with is remembered and used when saving. This - # is roughly in line with the standard, which says that keys - # are case-sensitive but two keys differing only in case are - # not allowed, and recommends case-insensitive - # implementations. - - def __getitem__(self, key): - return self.__dict[key.lower()] - - def __setitem__(self, key, value): - lower = key.lower() - self.__casemap[lower] = key - self.__dict[lower] = value - - def __delitem__(self, key): - lower = key.lower() - del(self.__casemap[lower]) - del(self.__dict[lower]) - - def keys(self): - return [self.__casemap.get(key, key) for key in self.__dict.keys()] - - -class APEv2(_CIDictProxy, Metadata): - """A file with an APEv2 tag. - - ID3v1 tags are silently ignored and overwritten. - """ - - filename = None - - def pprint(self): - """Return tag key=value pairs in a human-readable format.""" - - items = sorted(self.items()) - return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items) - - def load(self, filename): - """Load tags from a filename.""" - - self.filename = filename - with open(filename, "rb") as fileobj: - data = _APEv2Data(fileobj) - - if data.tag: - self.clear() - self.__parse_tag(data.tag, data.items) - else: - raise APENoHeaderError("No APE tag found") - - def __parse_tag(self, tag, count): - fileobj = cBytesIO(tag) - - for i in xrange(count): - size_data = fileobj.read(4) - # someone writes wrong item counts - if not size_data: - break - size = cdata.uint_le(size_data) - flags = cdata.uint_le(fileobj.read(4)) - - # Bits 1 and 2 bits are flags, 0-3 - # Bit 0 is read/write flag, ignored - kind = (flags & 6) >> 1 - if kind == 3: - raise APEBadItemError("value type must be 0, 1, or 2") - key = value = fileobj.read(1) - while key[-1:] != b'\x00' and value: - value = fileobj.read(1) - key += value - if key[-1:] == b"\x00": - key = key[:-1] - if PY3: - try: - key = key.decode("ascii") - except UnicodeError as err: - reraise(APEBadItemError, err, sys.exc_info()[2]) - value = fileobj.read(size) - - value = _get_value_type(kind)._new(value) - - self[key] = value - - def __getitem__(self, key): - if not is_valid_apev2_key(key): - raise KeyError("%r is not a valid APEv2 key" % key) - if PY2: - key = key.encode('ascii') - - return super(APEv2, self).__getitem__(key) - - def __delitem__(self, key): - if not is_valid_apev2_key(key): - raise KeyError("%r is not a valid APEv2 key" % key) - if PY2: - key = key.encode('ascii') - - super(APEv2, self).__delitem__(key) - - def __setitem__(self, key, value): - """'Magic' value setter. - - This function tries to guess at what kind of value you want to - store. If you pass in a valid UTF-8 or Unicode string, it - treats it as a text value. If you pass in a list, it treats it - as a list of string/Unicode values. If you pass in a string - that is not valid UTF-8, it assumes it is a binary value. - - Python 3: all bytes will be assumed to be a byte value, even - if they are valid utf-8. - - If you need to force a specific type of value (e.g. binary - data that also happens to be valid UTF-8, or an external - reference), use the APEValue factory and set the value to the - result of that:: - - from mutagen.apev2 import APEValue, EXTERNAL - tag['Website'] = APEValue('http://example.org', EXTERNAL) - """ - - if not is_valid_apev2_key(key): - raise KeyError("%r is not a valid APEv2 key" % key) - - if PY2: - key = key.encode('ascii') - - if not isinstance(value, _APEValue): - # let's guess at the content if we're not already a value... - if isinstance(value, text_type): - # unicode? we've got to be text. - value = APEValue(value, TEXT) - elif isinstance(value, list): - items = [] - for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("item in list not str") - v = v.decode("utf-8") - items.append(v) - - # list? text. - value = APEValue(u"\0".join(items), TEXT) - else: - if PY3: - value = APEValue(value, BINARY) - else: - try: - value.decode("utf-8") - except UnicodeError: - # invalid UTF8 text, probably binary - value = APEValue(value, BINARY) - else: - # valid UTF8, probably text - value = APEValue(value, TEXT) - - super(APEv2, self).__setitem__(key, value) - - def save(self, filename=None): - """Save changes to a file. - - If no filename is given, the one most recently loaded is used. - - Tags are always written at the end of the file, and include - a header and a footer. - """ - - filename = filename or self.filename - try: - fileobj = open(filename, "r+b") - except IOError: - fileobj = open(filename, "w+b") - data = _APEv2Data(fileobj) - - if data.is_at_start: - delete_bytes(fileobj, data.end - data.start, data.start) - elif data.start is not None: - fileobj.seek(data.start) - # Delete an ID3v1 tag if present, too. - fileobj.truncate() - fileobj.seek(0, 2) - - tags = [] - for key, value in self.items(): - # Packed format for an item: - # 4B: Value length - # 4B: Value type - # Key name - # 1B: Null - # Key value - value_data = value._write() - if not isinstance(key, bytes): - key = key.encode("utf-8") - tag_data = bytearray() - tag_data += struct.pack("<2I", len(value_data), value.kind << 1) - tag_data += key + b"\0" + value_data - tags.append(bytes(tag_data)) - - # "APE tags items should be sorted ascending by size... This is - # not a MUST, but STRONGLY recommended. Actually the items should - # be sorted by importance/byte, but this is not feasible." - tags.sort(key=len) - num_tags = len(tags) - tags = b"".join(tags) - - header = bytearray(b"APETAGEX") - # version, tag size, item count, flags - header += struct.pack("<4I", 2000, len(tags) + 32, num_tags, - HAS_HEADER | IS_HEADER) - header += b"\0" * 8 - fileobj.write(header) - - fileobj.write(tags) - - footer = bytearray(b"APETAGEX") - footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags, - HAS_HEADER) - footer += b"\0" * 8 - - fileobj.write(footer) - fileobj.close() - - def delete(self, filename=None): - """Remove tags from a file.""" - - filename = filename or self.filename - with open(filename, "r+b") as fileobj: - data = _APEv2Data(fileobj) - if data.start is not None and data.size is not None: - delete_bytes(fileobj, data.end - data.start, data.start) - - self.clear() - - -Open = APEv2 - - -def delete(filename): - """Remove tags from a file.""" - - try: - APEv2(filename).delete() - except APENoHeaderError: - pass - - -def _get_value_type(kind): - """Returns a _APEValue subclass or raises ValueError""" - - if kind == TEXT: - return APETextValue - elif kind == BINARY: - return APEBinaryValue - elif kind == EXTERNAL: - return APEExtValue - raise ValueError("unknown kind %r" % kind) - - -def APEValue(value, kind): - """APEv2 tag value factory. - - Use this if you need to specify the value's type manually. Binary - and text data are automatically detected by APEv2.__setitem__. - """ - - try: - type_ = _get_value_type(kind) - except ValueError: - raise ValueError("kind must be TEXT, BINARY, or EXTERNAL") - else: - return type_(value) - - -class _APEValue(object): - - kind = None - value = None - - def __init__(self, value, kind=None): - # kind kwarg is for backwards compat - if kind is not None and kind != self.kind: - raise ValueError - self.value = self._validate(value) - - @classmethod - def _new(cls, data): - instance = cls.__new__(cls) - instance._parse(data) - return instance - - def _parse(self, data): - """Sets value or raises APEBadItemError""" - - raise NotImplementedError - - def _write(self): - """Returns bytes""" - - raise NotImplementedError - - def _validate(self, value): - """Returns validated value or raises TypeError/ValueErrr""" - - raise NotImplementedError - - def __repr__(self): - return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind) - - -@swap_to_string -@total_ordering -class _APEUtf8Value(_APEValue): - - def _parse(self, data): - try: - self.value = data.decode("utf-8") - except UnicodeDecodeError as e: - reraise(APEBadItemError, e, sys.exc_info()[2]) - - def _validate(self, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") - return value - - def _write(self): - return self.value.encode("utf-8") - - def __len__(self): - return len(self.value) - - def __bytes__(self): - return self._write() - - def __eq__(self, other): - return self.value == other - - def __lt__(self, other): - return self.value < other - - def __str__(self): - return self.value - - -class APETextValue(_APEUtf8Value, MutableSequence): - """An APEv2 text value. - - Text values are Unicode/UTF-8 strings. They can be accessed like - strings (with a null separating the values), or arrays of strings. - """ - - kind = TEXT - - def __iter__(self): - """Iterate over the strings of the value (not the characters)""" - - return iter(self.value.split(u"\0")) - - def __getitem__(self, index): - return self.value.split(u"\0")[index] - - def __len__(self): - return self.value.count(u"\0") + 1 - - def __setitem__(self, index, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") - - values = list(self) - values[index] = value - self.value = u"\0".join(values) - - def insert(self, index, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") - - values = list(self) - values.insert(index, value) - self.value = u"\0".join(values) - - def __delitem__(self, index): - values = list(self) - del values[index] - self.value = u"\0".join(values) - - def pprint(self): - return u" / ".join(self) - - -@swap_to_string -@total_ordering -class APEBinaryValue(_APEValue): - """An APEv2 binary value.""" - - kind = BINARY - - def _parse(self, data): - self.value = data - - def _write(self): - return self.value - - def _validate(self, value): - if not isinstance(value, bytes): - raise TypeError("value not bytes") - return bytes(value) - - def __len__(self): - return len(self.value) - - def __bytes__(self): - return self._write() - - def __eq__(self, other): - return self.value == other - - def __lt__(self, other): - return self.value < other - - def pprint(self): - return u"[%d bytes]" % len(self) - - -class APEExtValue(_APEUtf8Value): - """An APEv2 external value. - - External values are usually URI or IRI strings. - """ - - kind = EXTERNAL - - def pprint(self): - return u"[External] %s" % self.value - - -class APEv2File(FileType): - class _Info(StreamInfo): - length = 0 - bitrate = 0 - - def __init__(self, fileobj): - pass - - @staticmethod - def pprint(): - return u"Unknown format with APEv2 tag." - - def load(self, filename): - self.filename = filename - self.info = self._Info(open(filename, "rb")) - try: - self.tags = APEv2(filename) - except APENoHeaderError: - self.tags = None - - def add_tags(self): - if self.tags is None: - self.tags = APEv2() - else: - raise error("%r already has tags: %r" % (self, self.tags)) - - @staticmethod - def score(filename, fileobj, header): - try: - fileobj.seek(-160, 2) - except IOError: - fileobj.seek(0) - footer = fileobj.read() - return ((b"APETAGEX" in footer) - header.startswith(b"ID3")) diff --git a/resources/lib/mutagen/asf/__init__.py b/resources/lib/mutagen/asf/__init__.py deleted file mode 100644 index e667192d..00000000 --- a/resources/lib/mutagen/asf/__init__.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005-2006 Joe Wreschnig -# Copyright (C) 2006-2007 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write ASF (Window Media Audio) files.""" - -__all__ = ["ASF", "Open"] - -from mutagen import FileType, Metadata, StreamInfo -from mutagen._util import resize_bytes, DictMixin -from mutagen._compat import string_types, long_, PY3, izip - -from ._util import error, ASFError, ASFHeaderError -from ._objects import HeaderObject, MetadataLibraryObject, MetadataObject, \ - ExtendedContentDescriptionObject, HeaderExtensionObject, \ - ContentDescriptionObject -from ._attrs import ASFGUIDAttribute, ASFWordAttribute, ASFQWordAttribute, \ - ASFDWordAttribute, ASFBoolAttribute, ASFByteArrayAttribute, \ - ASFUnicodeAttribute, ASFBaseAttribute, ASFValue - - -# pyflakes -error, ASFError, ASFHeaderError, ASFValue - - -class ASFInfo(StreamInfo): - """ASF stream information.""" - - length = 0.0 - """Length in seconds (`float`)""" - - sample_rate = 0 - """Sample rate in Hz (`int`)""" - - bitrate = 0 - """Bitrate in bps (`int`)""" - - channels = 0 - """Number of channels (`int`)""" - - codec_type = u"" - """Name of the codec type of the first audio stream or - an empty string if unknown. Example: ``Windows Media Audio 9 Standard`` - (:class:`mutagen.text`) - """ - - codec_name = u"" - """Name and maybe version of the codec used. Example: - ``Windows Media Audio 9.1`` (:class:`mutagen.text`) - """ - - codec_description = u"" - """Further information on the codec used. - Example: ``64 kbps, 48 kHz, stereo 2-pass CBR`` (:class:`mutagen.text`) - """ - - def __init__(self): - self.length = 0.0 - self.sample_rate = 0 - self.bitrate = 0 - self.channels = 0 - self.codec_type = u"" - self.codec_name = u"" - self.codec_description = u"" - - def pprint(self): - """Returns a stream information text summary - - :rtype: text - """ - - s = u"ASF (%s) %d bps, %s Hz, %d channels, %.2f seconds" % ( - self.codec_type or self.codec_name or u"???", self.bitrate, - self.sample_rate, self.channels, self.length) - return s - - -class ASFTags(list, DictMixin, Metadata): - """Dictionary containing ASF attributes.""" - - def __getitem__(self, key): - """A list of values for the key. - - This is a copy, so comment['title'].append('a title') will not - work. - - """ - - # PY3 only - if isinstance(key, slice): - return list.__getitem__(self, key) - - values = [value for (k, value) in self if k == key] - if not values: - raise KeyError(key) - else: - return values - - def __delitem__(self, key): - """Delete all values associated with the key.""" - - # PY3 only - if isinstance(key, slice): - return list.__delitem__(self, key) - - to_delete = [x for x in self if x[0] == key] - if not to_delete: - raise KeyError(key) - else: - for k in to_delete: - self.remove(k) - - def __contains__(self, key): - """Return true if the key has any values.""" - for k, value in self: - if k == key: - return True - else: - return False - - def __setitem__(self, key, values): - """Set a key's value or values. - - Setting a value overwrites all old ones. The value may be a - list of Unicode or UTF-8 strings, or a single Unicode or UTF-8 - string. - """ - - # PY3 only - if isinstance(key, slice): - return list.__setitem__(self, key, values) - - if not isinstance(values, list): - values = [values] - - to_append = [] - for value in values: - if not isinstance(value, ASFBaseAttribute): - if isinstance(value, string_types): - value = ASFUnicodeAttribute(value) - elif PY3 and isinstance(value, bytes): - value = ASFByteArrayAttribute(value) - elif isinstance(value, bool): - value = ASFBoolAttribute(value) - elif isinstance(value, int): - value = ASFDWordAttribute(value) - elif isinstance(value, long_): - value = ASFQWordAttribute(value) - else: - raise TypeError("Invalid type %r" % type(value)) - to_append.append((key, value)) - - try: - del(self[key]) - except KeyError: - pass - - self.extend(to_append) - - def keys(self): - """Return a sequence of all keys in the comment.""" - - return self and set(next(izip(*self))) - - def as_dict(self): - """Return a copy of the comment data in a real dict.""" - - d = {} - for key, value in self: - d.setdefault(key, []).append(value) - return d - - def pprint(self): - """Returns a string containing all key, value pairs. - - :rtype: text - """ - - return "\n".join("%s=%s" % (k, v) for k, v in self) - - -UNICODE = ASFUnicodeAttribute.TYPE -"""Unicode string type""" - -BYTEARRAY = ASFByteArrayAttribute.TYPE -"""Byte array type""" - -BOOL = ASFBoolAttribute.TYPE -"""Bool type""" - -DWORD = ASFDWordAttribute.TYPE -""""DWord type (uint32)""" - -QWORD = ASFQWordAttribute.TYPE -"""QWord type (uint64)""" - -WORD = ASFWordAttribute.TYPE -"""Word type (uint16)""" - -GUID = ASFGUIDAttribute.TYPE -"""GUID type""" - - -class ASF(FileType): - """An ASF file, probably containing WMA or WMV. - - :param filename: a filename to load - :raises mutagen.asf.error: In case loading fails - """ - - _mimes = ["audio/x-ms-wma", "audio/x-ms-wmv", "video/x-ms-asf", - "audio/x-wma", "video/x-wmv"] - - info = None - """A `ASFInfo` instance""" - - tags = None - """A `ASFTags` instance""" - - def load(self, filename): - self.filename = filename - self.info = ASFInfo() - self.tags = ASFTags() - - with open(filename, "rb") as fileobj: - self._tags = {} - - self._header = HeaderObject.parse_full(self, fileobj) - - for guid in [ContentDescriptionObject.GUID, - ExtendedContentDescriptionObject.GUID, MetadataObject.GUID, - MetadataLibraryObject.GUID]: - self.tags.extend(self._tags.pop(guid, [])) - - assert not self._tags - - def save(self, filename=None, padding=None): - """Save tag changes back to the loaded file. - - :param padding: A callback which returns the amount of padding to use. - See :class:`mutagen.PaddingInfo` - - :raises mutagen.asf.error: In case saving fails - """ - - if filename is not None and filename != self.filename: - raise ValueError("saving to another file not supported atm") - - # Move attributes to the right objects - self.to_content_description = {} - self.to_extended_content_description = {} - self.to_metadata = {} - self.to_metadata_library = [] - for name, value in self.tags: - library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID) - can_cont_desc = value.TYPE == UNICODE - - if library_only or value.language is not None: - self.to_metadata_library.append((name, value)) - elif value.stream is not None: - if name not in self.to_metadata: - self.to_metadata[name] = value - else: - self.to_metadata_library.append((name, value)) - elif name in ContentDescriptionObject.NAMES: - if name not in self.to_content_description and can_cont_desc: - self.to_content_description[name] = value - else: - self.to_metadata_library.append((name, value)) - else: - if name not in self.to_extended_content_description: - self.to_extended_content_description[name] = value - else: - self.to_metadata_library.append((name, value)) - - # Add missing objects - header = self._header - if header.get_child(ContentDescriptionObject.GUID) is None: - header.objects.append(ContentDescriptionObject()) - if header.get_child(ExtendedContentDescriptionObject.GUID) is None: - header.objects.append(ExtendedContentDescriptionObject()) - header_ext = header.get_child(HeaderExtensionObject.GUID) - if header_ext is None: - header_ext = HeaderExtensionObject() - header.objects.append(header_ext) - if header_ext.get_child(MetadataObject.GUID) is None: - header_ext.objects.append(MetadataObject()) - if header_ext.get_child(MetadataLibraryObject.GUID) is None: - header_ext.objects.append(MetadataLibraryObject()) - - # Render to file - with open(self.filename, "rb+") as fileobj: - old_size = header.parse_size(fileobj)[0] - data = header.render_full(self, fileobj, old_size, padding) - size = len(data) - resize_bytes(fileobj, old_size, size, 0) - fileobj.seek(0) - fileobj.write(data) - - def add_tags(self): - raise ASFError - - def delete(self, filename=None): - - if filename is not None and filename != self.filename: - raise ValueError("saving to another file not supported atm") - - self.tags.clear() - self.save(padding=lambda x: 0) - - @staticmethod - def score(filename, fileobj, header): - return header.startswith(HeaderObject.GUID) * 2 - -Open = ASF diff --git a/resources/lib/mutagen/asf/__pycache__/__init__.cpython-35.pyc b/resources/lib/mutagen/asf/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index 277e2c5f..00000000 Binary files a/resources/lib/mutagen/asf/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/asf/__pycache__/_attrs.cpython-35.pyc b/resources/lib/mutagen/asf/__pycache__/_attrs.cpython-35.pyc deleted file mode 100644 index aa916edd..00000000 Binary files a/resources/lib/mutagen/asf/__pycache__/_attrs.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/asf/__pycache__/_objects.cpython-35.pyc b/resources/lib/mutagen/asf/__pycache__/_objects.cpython-35.pyc deleted file mode 100644 index cb0810d8..00000000 Binary files a/resources/lib/mutagen/asf/__pycache__/_objects.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/asf/__pycache__/_util.cpython-35.pyc b/resources/lib/mutagen/asf/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index 661bff50..00000000 Binary files a/resources/lib/mutagen/asf/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/asf/_attrs.py b/resources/lib/mutagen/asf/_attrs.py deleted file mode 100644 index 4621c9fa..00000000 --- a/resources/lib/mutagen/asf/_attrs.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005-2006 Joe Wreschnig -# Copyright (C) 2006-2007 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import sys -import struct - -from mutagen._compat import swap_to_string, text_type, PY2, reraise -from mutagen._util import total_ordering - -from ._util import ASFError - - -class ASFBaseAttribute(object): - """Generic attribute.""" - - TYPE = None - - _TYPES = {} - - value = None - """The Python value of this attribute (type depends on the class)""" - - language = None - """Language""" - - stream = None - """Stream""" - - def __init__(self, value=None, data=None, language=None, - stream=None, **kwargs): - self.language = language - self.stream = stream - if data: - self.value = self.parse(data, **kwargs) - else: - if value is None: - # we used to support not passing any args and instead assign - # them later, keep that working.. - self.value = None - else: - self.value = self._validate(value) - - @classmethod - def _register(cls, other): - cls._TYPES[other.TYPE] = other - return other - - @classmethod - def _get_type(cls, type_): - """Raises KeyError""" - - return cls._TYPES[type_] - - def _validate(self, value): - """Raises TypeError or ValueError in case the user supplied value - isn't valid. - """ - - return value - - def data_size(self): - raise NotImplementedError - - def __repr__(self): - name = "%s(%r" % (type(self).__name__, self.value) - if self.language: - name += ", language=%d" % self.language - if self.stream: - name += ", stream=%d" % self.stream - name += ")" - return name - - def render(self, name): - name = name.encode("utf-16-le") + b"\x00\x00" - data = self._render() - return (struct.pack("" % ( - type(self).__name__, bytes2guid(self.GUID), self.objects) - - def pprint(self): - l = [] - l.append("%s(%s)" % (type(self).__name__, bytes2guid(self.GUID))) - for o in self.objects: - for e in o.pprint().splitlines(): - l.append(" " + e) - return "\n".join(l) - - -class UnknownObject(BaseObject): - """Unknown ASF object.""" - - def __init__(self, guid): - super(UnknownObject, self).__init__() - assert isinstance(guid, bytes) - self.GUID = guid - - -@BaseObject._register -class HeaderObject(BaseObject): - """ASF header.""" - - GUID = guid2bytes("75B22630-668E-11CF-A6D9-00AA0062CE6C") - - @classmethod - def parse_full(cls, asf, fileobj): - """Raises ASFHeaderError""" - - header = cls() - - size, num_objects = cls.parse_size(fileobj) - for i in xrange(num_objects): - guid, size = struct.unpack("<16sQ", fileobj.read(24)) - obj = BaseObject._get_object(guid) - data = fileobj.read(size - 24) - obj.parse(asf, data) - header.objects.append(obj) - - return header - - @classmethod - def parse_size(cls, fileobj): - """Returns (size, num_objects) - - Raises ASFHeaderError - """ - - header = fileobj.read(30) - if len(header) != 30 or header[:16] != HeaderObject.GUID: - raise ASFHeaderError("Not an ASF file.") - - return struct.unpack("= 0 - info = PaddingInfo(available - needed_size, content_size) - - # add padding - padding = info._get_padding(padding_func) - padding_obj.parse(asf, b"\x00" * padding) - data += padding_obj.render(asf) - num_objects += 1 - - data = (HeaderObject.GUID + - struct.pack(" 0: - texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00")) - else: - texts.append(None) - pos = end - - for key, value in izip(self.NAMES, texts): - if value is not None: - value = ASFUnicodeAttribute(value=value) - asf._tags.setdefault(self.GUID, []).append((key, value)) - - def render(self, asf): - def render_text(name): - value = asf.to_content_description.get(name) - if value is not None: - return text_type(value).encode("utf-16-le") + b"\x00\x00" - else: - return b"" - - texts = [render_text(x) for x in self.NAMES] - data = struct.pack("= 0 - asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0) - - -@BaseObject._register -class StreamPropertiesObject(BaseObject): - """Stream properties.""" - - GUID = guid2bytes("B7DC0791-A9B7-11CF-8EE6-00C00C205365") - - def parse(self, asf, data): - super(StreamPropertiesObject, self).parse(asf, data) - channels, sample_rate, bitrate = struct.unpack("H", int(s[19:23], 16)), - p(">Q", int(s[24:], 16))[2:], - ]) - - -def bytes2guid(s): - """Converts a serialized GUID to a text GUID""" - - assert isinstance(s, bytes) - - u = struct.unpack - v = [] - v.extend(u("HQ", s[8:10] + b"\x00\x00" + s[10:])) - return "%08X-%04X-%04X-%04X-%012X" % tuple(v) - - -# Names from http://windows.microsoft.com/en-za/windows7/c00d10d1-[0-9A-F]{1,4} -CODECS = { - 0x0000: u"Unknown Wave Format", - 0x0001: u"Microsoft PCM Format", - 0x0002: u"Microsoft ADPCM Format", - 0x0003: u"IEEE Float", - 0x0004: u"Compaq Computer VSELP", - 0x0005: u"IBM CVSD", - 0x0006: u"Microsoft CCITT A-Law", - 0x0007: u"Microsoft CCITT u-Law", - 0x0008: u"Microsoft DTS", - 0x0009: u"Microsoft DRM", - 0x000A: u"Windows Media Audio 9 Voice", - 0x000B: u"Windows Media Audio 10 Voice", - 0x000C: u"OGG Vorbis", - 0x000D: u"FLAC", - 0x000E: u"MOT AMR", - 0x000F: u"Nice Systems IMBE", - 0x0010: u"OKI ADPCM", - 0x0011: u"Intel IMA ADPCM", - 0x0012: u"Videologic MediaSpace ADPCM", - 0x0013: u"Sierra Semiconductor ADPCM", - 0x0014: u"Antex Electronics G.723 ADPCM", - 0x0015: u"DSP Solutions DIGISTD", - 0x0016: u"DSP Solutions DIGIFIX", - 0x0017: u"Dialogic OKI ADPCM", - 0x0018: u"MediaVision ADPCM", - 0x0019: u"Hewlett-Packard CU codec", - 0x001A: u"Hewlett-Packard Dynamic Voice", - 0x0020: u"Yamaha ADPCM", - 0x0021: u"Speech Compression SONARC", - 0x0022: u"DSP Group True Speech", - 0x0023: u"Echo Speech EchoSC1", - 0x0024: u"Ahead Inc. Audiofile AF36", - 0x0025: u"Audio Processing Technology APTX", - 0x0026: u"Ahead Inc. AudioFile AF10", - 0x0027: u"Aculab Prosody 1612", - 0x0028: u"Merging Technologies S.A. LRC", - 0x0030: u"Dolby Labs AC2", - 0x0031: u"Microsoft GSM 6.10", - 0x0032: u"Microsoft MSNAudio", - 0x0033: u"Antex Electronics ADPCME", - 0x0034: u"Control Resources VQLPC", - 0x0035: u"DSP Solutions Digireal", - 0x0036: u"DSP Solutions DigiADPCM", - 0x0037: u"Control Resources CR10", - 0x0038: u"Natural MicroSystems VBXADPCM", - 0x0039: u"Crystal Semiconductor IMA ADPCM", - 0x003A: u"Echo Speech EchoSC3", - 0x003B: u"Rockwell ADPCM", - 0x003C: u"Rockwell DigiTalk", - 0x003D: u"Xebec Multimedia Solutions", - 0x0040: u"Antex Electronics G.721 ADPCM", - 0x0041: u"Antex Electronics G.728 CELP", - 0x0042: u"Intel G.723", - 0x0043: u"Intel G.723.1", - 0x0044: u"Intel G.729 Audio", - 0x0045: u"Sharp G.726 Audio", - 0x0050: u"Microsoft MPEG-1", - 0x0052: u"InSoft RT24", - 0x0053: u"InSoft PAC", - 0x0055: u"MP3 - MPEG Layer III", - 0x0059: u"Lucent G.723", - 0x0060: u"Cirrus Logic", - 0x0061: u"ESS Technology ESPCM", - 0x0062: u"Voxware File-Mode", - 0x0063: u"Canopus Atrac", - 0x0064: u"APICOM G.726 ADPCM", - 0x0065: u"APICOM G.722 ADPCM", - 0x0066: u"Microsoft DSAT", - 0x0067: u"Microsoft DSAT Display", - 0x0069: u"Voxware Byte Aligned", - 0x0070: u"Voxware AC8", - 0x0071: u"Voxware AC10", - 0x0072: u"Voxware AC16", - 0x0073: u"Voxware AC20", - 0x0074: u"Voxware RT24 MetaVoice", - 0x0075: u"Voxware RT29 MetaSound", - 0x0076: u"Voxware RT29HW", - 0x0077: u"Voxware VR12", - 0x0078: u"Voxware VR18", - 0x0079: u"Voxware TQ40", - 0x007A: u"Voxware SC3", - 0x007B: u"Voxware SC3", - 0x0080: u"Softsound", - 0x0081: u"Voxware TQ60", - 0x0082: u"Microsoft MSRT24", - 0x0083: u"AT&T Labs G.729A", - 0x0084: u"Motion Pixels MVI MV12", - 0x0085: u"DataFusion Systems G.726", - 0x0086: u"DataFusion Systems GSM610", - 0x0088: u"Iterated Systems ISIAudio", - 0x0089: u"Onlive", - 0x008A: u"Multitude FT SX20", - 0x008B: u"Infocom ITS ACM G.721", - 0x008C: u"Convedia G.729", - 0x008D: u"Congruency Audio", - 0x0091: u"Siemens Business Communications SBC24", - 0x0092: u"Sonic Foundry Dolby AC3 SPDIF", - 0x0093: u"MediaSonic G.723", - 0x0094: u"Aculab Prosody 8KBPS", - 0x0097: u"ZyXEL ADPCM", - 0x0098: u"Philips LPCBB", - 0x0099: u"Studer Professional Audio AG Packed", - 0x00A0: u"Malden Electronics PHONYTALK", - 0x00A1: u"Racal Recorder GSM", - 0x00A2: u"Racal Recorder G720.a", - 0x00A3: u"Racal Recorder G723.1", - 0x00A4: u"Racal Recorder Tetra ACELP", - 0x00B0: u"NEC AAC", - 0x00FF: u"CoreAAC Audio", - 0x0100: u"Rhetorex ADPCM", - 0x0101: u"BeCubed Software IRAT", - 0x0111: u"Vivo G.723", - 0x0112: u"Vivo Siren", - 0x0120: u"Philips CELP", - 0x0121: u"Philips Grundig", - 0x0123: u"Digital G.723", - 0x0125: u"Sanyo ADPCM", - 0x0130: u"Sipro Lab Telecom ACELP.net", - 0x0131: u"Sipro Lab Telecom ACELP.4800", - 0x0132: u"Sipro Lab Telecom ACELP.8V3", - 0x0133: u"Sipro Lab Telecom ACELP.G.729", - 0x0134: u"Sipro Lab Telecom ACELP.G.729A", - 0x0135: u"Sipro Lab Telecom ACELP.KELVIN", - 0x0136: u"VoiceAge AMR", - 0x0140: u"Dictaphone G.726 ADPCM", - 0x0141: u"Dictaphone CELP68", - 0x0142: u"Dictaphone CELP54", - 0x0150: u"Qualcomm PUREVOICE", - 0x0151: u"Qualcomm HALFRATE", - 0x0155: u"Ring Zero Systems TUBGSM", - 0x0160: u"Windows Media Audio Standard", - 0x0161: u"Windows Media Audio 9 Standard", - 0x0162: u"Windows Media Audio 9 Professional", - 0x0163: u"Windows Media Audio 9 Lossless", - 0x0164: u"Windows Media Audio Pro over SPDIF", - 0x0170: u"Unisys NAP ADPCM", - 0x0171: u"Unisys NAP ULAW", - 0x0172: u"Unisys NAP ALAW", - 0x0173: u"Unisys NAP 16K", - 0x0174: u"Sycom ACM SYC008", - 0x0175: u"Sycom ACM SYC701 G725", - 0x0176: u"Sycom ACM SYC701 CELP54", - 0x0177: u"Sycom ACM SYC701 CELP68", - 0x0178: u"Knowledge Adventure ADPCM", - 0x0180: u"Fraunhofer IIS MPEG-2 AAC", - 0x0190: u"Digital Theater Systems DTS", - 0x0200: u"Creative Labs ADPCM", - 0x0202: u"Creative Labs FastSpeech8", - 0x0203: u"Creative Labs FastSpeech10", - 0x0210: u"UHER informatic GmbH ADPCM", - 0x0215: u"Ulead DV Audio", - 0x0216: u"Ulead DV Audio", - 0x0220: u"Quarterdeck", - 0x0230: u"I-link Worldwide ILINK VC", - 0x0240: u"Aureal Semiconductor RAW SPORT", - 0x0249: u"Generic Passthru", - 0x0250: u"Interactive Products HSX", - 0x0251: u"Interactive Products RPELP", - 0x0260: u"Consistent Software CS2", - 0x0270: u"Sony SCX", - 0x0271: u"Sony SCY", - 0x0272: u"Sony ATRAC3", - 0x0273: u"Sony SPC", - 0x0280: u"Telum Audio", - 0x0281: u"Telum IA Audio", - 0x0285: u"Norcom Voice Systems ADPCM", - 0x0300: u"Fujitsu TOWNS SND", - 0x0350: u"Micronas SC4 Speech", - 0x0351: u"Micronas CELP833", - 0x0400: u"Brooktree BTV Digital", - 0x0401: u"Intel Music Coder", - 0x0402: u"Intel Audio", - 0x0450: u"QDesign Music", - 0x0500: u"On2 AVC0 Audio", - 0x0501: u"On2 AVC1 Audio", - 0x0680: u"AT&T Labs VME VMPCM", - 0x0681: u"AT&T Labs TPC", - 0x08AE: u"ClearJump Lightwave Lossless", - 0x1000: u"Olivetti GSM", - 0x1001: u"Olivetti ADPCM", - 0x1002: u"Olivetti CELP", - 0x1003: u"Olivetti SBC", - 0x1004: u"Olivetti OPR", - 0x1100: u"Lernout & Hauspie", - 0x1101: u"Lernout & Hauspie CELP", - 0x1102: u"Lernout & Hauspie SBC8", - 0x1103: u"Lernout & Hauspie SBC12", - 0x1104: u"Lernout & Hauspie SBC16", - 0x1400: u"Norris Communication", - 0x1401: u"ISIAudio", - 0x1500: u"AT&T Labs Soundspace Music Compression", - 0x1600: u"Microsoft MPEG ADTS AAC", - 0x1601: u"Microsoft MPEG RAW AAC", - 0x1608: u"Nokia MPEG ADTS AAC", - 0x1609: u"Nokia MPEG RAW AAC", - 0x181C: u"VoxWare MetaVoice RT24", - 0x1971: u"Sonic Foundry Lossless", - 0x1979: u"Innings Telecom ADPCM", - 0x1FC4: u"NTCSoft ALF2CD ACM", - 0x2000: u"Dolby AC3", - 0x2001: u"DTS", - 0x4143: u"Divio AAC", - 0x4201: u"Nokia Adaptive Multi-Rate", - 0x4243: u"Divio G.726", - 0x4261: u"ITU-T H.261", - 0x4263: u"ITU-T H.263", - 0x4264: u"ITU-T H.264", - 0x674F: u"Ogg Vorbis Mode 1", - 0x6750: u"Ogg Vorbis Mode 2", - 0x6751: u"Ogg Vorbis Mode 3", - 0x676F: u"Ogg Vorbis Mode 1+", - 0x6770: u"Ogg Vorbis Mode 2+", - 0x6771: u"Ogg Vorbis Mode 3+", - 0x7000: u"3COM NBX Audio", - 0x706D: u"FAAD AAC Audio", - 0x77A1: u"True Audio Lossless Audio", - 0x7A21: u"GSM-AMR CBR 3GPP Audio", - 0x7A22: u"GSM-AMR VBR 3GPP Audio", - 0xA100: u"Comverse Infosys G723.1", - 0xA101: u"Comverse Infosys AVQSBC", - 0xA102: u"Comverse Infosys SBC", - 0xA103: u"Symbol Technologies G729a", - 0xA104: u"VoiceAge AMR WB", - 0xA105: u"Ingenient Technologies G.726", - 0xA106: u"ISO/MPEG-4 Advanced Audio Coding (AAC)", - 0xA107: u"Encore Software Ltd's G.726", - 0xA108: u"ZOLL Medical Corporation ASAO", - 0xA109: u"Speex Voice", - 0xA10A: u"Vianix MASC Speech Compression", - 0xA10B: u"Windows Media 9 Spectrum Analyzer Output", - 0xA10C: u"Media Foundation Spectrum Analyzer Output", - 0xA10D: u"GSM 6.10 (Full-Rate) Speech", - 0xA10E: u"GSM 6.20 (Half-Rate) Speech", - 0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech", - 0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech", - 0xA111: u"GSM Adaptive Multi-Rate WideBand Speech", - 0xA112: u"Polycom G.722", - 0xA113: u"Polycom G.728", - 0xA114: u"Polycom G.729a", - 0xA115: u"Polycom Siren", - 0xA116: u"Global IP Sound ILBC", - 0xA117: u"Radio Time Time Shifted Radio", - 0xA118: u"Nice Systems ACA", - 0xA119: u"Nice Systems ADPCM", - 0xA11A: u"Vocord Group ITU-T G.721", - 0xA11B: u"Vocord Group ITU-T G.726", - 0xA11C: u"Vocord Group ITU-T G.722.1", - 0xA11D: u"Vocord Group ITU-T G.728", - 0xA11E: u"Vocord Group ITU-T G.729", - 0xA11F: u"Vocord Group ITU-T G.729a", - 0xA120: u"Vocord Group ITU-T G.723.1", - 0xA121: u"Vocord Group LBC", - 0xA122: u"Nice G.728", - 0xA123: u"France Telecom G.729 ACM Audio", - 0xA124: u"CODIAN Audio", - 0xCC12: u"Intel YUV12 Codec", - 0xCFCC: u"Digital Processing Systems Perception Motion JPEG", - 0xD261: u"DEC H.261", - 0xD263: u"DEC H.263", - 0xFFFE: u"Extensible Wave Format", - 0xFFFF: u"Unregistered", -} diff --git a/resources/lib/mutagen/easyid3.py b/resources/lib/mutagen/easyid3.py deleted file mode 100644 index f8dd2de0..00000000 --- a/resources/lib/mutagen/easyid3.py +++ /dev/null @@ -1,534 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""Easier access to ID3 tags. - -EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear -more like Vorbis or APEv2 tags. -""" - -import mutagen.id3 - -from ._compat import iteritems, text_type, PY2 -from mutagen import Metadata -from mutagen._util import DictMixin, dict_match -from mutagen.id3 import ID3, error, delete, ID3FileType - - -__all__ = ['EasyID3', 'Open', 'delete'] - - -class EasyID3KeyError(KeyError, ValueError, error): - """Raised when trying to get/set an invalid key. - - Subclasses both KeyError and ValueError for API compatibility, - catching KeyError is preferred. - """ - - -class EasyID3(DictMixin, Metadata): - """A file with an ID3 tag. - - Like Vorbis comments, EasyID3 keys are case-insensitive ASCII - strings. Only a subset of ID3 frames are supported by default. Use - EasyID3.RegisterKey and its wrappers to support more. - - You can also set the GetFallback, SetFallback, and DeleteFallback - to generic key getter/setter/deleter functions, which are called - if no specific handler is registered for a key. Additionally, - ListFallback can be used to supply an arbitrary list of extra - keys. These can be set on EasyID3 or on individual instances after - creation. - - To use an EasyID3 class with mutagen.mp3.MP3:: - - from mutagen.mp3 import EasyMP3 as MP3 - MP3(filename) - - Because many of the attributes are constructed on the fly, things - like the following will not work:: - - ezid3["performer"].append("Joe") - - Instead, you must do:: - - values = ezid3["performer"] - values.append("Joe") - ezid3["performer"] = values - - """ - - Set = {} - Get = {} - Delete = {} - List = {} - - # For compatibility. - valid_keys = Get - - GetFallback = None - SetFallback = None - DeleteFallback = None - ListFallback = None - - @classmethod - def RegisterKey(cls, key, - getter=None, setter=None, deleter=None, lister=None): - """Register a new key mapping. - - A key mapping is four functions, a getter, setter, deleter, - and lister. The key may be either a string or a glob pattern. - - The getter, deleted, and lister receive an ID3 instance and - the requested key name. The setter also receives the desired - value, which will be a list of strings. - - The getter, setter, and deleter are used to implement __getitem__, - __setitem__, and __delitem__. - - The lister is used to implement keys(). It should return a - list of keys that are actually in the ID3 instance, provided - by its associated getter. - """ - key = key.lower() - if getter is not None: - cls.Get[key] = getter - if setter is not None: - cls.Set[key] = setter - if deleter is not None: - cls.Delete[key] = deleter - if lister is not None: - cls.List[key] = lister - - @classmethod - def RegisterTextKey(cls, key, frameid): - """Register a text key. - - If the key you need to register is a simple one-to-one mapping - of ID3 frame name to EasyID3 key, then you can use this - function:: - - EasyID3.RegisterTextKey("title", "TIT2") - """ - def getter(id3, key): - return list(id3[frameid]) - - def setter(id3, key, value): - try: - frame = id3[frameid] - except KeyError: - id3.add(mutagen.id3.Frames[frameid](encoding=3, text=value)) - else: - frame.encoding = 3 - frame.text = value - - def deleter(id3, key): - del(id3[frameid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterTXXXKey(cls, key, desc): - """Register a user-defined text frame key. - - Some ID3 tags are stored in TXXX frames, which allow a - freeform 'description' which acts as a subkey, - e.g. TXXX:BARCODE.:: - - EasyID3.RegisterTXXXKey('barcode', 'BARCODE'). - """ - frameid = "TXXX:" + desc - - def getter(id3, key): - return list(id3[frameid]) - - def setter(id3, key, value): - try: - frame = id3[frameid] - except KeyError: - enc = 0 - # Store 8859-1 if we can, per MusicBrainz spec. - for v in value: - if v and max(v) > u'\x7f': - enc = 3 - break - - id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc)) - else: - frame.text = value - - def deleter(id3, key): - del(id3[frameid]) - - cls.RegisterKey(key, getter, setter, deleter) - - def __init__(self, filename=None): - self.__id3 = ID3() - if filename is not None: - self.load(filename) - - load = property(lambda s: s.__id3.load, - lambda s, v: setattr(s.__id3, 'load', v)) - - def save(self, *args, **kwargs): - # ignore v2_version until we support 2.3 here - kwargs.pop("v2_version", None) - self.__id3.save(*args, **kwargs) - - delete = property(lambda s: s.__id3.delete, - lambda s, v: setattr(s.__id3, 'delete', v)) - - filename = property(lambda s: s.__id3.filename, - lambda s, fn: setattr(s.__id3, 'filename', fn)) - - size = property(lambda s: s.__id3.size, - lambda s, fn: setattr(s.__id3, 'size', s)) - - def __getitem__(self, key): - key = key.lower() - func = dict_match(self.Get, key, self.GetFallback) - if func is not None: - return func(self.__id3, key) - else: - raise EasyID3KeyError("%r is not a valid key" % key) - - def __setitem__(self, key, value): - key = key.lower() - if PY2: - if isinstance(value, basestring): - value = [value] - else: - if isinstance(value, text_type): - value = [value] - func = dict_match(self.Set, key, self.SetFallback) - if func is not None: - return func(self.__id3, key, value) - else: - raise EasyID3KeyError("%r is not a valid key" % key) - - def __delitem__(self, key): - key = key.lower() - func = dict_match(self.Delete, key, self.DeleteFallback) - if func is not None: - return func(self.__id3, key) - else: - raise EasyID3KeyError("%r is not a valid key" % key) - - def keys(self): - keys = [] - for key in self.Get.keys(): - if key in self.List: - keys.extend(self.List[key](self.__id3, key)) - elif key in self: - keys.append(key) - if self.ListFallback is not None: - keys.extend(self.ListFallback(self.__id3, "")) - return keys - - def pprint(self): - """Print tag key=value pairs.""" - strings = [] - for key in sorted(self.keys()): - values = self[key] - for value in values: - strings.append("%s=%s" % (key, value)) - return "\n".join(strings) - - -Open = EasyID3 - - -def genre_get(id3, key): - return id3["TCON"].genres - - -def genre_set(id3, key, value): - try: - frame = id3["TCON"] - except KeyError: - id3.add(mutagen.id3.TCON(encoding=3, text=value)) - else: - frame.encoding = 3 - frame.genres = value - - -def genre_delete(id3, key): - del(id3["TCON"]) - - -def date_get(id3, key): - return [stamp.text for stamp in id3["TDRC"].text] - - -def date_set(id3, key, value): - id3.add(mutagen.id3.TDRC(encoding=3, text=value)) - - -def date_delete(id3, key): - del(id3["TDRC"]) - - -def original_date_get(id3, key): - return [stamp.text for stamp in id3["TDOR"].text] - - -def original_date_set(id3, key, value): - id3.add(mutagen.id3.TDOR(encoding=3, text=value)) - - -def original_date_delete(id3, key): - del(id3["TDOR"]) - - -def performer_get(id3, key): - people = [] - wanted_role = key.split(":", 1)[1] - try: - mcl = id3["TMCL"] - except KeyError: - raise KeyError(key) - for role, person in mcl.people: - if role == wanted_role: - people.append(person) - if people: - return people - else: - raise KeyError(key) - - -def performer_set(id3, key, value): - wanted_role = key.split(":", 1)[1] - try: - mcl = id3["TMCL"] - except KeyError: - mcl = mutagen.id3.TMCL(encoding=3, people=[]) - id3.add(mcl) - mcl.encoding = 3 - people = [p for p in mcl.people if p[0] != wanted_role] - for v in value: - people.append((wanted_role, v)) - mcl.people = people - - -def performer_delete(id3, key): - wanted_role = key.split(":", 1)[1] - try: - mcl = id3["TMCL"] - except KeyError: - raise KeyError(key) - people = [p for p in mcl.people if p[0] != wanted_role] - if people == mcl.people: - raise KeyError(key) - elif people: - mcl.people = people - else: - del(id3["TMCL"]) - - -def performer_list(id3, key): - try: - mcl = id3["TMCL"] - except KeyError: - return [] - else: - return list(set("performer:" + p[0] for p in mcl.people)) - - -def musicbrainz_trackid_get(id3, key): - return [id3["UFID:http://musicbrainz.org"].data.decode('ascii')] - - -def musicbrainz_trackid_set(id3, key, value): - if len(value) != 1: - raise ValueError("only one track ID may be set per song") - value = value[0].encode('ascii') - try: - frame = id3["UFID:http://musicbrainz.org"] - except KeyError: - frame = mutagen.id3.UFID(owner="http://musicbrainz.org", data=value) - id3.add(frame) - else: - frame.data = value - - -def musicbrainz_trackid_delete(id3, key): - del(id3["UFID:http://musicbrainz.org"]) - - -def website_get(id3, key): - urls = [frame.url for frame in id3.getall("WOAR")] - if urls: - return urls - else: - raise EasyID3KeyError(key) - - -def website_set(id3, key, value): - id3.delall("WOAR") - for v in value: - id3.add(mutagen.id3.WOAR(url=v)) - - -def website_delete(id3, key): - id3.delall("WOAR") - - -def gain_get(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - raise EasyID3KeyError(key) - else: - return [u"%+f dB" % frame.gain] - - -def gain_set(id3, key, value): - if len(value) != 1: - raise ValueError( - "there must be exactly one gain value, not %r.", value) - gain = float(value[0].split()[0]) - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1) - id3.add(frame) - frame.gain = gain - - -def gain_delete(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - pass - else: - if frame.peak: - frame.gain = 0.0 - else: - del(id3["RVA2:" + key[11:-5]]) - - -def peak_get(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - raise EasyID3KeyError(key) - else: - return [u"%f" % frame.peak] - - -def peak_set(id3, key, value): - if len(value) != 1: - raise ValueError( - "there must be exactly one peak value, not %r.", value) - peak = float(value[0]) - if peak >= 2 or peak < 0: - raise ValueError("peak must be => 0 and < 2.") - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1) - id3.add(frame) - frame.peak = peak - - -def peak_delete(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - pass - else: - if frame.gain: - frame.peak = 0.0 - else: - del(id3["RVA2:" + key[11:-5]]) - - -def peakgain_list(id3, key): - keys = [] - for frame in id3.getall("RVA2"): - keys.append("replaygain_%s_gain" % frame.desc) - keys.append("replaygain_%s_peak" % frame.desc) - return keys - -for frameid, key in iteritems({ - "TALB": "album", - "TBPM": "bpm", - "TCMP": "compilation", # iTunes extension - "TCOM": "composer", - "TCOP": "copyright", - "TENC": "encodedby", - "TEXT": "lyricist", - "TLEN": "length", - "TMED": "media", - "TMOO": "mood", - "TIT2": "title", - "TIT3": "version", - "TPE1": "artist", - "TPE2": "performer", - "TPE3": "conductor", - "TPE4": "arranger", - "TPOS": "discnumber", - "TPUB": "organization", - "TRCK": "tracknumber", - "TOLY": "author", - "TSO2": "albumartistsort", # iTunes extension - "TSOA": "albumsort", - "TSOC": "composersort", # iTunes extension - "TSOP": "artistsort", - "TSOT": "titlesort", - "TSRC": "isrc", - "TSST": "discsubtitle", - "TLAN": "language", -}): - EasyID3.RegisterTextKey(key, frameid) - -EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete) -EasyID3.RegisterKey("date", date_get, date_set, date_delete) -EasyID3.RegisterKey("originaldate", original_date_get, original_date_set, - original_date_delete) -EasyID3.RegisterKey( - "performer:*", performer_get, performer_set, performer_delete, - performer_list) -EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get, - musicbrainz_trackid_set, musicbrainz_trackid_delete) -EasyID3.RegisterKey("website", website_get, website_set, website_delete) -EasyID3.RegisterKey( - "replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list) -EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete) - -# At various times, information for this came from -# http://musicbrainz.org/docs/specs/metadata_tags.html -# http://bugs.musicbrainz.org/ticket/1383 -# http://musicbrainz.org/doc/MusicBrainzTag -for desc, key in iteritems({ - u"MusicBrainz Artist Id": "musicbrainz_artistid", - u"MusicBrainz Album Id": "musicbrainz_albumid", - u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid", - u"MusicBrainz TRM Id": "musicbrainz_trmid", - u"MusicIP PUID": "musicip_puid", - u"MusicMagic Fingerprint": "musicip_fingerprint", - u"MusicBrainz Album Status": "musicbrainz_albumstatus", - u"MusicBrainz Album Type": "musicbrainz_albumtype", - u"MusicBrainz Album Release Country": "releasecountry", - u"MusicBrainz Disc Id": "musicbrainz_discid", - u"ASIN": "asin", - u"ALBUMARTISTSORT": "albumartistsort", - u"BARCODE": "barcode", - u"CATALOGNUMBER": "catalognumber", - u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid", - u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid", - u"MusicBrainz Work Id": "musicbrainz_workid", - u"Acoustid Fingerprint": "acoustid_fingerprint", - u"Acoustid Id": "acoustid_id", -}): - EasyID3.RegisterTXXXKey(key, desc) - - -class EasyID3FileType(ID3FileType): - """Like ID3FileType, but uses EasyID3 for tags.""" - ID3 = EasyID3 diff --git a/resources/lib/mutagen/easymp4.py b/resources/lib/mutagen/easymp4.py deleted file mode 100644 index b965f37d..00000000 --- a/resources/lib/mutagen/easymp4.py +++ /dev/null @@ -1,285 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2009 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -from mutagen import Metadata -from mutagen._util import DictMixin, dict_match -from mutagen.mp4 import MP4, MP4Tags, error, delete -from ._compat import PY2, text_type, PY3 - - -__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"] - - -class EasyMP4KeyError(error, KeyError, ValueError): - pass - - -class EasyMP4Tags(DictMixin, Metadata): - """A file with MPEG-4 iTunes metadata. - - Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII - strings, and values are a list of Unicode strings (and these lists - are always of length 0 or 1). - - If you need access to the full MP4 metadata feature set, you should use - MP4, not EasyMP4. - """ - - Set = {} - Get = {} - Delete = {} - List = {} - - def __init__(self, *args, **kwargs): - self.__mp4 = MP4Tags(*args, **kwargs) - self.load = self.__mp4.load - self.save = self.__mp4.save - self.delete = self.__mp4.delete - self._padding = self.__mp4._padding - - filename = property(lambda s: s.__mp4.filename, - lambda s, fn: setattr(s.__mp4, 'filename', fn)) - - @classmethod - def RegisterKey(cls, key, - getter=None, setter=None, deleter=None, lister=None): - """Register a new key mapping. - - A key mapping is four functions, a getter, setter, deleter, - and lister. The key may be either a string or a glob pattern. - - The getter, deleted, and lister receive an MP4Tags instance - and the requested key name. The setter also receives the - desired value, which will be a list of strings. - - The getter, setter, and deleter are used to implement __getitem__, - __setitem__, and __delitem__. - - The lister is used to implement keys(). It should return a - list of keys that are actually in the MP4 instance, provided - by its associated getter. - """ - key = key.lower() - if getter is not None: - cls.Get[key] = getter - if setter is not None: - cls.Set[key] = setter - if deleter is not None: - cls.Delete[key] = deleter - if lister is not None: - cls.List[key] = lister - - @classmethod - def RegisterTextKey(cls, key, atomid): - """Register a text key. - - If the key you need to register is a simple one-to-one mapping - of MP4 atom name to EasyMP4Tags key, then you can use this - function:: - - EasyMP4Tags.RegisterTextKey("artist", "\xa9ART") - """ - def getter(tags, key): - return tags[atomid] - - def setter(tags, key, value): - tags[atomid] = value - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1): - """Register a scalar integer key. - """ - - def getter(tags, key): - return list(map(text_type, tags[atomid])) - - def setter(tags, key, value): - clamp = lambda x: int(min(max(min_value, x), max_value)) - tags[atomid] = [clamp(v) for v in map(int, value)] - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterIntPairKey(cls, key, atomid, min_value=0, - max_value=(2 ** 16) - 1): - def getter(tags, key): - ret = [] - for (track, total) in tags[atomid]: - if total: - ret.append(u"%d/%d" % (track, total)) - else: - ret.append(text_type(track)) - return ret - - def setter(tags, key, value): - clamp = lambda x: int(min(max(min_value, x), max_value)) - data = [] - for v in value: - try: - tracks, total = v.split("/") - tracks = clamp(int(tracks)) - total = clamp(int(total)) - except (ValueError, TypeError): - tracks = clamp(int(v)) - total = min_value - data.append((tracks, total)) - tags[atomid] = data - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"): - """Register a text key. - - If the key you need to register is a simple one-to-one mapping - of MP4 freeform atom (----) and name to EasyMP4Tags key, then - you can use this function:: - - EasyMP4Tags.RegisterFreeformKey( - "musicbrainz_artistid", "MusicBrainz Artist Id") - """ - atomid = "----:" + mean + ":" + name - - def getter(tags, key): - return [s.decode("utf-8", "replace") for s in tags[atomid]] - - def setter(tags, key, value): - encoded = [] - for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("%r not str" % v) - v = v.decode("utf-8") - encoded.append(v.encode("utf-8")) - tags[atomid] = encoded - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - def __getitem__(self, key): - key = key.lower() - func = dict_match(self.Get, key) - if func is not None: - return func(self.__mp4, key) - else: - raise EasyMP4KeyError("%r is not a valid key" % key) - - def __setitem__(self, key, value): - key = key.lower() - - if PY2: - if isinstance(value, basestring): - value = [value] - else: - if isinstance(value, text_type): - value = [value] - - func = dict_match(self.Set, key) - if func is not None: - return func(self.__mp4, key, value) - else: - raise EasyMP4KeyError("%r is not a valid key" % key) - - def __delitem__(self, key): - key = key.lower() - func = dict_match(self.Delete, key) - if func is not None: - return func(self.__mp4, key) - else: - raise EasyMP4KeyError("%r is not a valid key" % key) - - def keys(self): - keys = [] - for key in self.Get.keys(): - if key in self.List: - keys.extend(self.List[key](self.__mp4, key)) - elif key in self: - keys.append(key) - return keys - - def pprint(self): - """Print tag key=value pairs.""" - strings = [] - for key in sorted(self.keys()): - values = self[key] - for value in values: - strings.append("%s=%s" % (key, value)) - return "\n".join(strings) - -for atomid, key in { - '\xa9nam': 'title', - '\xa9alb': 'album', - '\xa9ART': 'artist', - 'aART': 'albumartist', - '\xa9day': 'date', - '\xa9cmt': 'comment', - 'desc': 'description', - '\xa9grp': 'grouping', - '\xa9gen': 'genre', - 'cprt': 'copyright', - 'soal': 'albumsort', - 'soaa': 'albumartistsort', - 'soar': 'artistsort', - 'sonm': 'titlesort', - 'soco': 'composersort', -}.items(): - EasyMP4Tags.RegisterTextKey(key, atomid) - -for name, key in { - 'MusicBrainz Artist Id': 'musicbrainz_artistid', - 'MusicBrainz Track Id': 'musicbrainz_trackid', - 'MusicBrainz Album Id': 'musicbrainz_albumid', - 'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid', - 'MusicIP PUID': 'musicip_puid', - 'MusicBrainz Album Status': 'musicbrainz_albumstatus', - 'MusicBrainz Album Type': 'musicbrainz_albumtype', - 'MusicBrainz Release Country': 'releasecountry', -}.items(): - EasyMP4Tags.RegisterFreeformKey(key, name) - -for name, key in { - "tmpo": "bpm", -}.items(): - EasyMP4Tags.RegisterIntKey(key, name) - -for name, key in { - "trkn": "tracknumber", - "disk": "discnumber", -}.items(): - EasyMP4Tags.RegisterIntPairKey(key, name) - - -class EasyMP4(MP4): - """Like :class:`MP4 `, - but uses :class:`EasyMP4Tags` for tags. - - :ivar info: :class:`MP4Info ` - :ivar tags: :class:`EasyMP4Tags` - """ - - MP4Tags = EasyMP4Tags - - Get = EasyMP4Tags.Get - Set = EasyMP4Tags.Set - Delete = EasyMP4Tags.Delete - List = EasyMP4Tags.List - RegisterTextKey = EasyMP4Tags.RegisterTextKey - RegisterKey = EasyMP4Tags.RegisterKey diff --git a/resources/lib/mutagen/flac.py b/resources/lib/mutagen/flac.py deleted file mode 100644 index e6cd1cf7..00000000 --- a/resources/lib/mutagen/flac.py +++ /dev/null @@ -1,876 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""Read and write FLAC Vorbis comments and stream information. - -Read more about FLAC at http://flac.sourceforge.net. - -FLAC supports arbitrary metadata blocks. The two most interesting ones -are the FLAC stream information block, and the Vorbis comment block; -these are also the only ones Mutagen can currently read. - -This module does not handle Ogg FLAC files. - -Based off documentation available at -http://flac.sourceforge.net/format.html -""" - -__all__ = ["FLAC", "Open", "delete"] - -import struct -from ._vorbis import VCommentDict -import mutagen - -from ._compat import cBytesIO, endswith, chr_, xrange -from mutagen._util import resize_bytes, MutagenError, get_size -from mutagen._tags import PaddingInfo -from mutagen.id3 import BitPaddedInt -from functools import reduce - - -class error(IOError, MutagenError): - pass - - -class FLACNoHeaderError(error): - pass - - -class FLACVorbisError(ValueError, error): - pass - - -def to_int_be(data): - """Convert an arbitrarily-long string to a long using big-endian - byte order.""" - return reduce(lambda a, b: (a << 8) + b, bytearray(data), 0) - - -class StrictFileObject(object): - """Wraps a file-like object and raises an exception if the requested - amount of data to read isn't returned.""" - - def __init__(self, fileobj): - self._fileobj = fileobj - for m in ["close", "tell", "seek", "write", "name"]: - if hasattr(fileobj, m): - setattr(self, m, getattr(fileobj, m)) - - def read(self, size=-1): - data = self._fileobj.read(size) - if size >= 0 and len(data) != size: - raise error("file said %d bytes, read %d bytes" % ( - size, len(data))) - return data - - def tryread(self, *args): - return self._fileobj.read(*args) - - -class MetadataBlock(object): - """A generic block of FLAC metadata. - - This class is extended by specific used as an ancestor for more specific - blocks, and also as a container for data blobs of unknown blocks. - - Attributes: - - * data -- raw binary data for this block - """ - - _distrust_size = False - """For block types setting this, we don't trust the size field and - use the size of the content instead.""" - - _invalid_overflow_size = -1 - """In case the real size was bigger than what is representable by the - 24 bit size field, we save the wrong specified size here. This can - only be set if _distrust_size is True""" - - _MAX_SIZE = 2 ** 24 - 1 - - def __init__(self, data): - """Parse the given data string or file-like as a metadata block. - The metadata header should not be included.""" - if data is not None: - if not isinstance(data, StrictFileObject): - if isinstance(data, bytes): - data = cBytesIO(data) - elif not hasattr(data, 'read'): - raise TypeError( - "StreamInfo requires string data or a file-like") - data = StrictFileObject(data) - self.load(data) - - def load(self, data): - self.data = data.read() - - def write(self): - return self.data - - @classmethod - def _writeblock(cls, block, is_last=False): - """Returns the block content + header. - - Raises error. - """ - - data = bytearray() - code = (block.code | 128) if is_last else block.code - datum = block.write() - size = len(datum) - if size > cls._MAX_SIZE: - if block._distrust_size and block._invalid_overflow_size != -1: - # The original size of this block was (1) wrong and (2) - # the real size doesn't allow us to save the file - # according to the spec (too big for 24 bit uint). Instead - # simply write back the original wrong size.. at least - # we don't make the file more "broken" as it is. - size = block._invalid_overflow_size - else: - raise error("block is too long to write") - assert not size > cls._MAX_SIZE - length = struct.pack(">I", size)[-3:] - data.append(code) - data += length - data += datum - return data - - @classmethod - def _writeblocks(cls, blocks, available, cont_size, padding_func): - """Render metadata block as a byte string.""" - - # write everything except padding - data = bytearray() - for block in blocks: - if isinstance(block, Padding): - continue - data += cls._writeblock(block) - blockssize = len(data) - - # take the padding overhead into account. we always add one - # to make things simple. - padding_block = Padding() - blockssize += len(cls._writeblock(padding_block)) - - # finally add a padding block - info = PaddingInfo(available - blockssize, cont_size) - padding_block.length = min(info._get_padding(padding_func), - cls._MAX_SIZE) - data += cls._writeblock(padding_block, is_last=True) - - return data - - -class StreamInfo(MetadataBlock, mutagen.StreamInfo): - """FLAC stream information. - - This contains information about the audio data in the FLAC file. - Unlike most stream information objects in Mutagen, changes to this - one will rewritten to the file when it is saved. Unless you are - actually changing the audio stream itself, don't change any - attributes of this block. - - Attributes: - - * min_blocksize -- minimum audio block size - * max_blocksize -- maximum audio block size - * sample_rate -- audio sample rate in Hz - * channels -- audio channels (1 for mono, 2 for stereo) - * bits_per_sample -- bits per sample - * total_samples -- total samples in file - * length -- audio length in seconds - """ - - code = 0 - - def __eq__(self, other): - try: - return (self.min_blocksize == other.min_blocksize and - self.max_blocksize == other.max_blocksize and - self.sample_rate == other.sample_rate and - self.channels == other.channels and - self.bits_per_sample == other.bits_per_sample and - self.total_samples == other.total_samples) - except: - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - self.min_blocksize = int(to_int_be(data.read(2))) - self.max_blocksize = int(to_int_be(data.read(2))) - self.min_framesize = int(to_int_be(data.read(3))) - self.max_framesize = int(to_int_be(data.read(3))) - # first 16 bits of sample rate - sample_first = to_int_be(data.read(2)) - # last 4 bits of sample rate, 3 of channels, first 1 of bits/sample - sample_channels_bps = to_int_be(data.read(1)) - # last 4 of bits/sample, 36 of total samples - bps_total = to_int_be(data.read(5)) - - sample_tail = sample_channels_bps >> 4 - self.sample_rate = int((sample_first << 4) + sample_tail) - if not self.sample_rate: - raise error("A sample rate value of 0 is invalid") - self.channels = int(((sample_channels_bps >> 1) & 7) + 1) - bps_tail = bps_total >> 36 - bps_head = (sample_channels_bps & 1) << 4 - self.bits_per_sample = int(bps_head + bps_tail + 1) - self.total_samples = bps_total & 0xFFFFFFFFF - self.length = self.total_samples / float(self.sample_rate) - - self.md5_signature = to_int_be(data.read(16)) - - def write(self): - f = cBytesIO() - f.write(struct.pack(">I", self.min_blocksize)[-2:]) - f.write(struct.pack(">I", self.max_blocksize)[-2:]) - f.write(struct.pack(">I", self.min_framesize)[-3:]) - f.write(struct.pack(">I", self.max_framesize)[-3:]) - - # first 16 bits of sample rate - f.write(struct.pack(">I", self.sample_rate >> 4)[-2:]) - # 4 bits sample, 3 channel, 1 bps - byte = (self.sample_rate & 0xF) << 4 - byte += ((self.channels - 1) & 7) << 1 - byte += ((self.bits_per_sample - 1) >> 4) & 1 - f.write(chr_(byte)) - # 4 bits of bps, 4 of sample count - byte = ((self.bits_per_sample - 1) & 0xF) << 4 - byte += (self.total_samples >> 32) & 0xF - f.write(chr_(byte)) - # last 32 of sample count - f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF)) - # MD5 signature - sig = self.md5_signature - f.write(struct.pack( - ">4I", (sig >> 96) & 0xFFFFFFFF, (sig >> 64) & 0xFFFFFFFF, - (sig >> 32) & 0xFFFFFFFF, sig & 0xFFFFFFFF)) - return f.getvalue() - - def pprint(self): - return u"FLAC, %.2f seconds, %d Hz" % (self.length, self.sample_rate) - - -class SeekPoint(tuple): - """A single seek point in a FLAC file. - - Placeholder seek points have first_sample of 0xFFFFFFFFFFFFFFFFL, - and byte_offset and num_samples undefined. Seek points must be - sorted in ascending order by first_sample number. Seek points must - be unique by first_sample number, except for placeholder - points. Placeholder points must occur last in the table and there - may be any number of them. - - Attributes: - - * first_sample -- sample number of first sample in the target frame - * byte_offset -- offset from first frame to target frame - * num_samples -- number of samples in target frame - """ - - def __new__(cls, first_sample, byte_offset, num_samples): - return super(cls, SeekPoint).__new__( - cls, (first_sample, byte_offset, num_samples)) - - first_sample = property(lambda self: self[0]) - byte_offset = property(lambda self: self[1]) - num_samples = property(lambda self: self[2]) - - -class SeekTable(MetadataBlock): - """Read and write FLAC seek tables. - - Attributes: - - * seekpoints -- list of SeekPoint objects - """ - - __SEEKPOINT_FORMAT = '>QQH' - __SEEKPOINT_SIZE = struct.calcsize(__SEEKPOINT_FORMAT) - - code = 3 - - def __init__(self, data): - self.seekpoints = [] - super(SeekTable, self).__init__(data) - - def __eq__(self, other): - try: - return (self.seekpoints == other.seekpoints) - except (AttributeError, TypeError): - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - self.seekpoints = [] - sp = data.tryread(self.__SEEKPOINT_SIZE) - while len(sp) == self.__SEEKPOINT_SIZE: - self.seekpoints.append(SeekPoint( - *struct.unpack(self.__SEEKPOINT_FORMAT, sp))) - sp = data.tryread(self.__SEEKPOINT_SIZE) - - def write(self): - f = cBytesIO() - for seekpoint in self.seekpoints: - packed = struct.pack( - self.__SEEKPOINT_FORMAT, - seekpoint.first_sample, seekpoint.byte_offset, - seekpoint.num_samples) - f.write(packed) - return f.getvalue() - - def __repr__(self): - return "<%s seekpoints=%r>" % (type(self).__name__, self.seekpoints) - - -class VCFLACDict(VCommentDict): - """Read and write FLAC Vorbis comments. - - FLACs don't use the framing bit at the end of the comment block. - So this extends VCommentDict to not use the framing bit. - """ - - code = 4 - _distrust_size = True - - def load(self, data, errors='replace', framing=False): - super(VCFLACDict, self).load(data, errors=errors, framing=framing) - - def write(self, framing=False): - return super(VCFLACDict, self).write(framing=framing) - - -class CueSheetTrackIndex(tuple): - """Index for a track in a cuesheet. - - For CD-DA, an index_number of 0 corresponds to the track - pre-gap. The first index in a track must have a number of 0 or 1, - and subsequently, index_numbers must increase by 1. Index_numbers - must be unique within a track. And index_offset must be evenly - divisible by 588 samples. - - Attributes: - - * index_number -- index point number - * index_offset -- offset in samples from track start - """ - - def __new__(cls, index_number, index_offset): - return super(cls, CueSheetTrackIndex).__new__( - cls, (index_number, index_offset)) - - index_number = property(lambda self: self[0]) - index_offset = property(lambda self: self[1]) - - -class CueSheetTrack(object): - """A track in a cuesheet. - - For CD-DA, track_numbers must be 1-99, or 170 for the - lead-out. Track_numbers must be unique within a cue sheet. There - must be atleast one index in every track except the lead-out track - which must have none. - - Attributes: - - * track_number -- track number - * start_offset -- track offset in samples from start of FLAC stream - * isrc -- ISRC code - * type -- 0 for audio, 1 for digital data - * pre_emphasis -- true if the track is recorded with pre-emphasis - * indexes -- list of CueSheetTrackIndex objects - """ - - def __init__(self, track_number, start_offset, isrc='', type_=0, - pre_emphasis=False): - self.track_number = track_number - self.start_offset = start_offset - self.isrc = isrc - self.type = type_ - self.pre_emphasis = pre_emphasis - self.indexes = [] - - def __eq__(self, other): - try: - return (self.track_number == other.track_number and - self.start_offset == other.start_offset and - self.isrc == other.isrc and - self.type == other.type and - self.pre_emphasis == other.pre_emphasis and - self.indexes == other.indexes) - except (AttributeError, TypeError): - return False - - __hash__ = object.__hash__ - - def __repr__(self): - return (("<%s number=%r, offset=%d, isrc=%r, type=%r, " - "pre_emphasis=%r, indexes=%r)>") % - (type(self).__name__, self.track_number, self.start_offset, - self.isrc, self.type, self.pre_emphasis, self.indexes)) - - -class CueSheet(MetadataBlock): - """Read and write FLAC embedded cue sheets. - - Number of tracks should be from 1 to 100. There should always be - exactly one lead-out track and that track must be the last track - in the cue sheet. - - Attributes: - - * media_catalog_number -- media catalog number in ASCII - * lead_in_samples -- number of lead-in samples - * compact_disc -- true if the cuesheet corresponds to a compact disc - * tracks -- list of CueSheetTrack objects - * lead_out -- lead-out as CueSheetTrack or None if lead-out was not found - """ - - __CUESHEET_FORMAT = '>128sQB258xB' - __CUESHEET_SIZE = struct.calcsize(__CUESHEET_FORMAT) - __CUESHEET_TRACK_FORMAT = '>QB12sB13xB' - __CUESHEET_TRACK_SIZE = struct.calcsize(__CUESHEET_TRACK_FORMAT) - __CUESHEET_TRACKINDEX_FORMAT = '>QB3x' - __CUESHEET_TRACKINDEX_SIZE = struct.calcsize(__CUESHEET_TRACKINDEX_FORMAT) - - code = 5 - - media_catalog_number = b'' - lead_in_samples = 88200 - compact_disc = True - - def __init__(self, data): - self.tracks = [] - super(CueSheet, self).__init__(data) - - def __eq__(self, other): - try: - return (self.media_catalog_number == other.media_catalog_number and - self.lead_in_samples == other.lead_in_samples and - self.compact_disc == other.compact_disc and - self.tracks == other.tracks) - except (AttributeError, TypeError): - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - header = data.read(self.__CUESHEET_SIZE) - media_catalog_number, lead_in_samples, flags, num_tracks = \ - struct.unpack(self.__CUESHEET_FORMAT, header) - self.media_catalog_number = media_catalog_number.rstrip(b'\0') - self.lead_in_samples = lead_in_samples - self.compact_disc = bool(flags & 0x80) - self.tracks = [] - for i in xrange(num_tracks): - track = data.read(self.__CUESHEET_TRACK_SIZE) - start_offset, track_number, isrc_padded, flags, num_indexes = \ - struct.unpack(self.__CUESHEET_TRACK_FORMAT, track) - isrc = isrc_padded.rstrip(b'\0') - type_ = (flags & 0x80) >> 7 - pre_emphasis = bool(flags & 0x40) - val = CueSheetTrack( - track_number, start_offset, isrc, type_, pre_emphasis) - for j in xrange(num_indexes): - index = data.read(self.__CUESHEET_TRACKINDEX_SIZE) - index_offset, index_number = struct.unpack( - self.__CUESHEET_TRACKINDEX_FORMAT, index) - val.indexes.append( - CueSheetTrackIndex(index_number, index_offset)) - self.tracks.append(val) - - def write(self): - f = cBytesIO() - flags = 0 - if self.compact_disc: - flags |= 0x80 - packed = struct.pack( - self.__CUESHEET_FORMAT, self.media_catalog_number, - self.lead_in_samples, flags, len(self.tracks)) - f.write(packed) - for track in self.tracks: - track_flags = 0 - track_flags |= (track.type & 1) << 7 - if track.pre_emphasis: - track_flags |= 0x40 - track_packed = struct.pack( - self.__CUESHEET_TRACK_FORMAT, track.start_offset, - track.track_number, track.isrc, track_flags, - len(track.indexes)) - f.write(track_packed) - for index in track.indexes: - index_packed = struct.pack( - self.__CUESHEET_TRACKINDEX_FORMAT, - index.index_offset, index.index_number) - f.write(index_packed) - return f.getvalue() - - def __repr__(self): - return (("<%s media_catalog_number=%r, lead_in=%r, compact_disc=%r, " - "tracks=%r>") % - (type(self).__name__, self.media_catalog_number, - self.lead_in_samples, self.compact_disc, self.tracks)) - - -class Picture(MetadataBlock): - """Read and write FLAC embed pictures. - - Attributes: - - * type -- picture type (same as types for ID3 APIC frames) - * mime -- MIME type of the picture - * desc -- picture's description - * width -- width in pixels - * height -- height in pixels - * depth -- color depth in bits-per-pixel - * colors -- number of colors for indexed palettes (like GIF), - 0 for non-indexed - * data -- picture data - - To create a picture from file (in order to add to a FLAC file), - instantiate this object without passing anything to the constructor and - then set the properties manually:: - - p = Picture() - - with open("Folder.jpg", "rb") as f: - pic.data = f.read() - - pic.type = id3.PictureType.COVER_FRONT - pic.mime = u"image/jpeg" - pic.width = 500 - pic.height = 500 - pic.depth = 16 # color depth - """ - - code = 6 - _distrust_size = True - - def __init__(self, data=None): - self.type = 0 - self.mime = u'' - self.desc = u'' - self.width = 0 - self.height = 0 - self.depth = 0 - self.colors = 0 - self.data = b'' - super(Picture, self).__init__(data) - - def __eq__(self, other): - try: - return (self.type == other.type and - self.mime == other.mime and - self.desc == other.desc and - self.width == other.width and - self.height == other.height and - self.depth == other.depth and - self.colors == other.colors and - self.data == other.data) - except (AttributeError, TypeError): - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - self.type, length = struct.unpack('>2I', data.read(8)) - self.mime = data.read(length).decode('UTF-8', 'replace') - length, = struct.unpack('>I', data.read(4)) - self.desc = data.read(length).decode('UTF-8', 'replace') - (self.width, self.height, self.depth, - self.colors, length) = struct.unpack('>5I', data.read(20)) - self.data = data.read(length) - - def write(self): - f = cBytesIO() - mime = self.mime.encode('UTF-8') - f.write(struct.pack('>2I', self.type, len(mime))) - f.write(mime) - desc = self.desc.encode('UTF-8') - f.write(struct.pack('>I', len(desc))) - f.write(desc) - f.write(struct.pack('>5I', self.width, self.height, self.depth, - self.colors, len(self.data))) - f.write(self.data) - return f.getvalue() - - def __repr__(self): - return "<%s '%s' (%d bytes)>" % (type(self).__name__, self.mime, - len(self.data)) - - -class Padding(MetadataBlock): - """Empty padding space for metadata blocks. - - To avoid rewriting the entire FLAC file when editing comments, - metadata is often padded. Padding should occur at the end, and no - more than one padding block should be in any FLAC file. - """ - - code = 1 - - def __init__(self, data=b""): - super(Padding, self).__init__(data) - - def load(self, data): - self.length = len(data.read()) - - def write(self): - try: - return b"\x00" * self.length - # On some 64 bit platforms this won't generate a MemoryError - # or OverflowError since you might have enough RAM, but it - # still generates a ValueError. On other 64 bit platforms, - # this will still succeed for extremely large values. - # Those should never happen in the real world, and if they - # do, writeblocks will catch it. - except (OverflowError, ValueError, MemoryError): - raise error("cannot write %d bytes" % self.length) - - def __eq__(self, other): - return isinstance(other, Padding) and self.length == other.length - - __hash__ = MetadataBlock.__hash__ - - def __repr__(self): - return "<%s (%d bytes)>" % (type(self).__name__, self.length) - - -class FLAC(mutagen.FileType): - """A FLAC audio file. - - Attributes: - - * cuesheet -- CueSheet object, if any - * seektable -- SeekTable object, if any - * pictures -- list of embedded pictures - """ - - _mimes = ["audio/x-flac", "application/x-flac"] - - info = None - """A `StreamInfo`""" - - tags = None - """A `VCommentDict`""" - - METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict, - CueSheet, Picture] - """Known metadata block types, indexed by ID.""" - - @staticmethod - def score(filename, fileobj, header_data): - return (header_data.startswith(b"fLaC") + - endswith(filename.lower(), ".flac") * 3) - - def __read_metadata_block(self, fileobj): - byte = ord(fileobj.read(1)) - size = to_int_be(fileobj.read(3)) - code = byte & 0x7F - last_block = bool(byte & 0x80) - - try: - block_type = self.METADATA_BLOCKS[code] or MetadataBlock - except IndexError: - block_type = MetadataBlock - - if block_type._distrust_size: - # Some jackass is writing broken Metadata block length - # for Vorbis comment blocks, and the FLAC reference - # implementaton can parse them (mostly by accident), - # so we have to too. Instead of parsing the size - # given, parse an actual Vorbis comment, leaving - # fileobj in the right position. - # http://code.google.com/p/mutagen/issues/detail?id=52 - # ..same for the Picture block: - # http://code.google.com/p/mutagen/issues/detail?id=106 - start = fileobj.tell() - block = block_type(fileobj) - real_size = fileobj.tell() - start - if real_size > MetadataBlock._MAX_SIZE: - block._invalid_overflow_size = size - else: - data = fileobj.read(size) - block = block_type(data) - block.code = code - - if block.code == VCFLACDict.code: - if self.tags is None: - self.tags = block - else: - raise FLACVorbisError("> 1 Vorbis comment block found") - elif block.code == CueSheet.code: - if self.cuesheet is None: - self.cuesheet = block - else: - raise error("> 1 CueSheet block found") - elif block.code == SeekTable.code: - if self.seektable is None: - self.seektable = block - else: - raise error("> 1 SeekTable block found") - self.metadata_blocks.append(block) - return not last_block - - def add_tags(self): - """Add a Vorbis comment block to the file.""" - if self.tags is None: - self.tags = VCFLACDict() - self.metadata_blocks.append(self.tags) - else: - raise FLACVorbisError("a Vorbis comment already exists") - - add_vorbiscomment = add_tags - - def delete(self, filename=None): - """Remove Vorbis comments from a file. - - If no filename is given, the one most recently loaded is used. - """ - if filename is None: - filename = self.filename - - if self.tags is not None: - self.metadata_blocks.remove(self.tags) - self.save(padding=lambda x: 0) - self.metadata_blocks.append(self.tags) - self.tags.clear() - - vc = property(lambda s: s.tags, doc="Alias for tags; don't use this.") - - def load(self, filename): - """Load file information from a filename.""" - - self.metadata_blocks = [] - self.tags = None - self.cuesheet = None - self.seektable = None - self.filename = filename - fileobj = StrictFileObject(open(filename, "rb")) - try: - self.__check_header(fileobj) - while self.__read_metadata_block(fileobj): - pass - finally: - fileobj.close() - - try: - self.metadata_blocks[0].length - except (AttributeError, IndexError): - raise FLACNoHeaderError("Stream info block not found") - - @property - def info(self): - return self.metadata_blocks[0] - - def add_picture(self, picture): - """Add a new picture to the file.""" - self.metadata_blocks.append(picture) - - def clear_pictures(self): - """Delete all pictures from the file.""" - - blocks = [b for b in self.metadata_blocks if b.code != Picture.code] - self.metadata_blocks = blocks - - @property - def pictures(self): - """List of embedded pictures""" - - return [b for b in self.metadata_blocks if b.code == Picture.code] - - def save(self, filename=None, deleteid3=False, padding=None): - """Save metadata blocks to a file. - - If no filename is given, the one most recently loaded is used. - """ - - if filename is None: - filename = self.filename - - with open(filename, 'rb+') as f: - header = self.__check_header(f) - audio_offset = self.__find_audio_offset(f) - # "fLaC" and maybe ID3 - available = audio_offset - header - - # Delete ID3v2 - if deleteid3 and header > 4: - available += header - 4 - header = 4 - - content_size = get_size(f) - audio_offset - assert content_size >= 0 - data = MetadataBlock._writeblocks( - self.metadata_blocks, available, content_size, padding) - data_size = len(data) - - resize_bytes(f, available, data_size, header) - f.seek(header - 4) - f.write(b"fLaC") - f.write(data) - - # Delete ID3v1 - if deleteid3: - try: - f.seek(-128, 2) - except IOError: - pass - else: - if f.read(3) == b"TAG": - f.seek(-128, 2) - f.truncate() - - def __find_audio_offset(self, fileobj): - byte = 0x00 - while not (byte & 0x80): - byte = ord(fileobj.read(1)) - size = to_int_be(fileobj.read(3)) - try: - block_type = self.METADATA_BLOCKS[byte & 0x7F] - except IndexError: - block_type = None - - if block_type and block_type._distrust_size: - # See comments in read_metadata_block; the size can't - # be trusted for Vorbis comment blocks and Picture block - block_type(fileobj) - else: - fileobj.read(size) - return fileobj.tell() - - def __check_header(self, fileobj): - """Returns the offset of the flac block start - (skipping id3 tags if found). The passed fileobj will be advanced to - that offset as well. - """ - - size = 4 - header = fileobj.read(4) - if header != b"fLaC": - size = None - if header[:3] == b"ID3": - size = 14 + BitPaddedInt(fileobj.read(6)[2:]) - fileobj.seek(size - 4) - if fileobj.read(4) != b"fLaC": - size = None - if size is None: - raise FLACNoHeaderError( - "%r is not a valid FLAC file" % fileobj.name) - return size - - -Open = FLAC - - -def delete(filename): - """Remove tags from a file.""" - FLAC(filename).delete() diff --git a/resources/lib/mutagen/id3/__init__.py b/resources/lib/mutagen/id3/__init__.py deleted file mode 100644 index 9aef865b..00000000 --- a/resources/lib/mutagen/id3/__init__.py +++ /dev/null @@ -1,1093 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# 2006 Lukas Lalinsky -# 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""ID3v2 reading and writing. - -This is based off of the following references: - -* http://id3.org/id3v2.4.0-structure -* http://id3.org/id3v2.4.0-frames -* http://id3.org/id3v2.3.0 -* http://id3.org/id3v2-00 -* http://id3.org/ID3v1 - -Its largest deviation from the above (versions 2.3 and 2.2) is that it -will not interpret the / characters as a separator, and will almost -always accept null separators to generate multi-valued text frames. - -Because ID3 frame structure differs between frame types, each frame is -implemented as a different class (e.g. TIT2 as mutagen.id3.TIT2). Each -frame's documentation contains a list of its attributes. - -Since this file's documentation is a little unwieldy, you are probably -interested in the :class:`ID3` class to start with. -""" - -__all__ = ['ID3', 'ID3FileType', 'Frames', 'Open', 'delete'] - -import struct -import errno - -from struct import unpack, pack, error as StructError - -import mutagen -from mutagen._util import insert_bytes, delete_bytes, DictProxy, enum -from mutagen._tags import PaddingInfo -from .._compat import chr_, PY3 - -from ._util import * -from ._frames import * -from ._specs import * - - -@enum -class ID3v1SaveOptions(object): - - REMOVE = 0 - """ID3v1 tags will be removed""" - - UPDATE = 1 - """ID3v1 tags will be updated but not added""" - - CREATE = 2 - """ID3v1 tags will be created and/or updated""" - - -def _fullread(fileobj, size): - """Read a certain number of bytes from the source file. - - Raises ValueError on invalid size input or EOFError/IOError. - """ - - if size < 0: - raise ValueError('Requested bytes (%s) less than zero' % size) - data = fileobj.read(size) - if len(data) != size: - raise EOFError("Not enough data to read") - return data - - -class ID3Header(object): - - _V24 = (2, 4, 0) - _V23 = (2, 3, 0) - _V22 = (2, 2, 0) - _V11 = (1, 1) - - f_unsynch = property(lambda s: bool(s._flags & 0x80)) - f_extended = property(lambda s: bool(s._flags & 0x40)) - f_experimental = property(lambda s: bool(s._flags & 0x20)) - f_footer = property(lambda s: bool(s._flags & 0x10)) - - def __init__(self, fileobj=None): - """Raises ID3NoHeaderError, ID3UnsupportedVersionError or error""" - - if fileobj is None: - # for testing - self._flags = 0 - return - - fn = getattr(fileobj, "name", "") - try: - data = _fullread(fileobj, 10) - except EOFError: - raise ID3NoHeaderError("%s: too small" % fn) - - id3, vmaj, vrev, flags, size = unpack('>3sBBB4s', data) - self._flags = flags - self.size = BitPaddedInt(size) + 10 - self.version = (2, vmaj, vrev) - - if id3 != b'ID3': - raise ID3NoHeaderError("%r doesn't start with an ID3 tag" % fn) - - if vmaj not in [2, 3, 4]: - raise ID3UnsupportedVersionError("%r ID3v2.%d not supported" - % (fn, vmaj)) - - if not BitPaddedInt.has_valid_padding(size): - raise error("Header size not synchsafe") - - if (self.version >= self._V24) and (flags & 0x0f): - raise error( - "%r has invalid flags %#02x" % (fn, flags)) - elif (self._V23 <= self.version < self._V24) and (flags & 0x1f): - raise error( - "%r has invalid flags %#02x" % (fn, flags)) - - if self.f_extended: - try: - extsize_data = _fullread(fileobj, 4) - except EOFError: - raise error("%s: too small" % fn) - - if PY3: - frame_id = extsize_data.decode("ascii", "replace") - else: - frame_id = extsize_data - - if frame_id in Frames: - # Some tagger sets the extended header flag but - # doesn't write an extended header; in this case, the - # ID3 data follows immediately. Since no extended - # header is going to be long enough to actually match - # a frame, and if it's *not* a frame we're going to be - # completely lost anyway, this seems to be the most - # correct check. - # http://code.google.com/p/quodlibet/issues/detail?id=126 - self._flags ^= 0x40 - extsize = 0 - fileobj.seek(-4, 1) - elif self.version >= self._V24: - # "Where the 'Extended header size' is the size of the whole - # extended header, stored as a 32 bit synchsafe integer." - extsize = BitPaddedInt(extsize_data) - 4 - if not BitPaddedInt.has_valid_padding(extsize_data): - raise error( - "Extended header size not synchsafe") - else: - # "Where the 'Extended header size', currently 6 or 10 bytes, - # excludes itself." - extsize = unpack('>L', extsize_data)[0] - - try: - self._extdata = _fullread(fileobj, extsize) - except EOFError: - raise error("%s: too small" % fn) - - -class ID3(DictProxy, mutagen.Metadata): - """A file with an ID3v2 tag. - - Attributes: - - * version -- ID3 tag version as a tuple - * unknown_frames -- raw frame data of any unknown frames found - * size -- the total size of the ID3 tag, including the header - """ - - __module__ = "mutagen.id3" - - PEDANTIC = True - """Deprecated. Doesn't have any effect""" - - filename = None - - def __init__(self, *args, **kwargs): - self.unknown_frames = [] - self.__unknown_version = None - self._header = None - self._version = (2, 4, 0) - super(ID3, self).__init__(*args, **kwargs) - - @property - def version(self): - """ID3 tag version as a tuple (of the loaded file)""" - - if self._header is not None: - return self._header.version - return self._version - - @version.setter - def version(self, value): - self._version = value - - @property - def f_unsynch(self): - if self._header is not None: - return self._header.f_unsynch - return False - - @property - def f_extended(self): - if self._header is not None: - return self._header.f_extended - return False - - @property - def size(self): - if self._header is not None: - return self._header.size - return 0 - - def _pre_load_header(self, fileobj): - # XXX: for aiff to adjust the offset.. - pass - - def load(self, filename, known_frames=None, translate=True, v2_version=4): - """Load tags from a filename. - - Keyword arguments: - - * filename -- filename to load tag data from - * known_frames -- dict mapping frame IDs to Frame objects - * translate -- Update all tags to ID3v2.3/4 internally. If you - intend to save, this must be true or you have to - call update_to_v23() / update_to_v24() manually. - * v2_version -- if update_to_v23 or update_to_v24 get called (3 or 4) - - Example of loading a custom frame:: - - my_frames = dict(mutagen.id3.Frames) - class XMYF(Frame): ... - my_frames["XMYF"] = XMYF - mutagen.id3.ID3(filename, known_frames=my_frames) - """ - - if v2_version not in (3, 4): - raise ValueError("Only 3 and 4 possible for v2_version") - - self.filename = filename - self.unknown_frames = [] - self.__known_frames = known_frames - self._header = None - self._padding = 0 # for testing - - with open(filename, 'rb') as fileobj: - self._pre_load_header(fileobj) - - try: - self._header = ID3Header(fileobj) - except (ID3NoHeaderError, ID3UnsupportedVersionError): - frames, offset = _find_id3v1(fileobj) - if frames is None: - raise - - self.version = ID3Header._V11 - for v in frames.values(): - self.add(v) - else: - frames = self.__known_frames - if frames is None: - if self.version >= ID3Header._V23: - frames = Frames - elif self.version >= ID3Header._V22: - frames = Frames_2_2 - - try: - data = _fullread(fileobj, self.size - 10) - except (ValueError, EOFError, IOError) as e: - raise error(e) - - for frame in self.__read_frames(data, frames=frames): - if isinstance(frame, Frame): - self.add(frame) - else: - self.unknown_frames.append(frame) - self.__unknown_version = self.version[:2] - - if translate: - if v2_version == 3: - self.update_to_v23() - else: - self.update_to_v24() - - def getall(self, key): - """Return all frames with a given name (the list may be empty). - - This is best explained by examples:: - - id3.getall('TIT2') == [id3['TIT2']] - id3.getall('TTTT') == [] - id3.getall('TXXX') == [TXXX(desc='woo', text='bar'), - TXXX(desc='baz', text='quuuux'), ...] - - Since this is based on the frame's HashKey, which is - colon-separated, you can use it to do things like - ``getall('COMM:MusicMatch')`` or ``getall('TXXX:QuodLibet:')``. - """ - if key in self: - return [self[key]] - else: - key = key + ":" - return [v for s, v in self.items() if s.startswith(key)] - - def delall(self, key): - """Delete all tags of a given kind; see getall.""" - if key in self: - del(self[key]) - else: - key = key + ":" - for k in list(self.keys()): - if k.startswith(key): - del(self[k]) - - def setall(self, key, values): - """Delete frames of the given type and add frames in 'values'.""" - self.delall(key) - for tag in values: - self[tag.HashKey] = tag - - def pprint(self): - """Return tags in a human-readable format. - - "Human-readable" is used loosely here. The format is intended - to mirror that used for Vorbis or APEv2 output, e.g. - - ``TIT2=My Title`` - - However, ID3 frames can have multiple keys: - - ``POPM=user@example.org=3 128/255`` - """ - frames = sorted(Frame.pprint(s) for s in self.values()) - return "\n".join(frames) - - def loaded_frame(self, tag): - """Deprecated; use the add method.""" - # turn 2.2 into 2.3/2.4 tags - if len(type(tag).__name__) == 3: - tag = type(tag).__base__(tag) - self[tag.HashKey] = tag - - # add = loaded_frame (and vice versa) break applications that - # expect to be able to override loaded_frame (e.g. Quod Libet), - # as does making loaded_frame call add. - def add(self, frame): - """Add a frame to the tag.""" - return self.loaded_frame(frame) - - def __read_frames(self, data, frames): - assert self.version >= ID3Header._V22 - - if self.version < ID3Header._V24 and self.f_unsynch: - try: - data = unsynch.decode(data) - except ValueError: - pass - - if self.version >= ID3Header._V23: - if self.version < ID3Header._V24: - bpi = int - else: - bpi = _determine_bpi(data, frames) - - while data: - header = data[:10] - try: - name, size, flags = unpack('>4sLH', header) - except struct.error: - return # not enough header - if name.strip(b'\x00') == b'': - return - - size = bpi(size) - framedata = data[10:10 + size] - data = data[10 + size:] - self._padding = len(data) - if size == 0: - continue # drop empty frames - - if PY3: - try: - name = name.decode('ascii') - except UnicodeDecodeError: - continue - - try: - # someone writes 2.3 frames with 2.2 names - if name[-1] == "\x00": - tag = Frames_2_2[name[:-1]] - name = tag.__base__.__name__ - - tag = frames[name] - except KeyError: - if is_valid_frame_id(name): - yield header + framedata - else: - try: - yield tag._fromData(self._header, flags, framedata) - except NotImplementedError: - yield header + framedata - except ID3JunkFrameError: - pass - elif self.version >= ID3Header._V22: - while data: - header = data[0:6] - try: - name, size = unpack('>3s3s', header) - except struct.error: - return # not enough header - size, = struct.unpack('>L', b'\x00' + size) - if name.strip(b'\x00') == b'': - return - - framedata = data[6:6 + size] - data = data[6 + size:] - self._padding = len(data) - if size == 0: - continue # drop empty frames - - if PY3: - try: - name = name.decode('ascii') - except UnicodeDecodeError: - continue - - try: - tag = frames[name] - except KeyError: - if is_valid_frame_id(name): - yield header + framedata - else: - try: - yield tag._fromData(self._header, 0, framedata) - except (ID3EncryptionUnsupportedError, - NotImplementedError): - yield header + framedata - except ID3JunkFrameError: - pass - - def _prepare_data(self, fileobj, start, available, v2_version, v23_sep, - pad_func): - if v2_version == 3: - version = ID3Header._V23 - elif v2_version == 4: - version = ID3Header._V24 - else: - raise ValueError("Only 3 or 4 allowed for v2_version") - - # Sort frames by 'importance' - order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] - order = dict((b, a) for a, b in enumerate(order)) - last = len(order) - frames = sorted(self.items(), - key=lambda a: (order.get(a[0][:4], last), a[0])) - - framedata = [self.__save_frame(frame, version=version, v23_sep=v23_sep) - for (key, frame) in frames] - - # only write unknown frames if they were loaded from the version - # we are saving with or upgraded to it - if self.__unknown_version == version[:2]: - framedata.extend(data for data in self.unknown_frames - if len(data) > 10) - - needed = sum(map(len, framedata)) + 10 - - fileobj.seek(0, 2) - trailing_size = fileobj.tell() - start - - info = PaddingInfo(available - needed, trailing_size) - new_padding = info._get_padding(pad_func) - if new_padding < 0: - raise error("invalid padding") - new_size = needed + new_padding - - new_framesize = BitPaddedInt.to_str(new_size - 10, width=4) - header = pack('>3sBBB4s', b'ID3', v2_version, 0, 0, new_framesize) - - data = bytearray(header) - for frame in framedata: - data += frame - assert new_size >= len(data) - data += (new_size - len(data)) * b'\x00' - assert new_size == len(data) - - return data - - def save(self, filename=None, v1=1, v2_version=4, v23_sep='/', - padding=None): - """Save changes to a file. - - Args: - filename: - Filename to save the tag to. If no filename is given, - the one most recently loaded is used. - v1 (ID3v1SaveOptions): - if 0, ID3v1 tags will be removed. - if 1, ID3v1 tags will be updated but not added. - if 2, ID3v1 tags will be created and/or updated - v2 (int): - version of ID3v2 tags (3 or 4). - v23_sep (str): - the separator used to join multiple text values - if v2_version == 3. Defaults to '/' but if it's None - will be the ID3v2v2.4 null separator. - padding (function): - A function taking a PaddingInfo which should - return the amount of padding to use. If None (default) - will default to something reasonable. - - By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 - tags, you must call method update_to_v23 before saving the file. - - The lack of a way to update only an ID3v1 tag is intentional. - - Can raise id3.error. - """ - - if filename is None: - filename = self.filename - - try: - f = open(filename, 'rb+') - except IOError as err: - from errno import ENOENT - if err.errno != ENOENT: - raise - f = open(filename, 'ab') # create, then reopen - f = open(filename, 'rb+') - - try: - try: - header = ID3Header(f) - except ID3NoHeaderError: - old_size = 0 - else: - old_size = header.size - - data = self._prepare_data( - f, 0, old_size, v2_version, v23_sep, padding) - new_size = len(data) - - if (old_size < new_size): - insert_bytes(f, new_size - old_size, old_size) - elif (old_size > new_size): - delete_bytes(f, old_size - new_size, new_size) - f.seek(0) - f.write(data) - - self.__save_v1(f, v1) - - finally: - f.close() - - def __save_v1(self, f, v1): - tag, offset = _find_id3v1(f) - has_v1 = tag is not None - - f.seek(offset, 2) - if v1 == ID3v1SaveOptions.UPDATE and has_v1 or \ - v1 == ID3v1SaveOptions.CREATE: - f.write(MakeID3v1(self)) - else: - f.truncate() - - def delete(self, filename=None, delete_v1=True, delete_v2=True): - """Remove tags from a file. - - If no filename is given, the one most recently loaded is used. - - Keyword arguments: - - * delete_v1 -- delete any ID3v1 tag - * delete_v2 -- delete any ID3v2 tag - """ - if filename is None: - filename = self.filename - delete(filename, delete_v1, delete_v2) - self.clear() - - def __save_frame(self, frame, name=None, version=ID3Header._V24, - v23_sep=None): - flags = 0 - if isinstance(frame, TextFrame): - if len(str(frame)) == 0: - return b'' - - if version == ID3Header._V23: - framev23 = frame._get_v23_frame(sep=v23_sep) - framedata = framev23._writeData() - else: - framedata = frame._writeData() - - usize = len(framedata) - if usize > 2048: - # Disabled as this causes iTunes and other programs - # to fail to find these frames, which usually includes - # e.g. APIC. - # framedata = BitPaddedInt.to_str(usize) + framedata.encode('zlib') - # flags |= Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN - pass - - if version == ID3Header._V24: - bits = 7 - elif version == ID3Header._V23: - bits = 8 - else: - raise ValueError - - datasize = BitPaddedInt.to_str(len(framedata), width=4, bits=bits) - - if name is not None: - assert isinstance(name, bytes) - frame_name = name - else: - frame_name = type(frame).__name__ - if PY3: - frame_name = frame_name.encode("ascii") - - header = pack('>4s4sH', frame_name, datasize, flags) - return header + framedata - - def __update_common(self): - """Updates done by both v23 and v24 update""" - - if "TCON" in self: - # Get rid of "(xx)Foobr" format. - self["TCON"].genres = self["TCON"].genres - - # ID3v2.2 LNK frames are just way too different to upgrade. - for frame in self.getall("LINK"): - if len(frame.frameid) != 4: - del self[frame.HashKey] - - mimes = {"PNG": "image/png", "JPG": "image/jpeg"} - for pic in self.getall("APIC"): - if pic.mime in mimes: - newpic = APIC( - encoding=pic.encoding, mime=mimes[pic.mime], - type=pic.type, desc=pic.desc, data=pic.data) - self.add(newpic) - - def update_to_v24(self): - """Convert older tags into an ID3v2.4 tag. - - This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to - TDRC). If you intend to save tags, you must call this function - at some point; it is called by default when loading the tag. - """ - - self.__update_common() - - if self.__unknown_version == (2, 3): - # convert unknown 2.3 frames (flags/size) to 2.4 - converted = [] - for frame in self.unknown_frames: - try: - name, size, flags = unpack('>4sLH', frame[:10]) - except struct.error: - continue - - try: - frame = BinaryFrame._fromData( - self._header, flags, frame[10:]) - except (error, NotImplementedError): - continue - - converted.append(self.__save_frame(frame, name=name)) - self.unknown_frames[:] = converted - self.__unknown_version = (2, 4) - - # TDAT, TYER, and TIME have been turned into TDRC. - try: - date = text_type(self.get("TYER", "")) - if date.strip(u"\x00"): - self.pop("TYER") - dat = text_type(self.get("TDAT", "")) - if dat.strip("\x00"): - self.pop("TDAT") - date = "%s-%s-%s" % (date, dat[2:], dat[:2]) - time = text_type(self.get("TIME", "")) - if time.strip("\x00"): - self.pop("TIME") - date += "T%s:%s:00" % (time[:2], time[2:]) - if "TDRC" not in self: - self.add(TDRC(encoding=0, text=date)) - except UnicodeDecodeError: - # Old ID3 tags have *lots* of Unicode problems, so if TYER - # is bad, just chuck the frames. - pass - - # TORY can be the first part of a TDOR. - if "TORY" in self: - f = self.pop("TORY") - if "TDOR" not in self: - try: - self.add(TDOR(encoding=0, text=str(f))) - except UnicodeDecodeError: - pass - - # IPLS is now TIPL. - if "IPLS" in self: - f = self.pop("IPLS") - if "TIPL" not in self: - self.add(TIPL(encoding=f.encoding, people=f.people)) - - # These can't be trivially translated to any ID3v2.4 tags, or - # should have been removed already. - for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME", "CRM"]: - if key in self: - del(self[key]) - - def update_to_v23(self): - """Convert older (and newer) tags into an ID3v2.3 tag. - - This updates incompatible ID3v2 frames to ID3v2.3 ones. If you - intend to save tags as ID3v2.3, you must call this function - at some point. - - If you want to to go off spec and include some v2.4 frames - in v2.3, remove them before calling this and add them back afterwards. - """ - - self.__update_common() - - # we could downgrade unknown v2.4 frames here, but given that - # the main reason to save v2.3 is compatibility and this - # might increase the chance of some parser breaking.. better not - - # TMCL, TIPL -> TIPL - if "TIPL" in self or "TMCL" in self: - people = [] - if "TIPL" in self: - f = self.pop("TIPL") - people.extend(f.people) - if "TMCL" in self: - f = self.pop("TMCL") - people.extend(f.people) - if "IPLS" not in self: - self.add(IPLS(encoding=f.encoding, people=people)) - - # TDOR -> TORY - if "TDOR" in self: - f = self.pop("TDOR") - if f.text: - d = f.text[0] - if d.year and "TORY" not in self: - self.add(TORY(encoding=f.encoding, text="%04d" % d.year)) - - # TDRC -> TYER, TDAT, TIME - if "TDRC" in self: - f = self.pop("TDRC") - if f.text: - d = f.text[0] - if d.year and "TYER" not in self: - self.add(TYER(encoding=f.encoding, text="%04d" % d.year)) - if d.month and d.day and "TDAT" not in self: - self.add(TDAT(encoding=f.encoding, - text="%02d%02d" % (d.day, d.month))) - if d.hour and d.minute and "TIME" not in self: - self.add(TIME(encoding=f.encoding, - text="%02d%02d" % (d.hour, d.minute))) - - # New frames added in v2.4 - v24_frames = [ - 'ASPI', 'EQU2', 'RVA2', 'SEEK', 'SIGN', 'TDEN', 'TDOR', - 'TDRC', 'TDRL', 'TDTG', 'TIPL', 'TMCL', 'TMOO', 'TPRO', - 'TSOA', 'TSOP', 'TSOT', 'TSST', - ] - - for key in v24_frames: - if key in self: - del(self[key]) - - -def delete(filename, delete_v1=True, delete_v2=True): - """Remove tags from a file. - - Keyword arguments: - - * delete_v1 -- delete any ID3v1 tag - * delete_v2 -- delete any ID3v2 tag - """ - - with open(filename, 'rb+') as f: - - if delete_v1: - tag, offset = _find_id3v1(f) - if tag is not None: - f.seek(offset, 2) - f.truncate() - - # technically an insize=0 tag is invalid, but we delete it anyway - # (primarily because we used to write it) - if delete_v2: - f.seek(0, 0) - idata = f.read(10) - try: - id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) - except struct.error: - id3, insize = b'', -1 - insize = BitPaddedInt(insize) - if id3 == b'ID3' and insize >= 0: - delete_bytes(f, insize + 10, 0) - - -# support open(filename) as interface -Open = ID3 - - -def _determine_bpi(data, frames, EMPTY=b"\x00" * 10): - """Takes id3v2.4 frame data and determines if ints or bitpaddedints - should be used for parsing. Needed because iTunes used to write - normal ints for frame sizes. - """ - - # count number of tags found as BitPaddedInt and how far past - o = 0 - asbpi = 0 - while o < len(data) - 10: - part = data[o:o + 10] - if part == EMPTY: - bpioff = -((len(data) - o) % 10) - break - name, size, flags = unpack('>4sLH', part) - size = BitPaddedInt(size) - o += 10 + size - if PY3: - try: - name = name.decode("ascii") - except UnicodeDecodeError: - continue - if name in frames: - asbpi += 1 - else: - bpioff = o - len(data) - - # count number of tags found as int and how far past - o = 0 - asint = 0 - while o < len(data) - 10: - part = data[o:o + 10] - if part == EMPTY: - intoff = -((len(data) - o) % 10) - break - name, size, flags = unpack('>4sLH', part) - o += 10 + size - if PY3: - try: - name = name.decode("ascii") - except UnicodeDecodeError: - continue - if name in frames: - asint += 1 - else: - intoff = o - len(data) - - # if more tags as int, or equal and bpi is past and int is not - if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)): - return int - return BitPaddedInt - - -def _find_id3v1(fileobj): - """Returns a tuple of (id3tag, offset_to_end) or (None, 0) - - offset mainly because we used to write too short tags in some cases and - we need the offset to delete them. - """ - - # id3v1 is always at the end (after apev2) - - extra_read = b"APETAGEX".index(b"TAG") - - try: - fileobj.seek(-128 - extra_read, 2) - except IOError as e: - if e.errno == errno.EINVAL: - # If the file is too small, might be ok since we wrote too small - # tags at some point. let's see how the parsing goes.. - fileobj.seek(0, 0) - else: - raise - - data = fileobj.read(128 + extra_read) - try: - idx = data.index(b"TAG") - except ValueError: - return (None, 0) - else: - # FIXME: make use of the apev2 parser here - # if TAG is part of APETAGEX assume this is an APEv2 tag - try: - ape_idx = data.index(b"APETAGEX") - except ValueError: - pass - else: - if idx == ape_idx + extra_read: - return (None, 0) - - tag = ParseID3v1(data[idx:]) - if tag is None: - return (None, 0) - - offset = idx - len(data) - return (tag, offset) - - -# ID3v1.1 support. -def ParseID3v1(data): - """Parse an ID3v1 tag, returning a list of ID3v2.4 frames. - - Returns a {frame_name: frame} dict or None. - """ - - try: - data = data[data.index(b"TAG"):] - except ValueError: - return None - if 128 < len(data) or len(data) < 124: - return None - - # Issue #69 - Previous versions of Mutagen, when encountering - # out-of-spec TDRC and TYER frames of less than four characters, - # wrote only the characters available - e.g. "1" or "" - into the - # year field. To parse those, reduce the size of the year field. - # Amazingly, "0s" works as a struct format string. - unpack_fmt = "3s30s30s30s%ds29sBB" % (len(data) - 124) - - try: - tag, title, artist, album, year, comment, track, genre = unpack( - unpack_fmt, data) - except StructError: - return None - - if tag != b"TAG": - return None - - def fix(data): - return data.split(b"\x00")[0].strip().decode('latin1') - - title, artist, album, year, comment = map( - fix, [title, artist, album, year, comment]) - - frames = {} - if title: - frames["TIT2"] = TIT2(encoding=0, text=title) - if artist: - frames["TPE1"] = TPE1(encoding=0, text=[artist]) - if album: - frames["TALB"] = TALB(encoding=0, text=album) - if year: - frames["TDRC"] = TDRC(encoding=0, text=year) - if comment: - frames["COMM"] = COMM( - encoding=0, lang="eng", desc="ID3v1 Comment", text=comment) - # Don't read a track number if it looks like the comment was - # padded with spaces instead of nulls (thanks, WinAmp). - if track and ((track != 32) or (data[-3] == b'\x00'[0])): - frames["TRCK"] = TRCK(encoding=0, text=str(track)) - if genre != 255: - frames["TCON"] = TCON(encoding=0, text=str(genre)) - return frames - - -def MakeID3v1(id3): - """Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.""" - - v1 = {} - - for v2id, name in {"TIT2": "title", "TPE1": "artist", - "TALB": "album"}.items(): - if v2id in id3: - text = id3[v2id].text[0].encode('latin1', 'replace')[:30] - else: - text = b"" - v1[name] = text + (b"\x00" * (30 - len(text))) - - if "COMM" in id3: - cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28] - else: - cmnt = b"" - v1["comment"] = cmnt + (b"\x00" * (29 - len(cmnt))) - - if "TRCK" in id3: - try: - v1["track"] = chr_(+id3["TRCK"]) - except ValueError: - v1["track"] = b"\x00" - else: - v1["track"] = b"\x00" - - if "TCON" in id3: - try: - genre = id3["TCON"].genres[0] - except IndexError: - pass - else: - if genre in TCON.GENRES: - v1["genre"] = chr_(TCON.GENRES.index(genre)) - if "genre" not in v1: - v1["genre"] = b"\xff" - - if "TDRC" in id3: - year = text_type(id3["TDRC"]).encode('ascii') - elif "TYER" in id3: - year = text_type(id3["TYER"]).encode('ascii') - else: - year = b"" - v1["year"] = (year + b"\x00\x00\x00\x00")[:4] - - return ( - b"TAG" + - v1["title"] + - v1["artist"] + - v1["album"] + - v1["year"] + - v1["comment"] + - v1["track"] + - v1["genre"] - ) - - -class ID3FileType(mutagen.FileType): - """An unknown type of file with ID3 tags.""" - - ID3 = ID3 - - class _Info(mutagen.StreamInfo): - length = 0 - - def __init__(self, fileobj, offset): - pass - - @staticmethod - def pprint(): - return "Unknown format with ID3 tag" - - @staticmethod - def score(filename, fileobj, header_data): - return header_data.startswith(b"ID3") - - def add_tags(self, ID3=None): - """Add an empty ID3 tag to the file. - - A custom tag reader may be used in instead of the default - mutagen.id3.ID3 object, e.g. an EasyID3 reader. - """ - if ID3 is None: - ID3 = self.ID3 - if self.tags is None: - self.ID3 = ID3 - self.tags = ID3() - else: - raise error("an ID3 tag already exists") - - def load(self, filename, ID3=None, **kwargs): - """Load stream and tag information from a file. - - A custom tag reader may be used in instead of the default - mutagen.id3.ID3 object, e.g. an EasyID3 reader. - """ - - if ID3 is None: - ID3 = self.ID3 - else: - # If this was initialized with EasyID3, remember that for - # when tags are auto-instantiated in add_tags. - self.ID3 = ID3 - self.filename = filename - try: - self.tags = ID3(filename, **kwargs) - except ID3NoHeaderError: - self.tags = None - - if self.tags is not None: - try: - offset = self.tags.size - except AttributeError: - offset = None - else: - offset = None - - with open(filename, "rb") as fileobj: - self.info = self._Info(fileobj, offset) diff --git a/resources/lib/mutagen/id3/__pycache__/__init__.cpython-35.pyc b/resources/lib/mutagen/id3/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index f423db1a..00000000 Binary files a/resources/lib/mutagen/id3/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/id3/__pycache__/_frames.cpython-35.pyc b/resources/lib/mutagen/id3/__pycache__/_frames.cpython-35.pyc deleted file mode 100644 index 0331df59..00000000 Binary files a/resources/lib/mutagen/id3/__pycache__/_frames.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/id3/__pycache__/_specs.cpython-35.pyc b/resources/lib/mutagen/id3/__pycache__/_specs.cpython-35.pyc deleted file mode 100644 index 9af16aa1..00000000 Binary files a/resources/lib/mutagen/id3/__pycache__/_specs.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/id3/__pycache__/_util.cpython-35.pyc b/resources/lib/mutagen/id3/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index c2c75dad..00000000 Binary files a/resources/lib/mutagen/id3/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/id3/_frames.py b/resources/lib/mutagen/id3/_frames.py deleted file mode 100644 index c185cef3..00000000 --- a/resources/lib/mutagen/id3/_frames.py +++ /dev/null @@ -1,1925 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import zlib -from struct import unpack - -from ._util import ID3JunkFrameError, ID3EncryptionUnsupportedError, unsynch -from ._specs import ( - BinaryDataSpec, StringSpec, Latin1TextSpec, EncodedTextSpec, ByteSpec, - EncodingSpec, ASPIIndexSpec, SizedIntegerSpec, IntegerSpec, - VolumeAdjustmentsSpec, VolumePeakSpec, VolumeAdjustmentSpec, - ChannelSpec, MultiSpec, SynchronizedTextSpec, KeyEventSpec, TimeStampSpec, - EncodedNumericPartTextSpec, EncodedNumericTextSpec, SpecError) -from .._compat import text_type, string_types, swap_to_string, iteritems, izip - - -def is_valid_frame_id(frame_id): - return frame_id.isalnum() and frame_id.isupper() - - -def _bytes2key(b): - assert isinstance(b, bytes) - - return b.decode("latin1") - - -class Frame(object): - """Fundamental unit of ID3 data. - - ID3 tags are split into frames. Each frame has a potentially - different structure, and so this base class is not very featureful. - """ - - FLAG23_ALTERTAG = 0x8000 - FLAG23_ALTERFILE = 0x4000 - FLAG23_READONLY = 0x2000 - FLAG23_COMPRESS = 0x0080 - FLAG23_ENCRYPT = 0x0040 - FLAG23_GROUP = 0x0020 - - FLAG24_ALTERTAG = 0x4000 - FLAG24_ALTERFILE = 0x2000 - FLAG24_READONLY = 0x1000 - FLAG24_GROUPID = 0x0040 - FLAG24_COMPRESS = 0x0008 - FLAG24_ENCRYPT = 0x0004 - FLAG24_UNSYNCH = 0x0002 - FLAG24_DATALEN = 0x0001 - - _framespec = [] - - def __init__(self, *args, **kwargs): - if len(args) == 1 and len(kwargs) == 0 and \ - isinstance(args[0], type(self)): - other = args[0] - # ask the sub class to fill in our data - other._to_other(self) - else: - for checker, val in izip(self._framespec, args): - setattr(self, checker.name, checker.validate(self, val)) - for checker in self._framespec[len(args):]: - try: - validated = checker.validate( - self, kwargs.get(checker.name, None)) - except ValueError as e: - raise ValueError("%s: %s" % (checker.name, e)) - setattr(self, checker.name, validated) - - def _to_other(self, other): - # this impl covers subclasses with the same framespec - if other._framespec is not self._framespec: - raise ValueError - - for checker in other._framespec: - setattr(other, checker.name, getattr(self, checker.name)) - - def _get_v23_frame(self, **kwargs): - """Returns a frame copy which is suitable for writing into a v2.3 tag. - - kwargs get passed to the specs. - """ - - new_kwargs = {} - for checker in self._framespec: - name = checker.name - value = getattr(self, name) - new_kwargs[name] = checker._validate23(self, value, **kwargs) - return type(self)(**new_kwargs) - - @property - def HashKey(self): - """An internal key used to ensure frame uniqueness in a tag""" - - return self.FrameID - - @property - def FrameID(self): - """ID3v2 three or four character frame ID""" - - return type(self).__name__ - - def __repr__(self): - """Python representation of a frame. - - The string returned is a valid Python expression to construct - a copy of this frame. - """ - kw = [] - for attr in self._framespec: - # so repr works during __init__ - if hasattr(self, attr.name): - kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) - return '%s(%s)' % (type(self).__name__, ', '.join(kw)) - - def _readData(self, data): - """Raises ID3JunkFrameError; Returns leftover data""" - - for reader in self._framespec: - if len(data): - try: - value, data = reader.read(self, data) - except SpecError as e: - raise ID3JunkFrameError(e) - else: - raise ID3JunkFrameError("no data left") - setattr(self, reader.name, value) - - return data - - def _writeData(self): - data = [] - for writer in self._framespec: - data.append(writer.write(self, getattr(self, writer.name))) - return b''.join(data) - - def pprint(self): - """Return a human-readable representation of the frame.""" - return "%s=%s" % (type(self).__name__, self._pprint()) - - def _pprint(self): - return "[unrepresentable data]" - - @classmethod - def _fromData(cls, id3, tflags, data): - """Construct this ID3 frame from raw string data. - - Raises: - - ID3JunkFrameError in case parsing failed - NotImplementedError in case parsing isn't implemented - ID3EncryptionUnsupportedError in case the frame is encrypted. - """ - - if id3.version >= id3._V24: - if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN): - # The data length int is syncsafe in 2.4 (but not 2.3). - # However, we don't actually need the data length int, - # except to work around a QL 0.12 bug, and in that case - # all we need are the raw bytes. - datalen_bytes = data[:4] - data = data[4:] - if tflags & Frame.FLAG24_UNSYNCH or id3.f_unsynch: - try: - data = unsynch.decode(data) - except ValueError: - # Some things write synch-unsafe data with either the frame - # or global unsynch flag set. Try to load them as is. - # https://bitbucket.org/lazka/mutagen/issue/210 - # https://bitbucket.org/lazka/mutagen/issue/223 - pass - if tflags & Frame.FLAG24_ENCRYPT: - raise ID3EncryptionUnsupportedError - if tflags & Frame.FLAG24_COMPRESS: - try: - data = zlib.decompress(data) - except zlib.error as err: - # the initial mutagen that went out with QL 0.12 did not - # write the 4 bytes of uncompressed size. Compensate. - data = datalen_bytes + data - try: - data = zlib.decompress(data) - except zlib.error as err: - raise ID3JunkFrameError( - 'zlib: %s: %r' % (err, data)) - - elif id3.version >= id3._V23: - if tflags & Frame.FLAG23_COMPRESS: - usize, = unpack('>L', data[:4]) - data = data[4:] - if tflags & Frame.FLAG23_ENCRYPT: - raise ID3EncryptionUnsupportedError - if tflags & Frame.FLAG23_COMPRESS: - try: - data = zlib.decompress(data) - except zlib.error as err: - raise ID3JunkFrameError('zlib: %s: %r' % (err, data)) - - frame = cls() - frame._readData(data) - return frame - - def __hash__(self): - raise TypeError("Frame objects are unhashable") - - -class FrameOpt(Frame): - """A frame with optional parts. - - Some ID3 frames have optional data; this class extends Frame to - provide support for those parts. - """ - - _optionalspec = [] - - def __init__(self, *args, **kwargs): - super(FrameOpt, self).__init__(*args, **kwargs) - for spec in self._optionalspec: - if spec.name in kwargs: - validated = spec.validate(self, kwargs[spec.name]) - setattr(self, spec.name, validated) - else: - break - - def _to_other(self, other): - super(FrameOpt, self)._to_other(other) - - # this impl covers subclasses with the same optionalspec - if other._optionalspec is not self._optionalspec: - raise ValueError - - for checker in other._optionalspec: - if hasattr(self, checker.name): - setattr(other, checker.name, getattr(self, checker.name)) - - def _readData(self, data): - """Raises ID3JunkFrameError; Returns leftover data""" - - for reader in self._framespec: - if len(data): - try: - value, data = reader.read(self, data) - except SpecError as e: - raise ID3JunkFrameError(e) - else: - raise ID3JunkFrameError("no data left") - setattr(self, reader.name, value) - - if data: - for reader in self._optionalspec: - if len(data): - try: - value, data = reader.read(self, data) - except SpecError as e: - raise ID3JunkFrameError(e) - else: - break - setattr(self, reader.name, value) - - return data - - def _writeData(self): - data = [] - for writer in self._framespec: - data.append(writer.write(self, getattr(self, writer.name))) - for writer in self._optionalspec: - try: - data.append(writer.write(self, getattr(self, writer.name))) - except AttributeError: - break - return b''.join(data) - - def __repr__(self): - kw = [] - for attr in self._framespec: - kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) - for attr in self._optionalspec: - if hasattr(self, attr.name): - kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) - return '%s(%s)' % (type(self).__name__, ', '.join(kw)) - - -@swap_to_string -class TextFrame(Frame): - """Text strings. - - Text frames support casts to unicode or str objects, as well as - list-like indexing, extend, and append. - - Iterating over a TextFrame iterates over its strings, not its - characters. - - Text frames have a 'text' attribute which is the list of strings, - and an 'encoding' attribute; 0 for ISO-8859 1, 1 UTF-16, 2 for - UTF-16BE, and 3 for UTF-8. If you don't want to worry about - encodings, just set it to 3. - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000'), - ] - - def __bytes__(self): - return text_type(self).encode('utf-8') - - def __str__(self): - return u'\u0000'.join(self.text) - - def __eq__(self, other): - if isinstance(other, bytes): - return bytes(self) == other - elif isinstance(other, text_type): - return text_type(self) == other - return self.text == other - - __hash__ = Frame.__hash__ - - def __getitem__(self, item): - return self.text[item] - - def __iter__(self): - return iter(self.text) - - def append(self, value): - """Append a string.""" - - return self.text.append(value) - - def extend(self, value): - """Extend the list by appending all strings from the given list.""" - - return self.text.extend(value) - - def _pprint(self): - return " / ".join(self.text) - - -class NumericTextFrame(TextFrame): - """Numerical text strings. - - The numeric value of these frames can be gotten with unary plus, e.g.:: - - frame = TLEN('12345') - length = +frame - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', EncodedNumericTextSpec('text'), sep=u'\u0000'), - ] - - def __pos__(self): - """Return the numerical value of the string.""" - return int(self.text[0]) - - -class NumericPartTextFrame(TextFrame): - """Multivalue numerical text strings. - - These strings indicate 'part (e.g. track) X of Y', and unary plus - returns the first value:: - - frame = TRCK('4/15') - track = +frame # track == 4 - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', EncodedNumericPartTextSpec('text'), sep=u'\u0000'), - ] - - def __pos__(self): - return int(self.text[0].split("/")[0]) - - -@swap_to_string -class TimeStampTextFrame(TextFrame): - """A list of time stamps. - - The 'text' attribute in this frame is a list of ID3TimeStamp - objects, not a list of strings. - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', TimeStampSpec('stamp'), sep=u','), - ] - - def __bytes__(self): - return text_type(self).encode('utf-8') - - def __str__(self): - return u','.join([stamp.text for stamp in self.text]) - - def _pprint(self): - return u" / ".join([stamp.text for stamp in self.text]) - - -@swap_to_string -class UrlFrame(Frame): - """A frame containing a URL string. - - The ID3 specification is silent about IRIs and normalized URL - forms. Mutagen assumes all URLs in files are encoded as Latin 1, - but string conversion of this frame returns a UTF-8 representation - for compatibility with other string conversions. - - The only sane way to handle URLs in MP3s is to restrict them to - ASCII. - """ - - _framespec = [Latin1TextSpec('url')] - - def __bytes__(self): - return self.url.encode('utf-8') - - def __str__(self): - return self.url - - def __eq__(self, other): - return self.url == other - - __hash__ = Frame.__hash__ - - def _pprint(self): - return self.url - - -class UrlFrameU(UrlFrame): - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.url) - - -class TALB(TextFrame): - "Album" - - -class TBPM(NumericTextFrame): - "Beats per minute" - - -class TCOM(TextFrame): - "Composer" - - -class TCON(TextFrame): - """Content type (Genre) - - ID3 has several ways genres can be represented; for convenience, - use the 'genres' property rather than the 'text' attribute. - """ - - from mutagen._constants import GENRES - GENRES = GENRES - - def __get_genres(self): - genres = [] - import re - genre_re = re.compile(r"((?:\((?P[0-9]+|RX|CR)\))*)(?P.+)?") - for value in self.text: - # 255 possible entries in id3v1 - if value.isdigit() and int(value) < 256: - try: - genres.append(self.GENRES[int(value)]) - except IndexError: - genres.append(u"Unknown") - elif value == "CR": - genres.append(u"Cover") - elif value == "RX": - genres.append(u"Remix") - elif value: - newgenres = [] - genreid, dummy, genrename = genre_re.match(value).groups() - - if genreid: - for gid in genreid[1:-1].split(")("): - if gid.isdigit() and int(gid) < len(self.GENRES): - gid = text_type(self.GENRES[int(gid)]) - newgenres.append(gid) - elif gid == "CR": - newgenres.append(u"Cover") - elif gid == "RX": - newgenres.append(u"Remix") - else: - newgenres.append(u"Unknown") - - if genrename: - # "Unescaping" the first parenthesis - if genrename.startswith("(("): - genrename = genrename[1:] - if genrename not in newgenres: - newgenres.append(genrename) - - genres.extend(newgenres) - - return genres - - def __set_genres(self, genres): - if isinstance(genres, string_types): - genres = [genres] - self.text = [self.__decode(g) for g in genres] - - def __decode(self, value): - if isinstance(value, bytes): - enc = EncodedTextSpec._encodings[self.encoding][0] - return value.decode(enc) - else: - return value - - genres = property(__get_genres, __set_genres, None, - "A list of genres parsed from the raw text data.") - - def _pprint(self): - return " / ".join(self.genres) - - -class TCOP(TextFrame): - "Copyright (c)" - - -class TCMP(NumericTextFrame): - "iTunes Compilation Flag" - - -class TDAT(TextFrame): - "Date of recording (DDMM)" - - -class TDEN(TimeStampTextFrame): - "Encoding Time" - - -class TDES(TextFrame): - "iTunes Podcast Description" - - -class TDOR(TimeStampTextFrame): - "Original Release Time" - - -class TDLY(NumericTextFrame): - "Audio Delay (ms)" - - -class TDRC(TimeStampTextFrame): - "Recording Time" - - -class TDRL(TimeStampTextFrame): - "Release Time" - - -class TDTG(TimeStampTextFrame): - "Tagging Time" - - -class TENC(TextFrame): - "Encoder" - - -class TEXT(TextFrame): - "Lyricist" - - -class TFLT(TextFrame): - "File type" - - -class TGID(TextFrame): - "iTunes Podcast Identifier" - - -class TIME(TextFrame): - "Time of recording (HHMM)" - - -class TIT1(TextFrame): - "Content group description" - - -class TIT2(TextFrame): - "Title" - - -class TIT3(TextFrame): - "Subtitle/Description refinement" - - -class TKEY(TextFrame): - "Starting Key" - - -class TLAN(TextFrame): - "Audio Languages" - - -class TLEN(NumericTextFrame): - "Audio Length (ms)" - - -class TMED(TextFrame): - "Source Media Type" - - -class TMOO(TextFrame): - "Mood" - - -class TOAL(TextFrame): - "Original Album" - - -class TOFN(TextFrame): - "Original Filename" - - -class TOLY(TextFrame): - "Original Lyricist" - - -class TOPE(TextFrame): - "Original Artist/Performer" - - -class TORY(NumericTextFrame): - "Original Release Year" - - -class TOWN(TextFrame): - "Owner/Licensee" - - -class TPE1(TextFrame): - "Lead Artist/Performer/Soloist/Group" - - -class TPE2(TextFrame): - "Band/Orchestra/Accompaniment" - - -class TPE3(TextFrame): - "Conductor" - - -class TPE4(TextFrame): - "Interpreter/Remixer/Modifier" - - -class TPOS(NumericPartTextFrame): - "Part of set" - - -class TPRO(TextFrame): - "Produced (P)" - - -class TPUB(TextFrame): - "Publisher" - - -class TRCK(NumericPartTextFrame): - "Track Number" - - -class TRDA(TextFrame): - "Recording Dates" - - -class TRSN(TextFrame): - "Internet Radio Station Name" - - -class TRSO(TextFrame): - "Internet Radio Station Owner" - - -class TSIZ(NumericTextFrame): - "Size of audio data (bytes)" - - -class TSO2(TextFrame): - "iTunes Album Artist Sort" - - -class TSOA(TextFrame): - "Album Sort Order key" - - -class TSOC(TextFrame): - "iTunes Composer Sort" - - -class TSOP(TextFrame): - "Perfomer Sort Order key" - - -class TSOT(TextFrame): - "Title Sort Order key" - - -class TSRC(TextFrame): - "International Standard Recording Code (ISRC)" - - -class TSSE(TextFrame): - "Encoder settings" - - -class TSST(TextFrame): - "Set Subtitle" - - -class TYER(NumericTextFrame): - "Year of recording" - - -class TXXX(TextFrame): - """User-defined text data. - - TXXX frames have a 'desc' attribute which is set to any Unicode - value (though the encoding of the text and the description must be - the same). Many taggers use this frame to store freeform keys. - """ - - _framespec = [ - EncodingSpec('encoding'), - EncodedTextSpec('desc'), - MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def _pprint(self): - return "%s=%s" % (self.desc, " / ".join(self.text)) - - -class WCOM(UrlFrameU): - "Commercial Information" - - -class WCOP(UrlFrame): - "Copyright Information" - - -class WFED(UrlFrame): - "iTunes Podcast Feed" - - -class WOAF(UrlFrame): - "Official File Information" - - -class WOAR(UrlFrameU): - "Official Artist/Performer Information" - - -class WOAS(UrlFrame): - "Official Source Information" - - -class WORS(UrlFrame): - "Official Internet Radio Information" - - -class WPAY(UrlFrame): - "Payment Information" - - -class WPUB(UrlFrame): - "Official Publisher Information" - - -class WXXX(UrlFrame): - """User-defined URL data. - - Like TXXX, this has a freeform description associated with it. - """ - - _framespec = [ - EncodingSpec('encoding'), - EncodedTextSpec('desc'), - Latin1TextSpec('url'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - -class PairedTextFrame(Frame): - """Paired text strings. - - Some ID3 frames pair text strings, to associate names with a more - specific involvement in the song. The 'people' attribute of these - frames contains a list of pairs:: - - [['trumpet', 'Miles Davis'], ['bass', 'Paul Chambers']] - - Like text frames, these frames also have an encoding attribute. - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('people', - EncodedTextSpec('involvement'), - EncodedTextSpec('person')) - ] - - def __eq__(self, other): - return self.people == other - - __hash__ = Frame.__hash__ - - -class TIPL(PairedTextFrame): - "Involved People List" - - -class TMCL(PairedTextFrame): - "Musicians Credits List" - - -class IPLS(TIPL): - "Involved People List" - - -class BinaryFrame(Frame): - """Binary data - - The 'data' attribute contains the raw byte string. - """ - - _framespec = [BinaryDataSpec('data')] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -class MCDI(BinaryFrame): - "Binary dump of CD's TOC" - - -class ETCO(Frame): - """Event timing codes.""" - - _framespec = [ - ByteSpec("format"), - KeyEventSpec("events"), - ] - - def __eq__(self, other): - return self.events == other - - __hash__ = Frame.__hash__ - - -class MLLT(Frame): - """MPEG location lookup table. - - This frame's attributes may be changed in the future based on - feedback from real-world use. - """ - - _framespec = [ - SizedIntegerSpec('frames', 2), - SizedIntegerSpec('bytes', 3), - SizedIntegerSpec('milliseconds', 3), - ByteSpec('bits_for_bytes'), - ByteSpec('bits_for_milliseconds'), - BinaryDataSpec('data'), - ] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -class SYTC(Frame): - """Synchronised tempo codes. - - This frame's attributes may be changed in the future based on - feedback from real-world use. - """ - - _framespec = [ - ByteSpec("format"), - BinaryDataSpec("data"), - ] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -@swap_to_string -class USLT(Frame): - """Unsynchronised lyrics/text transcription. - - Lyrics have a three letter ISO language code ('lang'), a - description ('desc'), and a block of plain text ('text'). - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - EncodedTextSpec('desc'), - EncodedTextSpec('text'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) - - def __bytes__(self): - return self.text.encode('utf-8') - - def __str__(self): - return self.text - - def __eq__(self, other): - return self.text == other - - __hash__ = Frame.__hash__ - - -@swap_to_string -class SYLT(Frame): - """Synchronised lyrics/text.""" - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - ByteSpec('format'), - ByteSpec('type'), - EncodedTextSpec('desc'), - SynchronizedTextSpec('text'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) - - def __eq__(self, other): - return str(self) == other - - __hash__ = Frame.__hash__ - - def __str__(self): - return u"".join(text for (text, time) in self.text) - - def __bytes__(self): - return text_type(self).encode("utf-8") - - -class COMM(TextFrame): - """User comment. - - User comment frames have a descrption, like TXXX, and also a three - letter ISO language code in the 'lang' attribute. - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - EncodedTextSpec('desc'), - MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) - - def _pprint(self): - return "%s=%s=%s" % (self.desc, self.lang, " / ".join(self.text)) - - -class RVA2(Frame): - """Relative volume adjustment (2). - - This frame is used to implemented volume scaling, and in - particular, normalization using ReplayGain. - - Attributes: - - * desc -- description or context of this adjustment - * channel -- audio channel to adjust (master is 1) - * gain -- a + or - dB gain relative to some reference level - * peak -- peak of the audio as a floating point number, [0, 1] - - When storing ReplayGain tags, use descriptions of 'album' and - 'track' on channel 1. - """ - - _framespec = [ - Latin1TextSpec('desc'), - ChannelSpec('channel'), - VolumeAdjustmentSpec('gain'), - VolumePeakSpec('peak'), - ] - - _channels = ["Other", "Master volume", "Front right", "Front left", - "Back right", "Back left", "Front centre", "Back centre", - "Subwoofer"] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def __eq__(self, other): - try: - return ((str(self) == other) or - (self.desc == other.desc and - self.channel == other.channel and - self.gain == other.gain and - self.peak == other.peak)) - except AttributeError: - return False - - __hash__ = Frame.__hash__ - - def __str__(self): - return "%s: %+0.4f dB/%0.4f" % ( - self._channels[self.channel], self.gain, self.peak) - - -class EQU2(Frame): - """Equalisation (2). - - Attributes: - method -- interpolation method (0 = band, 1 = linear) - desc -- identifying description - adjustments -- list of (frequency, vol_adjustment) pairs - """ - - _framespec = [ - ByteSpec("method"), - Latin1TextSpec("desc"), - VolumeAdjustmentsSpec("adjustments"), - ] - - def __eq__(self, other): - return self.adjustments == other - - __hash__ = Frame.__hash__ - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - -# class RVAD: unsupported -# class EQUA: unsupported - - -class RVRB(Frame): - """Reverb.""" - - _framespec = [ - SizedIntegerSpec('left', 2), - SizedIntegerSpec('right', 2), - ByteSpec('bounce_left'), - ByteSpec('bounce_right'), - ByteSpec('feedback_ltl'), - ByteSpec('feedback_ltr'), - ByteSpec('feedback_rtr'), - ByteSpec('feedback_rtl'), - ByteSpec('premix_ltr'), - ByteSpec('premix_rtl'), - ] - - def __eq__(self, other): - return (self.left, self.right) == other - - __hash__ = Frame.__hash__ - - -class APIC(Frame): - """Attached (or linked) Picture. - - Attributes: - - * encoding -- text encoding for the description - * mime -- a MIME type (e.g. image/jpeg) or '-->' if the data is a URI - * type -- the source of the image (3 is the album front cover) - * desc -- a text description of the image - * data -- raw image data, as a byte string - - Mutagen will automatically compress large images when saving tags. - """ - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('mime'), - ByteSpec('type'), - EncodedTextSpec('desc'), - BinaryDataSpec('data'), - ] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def _validate_from_22(self, other, checker): - if checker.name == "mime": - self.mime = other.mime.decode("ascii", "ignore") - else: - super(APIC, self)._validate_from_22(other, checker) - - def _pprint(self): - return "%s (%s, %d bytes)" % ( - self.desc, self.mime, len(self.data)) - - -class PCNT(Frame): - """Play counter. - - The 'count' attribute contains the (recorded) number of times this - file has been played. - - This frame is basically obsoleted by POPM. - """ - - _framespec = [IntegerSpec('count')] - - def __eq__(self, other): - return self.count == other - - __hash__ = Frame.__hash__ - - def __pos__(self): - return self.count - - def _pprint(self): - return text_type(self.count) - - -class POPM(FrameOpt): - """Popularimeter. - - This frame keys a rating (out of 255) and a play count to an email - address. - - Attributes: - - * email -- email this POPM frame is for - * rating -- rating from 0 to 255 - * count -- number of times the files has been played (optional) - """ - - _framespec = [ - Latin1TextSpec('email'), - ByteSpec('rating'), - ] - - _optionalspec = [IntegerSpec('count')] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.email) - - def __eq__(self, other): - return self.rating == other - - __hash__ = FrameOpt.__hash__ - - def __pos__(self): - return self.rating - - def _pprint(self): - return "%s=%r %r/255" % ( - self.email, getattr(self, 'count', None), self.rating) - - -class GEOB(Frame): - """General Encapsulated Object. - - A blob of binary data, that is not a picture (those go in APIC). - - Attributes: - - * encoding -- encoding of the description - * mime -- MIME type of the data or '-->' if the data is a URI - * filename -- suggested filename if extracted - * desc -- text description of the data - * data -- raw data, as a byte string - """ - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('mime'), - EncodedTextSpec('filename'), - EncodedTextSpec('desc'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -class RBUF(FrameOpt): - """Recommended buffer size. - - Attributes: - - * size -- recommended buffer size in bytes - * info -- if ID3 tags may be elsewhere in the file (optional) - * offset -- the location of the next ID3 tag, if any - - Mutagen will not find the next tag itself. - """ - - _framespec = [SizedIntegerSpec('size', 3)] - - _optionalspec = [ - ByteSpec('info'), - SizedIntegerSpec('offset', 4), - ] - - def __eq__(self, other): - return self.size == other - - __hash__ = FrameOpt.__hash__ - - def __pos__(self): - return self.size - - -@swap_to_string -class AENC(FrameOpt): - """Audio encryption. - - Attributes: - - * owner -- key identifying this encryption type - * preview_start -- unencrypted data block offset - * preview_length -- number of unencrypted blocks - * data -- data required for decryption (optional) - - Mutagen cannot decrypt files. - """ - - _framespec = [ - Latin1TextSpec('owner'), - SizedIntegerSpec('preview_start', 2), - SizedIntegerSpec('preview_length', 2), - ] - - _optionalspec = [BinaryDataSpec('data')] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.owner) - - def __bytes__(self): - return self.owner.encode('utf-8') - - def __str__(self): - return self.owner - - def __eq__(self, other): - return self.owner == other - - __hash__ = FrameOpt.__hash__ - - -class LINK(FrameOpt): - """Linked information. - - Attributes: - - * frameid -- the ID of the linked frame - * url -- the location of the linked frame - * data -- further ID information for the frame - """ - - _framespec = [ - StringSpec('frameid', 4), - Latin1TextSpec('url'), - ] - - _optionalspec = [BinaryDataSpec('data')] - - @property - def HashKey(self): - try: - return "%s:%s:%s:%s" % ( - self.FrameID, self.frameid, self.url, _bytes2key(self.data)) - except AttributeError: - return "%s:%s:%s" % (self.FrameID, self.frameid, self.url) - - def __eq__(self, other): - try: - return (self.frameid, self.url, self.data) == other - except AttributeError: - return (self.frameid, self.url) == other - - __hash__ = FrameOpt.__hash__ - - -class POSS(Frame): - """Position synchronisation frame - - Attribute: - - * format -- format of the position attribute (frames or milliseconds) - * position -- current position of the file - """ - - _framespec = [ - ByteSpec('format'), - IntegerSpec('position'), - ] - - def __pos__(self): - return self.position - - def __eq__(self, other): - return self.position == other - - __hash__ = Frame.__hash__ - - -class UFID(Frame): - """Unique file identifier. - - Attributes: - - * owner -- format/type of identifier - * data -- identifier - """ - - _framespec = [ - Latin1TextSpec('owner'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.owner) - - def __eq__(s, o): - if isinstance(o, UFI): - return s.owner == o.owner and s.data == o.data - else: - return s.data == o - - __hash__ = Frame.__hash__ - - def _pprint(self): - return "%s=%r" % (self.owner, self.data) - - -@swap_to_string -class USER(Frame): - """Terms of use. - - Attributes: - - * encoding -- text encoding - * lang -- ISO three letter language code - * text -- licensing terms for the audio - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - EncodedTextSpec('text'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.lang) - - def __bytes__(self): - return self.text.encode('utf-8') - - def __str__(self): - return self.text - - def __eq__(self, other): - return self.text == other - - __hash__ = Frame.__hash__ - - def _pprint(self): - return "%r=%s" % (self.lang, self.text) - - -@swap_to_string -class OWNE(Frame): - """Ownership frame.""" - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('price'), - StringSpec('date', 8), - EncodedTextSpec('seller'), - ] - - def __bytes__(self): - return self.seller.encode('utf-8') - - def __str__(self): - return self.seller - - def __eq__(self, other): - return self.seller == other - - __hash__ = Frame.__hash__ - - -class COMR(FrameOpt): - """Commercial frame.""" - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('price'), - StringSpec('valid_until', 8), - Latin1TextSpec('contact'), - ByteSpec('format'), - EncodedTextSpec('seller'), - EncodedTextSpec('desc'), - ] - - _optionalspec = [ - Latin1TextSpec('mime'), - BinaryDataSpec('logo'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, _bytes2key(self._writeData())) - - def __eq__(self, other): - return self._writeData() == other._writeData() - - __hash__ = FrameOpt.__hash__ - - -@swap_to_string -class ENCR(Frame): - """Encryption method registration. - - The standard does not allow multiple ENCR frames with the same owner - or the same method. Mutagen only verifies that the owner is unique. - """ - - _framespec = [ - Latin1TextSpec('owner'), - ByteSpec('method'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return "%s:%s" % (self.FrameID, self.owner) - - def __bytes__(self): - return self.data - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -@swap_to_string -class GRID(FrameOpt): - """Group identification registration.""" - - _framespec = [ - Latin1TextSpec('owner'), - ByteSpec('group'), - ] - - _optionalspec = [BinaryDataSpec('data')] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.group) - - def __pos__(self): - return self.group - - def __bytes__(self): - return self.owner.encode('utf-8') - - def __str__(self): - return self.owner - - def __eq__(self, other): - return self.owner == other or self.group == other - - __hash__ = FrameOpt.__hash__ - - -@swap_to_string -class PRIV(Frame): - """Private frame.""" - - _framespec = [ - Latin1TextSpec('owner'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % ( - self.FrameID, self.owner, _bytes2key(self.data)) - - def __bytes__(self): - return self.data - - def __eq__(self, other): - return self.data == other - - def _pprint(self): - return "%s=%r" % (self.owner, self.data) - - __hash__ = Frame.__hash__ - - -@swap_to_string -class SIGN(Frame): - """Signature frame.""" - - _framespec = [ - ByteSpec('group'), - BinaryDataSpec('sig'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.group, _bytes2key(self.sig)) - - def __bytes__(self): - return self.sig - - def __eq__(self, other): - return self.sig == other - - __hash__ = Frame.__hash__ - - -class SEEK(Frame): - """Seek frame. - - Mutagen does not find tags at seek offsets. - """ - - _framespec = [IntegerSpec('offset')] - - def __pos__(self): - return self.offset - - def __eq__(self, other): - return self.offset == other - - __hash__ = Frame.__hash__ - - -class ASPI(Frame): - """Audio seek point index. - - Attributes: S, L, N, b, and Fi. For the meaning of these, see - the ID3v2.4 specification. Fi is a list of integers. - """ - _framespec = [ - SizedIntegerSpec("S", 4), - SizedIntegerSpec("L", 4), - SizedIntegerSpec("N", 2), - ByteSpec("b"), - ASPIIndexSpec("Fi"), - ] - - def __eq__(self, other): - return self.Fi == other - - __hash__ = Frame.__hash__ - - -# ID3v2.2 frames -class UFI(UFID): - "Unique File Identifier" - - -class TT1(TIT1): - "Content group description" - - -class TT2(TIT2): - "Title" - - -class TT3(TIT3): - "Subtitle/Description refinement" - - -class TP1(TPE1): - "Lead Artist/Performer/Soloist/Group" - - -class TP2(TPE2): - "Band/Orchestra/Accompaniment" - - -class TP3(TPE3): - "Conductor" - - -class TP4(TPE4): - "Interpreter/Remixer/Modifier" - - -class TCM(TCOM): - "Composer" - - -class TXT(TEXT): - "Lyricist" - - -class TLA(TLAN): - "Audio Language(s)" - - -class TCO(TCON): - "Content Type (Genre)" - - -class TAL(TALB): - "Album" - - -class TPA(TPOS): - "Part of set" - - -class TRK(TRCK): - "Track Number" - - -class TRC(TSRC): - "International Standard Recording Code (ISRC)" - - -class TYE(TYER): - "Year of recording" - - -class TDA(TDAT): - "Date of recording (DDMM)" - - -class TIM(TIME): - "Time of recording (HHMM)" - - -class TRD(TRDA): - "Recording Dates" - - -class TMT(TMED): - "Source Media Type" - - -class TFT(TFLT): - "File Type" - - -class TBP(TBPM): - "Beats per minute" - - -class TCP(TCMP): - "iTunes Compilation Flag" - - -class TCR(TCOP): - "Copyright (C)" - - -class TPB(TPUB): - "Publisher" - - -class TEN(TENC): - "Encoder" - - -class TSS(TSSE): - "Encoder settings" - - -class TOF(TOFN): - "Original Filename" - - -class TLE(TLEN): - "Audio Length (ms)" - - -class TSI(TSIZ): - "Audio Data size (bytes)" - - -class TDY(TDLY): - "Audio Delay (ms)" - - -class TKE(TKEY): - "Starting Key" - - -class TOT(TOAL): - "Original Album" - - -class TOA(TOPE): - "Original Artist/Perfomer" - - -class TOL(TOLY): - "Original Lyricist" - - -class TOR(TORY): - "Original Release Year" - - -class TXX(TXXX): - "User-defined Text" - - -class WAF(WOAF): - "Official File Information" - - -class WAR(WOAR): - "Official Artist/Performer Information" - - -class WAS(WOAS): - "Official Source Information" - - -class WCM(WCOM): - "Commercial Information" - - -class WCP(WCOP): - "Copyright Information" - - -class WPB(WPUB): - "Official Publisher Information" - - -class WXX(WXXX): - "User-defined URL" - - -class IPL(IPLS): - "Involved people list" - - -class MCI(MCDI): - "Binary dump of CD's TOC" - - -class ETC(ETCO): - "Event timing codes" - - -class MLL(MLLT): - "MPEG location lookup table" - - -class STC(SYTC): - "Synced tempo codes" - - -class ULT(USLT): - "Unsychronised lyrics/text transcription" - - -class SLT(SYLT): - "Synchronised lyrics/text" - - -class COM(COMM): - "Comment" - - -# class RVA(RVAD) -# class EQU(EQUA) - - -class REV(RVRB): - "Reverb" - - -class PIC(APIC): - """Attached Picture. - - The 'mime' attribute of an ID3v2.2 attached picture must be either - 'PNG' or 'JPG'. - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('mime', 3), - ByteSpec('type'), - EncodedTextSpec('desc'), - BinaryDataSpec('data') - ] - - def _to_other(self, other): - if not isinstance(other, APIC): - raise TypeError - - other.encoding = self.encoding - other.mime = self.mime - other.type = self.type - other.desc = self.desc - other.data = self.data - - -class GEO(GEOB): - "General Encapsulated Object" - - -class CNT(PCNT): - "Play counter" - - -class POP(POPM): - "Popularimeter" - - -class BUF(RBUF): - "Recommended buffer size" - - -class CRM(Frame): - """Encrypted meta frame""" - _framespec = [Latin1TextSpec('owner'), Latin1TextSpec('desc'), - BinaryDataSpec('data')] - - def __eq__(self, other): - return self.data == other - __hash__ = Frame.__hash__ - - -class CRA(AENC): - "Audio encryption" - - -class LNK(LINK): - """Linked information""" - - _framespec = [ - StringSpec('frameid', 3), - Latin1TextSpec('url') - ] - - _optionalspec = [BinaryDataSpec('data')] - - def _to_other(self, other): - if not isinstance(other, LINK): - raise TypeError - - other.frameid = self.frameid - other.url = self.url - if hasattr(self, "data"): - other.data = self.data - - -Frames = {} -"""All supported ID3v2.3/4 frames, keyed by frame name.""" - - -Frames_2_2 = {} -"""All supported ID3v2.2 frames, keyed by frame name.""" - - -k, v = None, None -for k, v in iteritems(globals()): - if isinstance(v, type) and issubclass(v, Frame): - v.__module__ = "mutagen.id3" - - if len(k) == 3: - Frames_2_2[k] = v - elif len(k) == 4: - Frames[k] = v - -try: - del k - del v -except NameError: - pass diff --git a/resources/lib/mutagen/id3/_specs.py b/resources/lib/mutagen/id3/_specs.py deleted file mode 100644 index 4358a65d..00000000 --- a/resources/lib/mutagen/id3/_specs.py +++ /dev/null @@ -1,635 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import struct -from struct import unpack, pack - -from .._compat import text_type, chr_, PY3, swap_to_string, string_types, \ - xrange -from .._util import total_ordering, decode_terminated, enum, izip -from ._util import BitPaddedInt - - -@enum -class PictureType(object): - """Enumeration of image types defined by the ID3 standard for the APIC - frame, but also reused in WMA/FLAC/VorbisComment. - """ - - OTHER = 0 - """Other""" - - FILE_ICON = 1 - """32x32 pixels 'file icon' (PNG only)""" - - OTHER_FILE_ICON = 2 - """Other file icon""" - - COVER_FRONT = 3 - """Cover (front)""" - - COVER_BACK = 4 - """Cover (back)""" - - LEAFLET_PAGE = 5 - """Leaflet page""" - - MEDIA = 6 - """Media (e.g. label side of CD)""" - - LEAD_ARTIST = 7 - """Lead artist/lead performer/soloist""" - - ARTIST = 8 - """Artist/performer""" - - CONDUCTOR = 9 - """Conductor""" - - BAND = 10 - """Band/Orchestra""" - - COMPOSER = 11 - """Composer""" - - LYRICIST = 12 - """Lyricist/text writer""" - - RECORDING_LOCATION = 13 - """Recording Location""" - - DURING_RECORDING = 14 - """During recording""" - - DURING_PERFORMANCE = 15 - """During performance""" - - SCREEN_CAPTURE = 16 - """Movie/video screen capture""" - - FISH = 17 - """A bright coloured fish""" - - ILLUSTRATION = 18 - """Illustration""" - - BAND_LOGOTYPE = 19 - """Band/artist logotype""" - - PUBLISHER_LOGOTYPE = 20 - """Publisher/Studio logotype""" - - -class SpecError(Exception): - pass - - -class Spec(object): - - def __init__(self, name): - self.name = name - - def __hash__(self): - raise TypeError("Spec objects are unhashable") - - def _validate23(self, frame, value, **kwargs): - """Return a possibly modified value which, if written, - results in valid id3v2.3 data. - """ - - return value - - def read(self, frame, data): - """Returns the (value, left_data) or raises SpecError""" - - raise NotImplementedError - - def write(self, frame, value): - raise NotImplementedError - - def validate(self, frame, value): - """Returns the validated data or raises ValueError/TypeError""" - - raise NotImplementedError - - -class ByteSpec(Spec): - def read(self, frame, data): - return bytearray(data)[0], data[1:] - - def write(self, frame, value): - return chr_(value) - - def validate(self, frame, value): - if value is not None: - chr_(value) - return value - - -class IntegerSpec(Spec): - def read(self, frame, data): - return int(BitPaddedInt(data, bits=8)), b'' - - def write(self, frame, value): - return BitPaddedInt.to_str(value, bits=8, width=-1) - - def validate(self, frame, value): - return value - - -class SizedIntegerSpec(Spec): - def __init__(self, name, size): - self.name, self.__sz = name, size - - def read(self, frame, data): - return int(BitPaddedInt(data[:self.__sz], bits=8)), data[self.__sz:] - - def write(self, frame, value): - return BitPaddedInt.to_str(value, bits=8, width=self.__sz) - - def validate(self, frame, value): - return value - - -@enum -class Encoding(object): - """Text Encoding""" - - LATIN1 = 0 - """ISO-8859-1""" - - UTF16 = 1 - """UTF-16 with BOM""" - - UTF16BE = 2 - """UTF-16BE without BOM""" - - UTF8 = 3 - """UTF-8""" - - -class EncodingSpec(ByteSpec): - - def read(self, frame, data): - enc, data = super(EncodingSpec, self).read(frame, data) - if enc not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE, - Encoding.UTF8): - raise SpecError('Invalid Encoding: %r' % enc) - return enc, data - - def validate(self, frame, value): - if value is None: - return None - if value not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE, - Encoding.UTF8): - raise ValueError('Invalid Encoding: %r' % value) - return value - - def _validate23(self, frame, value, **kwargs): - # only 0, 1 are valid in v2.3, default to utf-16 - if value not in (Encoding.LATIN1, Encoding.UTF16): - value = Encoding.UTF16 - return value - - -class StringSpec(Spec): - """A fixed size ASCII only payload.""" - - def __init__(self, name, length): - super(StringSpec, self).__init__(name) - self.len = length - - def read(s, frame, data): - chunk = data[:s.len] - try: - ascii = chunk.decode("ascii") - except UnicodeDecodeError: - raise SpecError("not ascii") - else: - if PY3: - chunk = ascii - - return chunk, data[s.len:] - - def write(s, frame, value): - if value is None: - return b'\x00' * s.len - else: - if PY3: - value = value.encode("ascii") - return (bytes(value) + b'\x00' * s.len)[:s.len] - - def validate(s, frame, value): - if value is None: - return None - - if PY3: - if not isinstance(value, str): - raise TypeError("%s has to be str" % s.name) - value.encode("ascii") - else: - if not isinstance(value, bytes): - value = value.encode("ascii") - - if len(value) == s.len: - return value - - raise ValueError('Invalid StringSpec[%d] data: %r' % (s.len, value)) - - -class BinaryDataSpec(Spec): - def read(self, frame, data): - return data, b'' - - def write(self, frame, value): - if value is None: - return b"" - if isinstance(value, bytes): - return value - value = text_type(value).encode("ascii") - return value - - def validate(self, frame, value): - if value is None: - return None - - if isinstance(value, bytes): - return value - elif PY3: - raise TypeError("%s has to be bytes" % self.name) - - value = text_type(value).encode("ascii") - return value - - -class EncodedTextSpec(Spec): - - _encodings = { - Encoding.LATIN1: ('latin1', b'\x00'), - Encoding.UTF16: ('utf16', b'\x00\x00'), - Encoding.UTF16BE: ('utf_16_be', b'\x00\x00'), - Encoding.UTF8: ('utf8', b'\x00'), - } - - def read(self, frame, data): - enc, term = self._encodings[frame.encoding] - try: - # allow missing termination - return decode_terminated(data, enc, strict=False) - except ValueError: - # utf-16 termination with missing BOM, or single NULL - if not data[:len(term)].strip(b"\x00"): - return u"", data[len(term):] - - # utf-16 data with single NULL, see issue 169 - try: - return decode_terminated(data + b"\x00", enc) - except ValueError: - raise SpecError("Decoding error") - - def write(self, frame, value): - enc, term = self._encodings[frame.encoding] - return value.encode(enc) + term - - def validate(self, frame, value): - return text_type(value) - - -class MultiSpec(Spec): - def __init__(self, name, *specs, **kw): - super(MultiSpec, self).__init__(name) - self.specs = specs - self.sep = kw.get('sep') - - def read(self, frame, data): - values = [] - while data: - record = [] - for spec in self.specs: - value, data = spec.read(frame, data) - record.append(value) - if len(self.specs) != 1: - values.append(record) - else: - values.append(record[0]) - return values, data - - def write(self, frame, value): - data = [] - if len(self.specs) == 1: - for v in value: - data.append(self.specs[0].write(frame, v)) - else: - for record in value: - for v, s in izip(record, self.specs): - data.append(s.write(frame, v)) - return b''.join(data) - - def validate(self, frame, value): - if value is None: - return [] - if self.sep and isinstance(value, string_types): - value = value.split(self.sep) - if isinstance(value, list): - if len(self.specs) == 1: - return [self.specs[0].validate(frame, v) for v in value] - else: - return [ - [s.validate(frame, v) for (v, s) in izip(val, self.specs)] - for val in value] - raise ValueError('Invalid MultiSpec data: %r' % value) - - def _validate23(self, frame, value, **kwargs): - if len(self.specs) != 1: - return [[s._validate23(frame, v, **kwargs) - for (v, s) in izip(val, self.specs)] - for val in value] - - spec = self.specs[0] - - # Merge single text spec multispecs only. - # (TimeStampSpec beeing the exception, but it's not a valid v2.3 frame) - if not isinstance(spec, EncodedTextSpec) or \ - isinstance(spec, TimeStampSpec): - return value - - value = [spec._validate23(frame, v, **kwargs) for v in value] - if kwargs.get("sep") is not None: - return [spec.validate(frame, kwargs["sep"].join(value))] - return value - - -class EncodedNumericTextSpec(EncodedTextSpec): - pass - - -class EncodedNumericPartTextSpec(EncodedTextSpec): - pass - - -class Latin1TextSpec(EncodedTextSpec): - def read(self, frame, data): - if b'\x00' in data: - data, ret = data.split(b'\x00', 1) - else: - ret = b'' - return data.decode('latin1'), ret - - def write(self, data, value): - return value.encode('latin1') + b'\x00' - - def validate(self, frame, value): - return text_type(value) - - -@swap_to_string -@total_ordering -class ID3TimeStamp(object): - """A time stamp in ID3v2 format. - - This is a restricted form of the ISO 8601 standard; time stamps - take the form of: - YYYY-MM-DD HH:MM:SS - Or some partial form (YYYY-MM-DD HH, YYYY, etc.). - - The 'text' attribute contains the raw text data of the time stamp. - """ - - import re - - def __init__(self, text): - if isinstance(text, ID3TimeStamp): - text = text.text - elif not isinstance(text, text_type): - if PY3: - raise TypeError("not a str") - text = text.decode("utf-8") - - self.text = text - - __formats = ['%04d'] + ['%02d'] * 5 - __seps = ['-', '-', ' ', ':', ':', 'x'] - - def get_text(self): - parts = [self.year, self.month, self.day, - self.hour, self.minute, self.second] - pieces = [] - for i, part in enumerate(parts): - if part is None: - break - pieces.append(self.__formats[i] % part + self.__seps[i]) - return u''.join(pieces)[:-1] - - def set_text(self, text, splitre=re.compile('[-T:/.]|\s+')): - year, month, day, hour, minute, second = \ - splitre.split(text + ':::::')[:6] - for a in 'year month day hour minute second'.split(): - try: - v = int(locals()[a]) - except ValueError: - v = None - setattr(self, a, v) - - text = property(get_text, set_text, doc="ID3v2.4 date and time.") - - def __str__(self): - return self.text - - def __bytes__(self): - return self.text.encode("utf-8") - - def __repr__(self): - return repr(self.text) - - def __eq__(self, other): - return self.text == other.text - - def __lt__(self, other): - return self.text < other.text - - __hash__ = object.__hash__ - - def encode(self, *args): - return self.text.encode(*args) - - -class TimeStampSpec(EncodedTextSpec): - def read(self, frame, data): - value, data = super(TimeStampSpec, self).read(frame, data) - return self.validate(frame, value), data - - def write(self, frame, data): - return super(TimeStampSpec, self).write(frame, - data.text.replace(' ', 'T')) - - def validate(self, frame, value): - try: - return ID3TimeStamp(value) - except TypeError: - raise ValueError("Invalid ID3TimeStamp: %r" % value) - - -class ChannelSpec(ByteSpec): - (OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE, - BACKCENTRE, SUBWOOFER) = xrange(9) - - -class VolumeAdjustmentSpec(Spec): - def read(self, frame, data): - value, = unpack('>h', data[0:2]) - return value / 512.0, data[2:] - - def write(self, frame, value): - number = int(round(value * 512)) - # pack only fails in 2.7, do it manually in 2.6 - if not -32768 <= number <= 32767: - raise SpecError("not in range") - return pack('>h', number) - - def validate(self, frame, value): - if value is not None: - try: - self.write(frame, value) - except SpecError: - raise ValueError("out of range") - return value - - -class VolumePeakSpec(Spec): - def read(self, frame, data): - # http://bugs.xmms.org/attachment.cgi?id=113&action=view - peak = 0 - data_array = bytearray(data) - bits = data_array[0] - vol_bytes = min(4, (bits + 7) >> 3) - # not enough frame data - if vol_bytes + 1 > len(data): - raise SpecError("not enough frame data") - shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8 - for i in xrange(1, vol_bytes + 1): - peak *= 256 - peak += data_array[i] - peak *= 2 ** shift - return (float(peak) / (2 ** 31 - 1)), data[1 + vol_bytes:] - - def write(self, frame, value): - number = int(round(value * 32768)) - # pack only fails in 2.7, do it manually in 2.6 - if not 0 <= number <= 65535: - raise SpecError("not in range") - # always write as 16 bits for sanity. - return b"\x10" + pack('>H', number) - - def validate(self, frame, value): - if value is not None: - try: - self.write(frame, value) - except SpecError: - raise ValueError("out of range") - return value - - -class SynchronizedTextSpec(EncodedTextSpec): - def read(self, frame, data): - texts = [] - encoding, term = self._encodings[frame.encoding] - while data: - try: - value, data = decode_terminated(data, encoding) - except ValueError: - raise SpecError("decoding error") - - if len(data) < 4: - raise SpecError("not enough data") - time, = struct.unpack(">I", data[:4]) - - texts.append((value, time)) - data = data[4:] - return texts, b"" - - def write(self, frame, value): - data = [] - encoding, term = self._encodings[frame.encoding] - for text, time in value: - text = text.encode(encoding) + term - data.append(text + struct.pack(">I", time)) - return b"".join(data) - - def validate(self, frame, value): - return value - - -class KeyEventSpec(Spec): - def read(self, frame, data): - events = [] - while len(data) >= 5: - events.append(struct.unpack(">bI", data[:5])) - data = data[5:] - return events, data - - def write(self, frame, value): - return b"".join(struct.pack(">bI", *event) for event in value) - - def validate(self, frame, value): - return value - - -class VolumeAdjustmentsSpec(Spec): - # Not to be confused with VolumeAdjustmentSpec. - def read(self, frame, data): - adjustments = {} - while len(data) >= 4: - freq, adj = struct.unpack(">Hh", data[:4]) - data = data[4:] - freq /= 2.0 - adj /= 512.0 - adjustments[freq] = adj - adjustments = sorted(adjustments.items()) - return adjustments, data - - def write(self, frame, value): - value.sort() - return b"".join(struct.pack(">Hh", int(freq * 2), int(adj * 512)) - for (freq, adj) in value) - - def validate(self, frame, value): - return value - - -class ASPIIndexSpec(Spec): - def read(self, frame, data): - if frame.b == 16: - format = "H" - size = 2 - elif frame.b == 8: - format = "B" - size = 1 - else: - raise SpecError("invalid bit count in ASPI (%d)" % frame.b) - - indexes = data[:frame.N * size] - data = data[frame.N * size:] - try: - return list(struct.unpack(">" + format * frame.N, indexes)), data - except struct.error as e: - raise SpecError(e) - - def write(self, frame, values): - if frame.b == 16: - format = "H" - elif frame.b == 8: - format = "B" - else: - raise SpecError("frame.b must be 8 or 16") - try: - return struct.pack(">" + format * frame.N, *values) - except struct.error as e: - raise SpecError(e) - - def validate(self, frame, values): - return values diff --git a/resources/lib/mutagen/id3/_util.py b/resources/lib/mutagen/id3/_util.py deleted file mode 100644 index 29f7241d..00000000 --- a/resources/lib/mutagen/id3/_util.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# 2013 Christoph Reiter -# 2014 Ben Ockmore -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -from .._compat import long_, integer_types, PY3 -from .._util import MutagenError - - -class error(MutagenError): - pass - - -class ID3NoHeaderError(error, ValueError): - pass - - -class ID3UnsupportedVersionError(error, NotImplementedError): - pass - - -class ID3EncryptionUnsupportedError(error, NotImplementedError): - pass - - -class ID3JunkFrameError(error, ValueError): - pass - - -class unsynch(object): - @staticmethod - def decode(value): - fragments = bytearray(value).split(b'\xff') - if len(fragments) > 1 and not fragments[-1]: - raise ValueError('string ended unsafe') - - for f in fragments[1:]: - if (not f) or (f[0] >= 0xE0): - raise ValueError('invalid sync-safe string') - - if f[0] == 0x00: - del f[0] - - return bytes(bytearray(b'\xff').join(fragments)) - - @staticmethod - def encode(value): - fragments = bytearray(value).split(b'\xff') - for f in fragments[1:]: - if (not f) or (f[0] >= 0xE0) or (f[0] == 0x00): - f.insert(0, 0x00) - return bytes(bytearray(b'\xff').join(fragments)) - - -class _BitPaddedMixin(object): - - def as_str(self, width=4, minwidth=4): - return self.to_str(self, self.bits, self.bigendian, width, minwidth) - - @staticmethod - def to_str(value, bits=7, bigendian=True, width=4, minwidth=4): - mask = (1 << bits) - 1 - - if width != -1: - index = 0 - bytes_ = bytearray(width) - try: - while value: - bytes_[index] = value & mask - value >>= bits - index += 1 - except IndexError: - raise ValueError('Value too wide (>%d bytes)' % width) - else: - # PCNT and POPM use growing integers - # of at least 4 bytes (=minwidth) as counters. - bytes_ = bytearray() - append = bytes_.append - while value: - append(value & mask) - value >>= bits - bytes_ = bytes_.ljust(minwidth, b"\x00") - - if bigendian: - bytes_.reverse() - return bytes(bytes_) - - @staticmethod - def has_valid_padding(value, bits=7): - """Whether the padding bits are all zero""" - - assert bits <= 8 - - mask = (((1 << (8 - bits)) - 1) << bits) - - if isinstance(value, integer_types): - while value: - if value & mask: - return False - value >>= 8 - elif isinstance(value, bytes): - for byte in bytearray(value): - if byte & mask: - return False - else: - raise TypeError - - return True - - -class BitPaddedInt(int, _BitPaddedMixin): - - def __new__(cls, value, bits=7, bigendian=True): - - mask = (1 << (bits)) - 1 - numeric_value = 0 - shift = 0 - - if isinstance(value, integer_types): - while value: - numeric_value += (value & mask) << shift - value >>= 8 - shift += bits - elif isinstance(value, bytes): - if bigendian: - value = reversed(value) - for byte in bytearray(value): - numeric_value += (byte & mask) << shift - shift += bits - else: - raise TypeError - - if isinstance(numeric_value, int): - self = int.__new__(BitPaddedInt, numeric_value) - else: - self = long_.__new__(BitPaddedLong, numeric_value) - - self.bits = bits - self.bigendian = bigendian - return self - -if PY3: - BitPaddedLong = BitPaddedInt -else: - class BitPaddedLong(long_, _BitPaddedMixin): - pass - - -class ID3BadUnsynchData(error, ValueError): - """Deprecated""" - - -class ID3BadCompressedData(error, ValueError): - """Deprecated""" - - -class ID3TagError(error, ValueError): - """Deprecated""" - - -class ID3Warning(error, UserWarning): - """Deprecated""" diff --git a/resources/lib/mutagen/m4a.py b/resources/lib/mutagen/m4a.py deleted file mode 100644 index 5730ace3..00000000 --- a/resources/lib/mutagen/m4a.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -""" -since 1.9: mutagen.m4a is deprecated; use mutagen.mp4 instead. -since 1.31: mutagen.m4a will no longer work; any operation that could fail - will fail now. -""" - -import warnings - -from mutagen import FileType, Metadata, StreamInfo -from ._util import DictProxy, MutagenError - -warnings.warn( - "mutagen.m4a is deprecated; use mutagen.mp4 instead.", - DeprecationWarning) - - -class error(IOError, MutagenError): - pass - - -class M4AMetadataError(error): - pass - - -class M4AStreamInfoError(error): - pass - - -class M4AMetadataValueError(ValueError, M4AMetadataError): - pass - - -__all__ = ['M4A', 'Open', 'delete', 'M4ACover'] - - -class M4ACover(bytes): - - FORMAT_JPEG = 0x0D - FORMAT_PNG = 0x0E - - def __new__(cls, data, imageformat=None): - self = bytes.__new__(cls, data) - if imageformat is None: - imageformat = M4ACover.FORMAT_JPEG - self.imageformat = imageformat - return self - - -class M4ATags(DictProxy, Metadata): - - def load(self, atoms, fileobj): - raise error("deprecated") - - def save(self, filename): - raise error("deprecated") - - def delete(self, filename): - raise error("deprecated") - - def pprint(self): - return u"" - - -class M4AInfo(StreamInfo): - - bitrate = 0 - - def __init__(self, atoms, fileobj): - raise error("deprecated") - - def pprint(self): - return u"" - - -class M4A(FileType): - - _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] - - def load(self, filename): - raise error("deprecated") - - def add_tags(self): - self.tags = M4ATags() - - @staticmethod - def score(filename, fileobj, header): - return 0 - - -Open = M4A - - -def delete(filename): - raise error("deprecated") diff --git a/resources/lib/mutagen/monkeysaudio.py b/resources/lib/mutagen/monkeysaudio.py deleted file mode 100644 index 0e29273f..00000000 --- a/resources/lib/mutagen/monkeysaudio.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Monkey's Audio streams with APEv2 tags. - -Monkey's Audio is a very efficient lossless audio compressor developed -by Matt Ashland. - -For more information, see http://www.monkeysaudio.com/. -""" - -__all__ = ["MonkeysAudio", "Open", "delete"] - -import struct - -from ._compat import endswith -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete -from mutagen._util import cdata - - -class MonkeysAudioHeaderError(error): - pass - - -class MonkeysAudioInfo(StreamInfo): - """Monkey's Audio stream information. - - Attributes: - - * channels -- number of audio channels - * length -- file length in seconds, as a float - * sample_rate -- audio sampling rate in Hz - * bits_per_sample -- bits per sample - * version -- Monkey's Audio stream version, as a float (eg: 3.99) - """ - - def __init__(self, fileobj): - header = fileobj.read(76) - if len(header) != 76 or not header.startswith(b"MAC "): - raise MonkeysAudioHeaderError("not a Monkey's Audio file") - self.version = cdata.ushort_le(header[4:6]) - if self.version >= 3980: - (blocks_per_frame, final_frame_blocks, total_frames, - self.bits_per_sample, self.channels, - self.sample_rate) = struct.unpack("= 3950: - blocks_per_frame = 73728 * 4 - elif self.version >= 3900 or (self.version >= 3800 and - compression_level == 4): - blocks_per_frame = 73728 - else: - blocks_per_frame = 9216 - self.version /= 1000.0 - self.length = 0.0 - if (self.sample_rate != 0) and (total_frames > 0): - total_blocks = ((total_frames - 1) * blocks_per_frame + - final_frame_blocks) - self.length = float(total_blocks) / self.sample_rate - - def pprint(self): - return u"Monkey's Audio %.2f, %.2f seconds, %d Hz" % ( - self.version, self.length, self.sample_rate) - - -class MonkeysAudio(APEv2File): - _Info = MonkeysAudioInfo - _mimes = ["audio/ape", "audio/x-ape"] - - @staticmethod - def score(filename, fileobj, header): - return header.startswith(b"MAC ") + endswith(filename.lower(), ".ape") - - -Open = MonkeysAudio diff --git a/resources/lib/mutagen/mp3.py b/resources/lib/mutagen/mp3.py deleted file mode 100644 index afb600cf..00000000 --- a/resources/lib/mutagen/mp3.py +++ /dev/null @@ -1,362 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""MPEG audio stream information and tags.""" - -import os -import struct - -from ._compat import endswith, xrange -from ._mp3util import XingHeader, XingHeaderError, VBRIHeader, VBRIHeaderError -from mutagen import StreamInfo -from mutagen._util import MutagenError, enum -from mutagen.id3 import ID3FileType, BitPaddedInt, delete - -__all__ = ["MP3", "Open", "delete", "MP3"] - - -class error(RuntimeError, MutagenError): - pass - - -class HeaderNotFoundError(error, IOError): - pass - - -class InvalidMPEGHeader(error, IOError): - pass - - -@enum -class BitrateMode(object): - - UNKNOWN = 0 - """Probably a CBR file, but not sure""" - - CBR = 1 - """Constant Bitrate""" - - VBR = 2 - """Variable Bitrate""" - - ABR = 3 - """Average Bitrate (a variant of VBR)""" - - -def _guess_xing_bitrate_mode(xing): - - if xing.lame_header: - lame = xing.lame_header - if lame.vbr_method in (1, 8): - return BitrateMode.CBR - elif lame.vbr_method in (2, 9): - return BitrateMode.ABR - elif lame.vbr_method in (3, 4, 5, 6): - return BitrateMode.VBR - # everything else undefined, continue guessing - - # info tags get only written by lame for cbr files - if xing.is_info: - return BitrateMode.CBR - - # older lame and non-lame with some variant of vbr - if xing.vbr_scale != -1 or xing.lame_version: - return BitrateMode.VBR - - return BitrateMode.UNKNOWN - - -# Mode values. -STEREO, JOINTSTEREO, DUALCHANNEL, MONO = xrange(4) - - -class MPEGInfo(StreamInfo): - """MPEG audio stream information - - Parse information about an MPEG audio file. This also reads the - Xing VBR header format. - - This code was implemented based on the format documentation at - http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm. - - Useful attributes: - - * length -- audio length, in seconds - * channels -- number of audio channels - * bitrate -- audio bitrate, in bits per second - * sketchy -- if true, the file may not be valid MPEG audio - * encoder_info -- a string containing encoder name and possibly version. - In case a lame tag is present this will start with - ``"LAME "``, if unknown it is empty, otherwise the - text format is undefined. - * bitrate_mode -- a :class:`BitrateMode` - - * track_gain -- replaygain track gain (89db) or None - * track_peak -- replaygain track peak or None - * album_gain -- replaygain album gain (89db) or None - - Useless attributes: - - * version -- MPEG version (1, 2, 2.5) - * layer -- 1, 2, or 3 - * mode -- One of STEREO, JOINTSTEREO, DUALCHANNEL, or MONO (0-3) - * protected -- whether or not the file is "protected" - * padding -- whether or not audio frames are padded - * sample_rate -- audio sample rate, in Hz - """ - - # Map (version, layer) tuples to bitrates. - __BITRATE = { - (1, 1): [0, 32, 64, 96, 128, 160, 192, 224, - 256, 288, 320, 352, 384, 416, 448], - (1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128, - 160, 192, 224, 256, 320, 384], - (1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112, - 128, 160, 192, 224, 256, 320], - (2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128, - 144, 160, 176, 192, 224, 256], - (2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64, - 80, 96, 112, 128, 144, 160], - } - - __BITRATE[(2, 3)] = __BITRATE[(2, 2)] - for i in xrange(1, 4): - __BITRATE[(2.5, i)] = __BITRATE[(2, i)] - - # Map version to sample rates. - __RATES = { - 1: [44100, 48000, 32000], - 2: [22050, 24000, 16000], - 2.5: [11025, 12000, 8000] - } - - sketchy = False - encoder_info = u"" - bitrate_mode = BitrateMode.UNKNOWN - track_gain = track_peak = album_gain = album_peak = None - - def __init__(self, fileobj, offset=None): - """Parse MPEG stream information from a file-like object. - - If an offset argument is given, it is used to start looking - for stream information and Xing headers; otherwise, ID3v2 tags - will be skipped automatically. A correct offset can make - loading files significantly faster. - """ - - try: - size = os.path.getsize(fileobj.name) - except (IOError, OSError, AttributeError): - fileobj.seek(0, 2) - size = fileobj.tell() - - # If we don't get an offset, try to skip an ID3v2 tag. - if offset is None: - fileobj.seek(0, 0) - idata = fileobj.read(10) - try: - id3, insize = struct.unpack('>3sxxx4s', idata) - except struct.error: - id3, insize = b'', 0 - insize = BitPaddedInt(insize) - if id3 == b'ID3' and insize > 0: - offset = insize + 10 - else: - offset = 0 - - # Try to find two valid headers (meaning, very likely MPEG data) - # at the given offset, 30% through the file, 60% through the file, - # and 90% through the file. - for i in [offset, 0.3 * size, 0.6 * size, 0.9 * size]: - try: - self.__try(fileobj, int(i), size - offset) - except error: - pass - else: - break - # If we can't find any two consecutive frames, try to find just - # one frame back at the original offset given. - else: - self.__try(fileobj, offset, size - offset, False) - self.sketchy = True - - def __try(self, fileobj, offset, real_size, check_second=True): - # This is going to be one really long function; bear with it, - # because there's not really a sane point to cut it up. - fileobj.seek(offset, 0) - - # We "know" we have an MPEG file if we find two frames that look like - # valid MPEG data. If we can't find them in 32k of reads, something - # is horribly wrong (the longest frame can only be about 4k). This - # is assuming the offset didn't lie. - data = fileobj.read(32768) - - frame_1 = data.find(b"\xff") - while 0 <= frame_1 <= (len(data) - 4): - frame_data = struct.unpack(">I", data[frame_1:frame_1 + 4])[0] - if ((frame_data >> 16) & 0xE0) != 0xE0: - frame_1 = data.find(b"\xff", frame_1 + 2) - else: - version = (frame_data >> 19) & 0x3 - layer = (frame_data >> 17) & 0x3 - protection = (frame_data >> 16) & 0x1 - bitrate = (frame_data >> 12) & 0xF - sample_rate = (frame_data >> 10) & 0x3 - padding = (frame_data >> 9) & 0x1 - # private = (frame_data >> 8) & 0x1 - self.mode = (frame_data >> 6) & 0x3 - # mode_extension = (frame_data >> 4) & 0x3 - # copyright = (frame_data >> 3) & 0x1 - # original = (frame_data >> 2) & 0x1 - # emphasis = (frame_data >> 0) & 0x3 - if (version == 1 or layer == 0 or sample_rate == 0x3 or - bitrate == 0 or bitrate == 0xF): - frame_1 = data.find(b"\xff", frame_1 + 2) - else: - break - else: - raise HeaderNotFoundError("can't sync to an MPEG frame") - - self.channels = 1 if self.mode == MONO else 2 - - # There is a serious problem here, which is that many flags - # in an MPEG header are backwards. - self.version = [2.5, None, 2, 1][version] - self.layer = 4 - layer - self.protected = not protection - self.padding = bool(padding) - - self.bitrate = self.__BITRATE[(self.version, self.layer)][bitrate] - self.bitrate *= 1000 - self.sample_rate = self.__RATES[self.version][sample_rate] - - if self.layer == 1: - frame_length = ( - (12 * self.bitrate // self.sample_rate) + padding) * 4 - frame_size = 384 - elif self.version >= 2 and self.layer == 3: - frame_length = (72 * self.bitrate // self.sample_rate) + padding - frame_size = 576 - else: - frame_length = (144 * self.bitrate // self.sample_rate) + padding - frame_size = 1152 - - if check_second: - possible = int(frame_1 + frame_length) - if possible > len(data) + 4: - raise HeaderNotFoundError("can't sync to second MPEG frame") - try: - frame_data = struct.unpack( - ">H", data[possible:possible + 2])[0] - except struct.error: - raise HeaderNotFoundError("can't sync to second MPEG frame") - if (frame_data & 0xFFE0) != 0xFFE0: - raise HeaderNotFoundError("can't sync to second MPEG frame") - - self.length = 8 * real_size / float(self.bitrate) - - # Try to find/parse the Xing header, which trumps the above length - # and bitrate calculation. - - if self.layer != 3: - return - - # Xing - xing_offset = XingHeader.get_offset(self) - fileobj.seek(offset + frame_1 + xing_offset, 0) - try: - xing = XingHeader(fileobj) - except XingHeaderError: - pass - else: - lame = xing.lame_header - self.sketchy = False - self.bitrate_mode = _guess_xing_bitrate_mode(xing) - if xing.frames != -1: - samples = frame_size * xing.frames - if lame is not None: - samples -= lame.encoder_delay_start - samples -= lame.encoder_padding_end - self.length = float(samples) / self.sample_rate - if xing.bytes != -1 and self.length: - self.bitrate = int((xing.bytes * 8) / self.length) - if xing.lame_version: - self.encoder_info = u"LAME %s" % xing.lame_version - if lame is not None: - self.track_gain = lame.track_gain_adjustment - self.track_peak = lame.track_peak - self.album_gain = lame.album_gain_adjustment - return - - # VBRI - vbri_offset = VBRIHeader.get_offset(self) - fileobj.seek(offset + frame_1 + vbri_offset, 0) - try: - vbri = VBRIHeader(fileobj) - except VBRIHeaderError: - pass - else: - self.bitrate_mode = BitrateMode.VBR - self.encoder_info = u"FhG" - self.sketchy = False - self.length = float(frame_size * vbri.frames) / self.sample_rate - if self.length: - self.bitrate = int((vbri.bytes * 8) / self.length) - - def pprint(self): - info = str(self.bitrate_mode).split(".", 1)[-1] - if self.bitrate_mode == BitrateMode.UNKNOWN: - info = u"CBR?" - if self.encoder_info: - info += ", %s" % self.encoder_info - s = u"MPEG %s layer %d, %d bps (%s), %s Hz, %d chn, %.2f seconds" % ( - self.version, self.layer, self.bitrate, info, - self.sample_rate, self.channels, self.length) - if self.sketchy: - s += u" (sketchy)" - return s - - -class MP3(ID3FileType): - """An MPEG audio (usually MPEG-1 Layer 3) file. - - :ivar info: :class:`MPEGInfo` - :ivar tags: :class:`ID3 ` - """ - - _Info = MPEGInfo - - _mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"] - - @property - def mime(self): - l = self.info.layer - return ["audio/mp%d" % l, "audio/x-mp%d" % l] + super(MP3, self).mime - - @staticmethod - def score(filename, fileobj, header_data): - filename = filename.lower() - - return (header_data.startswith(b"ID3") * 2 + - endswith(filename, b".mp3") + - endswith(filename, b".mp2") + endswith(filename, b".mpg") + - endswith(filename, b".mpeg")) - - -Open = MP3 - - -class EasyMP3(MP3): - """Like MP3, but uses EasyID3 for tags. - - :ivar info: :class:`MPEGInfo` - :ivar tags: :class:`EasyID3 ` - """ - - from mutagen.easyid3 import EasyID3 as ID3 - ID3 = ID3 diff --git a/resources/lib/mutagen/mp4/__init__.py b/resources/lib/mutagen/mp4/__init__.py deleted file mode 100644 index bc242ee8..00000000 --- a/resources/lib/mutagen/mp4/__init__.py +++ /dev/null @@ -1,1010 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write MPEG-4 audio files with iTunes metadata. - -This module will read MPEG-4 audio information and metadata, -as found in Apple's MP4 (aka M4A, M4B, M4P) files. - -There is no official specification for this format. The source code -for TagLib, FAAD, and various MPEG specifications at - -* http://developer.apple.com/documentation/QuickTime/QTFF/ -* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt -* http://standards.iso.org/ittf/PubliclyAvailableStandards/\ -c041828_ISO_IEC_14496-12_2005(E).zip -* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime - -were all consulted. -""" - -import struct -import sys - -from mutagen import FileType, Metadata, StreamInfo, PaddingInfo -from mutagen._constants import GENRES -from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError, - hashable, enum, get_size, resize_bytes) -from mutagen._compat import (reraise, PY2, string_types, text_type, chr_, - iteritems, PY3, cBytesIO, izip, xrange) -from ._atom import Atoms, Atom, AtomError -from ._util import parse_full_atom -from ._as_entry import AudioSampleEntry, ASEntryError - - -class error(IOError, MutagenError): - pass - - -class MP4MetadataError(error): - pass - - -class MP4StreamInfoError(error): - pass - - -class MP4MetadataValueError(ValueError, MP4MetadataError): - pass - - -__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType'] - - -@enum -class AtomDataType(object): - """Enum for `dataformat` attribute of MP4FreeForm. - - .. versionadded:: 1.25 - """ - - IMPLICIT = 0 - """for use with tags for which no type needs to be indicated because - only one type is allowed""" - - UTF8 = 1 - """without any count or null terminator""" - - UTF16 = 2 - """also known as UTF-16BE""" - - SJIS = 3 - """deprecated unless it is needed for special Japanese characters""" - - HTML = 6 - """the HTML file header specifies which HTML version""" - - XML = 7 - """the XML header must identify the DTD or schemas""" - - UUID = 8 - """also known as GUID; stored as 16 bytes in binary (valid as an ID)""" - - ISRC = 9 - """stored as UTF-8 text (valid as an ID)""" - - MI3P = 10 - """stored as UTF-8 text (valid as an ID)""" - - GIF = 12 - """(deprecated) a GIF image""" - - JPEG = 13 - """a JPEG image""" - - PNG = 14 - """PNG image""" - - URL = 15 - """absolute, in UTF-8 characters""" - - DURATION = 16 - """in milliseconds, 32-bit integer""" - - DATETIME = 17 - """in UTC, counting seconds since midnight, January 1, 1904; - 32 or 64-bits""" - - GENRES = 18 - """a list of enumerated values""" - - INTEGER = 21 - """a signed big-endian integer with length one of { 1,2,3,4,8 } bytes""" - - RIAA_PA = 24 - """RIAA parental advisory; { -1=no, 1=yes, 0=unspecified }, - 8-bit ingteger""" - - UPC = 25 - """Universal Product Code, in text UTF-8 format (valid as an ID)""" - - BMP = 27 - """Windows bitmap image""" - - -@hashable -class MP4Cover(bytes): - """A cover artwork. - - Attributes: - - * imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG) - """ - - FORMAT_JPEG = AtomDataType.JPEG - FORMAT_PNG = AtomDataType.PNG - - def __new__(cls, data, *args, **kwargs): - return bytes.__new__(cls, data) - - def __init__(self, data, imageformat=FORMAT_JPEG): - self.imageformat = imageformat - - __hash__ = bytes.__hash__ - - def __eq__(self, other): - if not isinstance(other, MP4Cover): - return bytes(self) == other - - return (bytes(self) == bytes(other) and - self.imageformat == other.imageformat) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(%r, %r)" % ( - type(self).__name__, bytes(self), - AtomDataType(self.imageformat)) - - -@hashable -class MP4FreeForm(bytes): - """A freeform value. - - Attributes: - - * dataformat -- format of the data (see AtomDataType) - """ - - FORMAT_DATA = AtomDataType.IMPLICIT # deprecated - FORMAT_TEXT = AtomDataType.UTF8 # deprecated - - def __new__(cls, data, *args, **kwargs): - return bytes.__new__(cls, data) - - def __init__(self, data, dataformat=AtomDataType.UTF8, version=0): - self.dataformat = dataformat - self.version = version - - __hash__ = bytes.__hash__ - - def __eq__(self, other): - if not isinstance(other, MP4FreeForm): - return bytes(self) == other - - return (bytes(self) == bytes(other) and - self.dataformat == other.dataformat and - self.version == other.version) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(%r, %r)" % ( - type(self).__name__, bytes(self), - AtomDataType(self.dataformat)) - - - -def _name2key(name): - if PY2: - return name - return name.decode("latin-1") - - -def _key2name(key): - if PY2: - return key - return key.encode("latin-1") - - -def _find_padding(atom_path): - # Check for padding "free" atom - # XXX: we only use them if they are adjacent to ilst, and only one. - # and there also is a top level free atom which we could use maybe..? - - meta, ilst = atom_path[-2:] - assert meta.name == b"meta" and ilst.name == b"ilst" - index = meta.children.index(ilst) - try: - prev = meta.children[index - 1] - if prev.name == b"free": - return prev - except IndexError: - pass - - try: - next_ = meta.children[index + 1] - if next_.name == b"free": - return next_ - except IndexError: - pass - - -class MP4Tags(DictProxy, Metadata): - r"""Dictionary containing Apple iTunes metadata list key/values. - - Keys are four byte identifiers, except for freeform ('----') - keys. Values are usually unicode strings, but some atoms have a - special structure: - - Text values (multiple values per key are supported): - - * '\\xa9nam' -- track title - * '\\xa9alb' -- album - * '\\xa9ART' -- artist - * 'aART' -- album artist - * '\\xa9wrt' -- composer - * '\\xa9day' -- year - * '\\xa9cmt' -- comment - * 'desc' -- description (usually used in podcasts) - * 'purd' -- purchase date - * '\\xa9grp' -- grouping - * '\\xa9gen' -- genre - * '\\xa9lyr' -- lyrics - * 'purl' -- podcast URL - * 'egid' -- podcast episode GUID - * 'catg' -- podcast category - * 'keyw' -- podcast keywords - * '\\xa9too' -- encoded by - * 'cprt' -- copyright - * 'soal' -- album sort order - * 'soaa' -- album artist sort order - * 'soar' -- artist sort order - * 'sonm' -- title sort order - * 'soco' -- composer sort order - * 'sosn' -- show sort order - * 'tvsh' -- show name - - Boolean values: - - * 'cpil' -- part of a compilation - * 'pgap' -- part of a gapless album - * 'pcst' -- podcast (iTunes reads this only on import) - - Tuples of ints (multiple values per key are supported): - - * 'trkn' -- track number, total tracks - * 'disk' -- disc number, total discs - - Others: - - * 'tmpo' -- tempo/BPM, 16 bit int - * 'covr' -- cover artwork, list of MP4Cover objects (which are - tagged strs) - * 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead. - - The freeform '----' frames use a key in the format '----:mean:name' - where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique - identifier for this frame. The value is a str, but is probably - text that can be decoded as UTF-8. Multiple values per key are - supported. - - MP4 tag data cannot exist outside of the structure of an MP4 file, - so this class should not be manually instantiated. - - Unknown non-text tags and tags that failed to parse will be written - back as is. - """ - - def __init__(self, *args, **kwargs): - self._failed_atoms = {} - super(MP4Tags, self).__init__(*args, **kwargs) - - def load(self, atoms, fileobj): - try: - path = atoms.path(b"moov", b"udta", b"meta", b"ilst") - except KeyError as key: - raise MP4MetadataError(key) - - free = _find_padding(path) - self._padding = free.datalength if free is not None else 0 - - ilst = path[-1] - for atom in ilst.children: - ok, data = atom.read(fileobj) - if not ok: - raise MP4MetadataError("Not enough data") - - try: - if atom.name in self.__atoms: - info = self.__atoms[atom.name] - info[0](self, atom, data) - else: - # unknown atom, try as text - self.__parse_text(atom, data, implicit=False) - except MP4MetadataError: - # parsing failed, save them so we can write them back - key = _name2key(atom.name) - self._failed_atoms.setdefault(key, []).append(data) - - def __setitem__(self, key, value): - if not isinstance(key, str): - raise TypeError("key has to be str") - super(MP4Tags, self).__setitem__(key, value) - - @classmethod - def _can_load(cls, atoms): - return b"moov.udta.meta.ilst" in atoms - - @staticmethod - def _key_sort(item): - (key, v) = item - # iTunes always writes the tags in order of "relevance", try - # to copy it as closely as possible. - order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb", - "\xa9gen", "gnre", "trkn", "disk", - "\xa9day", "cpil", "pgap", "pcst", "tmpo", - "\xa9too", "----", "covr", "\xa9lyr"] - order = dict(izip(order, xrange(len(order)))) - last = len(order) - # If there's no key-based way to distinguish, order by length. - # If there's still no way, go by string comparison on the - # values, so we at least have something determinstic. - return (order.get(key[:4], last), len(repr(v)), repr(v)) - - def save(self, filename, padding=None): - """Save the metadata to the given filename.""" - - values = [] - items = sorted(self.items(), key=self._key_sort) - for key, value in items: - atom_name = _key2name(key)[:4] - if atom_name in self.__atoms: - render_func = self.__atoms[atom_name][1] - else: - render_func = type(self).__render_text - - try: - values.append(render_func(self, key, value)) - except (TypeError, ValueError) as s: - reraise(MP4MetadataValueError, s, sys.exc_info()[2]) - - for key, failed in iteritems(self._failed_atoms): - # don't write atoms back if we have added a new one with - # the same name, this excludes freeform which can have - # multiple atoms with the same key (most parsers seem to be able - # to handle that) - if key in self: - assert _key2name(key) != b"----" - continue - for data in failed: - values.append(Atom.render(_key2name(key), data)) - - data = Atom.render(b"ilst", b"".join(values)) - - # Find the old atoms. - with open(filename, "rb+") as fileobj: - try: - atoms = Atoms(fileobj) - except AtomError as err: - reraise(error, err, sys.exc_info()[2]) - - self.__save(fileobj, atoms, data, padding) - - def __save(self, fileobj, atoms, data, padding): - try: - path = atoms.path(b"moov", b"udta", b"meta", b"ilst") - except KeyError: - self.__save_new(fileobj, atoms, data, padding) - else: - self.__save_existing(fileobj, atoms, path, data, padding) - - def __pad_ilst(self, data, length=None): - if length is None: - length = ((len(data) + 1023) & ~1023) - len(data) - return Atom.render(b"free", b"\x00" * length) - - def __save_new(self, fileobj, atoms, ilst_data, padding_func): - hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9) - meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data - - try: - path = atoms.path(b"moov", b"udta") - except KeyError: - path = atoms.path(b"moov") - - offset = path[-1]._dataoffset - - # ignoring some atom overhead... but we don't have padding left anyway - # and padding_size is guaranteed to be less than zero - content_size = get_size(fileobj) - offset - padding_size = -len(meta_data) - assert padding_size < 0 - info = PaddingInfo(padding_size, content_size) - new_padding = info._get_padding(padding_func) - new_padding = min(0xFFFFFFFF, new_padding) - - free = Atom.render(b"free", b"\x00" * new_padding) - meta = Atom.render(b"meta", meta_data + free) - if path[-1].name != b"udta": - # moov.udta not found -- create one - data = Atom.render(b"udta", meta) - else: - data = meta - - insert_bytes(fileobj, len(data), offset) - fileobj.seek(offset) - fileobj.write(data) - self.__update_parents(fileobj, path, len(data)) - self.__update_offsets(fileobj, atoms, len(data), offset) - - def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func): - # Replace the old ilst atom. - ilst = path[-1] - offset = ilst.offset - length = ilst.length - - # Use adjacent free atom if there is one - free = _find_padding(path) - if free is not None: - offset = min(offset, free.offset) - length += free.length - - # Always add a padding atom to make things easier - padding_overhead = len(Atom.render(b"free", b"")) - content_size = get_size(fileobj) - (offset + length) - padding_size = length - (len(ilst_data) + padding_overhead) - info = PaddingInfo(padding_size, content_size) - new_padding = info._get_padding(padding_func) - # Limit padding size so we can be sure the free atom overhead is as we - # calculated above (see Atom.render) - new_padding = min(0xFFFFFFFF, new_padding) - - ilst_data += Atom.render(b"free", b"\x00" * new_padding) - - resize_bytes(fileobj, length, len(ilst_data), offset) - delta = len(ilst_data) - length - - fileobj.seek(offset) - fileobj.write(ilst_data) - self.__update_parents(fileobj, path[:-1], delta) - self.__update_offsets(fileobj, atoms, delta, offset) - - def __update_parents(self, fileobj, path, delta): - """Update all parent atoms with the new size.""" - - if delta == 0: - return - - for atom in path: - fileobj.seek(atom.offset) - size = cdata.uint_be(fileobj.read(4)) - if size == 1: # 64bit - # skip name (4B) and read size (8B) - size = cdata.ulonglong_be(fileobj.read(12)[4:]) - fileobj.seek(atom.offset + 8) - fileobj.write(cdata.to_ulonglong_be(size + delta)) - else: # 32bit - fileobj.seek(atom.offset) - fileobj.write(cdata.to_uint_be(size + delta)) - - def __update_offset_table(self, fileobj, fmt, atom, delta, offset): - """Update offset table in the specified atom.""" - if atom.offset > offset: - atom.offset += delta - fileobj.seek(atom.offset + 12) - data = fileobj.read(atom.length - 12) - fmt = fmt % cdata.uint_be(data[:4]) - offsets = struct.unpack(fmt, data[4:]) - offsets = [o + (0, delta)[offset < o] for o in offsets] - fileobj.seek(atom.offset + 16) - fileobj.write(struct.pack(fmt, *offsets)) - - def __update_tfhd(self, fileobj, atom, delta, offset): - if atom.offset > offset: - atom.offset += delta - fileobj.seek(atom.offset + 9) - data = fileobj.read(atom.length - 9) - flags = cdata.uint_be(b"\x00" + data[:3]) - if flags & 1: - o = cdata.ulonglong_be(data[7:15]) - if o > offset: - o += delta - fileobj.seek(atom.offset + 16) - fileobj.write(cdata.to_ulonglong_be(o)) - - def __update_offsets(self, fileobj, atoms, delta, offset): - """Update offset tables in all 'stco' and 'co64' atoms.""" - if delta == 0: - return - moov = atoms[b"moov"] - for atom in moov.findall(b'stco', True): - self.__update_offset_table(fileobj, ">%dI", atom, delta, offset) - for atom in moov.findall(b'co64', True): - self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset) - try: - for atom in atoms[b"moof"].findall(b'tfhd', True): - self.__update_tfhd(fileobj, atom, delta, offset) - except KeyError: - pass - - def __parse_data(self, atom, data): - pos = 0 - while pos < atom.length - 8: - head = data[pos:pos + 12] - if len(head) != 12: - raise MP4MetadataError("truncated atom % r" % atom.name) - length, name = struct.unpack(">I4s", head[:8]) - version = ord(head[8:9]) - flags = struct.unpack(">I", b"\x00" + head[9:12])[0] - if name != b"data": - raise MP4MetadataError( - "unexpected atom %r inside %r" % (name, atom.name)) - - chunk = data[pos + 16:pos + length] - if len(chunk) != length - 16: - raise MP4MetadataError("truncated atom % r" % atom.name) - yield version, flags, chunk - pos += length - - def __add(self, key, value, single=False): - assert isinstance(key, str) - - if single: - self[key] = value - else: - self.setdefault(key, []).extend(value) - - def __render_data(self, key, version, flags, value): - return Atom.render(_key2name(key), b"".join([ - Atom.render( - b"data", struct.pack(">2I", version << 24 | flags, 0) + data) - for data in value])) - - def __parse_freeform(self, atom, data): - length = cdata.uint_be(data[:4]) - mean = data[12:length] - pos = length - length = cdata.uint_be(data[pos:pos + 4]) - name = data[pos + 12:pos + length] - pos += length - value = [] - while pos < atom.length - 8: - length, atom_name = struct.unpack(">I4s", data[pos:pos + 8]) - if atom_name != b"data": - raise MP4MetadataError( - "unexpected atom %r inside %r" % (atom_name, atom.name)) - - version = ord(data[pos + 8:pos + 8 + 1]) - flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0] - value.append(MP4FreeForm(data[pos + 16:pos + length], - dataformat=flags, version=version)) - pos += length - - key = _name2key(atom.name + b":" + mean + b":" + name) - self.__add(key, value) - - def __render_freeform(self, key, value): - if isinstance(value, bytes): - value = [value] - - dummy, mean, name = _key2name(key).split(b":", 2) - mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean - name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name - - data = b"" - for v in value: - flags = AtomDataType.UTF8 - version = 0 - if isinstance(v, MP4FreeForm): - flags = v.dataformat - version = v.version - - data += struct.pack( - ">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0) - data += v - - return Atom.render(b"----", mean + name + data) - - def __parse_pair(self, atom, data): - key = _name2key(atom.name) - values = [struct.unpack(">2H", d[2:6]) for - version, flags, d in self.__parse_data(atom, data)] - self.__add(key, values) - - def __render_pair(self, key, value): - data = [] - for (track, total) in value: - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data.append(struct.pack(">4H", 0, track, total, 0)) - else: - raise MP4MetadataValueError( - "invalid numeric pair %r" % ((track, total),)) - return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) - - def __render_pair_no_trailing(self, key, value): - data = [] - for (track, total) in value: - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data.append(struct.pack(">3H", 0, track, total)) - else: - raise MP4MetadataValueError( - "invalid numeric pair %r" % ((track, total),)) - return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) - - def __parse_genre(self, atom, data): - values = [] - for version, flags, data in self.__parse_data(atom, data): - # version = 0, flags = 0 - if len(data) != 2: - raise MP4MetadataValueError("invalid genre") - genre = cdata.short_be(data) - # Translate to a freeform genre. - try: - genre = GENRES[genre - 1] - except IndexError: - # this will make us write it back at least - raise MP4MetadataValueError("unknown genre") - values.append(genre) - key = _name2key(b"\xa9gen") - self.__add(key, values) - - def __parse_tempo(self, atom, data): - values = [] - for version, flags, data in self.__parse_data(atom, data): - # version = 0, flags = 0 or 21 - if len(data) != 2: - raise MP4MetadataValueError("invalid tempo") - values.append(cdata.ushort_be(data)) - key = _name2key(atom.name) - self.__add(key, values) - - def __render_tempo(self, key, value): - try: - if len(value) == 0: - return self.__render_data(key, 0, AtomDataType.INTEGER, b"") - - if (min(value) < 0) or (max(value) >= 2 ** 16): - raise MP4MetadataValueError( - "invalid 16 bit integers: %r" % value) - except TypeError: - raise MP4MetadataValueError( - "tmpo must be a list of 16 bit integers") - - values = [cdata.to_ushort_be(v) for v in value] - return self.__render_data(key, 0, AtomDataType.INTEGER, values) - - def __parse_bool(self, atom, data): - for version, flags, data in self.__parse_data(atom, data): - if len(data) != 1: - raise MP4MetadataValueError("invalid bool") - - value = bool(ord(data)) - key = _name2key(atom.name) - self.__add(key, value, single=True) - - def __render_bool(self, key, value): - return self.__render_data( - key, 0, AtomDataType.INTEGER, [chr_(bool(value))]) - - def __parse_cover(self, atom, data): - values = [] - pos = 0 - while pos < atom.length - 8: - length, name, imageformat = struct.unpack(">I4sI", - data[pos:pos + 12]) - if name != b"data": - if name == b"name": - pos += length - continue - raise MP4MetadataError( - "unexpected atom %r inside 'covr'" % name) - if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG): - # Sometimes AtomDataType.IMPLICIT or simply wrong. - # In all cases it was jpeg, so default to it - imageformat = MP4Cover.FORMAT_JPEG - cover = MP4Cover(data[pos + 16:pos + length], imageformat) - values.append(cover) - pos += length - - key = _name2key(atom.name) - self.__add(key, values) - - def __render_cover(self, key, value): - atom_data = [] - for cover in value: - try: - imageformat = cover.imageformat - except AttributeError: - imageformat = MP4Cover.FORMAT_JPEG - atom_data.append(Atom.render( - b"data", struct.pack(">2I", imageformat, 0) + cover)) - return Atom.render(_key2name(key), b"".join(atom_data)) - - def __parse_text(self, atom, data, implicit=True): - # implicit = False, for parsing unknown atoms only take utf8 ones. - # For known ones we can assume the implicit are utf8 too. - values = [] - for version, flags, atom_data in self.__parse_data(atom, data): - if implicit: - if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8): - raise MP4MetadataError( - "Unknown atom type %r for %r" % (flags, atom.name)) - else: - if flags != AtomDataType.UTF8: - raise MP4MetadataError( - "%r is not text, ignore" % atom.name) - - try: - text = atom_data.decode("utf-8") - except UnicodeDecodeError as e: - raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e)) - - values.append(text) - - key = _name2key(atom.name) - self.__add(key, values) - - def __render_text(self, key, value, flags=AtomDataType.UTF8): - if isinstance(value, string_types): - value = [value] - - encoded = [] - for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("%r not str" % v) - v = v.decode("utf-8") - encoded.append(v.encode("utf-8")) - - return self.__render_data(key, 0, flags, encoded) - - def delete(self, filename): - """Remove the metadata from the given filename.""" - - self._failed_atoms.clear() - self.clear() - self.save(filename, padding=lambda x: 0) - - __atoms = { - b"----": (__parse_freeform, __render_freeform), - b"trkn": (__parse_pair, __render_pair), - b"disk": (__parse_pair, __render_pair_no_trailing), - b"gnre": (__parse_genre, None), - b"tmpo": (__parse_tempo, __render_tempo), - b"cpil": (__parse_bool, __render_bool), - b"pgap": (__parse_bool, __render_bool), - b"pcst": (__parse_bool, __render_bool), - b"covr": (__parse_cover, __render_cover), - b"purl": (__parse_text, __render_text), - b"egid": (__parse_text, __render_text), - } - - # these allow implicit flags and parse as text - for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt", - b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp", - b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too", - b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco", - b"sosn", b"tvsh"]: - __atoms[name] = (__parse_text, __render_text) - - def pprint(self): - - def to_line(key, value): - assert isinstance(key, text_type) - if isinstance(value, text_type): - return u"%s=%s" % (key, value) - return u"%s=%r" % (key, value) - - values = [] - for key, value in sorted(iteritems(self)): - if not isinstance(key, text_type): - key = key.decode("latin-1") - if key == "covr": - values.append(u"%s=%s" % (key, u", ".join( - [u"[%d bytes of data]" % len(data) for data in value]))) - elif isinstance(value, list): - for v in value: - values.append(to_line(key, v)) - else: - values.append(to_line(key, value)) - return u"\n".join(values) - - -class MP4Info(StreamInfo): - """MPEG-4 stream information. - - Attributes: - - * bitrate -- bitrate in bits per second, as an int - * length -- file length in seconds, as a float - * channels -- number of audio channels - * sample_rate -- audio sampling rate in Hz - * bits_per_sample -- bits per sample - * codec (string): - * if starting with ``"mp4a"`` uses an mp4a audio codec - (see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``) - * for everything else see a list of possible values at - http://www.mp4ra.org/codecs.html - - e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc. - * codec_description (string): - Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in - the future, use for display purposes only. - """ - - bitrate = 0 - channels = 0 - sample_rate = 0 - bits_per_sample = 0 - codec = u"" - codec_name = u"" - - def __init__(self, atoms, fileobj): - try: - moov = atoms[b"moov"] - except KeyError: - raise MP4StreamInfoError("not a MP4 file") - - for trak in moov.findall(b"trak"): - hdlr = trak[b"mdia", b"hdlr"] - ok, data = hdlr.read(fileobj) - if not ok: - raise MP4StreamInfoError("Not enough data") - if data[8:12] == b"soun": - break - else: - raise MP4StreamInfoError("track has no audio data") - - mdhd = trak[b"mdia", b"mdhd"] - ok, data = mdhd.read(fileobj) - if not ok: - raise MP4StreamInfoError("Not enough data") - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise MP4StreamInfoError(e) - - if version == 0: - offset = 8 - fmt = ">2I" - elif version == 1: - offset = 16 - fmt = ">IQ" - else: - raise MP4StreamInfoError("Unknown mdhd version %d" % version) - - end = offset + struct.calcsize(fmt) - unit, length = struct.unpack(fmt, data[offset:end]) - try: - self.length = float(length) / unit - except ZeroDivisionError: - self.length = 0 - - try: - atom = trak[b"mdia", b"minf", b"stbl", b"stsd"] - except KeyError: - pass - else: - self._parse_stsd(atom, fileobj) - - def _parse_stsd(self, atom, fileobj): - """Sets channels, bits_per_sample, sample_rate and optionally bitrate. - - Can raise MP4StreamInfoError. - """ - - assert atom.name == b"stsd" - - ok, data = atom.read(fileobj) - if not ok: - raise MP4StreamInfoError("Invalid stsd") - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise MP4StreamInfoError(e) - - if version != 0: - raise MP4StreamInfoError("Unsupported stsd version") - - try: - num_entries, offset = cdata.uint32_be_from(data, 0) - except cdata.error as e: - raise MP4StreamInfoError(e) - - if num_entries == 0: - return - - # look at the first entry if there is one - entry_fileobj = cBytesIO(data[offset:]) - try: - entry_atom = Atom(entry_fileobj) - except AtomError as e: - raise MP4StreamInfoError(e) - - try: - entry = AudioSampleEntry(entry_atom, entry_fileobj) - except ASEntryError as e: - raise MP4StreamInfoError(e) - else: - self.channels = entry.channels - self.bits_per_sample = entry.sample_size - self.sample_rate = entry.sample_rate - self.bitrate = entry.bitrate - self.codec = entry.codec - self.codec_description = entry.codec_description - - def pprint(self): - return "MPEG-4 audio (%s), %.2f seconds, %d bps" % ( - self.codec_description, self.length, self.bitrate) - - -class MP4(FileType): - """An MPEG-4 audio file, probably containing AAC. - - If more than one track is present in the file, the first is used. - Only audio ('soun') tracks will be read. - - :ivar info: :class:`MP4Info` - :ivar tags: :class:`MP4Tags` - """ - - MP4Tags = MP4Tags - - _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] - - def load(self, filename): - self.filename = filename - with open(filename, "rb") as fileobj: - try: - atoms = Atoms(fileobj) - except AtomError as err: - reraise(error, err, sys.exc_info()[2]) - - try: - self.info = MP4Info(atoms, fileobj) - except error: - raise - except Exception as err: - reraise(MP4StreamInfoError, err, sys.exc_info()[2]) - - if not MP4Tags._can_load(atoms): - self.tags = None - self._padding = 0 - else: - try: - self.tags = self.MP4Tags(atoms, fileobj) - except error: - raise - except Exception as err: - reraise(MP4MetadataError, err, sys.exc_info()[2]) - else: - self._padding = self.tags._padding - - def add_tags(self): - if self.tags is None: - self.tags = self.MP4Tags() - else: - raise error("an MP4 tag already exists") - - @staticmethod - def score(filename, fileobj, header_data): - return (b"ftyp" in header_data) + (b"mp4" in header_data) - - -Open = MP4 - - -def delete(filename): - """Remove tags from a file.""" - - MP4(filename).delete() diff --git a/resources/lib/mutagen/mp4/__pycache__/__init__.cpython-35.pyc b/resources/lib/mutagen/mp4/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index de968da9..00000000 Binary files a/resources/lib/mutagen/mp4/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/mp4/__pycache__/_as_entry.cpython-35.pyc b/resources/lib/mutagen/mp4/__pycache__/_as_entry.cpython-35.pyc deleted file mode 100644 index 31483c46..00000000 Binary files a/resources/lib/mutagen/mp4/__pycache__/_as_entry.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/mp4/__pycache__/_atom.cpython-35.pyc b/resources/lib/mutagen/mp4/__pycache__/_atom.cpython-35.pyc deleted file mode 100644 index f4abf385..00000000 Binary files a/resources/lib/mutagen/mp4/__pycache__/_atom.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/mp4/__pycache__/_util.cpython-35.pyc b/resources/lib/mutagen/mp4/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index e7df30cc..00000000 Binary files a/resources/lib/mutagen/mp4/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/mutagen/mp4/_as_entry.py b/resources/lib/mutagen/mp4/_as_entry.py deleted file mode 100644 index 306d5720..00000000 --- a/resources/lib/mutagen/mp4/_as_entry.py +++ /dev/null @@ -1,542 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -from mutagen._compat import cBytesIO, xrange -from mutagen.aac import ProgramConfigElement -from mutagen._util import BitReader, BitReaderError, cdata -from mutagen._compat import text_type -from ._util import parse_full_atom -from ._atom import Atom, AtomError - - -class ASEntryError(Exception): - pass - - -class AudioSampleEntry(object): - """Parses an AudioSampleEntry atom. - - Private API. - - Attrs: - channels (int): number of channels - sample_size (int): sample size in bits - sample_rate (int): sample rate in Hz - bitrate (int): bits per second (0 means unknown) - codec (string): - audio codec, either 'mp4a[.*][.*]' (rfc6381) or 'alac' - codec_description (string): descriptive codec name e.g. "AAC LC+SBR" - - Can raise ASEntryError. - """ - - channels = 0 - sample_size = 0 - sample_rate = 0 - bitrate = 0 - codec = None - codec_description = None - - def __init__(self, atom, fileobj): - ok, data = atom.read(fileobj) - if not ok: - raise ASEntryError("too short %r atom" % atom.name) - - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - try: - # SampleEntry - r.skip(6 * 8) # reserved - r.skip(2 * 8) # data_ref_index - - # AudioSampleEntry - r.skip(8 * 8) # reserved - self.channels = r.bits(16) - self.sample_size = r.bits(16) - r.skip(2 * 8) # pre_defined - r.skip(2 * 8) # reserved - self.sample_rate = r.bits(32) >> 16 - except BitReaderError as e: - raise ASEntryError(e) - - assert r.is_aligned() - - try: - extra = Atom(fileobj) - except AtomError as e: - raise ASEntryError(e) - - self.codec = atom.name.decode("latin-1") - self.codec_description = None - - if atom.name == b"mp4a" and extra.name == b"esds": - self._parse_esds(extra, fileobj) - elif atom.name == b"alac" and extra.name == b"alac": - self._parse_alac(extra, fileobj) - elif atom.name == b"ac-3" and extra.name == b"dac3": - self._parse_dac3(extra, fileobj) - - if self.codec_description is None: - self.codec_description = self.codec.upper() - - def _parse_dac3(self, atom, fileobj): - # ETSI TS 102 366 - - assert atom.name == b"dac3" - - ok, data = atom.read(fileobj) - if not ok: - raise ASEntryError("truncated %s atom" % atom.name) - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - # sample_rate in AudioSampleEntry covers values in - # fscod2 and not just fscod, so ignore fscod here. - try: - r.skip(2 + 5 + 3) # fscod, bsid, bsmod - acmod = r.bits(3) - lfeon = r.bits(1) - bit_rate_code = r.bits(5) - r.skip(5) # reserved - except BitReaderError as e: - raise ASEntryError(e) - - self.channels = [2, 1, 2, 3, 3, 4, 4, 5][acmod] + lfeon - - try: - self.bitrate = [ - 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, - 224, 256, 320, 384, 448, 512, 576, 640][bit_rate_code] * 1000 - except IndexError: - pass - - def _parse_alac(self, atom, fileobj): - # https://alac.macosforge.org/trac/browser/trunk/ - # ALACMagicCookieDescription.txt - - assert atom.name == b"alac" - - ok, data = atom.read(fileobj) - if not ok: - raise ASEntryError("truncated %s atom" % atom.name) - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise ASEntryError(e) - - if version != 0: - raise ASEntryError("Unsupported version %d" % version) - - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - try: - # for some files the AudioSampleEntry values default to 44100/2chan - # and the real info is in the alac cookie, so prefer it - r.skip(32) # frameLength - compatibleVersion = r.bits(8) - if compatibleVersion != 0: - return - self.sample_size = r.bits(8) - r.skip(8 + 8 + 8) - self.channels = r.bits(8) - r.skip(16 + 32) - self.bitrate = r.bits(32) - self.sample_rate = r.bits(32) - except BitReaderError as e: - raise ASEntryError(e) - - def _parse_esds(self, esds, fileobj): - assert esds.name == b"esds" - - ok, data = esds.read(fileobj) - if not ok: - raise ASEntryError("truncated %s atom" % esds.name) - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise ASEntryError(e) - - if version != 0: - raise ASEntryError("Unsupported version %d" % version) - - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - try: - tag = r.bits(8) - if tag != ES_Descriptor.TAG: - raise ASEntryError("unexpected descriptor: %d" % tag) - assert r.is_aligned() - except BitReaderError as e: - raise ASEntryError(e) - - try: - decSpecificInfo = ES_Descriptor.parse(fileobj) - except DescriptorError as e: - raise ASEntryError(e) - dec_conf_desc = decSpecificInfo.decConfigDescr - - self.bitrate = dec_conf_desc.avgBitrate - self.codec += dec_conf_desc.codec_param - self.codec_description = dec_conf_desc.codec_desc - - decSpecificInfo = dec_conf_desc.decSpecificInfo - if decSpecificInfo is not None: - if decSpecificInfo.channels != 0: - self.channels = decSpecificInfo.channels - - if decSpecificInfo.sample_rate != 0: - self.sample_rate = decSpecificInfo.sample_rate - - -class DescriptorError(Exception): - pass - - -class BaseDescriptor(object): - - TAG = None - - @classmethod - def _parse_desc_length_file(cls, fileobj): - """May raise ValueError""" - - value = 0 - for i in xrange(4): - try: - b = cdata.uint8(fileobj.read(1)) - except cdata.error as e: - raise ValueError(e) - value = (value << 7) | (b & 0x7f) - if not b >> 7: - break - else: - raise ValueError("invalid descriptor length") - - return value - - @classmethod - def parse(cls, fileobj): - """Returns a parsed instance of the called type. - The file position is right after the descriptor after this returns. - - Raises DescriptorError - """ - - try: - length = cls._parse_desc_length_file(fileobj) - except ValueError as e: - raise DescriptorError(e) - pos = fileobj.tell() - instance = cls(fileobj, length) - left = length - (fileobj.tell() - pos) - if left < 0: - raise DescriptorError("descriptor parsing read too much data") - fileobj.seek(left, 1) - return instance - - -class ES_Descriptor(BaseDescriptor): - - TAG = 0x3 - - def __init__(self, fileobj, length): - """Raises DescriptorError""" - - r = BitReader(fileobj) - try: - self.ES_ID = r.bits(16) - self.streamDependenceFlag = r.bits(1) - self.URL_Flag = r.bits(1) - self.OCRstreamFlag = r.bits(1) - self.streamPriority = r.bits(5) - if self.streamDependenceFlag: - self.dependsOn_ES_ID = r.bits(16) - if self.URL_Flag: - URLlength = r.bits(8) - self.URLstring = r.bytes(URLlength) - if self.OCRstreamFlag: - self.OCR_ES_Id = r.bits(16) - - tag = r.bits(8) - except BitReaderError as e: - raise DescriptorError(e) - - if tag != DecoderConfigDescriptor.TAG: - raise DescriptorError("unexpected DecoderConfigDescrTag %d" % tag) - - assert r.is_aligned() - self.decConfigDescr = DecoderConfigDescriptor.parse(fileobj) - - -class DecoderConfigDescriptor(BaseDescriptor): - - TAG = 0x4 - - decSpecificInfo = None - """A DecoderSpecificInfo, optional""" - - def __init__(self, fileobj, length): - """Raises DescriptorError""" - - r = BitReader(fileobj) - - try: - self.objectTypeIndication = r.bits(8) - self.streamType = r.bits(6) - self.upStream = r.bits(1) - self.reserved = r.bits(1) - self.bufferSizeDB = r.bits(24) - self.maxBitrate = r.bits(32) - self.avgBitrate = r.bits(32) - - if (self.objectTypeIndication, self.streamType) != (0x40, 0x5): - return - - # all from here is optional - if length * 8 == r.get_position(): - return - - tag = r.bits(8) - except BitReaderError as e: - raise DescriptorError(e) - - if tag == DecoderSpecificInfo.TAG: - assert r.is_aligned() - self.decSpecificInfo = DecoderSpecificInfo.parse(fileobj) - - @property - def codec_param(self): - """string""" - - param = u".%X" % self.objectTypeIndication - info = self.decSpecificInfo - if info is not None: - param += u".%d" % info.audioObjectType - return param - - @property - def codec_desc(self): - """string or None""" - - info = self.decSpecificInfo - desc = None - if info is not None: - desc = info.description - return desc - - -class DecoderSpecificInfo(BaseDescriptor): - - TAG = 0x5 - - _TYPE_NAMES = [ - None, "AAC MAIN", "AAC LC", "AAC SSR", "AAC LTP", "SBR", - "AAC scalable", "TwinVQ", "CELP", "HVXC", None, None, "TTSI", - "Main synthetic", "Wavetable synthesis", "General MIDI", - "Algorithmic Synthesis and Audio FX", "ER AAC LC", None, "ER AAC LTP", - "ER AAC scalable", "ER Twin VQ", "ER BSAC", "ER AAC LD", "ER CELP", - "ER HVXC", "ER HILN", "ER Parametric", "SSC", "PS", "MPEG Surround", - None, "Layer-1", "Layer-2", "Layer-3", "DST", "ALS", "SLS", - "SLS non-core", "ER AAC ELD", "SMR Simple", "SMR Main", "USAC", - "SAOC", "LD MPEG Surround", "USAC" - ] - - _FREQS = [ - 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, - 12000, 11025, 8000, 7350, - ] - - @property - def description(self): - """string or None if unknown""" - - name = None - try: - name = self._TYPE_NAMES[self.audioObjectType] - except IndexError: - pass - if name is None: - return - if self.sbrPresentFlag == 1: - name += "+SBR" - if self.psPresentFlag == 1: - name += "+PS" - return text_type(name) - - @property - def sample_rate(self): - """0 means unknown""" - - if self.sbrPresentFlag == 1: - return self.extensionSamplingFrequency - elif self.sbrPresentFlag == 0: - return self.samplingFrequency - else: - # these are all types that support SBR - aot_can_sbr = (1, 2, 3, 4, 6, 17, 19, 20, 22) - if self.audioObjectType not in aot_can_sbr: - return self.samplingFrequency - # there shouldn't be SBR for > 48KHz - if self.samplingFrequency > 24000: - return self.samplingFrequency - # either samplingFrequency or samplingFrequency * 2 - return 0 - - @property - def channels(self): - """channel count or 0 for unknown""" - - # from ProgramConfigElement() - if hasattr(self, "pce_channels"): - return self.pce_channels - - conf = getattr( - self, "extensionChannelConfiguration", self.channelConfiguration) - - if conf == 1: - if self.psPresentFlag == -1: - return 0 - elif self.psPresentFlag == 1: - return 2 - else: - return 1 - elif conf == 7: - return 8 - elif conf > 7: - return 0 - else: - return conf - - def _get_audio_object_type(self, r): - """Raises BitReaderError""" - - audioObjectType = r.bits(5) - if audioObjectType == 31: - audioObjectTypeExt = r.bits(6) - audioObjectType = 32 + audioObjectTypeExt - return audioObjectType - - def _get_sampling_freq(self, r): - """Raises BitReaderError""" - - samplingFrequencyIndex = r.bits(4) - if samplingFrequencyIndex == 0xf: - samplingFrequency = r.bits(24) - else: - try: - samplingFrequency = self._FREQS[samplingFrequencyIndex] - except IndexError: - samplingFrequency = 0 - return samplingFrequency - - def __init__(self, fileobj, length): - """Raises DescriptorError""" - - r = BitReader(fileobj) - try: - self._parse(r, length) - except BitReaderError as e: - raise DescriptorError(e) - - def _parse(self, r, length): - """Raises BitReaderError""" - - def bits_left(): - return length * 8 - r.get_position() - - self.audioObjectType = self._get_audio_object_type(r) - self.samplingFrequency = self._get_sampling_freq(r) - self.channelConfiguration = r.bits(4) - - self.sbrPresentFlag = -1 - self.psPresentFlag = -1 - if self.audioObjectType in (5, 29): - self.extensionAudioObjectType = 5 - self.sbrPresentFlag = 1 - if self.audioObjectType == 29: - self.psPresentFlag = 1 - self.extensionSamplingFrequency = self._get_sampling_freq(r) - self.audioObjectType = self._get_audio_object_type(r) - if self.audioObjectType == 22: - self.extensionChannelConfiguration = r.bits(4) - else: - self.extensionAudioObjectType = 0 - - if self.audioObjectType in (1, 2, 3, 4, 6, 7, 17, 19, 20, 21, 22, 23): - try: - GASpecificConfig(r, self) - except NotImplementedError: - # unsupported, (warn?) - return - else: - # unsupported - return - - if self.audioObjectType in ( - 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 39): - epConfig = r.bits(2) - if epConfig in (2, 3): - # unsupported - return - - if self.extensionAudioObjectType != 5 and bits_left() >= 16: - syncExtensionType = r.bits(11) - if syncExtensionType == 0x2b7: - self.extensionAudioObjectType = self._get_audio_object_type(r) - - if self.extensionAudioObjectType == 5: - self.sbrPresentFlag = r.bits(1) - if self.sbrPresentFlag == 1: - self.extensionSamplingFrequency = \ - self._get_sampling_freq(r) - if bits_left() >= 12: - syncExtensionType = r.bits(11) - if syncExtensionType == 0x548: - self.psPresentFlag = r.bits(1) - - if self.extensionAudioObjectType == 22: - self.sbrPresentFlag = r.bits(1) - if self.sbrPresentFlag == 1: - self.extensionSamplingFrequency = \ - self._get_sampling_freq(r) - self.extensionChannelConfiguration = r.bits(4) - - -def GASpecificConfig(r, info): - """Reads GASpecificConfig which is needed to get the data after that - (there is no length defined to skip it) and to read program_config_element - which can contain channel counts. - - May raise BitReaderError on error or - NotImplementedError if some reserved data was set. - """ - - assert isinstance(info, DecoderSpecificInfo) - - r.skip(1) # frameLengthFlag - dependsOnCoreCoder = r.bits(1) - if dependsOnCoreCoder: - r.skip(14) - extensionFlag = r.bits(1) - if not info.channelConfiguration: - pce = ProgramConfigElement(r) - info.pce_channels = pce.channels - if info.audioObjectType == 6 or info.audioObjectType == 20: - r.skip(3) - if extensionFlag: - if info.audioObjectType == 22: - r.skip(5 + 11) - if info.audioObjectType in (17, 19, 20, 23): - r.skip(1 + 1 + 1) - extensionFlag3 = r.bits(1) - if extensionFlag3 != 0: - raise NotImplementedError("extensionFlag3 set") diff --git a/resources/lib/mutagen/mp4/_atom.py b/resources/lib/mutagen/mp4/_atom.py deleted file mode 100644 index f73eb556..00000000 --- a/resources/lib/mutagen/mp4/_atom.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import struct - -from mutagen._compat import PY2 - -# This is not an exhaustive list of container atoms, but just the -# ones this module needs to peek inside. -_CONTAINERS = [b"moov", b"udta", b"trak", b"mdia", b"meta", b"ilst", - b"stbl", b"minf", b"moof", b"traf"] -_SKIP_SIZE = {b"meta": 4} - - -class AtomError(Exception): - pass - - -class Atom(object): - """An individual atom. - - Attributes: - children -- list child atoms (or None for non-container atoms) - length -- length of this atom, including length and name - datalength = -- length of this atom without length, name - name -- four byte name of the atom, as a str - offset -- location in the constructor-given fileobj of this atom - - This structure should only be used internally by Mutagen. - """ - - children = None - - def __init__(self, fileobj, level=0): - """May raise AtomError""" - - self.offset = fileobj.tell() - try: - self.length, self.name = struct.unpack(">I4s", fileobj.read(8)) - except struct.error: - raise AtomError("truncated data") - self._dataoffset = self.offset + 8 - if self.length == 1: - try: - self.length, = struct.unpack(">Q", fileobj.read(8)) - except struct.error: - raise AtomError("truncated data") - self._dataoffset += 8 - if self.length < 16: - raise AtomError( - "64 bit atom length can only be 16 and higher") - elif self.length == 0: - if level != 0: - raise AtomError( - "only a top-level atom can have zero length") - # Only the last atom is supposed to have a zero-length, meaning it - # extends to the end of file. - fileobj.seek(0, 2) - self.length = fileobj.tell() - self.offset - fileobj.seek(self.offset + 8, 0) - elif self.length < 8: - raise AtomError( - "atom length can only be 0, 1 or 8 and higher") - - if self.name in _CONTAINERS: - self.children = [] - fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1) - while fileobj.tell() < self.offset + self.length: - self.children.append(Atom(fileobj, level + 1)) - else: - fileobj.seek(self.offset + self.length, 0) - - @property - def datalength(self): - return self.length - (self._dataoffset - self.offset) - - def read(self, fileobj): - """Return if all data could be read and the atom payload""" - - fileobj.seek(self._dataoffset, 0) - data = fileobj.read(self.datalength) - return len(data) == self.datalength, data - - @staticmethod - def render(name, data): - """Render raw atom data.""" - # this raises OverflowError if Py_ssize_t can't handle the atom data - size = len(data) + 8 - if size <= 0xFFFFFFFF: - return struct.pack(">I4s", size, name) + data - else: - return struct.pack(">I4sQ", 1, name, size + 8) + data - - def findall(self, name, recursive=False): - """Recursively find all child atoms by specified name.""" - if self.children is not None: - for child in self.children: - if child.name == name: - yield child - if recursive: - for atom in child.findall(name, True): - yield atom - - def __getitem__(self, remaining): - """Look up a child atom, potentially recursively. - - e.g. atom['udta', 'meta'] => - """ - if not remaining: - return self - elif self.children is None: - raise KeyError("%r is not a container" % self.name) - for child in self.children: - if child.name == remaining[0]: - return child[remaining[1:]] - else: - raise KeyError("%r not found" % remaining[0]) - - def __repr__(self): - cls = self.__class__.__name__ - if self.children is None: - return "<%s name=%r length=%r offset=%r>" % ( - cls, self.name, self.length, self.offset) - else: - children = "\n".join([" " + line for child in self.children - for line in repr(child).splitlines()]) - return "<%s name=%r length=%r offset=%r\n%s>" % ( - cls, self.name, self.length, self.offset, children) - - -class Atoms(object): - """Root atoms in a given file. - - Attributes: - atoms -- a list of top-level atoms as Atom objects - - This structure should only be used internally by Mutagen. - """ - - def __init__(self, fileobj): - self.atoms = [] - fileobj.seek(0, 2) - end = fileobj.tell() - fileobj.seek(0) - while fileobj.tell() + 8 <= end: - self.atoms.append(Atom(fileobj)) - - def path(self, *names): - """Look up and return the complete path of an atom. - - For example, atoms.path('moov', 'udta', 'meta') will return a - list of three atoms, corresponding to the moov, udta, and meta - atoms. - """ - - path = [self] - for name in names: - path.append(path[-1][name, ]) - return path[1:] - - def __contains__(self, names): - try: - self[names] - except KeyError: - return False - return True - - def __getitem__(self, names): - """Look up a child atom. - - 'names' may be a list of atoms (['moov', 'udta']) or a string - specifying the complete path ('moov.udta'). - """ - - if PY2: - if isinstance(names, basestring): - names = names.split(b".") - else: - if isinstance(names, bytes): - names = names.split(b".") - - for child in self.atoms: - if child.name == names[0]: - return child[names[1:]] - else: - raise KeyError("%r not found" % names[0]) - - def __repr__(self): - return "\n".join([repr(child) for child in self.atoms]) diff --git a/resources/lib/mutagen/mp4/_util.py b/resources/lib/mutagen/mp4/_util.py deleted file mode 100644 index 9583334a..00000000 --- a/resources/lib/mutagen/mp4/_util.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -from mutagen._util import cdata - - -def parse_full_atom(data): - """Some atoms are versioned. Split them up in (version, flags, payload). - Can raise ValueError. - """ - - if len(data) < 4: - raise ValueError("not enough data") - - version = ord(data[0:1]) - flags = cdata.uint_be(b"\x00" + data[1:4]) - return version, flags, data[4:] diff --git a/resources/lib/mutagen/musepack.py b/resources/lib/mutagen/musepack.py deleted file mode 100644 index 7880958b..00000000 --- a/resources/lib/mutagen/musepack.py +++ /dev/null @@ -1,270 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Lukas Lalinsky -# Copyright (C) 2012 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Musepack audio streams with APEv2 tags. - -Musepack is an audio format originally based on the MPEG-1 Layer-2 -algorithms. Stream versions 4 through 7 are supported. - -For more information, see http://www.musepack.net/. -""" - -__all__ = ["Musepack", "Open", "delete"] - -import struct - -from ._compat import endswith, xrange -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete -from mutagen.id3 import BitPaddedInt -from mutagen._util import cdata - - -class MusepackHeaderError(error): - pass - - -RATES = [44100, 48000, 37800, 32000] - - -def _parse_sv8_int(fileobj, limit=9): - """Reads (max limit) bytes from fileobj until the MSB is zero. - All 7 LSB will be merged to a big endian uint. - - Raises ValueError in case not MSB is zero, or EOFError in - case the file ended before limit is reached. - - Returns (parsed number, number of bytes read) - """ - - num = 0 - for i in xrange(limit): - c = fileobj.read(1) - if len(c) != 1: - raise EOFError - c = bytearray(c) - num = (num << 7) | (c[0] & 0x7F) - if not c[0] & 0x80: - return num, i + 1 - if limit > 0: - raise ValueError - return 0, 0 - - -def _calc_sv8_gain(gain): - # 64.82 taken from mpcdec - return 64.82 - gain / 256.0 - - -def _calc_sv8_peak(peak): - return (10 ** (peak / (256.0 * 20.0)) / 65535.0) - - -class MusepackInfo(StreamInfo): - """Musepack stream information. - - Attributes: - - * channels -- number of audio channels - * length -- file length in seconds, as a float - * sample_rate -- audio sampling rate in Hz - * bitrate -- audio bitrate, in bits per second - * version -- Musepack stream version - - Optional Attributes: - - * title_gain, title_peak -- Replay Gain and peak data for this song - * album_gain, album_peak -- Replay Gain and peak data for this album - - These attributes are only available in stream version 7/8. The - gains are a float, +/- some dB. The peaks are a percentage [0..1] of - the maximum amplitude. This means to get a number comparable to - VorbisGain, you must multiply the peak by 2. - """ - - def __init__(self, fileobj): - header = fileobj.read(4) - if len(header) != 4: - raise MusepackHeaderError("not a Musepack file") - - # Skip ID3v2 tags - if header[:3] == b"ID3": - header = fileobj.read(6) - if len(header) != 6: - raise MusepackHeaderError("not a Musepack file") - size = 10 + BitPaddedInt(header[2:6]) - fileobj.seek(size) - header = fileobj.read(4) - if len(header) != 4: - raise MusepackHeaderError("not a Musepack file") - - if header.startswith(b"MPCK"): - self.__parse_sv8(fileobj) - else: - self.__parse_sv467(fileobj) - - if not self.bitrate and self.length != 0: - fileobj.seek(0, 2) - self.bitrate = int(round(fileobj.tell() * 8 / self.length)) - - def __parse_sv8(self, fileobj): - # SV8 http://trac.musepack.net/trac/wiki/SV8Specification - - key_size = 2 - mandatory_packets = [b"SH", b"RG"] - - def check_frame_key(key): - if ((len(frame_type) != key_size) or - (not b'AA' <= frame_type <= b'ZZ')): - raise MusepackHeaderError("Invalid frame key.") - - frame_type = fileobj.read(key_size) - check_frame_key(frame_type) - - while frame_type not in (b"AP", b"SE") and mandatory_packets: - try: - frame_size, slen = _parse_sv8_int(fileobj) - except (EOFError, ValueError): - raise MusepackHeaderError("Invalid packet size.") - data_size = frame_size - key_size - slen - # packets can be at maximum data_size big and are padded with zeros - - if frame_type == b"SH": - mandatory_packets.remove(frame_type) - self.__parse_stream_header(fileobj, data_size) - elif frame_type == b"RG": - mandatory_packets.remove(frame_type) - self.__parse_replaygain_packet(fileobj, data_size) - else: - fileobj.seek(data_size, 1) - - frame_type = fileobj.read(key_size) - check_frame_key(frame_type) - - if mandatory_packets: - raise MusepackHeaderError("Missing mandatory packets: %s." % - ", ".join(map(repr, mandatory_packets))) - - self.length = float(self.samples) / self.sample_rate - self.bitrate = 0 - - def __parse_stream_header(self, fileobj, data_size): - # skip CRC - fileobj.seek(4, 1) - remaining_size = data_size - 4 - - try: - self.version = bytearray(fileobj.read(1))[0] - except TypeError: - raise MusepackHeaderError("SH packet ended unexpectedly.") - - remaining_size -= 1 - - try: - samples, l1 = _parse_sv8_int(fileobj) - samples_skip, l2 = _parse_sv8_int(fileobj) - except (EOFError, ValueError): - raise MusepackHeaderError( - "SH packet: Invalid sample counts.") - - self.samples = samples - samples_skip - remaining_size -= l1 + l2 - - data = fileobj.read(remaining_size) - if len(data) != remaining_size: - raise MusepackHeaderError("SH packet ended unexpectedly.") - self.sample_rate = RATES[bytearray(data)[0] >> 5] - self.channels = (bytearray(data)[1] >> 4) + 1 - - def __parse_replaygain_packet(self, fileobj, data_size): - data = fileobj.read(data_size) - if data_size < 9: - raise MusepackHeaderError("Invalid RG packet size.") - if len(data) != data_size: - raise MusepackHeaderError("RG packet ended unexpectedly.") - title_gain = cdata.short_be(data[1:3]) - title_peak = cdata.short_be(data[3:5]) - album_gain = cdata.short_be(data[5:7]) - album_peak = cdata.short_be(data[7:9]) - if title_gain: - self.title_gain = _calc_sv8_gain(title_gain) - if title_peak: - self.title_peak = _calc_sv8_peak(title_peak) - if album_gain: - self.album_gain = _calc_sv8_gain(album_gain) - if album_peak: - self.album_peak = _calc_sv8_peak(album_peak) - - def __parse_sv467(self, fileobj): - fileobj.seek(-4, 1) - header = fileobj.read(32) - if len(header) != 32: - raise MusepackHeaderError("not a Musepack file") - - # SV7 - if header.startswith(b"MP+"): - self.version = bytearray(header)[3] & 0xF - if self.version < 7: - raise MusepackHeaderError("not a Musepack file") - frames = cdata.uint_le(header[4:8]) - flags = cdata.uint_le(header[8:12]) - - self.title_peak, self.title_gain = struct.unpack( - "> 16) & 0x0003] - self.bitrate = 0 - # SV4-SV6 - else: - header_dword = cdata.uint_le(header[0:4]) - self.version = (header_dword >> 11) & 0x03FF - if self.version < 4 or self.version > 6: - raise MusepackHeaderError("not a Musepack file") - self.bitrate = (header_dword >> 23) & 0x01FF - self.sample_rate = 44100 - if self.version >= 5: - frames = cdata.uint_le(header[4:8]) - else: - frames = cdata.ushort_le(header[6:8]) - if self.version < 6: - frames -= 1 - self.channels = 2 - self.length = float(frames * 1152 - 576) / self.sample_rate - - def pprint(self): - rg_data = [] - if hasattr(self, "title_gain"): - rg_data.append(u"%+0.2f (title)" % self.title_gain) - if hasattr(self, "album_gain"): - rg_data.append(u"%+0.2f (album)" % self.album_gain) - rg_data = (rg_data and ", Gain: " + ", ".join(rg_data)) or "" - - return u"Musepack SV%d, %.2f seconds, %d Hz, %d bps%s" % ( - self.version, self.length, self.sample_rate, self.bitrate, rg_data) - - -class Musepack(APEv2File): - _Info = MusepackInfo - _mimes = ["audio/x-musepack", "audio/x-mpc"] - - @staticmethod - def score(filename, fileobj, header): - filename = filename.lower() - - return (header.startswith(b"MP+") + header.startswith(b"MPCK") + - endswith(filename, b".mpc")) - - -Open = Musepack diff --git a/resources/lib/mutagen/ogg.py b/resources/lib/mutagen/ogg.py deleted file mode 100644 index 9961a966..00000000 --- a/resources/lib/mutagen/ogg.py +++ /dev/null @@ -1,548 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg bitstreams and pages. - -This module reads and writes a subset of the Ogg bitstream format -version 0. It does *not* read or write Ogg Vorbis files! For that, -you should use mutagen.oggvorbis. - -This implementation is based on the RFC 3533 standard found at -http://www.xiph.org/ogg/doc/rfc3533.txt. -""" - -import struct -import sys -import zlib - -from mutagen import FileType -from mutagen._util import cdata, resize_bytes, MutagenError -from ._compat import cBytesIO, reraise, chr_, izip, xrange - - -class error(IOError, MutagenError): - """Ogg stream parsing errors.""" - - pass - - -class OggPage(object): - """A single Ogg page (not necessarily a single encoded packet). - - A page is a header of 26 bytes, followed by the length of the - data, followed by the data. - - The constructor is givin a file-like object pointing to the start - of an Ogg page. After the constructor is finished it is pointing - to the start of the next page. - - Attributes: - - * version -- stream structure version (currently always 0) - * position -- absolute stream position (default -1) - * serial -- logical stream serial number (default 0) - * sequence -- page sequence number within logical stream (default 0) - * offset -- offset this page was read from (default None) - * complete -- if the last packet on this page is complete (default True) - * packets -- list of raw packet data (default []) - - Note that if 'complete' is false, the next page's 'continued' - property must be true (so set both when constructing pages). - - If a file-like object is supplied to the constructor, the above - attributes will be filled in based on it. - """ - - version = 0 - __type_flags = 0 - position = 0 - serial = 0 - sequence = 0 - offset = None - complete = True - - def __init__(self, fileobj=None): - self.packets = [] - - if fileobj is None: - return - - self.offset = fileobj.tell() - - header = fileobj.read(27) - if len(header) == 0: - raise EOFError - - try: - (oggs, self.version, self.__type_flags, - self.position, self.serial, self.sequence, - crc, segments) = struct.unpack("<4sBBqIIiB", header) - except struct.error: - raise error("unable to read full header; got %r" % header) - - if oggs != b"OggS": - raise error("read %r, expected %r, at 0x%x" % ( - oggs, b"OggS", fileobj.tell() - 27)) - - if self.version != 0: - raise error("version %r unsupported" % self.version) - - total = 0 - lacings = [] - lacing_bytes = fileobj.read(segments) - if len(lacing_bytes) != segments: - raise error("unable to read %r lacing bytes" % segments) - for c in bytearray(lacing_bytes): - total += c - if c < 255: - lacings.append(total) - total = 0 - if total: - lacings.append(total) - self.complete = False - - self.packets = [fileobj.read(l) for l in lacings] - if [len(p) for p in self.packets] != lacings: - raise error("unable to read full data") - - def __eq__(self, other): - """Two Ogg pages are the same if they write the same data.""" - try: - return (self.write() == other.write()) - except AttributeError: - return False - - __hash__ = object.__hash__ - - def __repr__(self): - attrs = ['version', 'position', 'serial', 'sequence', 'offset', - 'complete', 'continued', 'first', 'last'] - values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs] - return "<%s %s, %d bytes in %d packets>" % ( - type(self).__name__, " ".join(values), sum(map(len, self.packets)), - len(self.packets)) - - def write(self): - """Return a string encoding of the page header and data. - - A ValueError is raised if the data is too big to fit in a - single page. - """ - - data = [ - struct.pack("<4sBBqIIi", b"OggS", self.version, self.__type_flags, - self.position, self.serial, self.sequence, 0) - ] - - lacing_data = [] - for datum in self.packets: - quot, rem = divmod(len(datum), 255) - lacing_data.append(b"\xff" * quot + chr_(rem)) - lacing_data = b"".join(lacing_data) - if not self.complete and lacing_data.endswith(b"\x00"): - lacing_data = lacing_data[:-1] - data.append(chr_(len(lacing_data))) - data.append(lacing_data) - data.extend(self.packets) - data = b"".join(data) - - # Python's CRC is swapped relative to Ogg's needs. - # crc32 returns uint prior to py2.6 on some platforms, so force uint - crc = (~zlib.crc32(data.translate(cdata.bitswap), -1)) & 0xffffffff - # Although we're using to_uint_be, this actually makes the CRC - # a proper le integer, since Python's CRC is byteswapped. - crc = cdata.to_uint_be(crc).translate(cdata.bitswap) - data = data[:22] + crc + data[26:] - return data - - @property - def size(self): - """Total frame size.""" - - size = 27 # Initial header size - for datum in self.packets: - quot, rem = divmod(len(datum), 255) - size += quot + 1 - if not self.complete and rem == 0: - # Packet contains a multiple of 255 bytes and is not - # terminated, so we don't have a \x00 at the end. - size -= 1 - size += sum(map(len, self.packets)) - return size - - def __set_flag(self, bit, val): - mask = 1 << bit - if val: - self.__type_flags |= mask - else: - self.__type_flags &= ~mask - - continued = property( - lambda self: cdata.test_bit(self.__type_flags, 0), - lambda self, v: self.__set_flag(0, v), - doc="The first packet is continued from the previous page.") - - first = property( - lambda self: cdata.test_bit(self.__type_flags, 1), - lambda self, v: self.__set_flag(1, v), - doc="This is the first page of a logical bitstream.") - - last = property( - lambda self: cdata.test_bit(self.__type_flags, 2), - lambda self, v: self.__set_flag(2, v), - doc="This is the last page of a logical bitstream.") - - @staticmethod - def renumber(fileobj, serial, start): - """Renumber pages belonging to a specified logical stream. - - fileobj must be opened with mode r+b or w+b. - - Starting at page number 'start', renumber all pages belonging - to logical stream 'serial'. Other pages will be ignored. - - fileobj must point to the start of a valid Ogg page; any - occuring after it and part of the specified logical stream - will be numbered. No adjustment will be made to the data in - the pages nor the granule position; only the page number, and - so also the CRC. - - If an error occurs (e.g. non-Ogg data is found), fileobj will - be left pointing to the place in the stream the error occured, - but the invalid data will be left intact (since this function - does not change the total file size). - """ - - number = start - while True: - try: - page = OggPage(fileobj) - except EOFError: - break - else: - if page.serial != serial: - # Wrong stream, skip this page. - continue - # Changing the number can't change the page size, - # so seeking back based on the current size is safe. - fileobj.seek(-page.size, 1) - page.sequence = number - fileobj.write(page.write()) - fileobj.seek(page.offset + page.size, 0) - number += 1 - - @staticmethod - def to_packets(pages, strict=False): - """Construct a list of packet data from a list of Ogg pages. - - If strict is true, the first page must start a new packet, - and the last page must end the last packet. - """ - - serial = pages[0].serial - sequence = pages[0].sequence - packets = [] - - if strict: - if pages[0].continued: - raise ValueError("first packet is continued") - if not pages[-1].complete: - raise ValueError("last packet does not complete") - elif pages and pages[0].continued: - packets.append([b""]) - - for page in pages: - if serial != page.serial: - raise ValueError("invalid serial number in %r" % page) - elif sequence != page.sequence: - raise ValueError("bad sequence number in %r" % page) - else: - sequence += 1 - - if page.continued: - packets[-1].append(page.packets[0]) - else: - packets.append([page.packets[0]]) - packets.extend([p] for p in page.packets[1:]) - - return [b"".join(p) for p in packets] - - @classmethod - def _from_packets_try_preserve(cls, packets, old_pages): - """Like from_packets but in case the size and number of the packets - is the same as in the given pages the layout of the pages will - be copied (the page size and number will match). - - If the packets don't match this behaves like:: - - OggPage.from_packets(packets, sequence=old_pages[0].sequence) - """ - - old_packets = cls.to_packets(old_pages) - - if [len(p) for p in packets] != [len(p) for p in old_packets]: - # doesn't match, fall back - return cls.from_packets(packets, old_pages[0].sequence) - - new_data = b"".join(packets) - new_pages = [] - for old in old_pages: - new = OggPage() - new.sequence = old.sequence - new.complete = old.complete - new.continued = old.continued - new.position = old.position - for p in old.packets: - data, new_data = new_data[:len(p)], new_data[len(p):] - new.packets.append(data) - new_pages.append(new) - assert not new_data - - return new_pages - - @staticmethod - def from_packets(packets, sequence=0, default_size=4096, - wiggle_room=2048): - """Construct a list of Ogg pages from a list of packet data. - - The algorithm will generate pages of approximately - default_size in size (rounded down to the nearest multiple of - 255). However, it will also allow pages to increase to - approximately default_size + wiggle_room if allowing the - wiggle room would finish a packet (only one packet will be - finished in this way per page; if the next packet would fit - into the wiggle room, it still starts on a new page). - - This method reduces packet fragmentation when packet sizes are - slightly larger than the default page size, while still - ensuring most pages are of the average size. - - Pages are numbered started at 'sequence'; other information is - uninitialized. - """ - - chunk_size = (default_size // 255) * 255 - - pages = [] - - page = OggPage() - page.sequence = sequence - - for packet in packets: - page.packets.append(b"") - while packet: - data, packet = packet[:chunk_size], packet[chunk_size:] - if page.size < default_size and len(page.packets) < 255: - page.packets[-1] += data - else: - # If we've put any packet data into this page yet, - # we need to mark it incomplete. However, we can - # also have just started this packet on an already - # full page, in which case, just start the new - # page with this packet. - if page.packets[-1]: - page.complete = False - if len(page.packets) == 1: - page.position = -1 - else: - page.packets.pop(-1) - pages.append(page) - page = OggPage() - page.continued = not pages[-1].complete - page.sequence = pages[-1].sequence + 1 - page.packets.append(data) - - if len(packet) < wiggle_room: - page.packets[-1] += packet - packet = b"" - - if page.packets: - pages.append(page) - - return pages - - @classmethod - def replace(cls, fileobj, old_pages, new_pages): - """Replace old_pages with new_pages within fileobj. - - old_pages must have come from reading fileobj originally. - new_pages are assumed to have the 'same' data as old_pages, - and so the serial and sequence numbers will be copied, as will - the flags for the first and last pages. - - fileobj will be resized and pages renumbered as necessary. As - such, it must be opened r+b or w+b. - """ - - if not len(old_pages) or not len(new_pages): - raise ValueError("empty pages list not allowed") - - # Number the new pages starting from the first old page. - first = old_pages[0].sequence - for page, seq in izip(new_pages, - xrange(first, first + len(new_pages))): - page.sequence = seq - page.serial = old_pages[0].serial - - new_pages[0].first = old_pages[0].first - new_pages[0].last = old_pages[0].last - new_pages[0].continued = old_pages[0].continued - - new_pages[-1].first = old_pages[-1].first - new_pages[-1].last = old_pages[-1].last - new_pages[-1].complete = old_pages[-1].complete - if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: - new_pages[-1].position = -1 - - new_data = [cls.write(p) for p in new_pages] - - # Add dummy data or merge the remaining data together so multiple - # new pages replace an old one - pages_diff = len(old_pages) - len(new_data) - if pages_diff > 0: - new_data.extend([b""] * pages_diff) - elif pages_diff < 0: - new_data[pages_diff - 1:] = [b"".join(new_data[pages_diff - 1:])] - - # Replace pages one by one. If the sizes match no resize happens. - offset_adjust = 0 - new_data_end = None - assert len(old_pages) == len(new_data) - for old_page, data in izip(old_pages, new_data): - offset = old_page.offset + offset_adjust - data_size = len(data) - resize_bytes(fileobj, old_page.size, data_size, offset) - fileobj.seek(offset, 0) - fileobj.write(data) - new_data_end = offset + data_size - offset_adjust += (data_size - old_page.size) - - # Finally, if there's any discrepency in length, we need to - # renumber the pages for the logical stream. - if len(old_pages) != len(new_pages): - fileobj.seek(new_data_end, 0) - serial = new_pages[-1].serial - sequence = new_pages[-1].sequence + 1 - cls.renumber(fileobj, serial, sequence) - - @staticmethod - def find_last(fileobj, serial): - """Find the last page of the stream 'serial'. - - If the file is not multiplexed this function is fast. If it is, - it must read the whole the stream. - - This finds the last page in the actual file object, or the last - page in the stream (with eos set), whichever comes first. - """ - - # For non-muxed streams, look at the last page. - try: - fileobj.seek(-256 * 256, 2) - except IOError: - # The file is less than 64k in length. - fileobj.seek(0) - data = fileobj.read() - try: - index = data.rindex(b"OggS") - except ValueError: - raise error("unable to find final Ogg header") - bytesobj = cBytesIO(data[index:]) - best_page = None - try: - page = OggPage(bytesobj) - except error: - pass - else: - if page.serial == serial: - if page.last: - return page - else: - best_page = page - else: - best_page = None - - # The stream is muxed, so use the slow way. - fileobj.seek(0) - try: - page = OggPage(fileobj) - while not page.last: - page = OggPage(fileobj) - while page.serial != serial: - page = OggPage(fileobj) - best_page = page - return page - except error: - return best_page - except EOFError: - return best_page - - -class OggFileType(FileType): - """An generic Ogg file.""" - - _Info = None - _Tags = None - _Error = None - _mimes = ["application/ogg", "application/x-ogg"] - - def load(self, filename): - """Load file information from a filename.""" - - self.filename = filename - with open(filename, "rb") as fileobj: - try: - self.info = self._Info(fileobj) - self.tags = self._Tags(fileobj, self.info) - self.info._post_tags(fileobj) - except error as e: - reraise(self._Error, e, sys.exc_info()[2]) - except EOFError: - raise self._Error("no appropriate stream found") - - def delete(self, filename=None): - """Remove tags from a file. - - If no filename is given, the one most recently loaded is used. - """ - - if filename is None: - filename = self.filename - - self.tags.clear() - # TODO: we should delegate the deletion to the subclass and not through - # _inject. - with open(filename, "rb+") as fileobj: - try: - self.tags._inject(fileobj, lambda x: 0) - except error as e: - reraise(self._Error, e, sys.exc_info()[2]) - except EOFError: - raise self._Error("no appropriate stream found") - - def add_tags(self): - raise self._Error - - def save(self, filename=None, padding=None): - """Save a tag to a file. - - If no filename is given, the one most recently loaded is used. - """ - - if filename is None: - filename = self.filename - fileobj = open(filename, "rb+") - try: - try: - self.tags._inject(fileobj, padding) - except error as e: - reraise(self._Error, e, sys.exc_info()[2]) - except EOFError: - raise self._Error("no appropriate stream found") - finally: - fileobj.close() diff --git a/resources/lib/mutagen/oggflac.py b/resources/lib/mutagen/oggflac.py deleted file mode 100644 index b86226ca..00000000 --- a/resources/lib/mutagen/oggflac.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg FLAC comments. - -This module handles FLAC files wrapped in an Ogg bitstream. The first -FLAC stream found is used. For 'naked' FLACs, see mutagen.flac. - -This module is based off the specification at -http://flac.sourceforge.net/ogg_mapping.html. -""" - -__all__ = ["OggFLAC", "Open", "delete"] - -import struct - -from ._compat import cBytesIO - -from mutagen import StreamInfo -from mutagen.flac import StreamInfo as FLACStreamInfo, error as FLACError -from mutagen._vorbis import VCommentDict -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggFLACHeaderError(error): - pass - - -class OggFLACStreamInfo(StreamInfo): - """Ogg FLAC stream info.""" - - length = 0 - """File length in seconds, as a float""" - - channels = 0 - """Number of channels""" - - sample_rate = 0 - """Sample rate in Hz""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x7FFLAC"): - page = OggPage(fileobj) - major, minor, self.packets, flac = struct.unpack( - ">BBH4s", page.packets[0][5:13]) - if flac != b"fLaC": - raise OggFLACHeaderError("invalid FLAC marker (%r)" % flac) - elif (major, minor) != (1, 0): - raise OggFLACHeaderError( - "unknown mapping version: %d.%d" % (major, minor)) - self.serial = page.serial - - # Skip over the block header. - stringobj = cBytesIO(page.packets[0][17:]) - - try: - flac_info = FLACStreamInfo(stringobj) - except FLACError as e: - raise OggFLACHeaderError(e) - - for attr in ["min_blocksize", "max_blocksize", "sample_rate", - "channels", "bits_per_sample", "total_samples", "length"]: - setattr(self, attr, getattr(flac_info, attr)) - - def _post_tags(self, fileobj): - if self.length: - return - page = OggPage.find_last(fileobj, self.serial) - self.length = page.position / float(self.sample_rate) - - def pprint(self): - return u"Ogg FLAC, %.2f seconds, %d Hz" % ( - self.length, self.sample_rate) - - -class OggFLACVComment(VCommentDict): - - def __init__(self, fileobj, info): - # data should be pointing at the start of an Ogg page, after - # the first FLAC page. - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - comment = cBytesIO(OggPage.to_packets(pages)[0][4:]) - super(OggFLACVComment, self).__init__(comment, framing=False) - - def _inject(self, fileobj, padding_func): - """Write tag data into the FLAC Vorbis comment packet/page.""" - - # Ogg FLAC has no convenient data marker like Vorbis, but the - # second packet - and second page - must be the comment data. - fileobj.seek(0) - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x7FFLAC"): - page = OggPage(fileobj) - - first_page = page - while not (page.sequence == 1 and page.serial == first_page.serial): - page = OggPage(fileobj) - - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == first_page.serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - # Set the new comment block. - data = self.write(framing=False) - data = packets[0][:1] + struct.pack(">I", len(data))[-3:] + data - packets[0] = data - - new_pages = OggPage.from_packets(packets, old_pages[0].sequence) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggFLAC(OggFileType): - """An Ogg FLAC file.""" - - _Info = OggFLACStreamInfo - _Tags = OggFLACVComment - _Error = OggFLACHeaderError - _mimes = ["audio/x-oggflac"] - - info = None - """A `OggFLACStreamInfo`""" - - tags = None - """A `VCommentDict`""" - - def save(self, filename=None): - return super(OggFLAC, self).save(filename) - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * ( - (b"FLAC" in header) + (b"fLaC" in header))) - - -Open = OggFLAC - - -def delete(filename): - """Remove tags from a file.""" - - OggFLAC(filename).delete() diff --git a/resources/lib/mutagen/oggopus.py b/resources/lib/mutagen/oggopus.py deleted file mode 100644 index 7154e479..00000000 --- a/resources/lib/mutagen/oggopus.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2012, 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Opus comments. - -This module handles Opus files wrapped in an Ogg bitstream. The -first Opus stream found is used. - -Based on http://tools.ietf.org/html/draft-terriberry-oggopus-01 -""" - -__all__ = ["OggOpus", "Open", "delete"] - -import struct - -from mutagen import StreamInfo -from mutagen._compat import BytesIO -from mutagen._util import get_size -from mutagen._tags import PaddingInfo -from mutagen._vorbis import VCommentDict -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggOpusHeaderError(error): - pass - - -class OggOpusInfo(StreamInfo): - """Ogg Opus stream information.""" - - length = 0 - """File length in seconds, as a float""" - - channels = 0 - """Number of channels""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"OpusHead"): - page = OggPage(fileobj) - - self.serial = page.serial - - if not page.first: - raise OggOpusHeaderError( - "page has ID header, but doesn't start a stream") - - (version, self.channels, pre_skip, orig_sample_rate, output_gain, - channel_map) = struct.unpack("> 4 - if major != 0: - raise OggOpusHeaderError("version %r unsupported" % major) - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - self.length = (page.position - self.__pre_skip) / float(48000) - - def pprint(self): - return u"Ogg Opus, %.2f seconds" % (self.length) - - -class OggOpusVComment(VCommentDict): - """Opus comments embedded in an Ogg bitstream.""" - - def __get_comment_pages(self, fileobj, info): - # find the first tags page with the right serial - page = OggPage(fileobj) - while ((info.serial != page.serial) or - not page.packets[0].startswith(b"OpusTags")): - page = OggPage(fileobj) - - # get all comment pages - pages = [page] - while not (pages[-1].complete or len(pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == pages[0].serial: - pages.append(page) - - return pages - - def __init__(self, fileobj, info): - pages = self.__get_comment_pages(fileobj, info) - data = OggPage.to_packets(pages)[0][8:] # Strip OpusTags - fileobj = BytesIO(data) - super(OggOpusVComment, self).__init__(fileobj, framing=False) - self._padding = len(data) - self._size - - # in case the LSB of the first byte after v-comment is 1, preserve the - # following data - padding_flag = fileobj.read(1) - if padding_flag and ord(padding_flag) & 0x1: - self._pad_data = padding_flag + fileobj.read() - self._padding = 0 # we have to preserve, so no padding - else: - self._pad_data = b"" - - def _inject(self, fileobj, padding_func): - fileobj.seek(0) - info = OggOpusInfo(fileobj) - old_pages = self.__get_comment_pages(fileobj, info) - - packets = OggPage.to_packets(old_pages) - vcomment_data = b"OpusTags" + self.write(framing=False) - - if self._pad_data: - # if we have padding data to preserver we can't add more padding - # as long as we don't know the structure of what follows - packets[0] = vcomment_data + self._pad_data - else: - content_size = get_size(fileobj) - len(packets[0]) # approx - padding_left = len(packets[0]) - len(vcomment_data) - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggOpus(OggFileType): - """An Ogg Opus file.""" - - _Info = OggOpusInfo - _Tags = OggOpusVComment - _Error = OggOpusHeaderError - _mimes = ["audio/ogg", "audio/ogg; codecs=opus"] - - info = None - """A `OggOpusInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * (b"OpusHead" in header)) - - -Open = OggOpus - - -def delete(filename): - """Remove tags from a file.""" - - OggOpus(filename).delete() diff --git a/resources/lib/mutagen/oggspeex.py b/resources/lib/mutagen/oggspeex.py deleted file mode 100644 index 9b16930b..00000000 --- a/resources/lib/mutagen/oggspeex.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Speex comments. - -This module handles Speex files wrapped in an Ogg bitstream. The -first Speex stream found is used. - -Read more about Ogg Speex at http://www.speex.org/. This module is -based on the specification at http://www.speex.org/manual2/node7.html -and clarifications after personal communication with Jean-Marc, -http://lists.xiph.org/pipermail/speex-dev/2006-July/004676.html. -""" - -__all__ = ["OggSpeex", "Open", "delete"] - -from mutagen import StreamInfo -from mutagen._vorbis import VCommentDict -from mutagen.ogg import OggPage, OggFileType, error as OggError -from mutagen._util import cdata, get_size -from mutagen._tags import PaddingInfo - - -class error(OggError): - pass - - -class OggSpeexHeaderError(error): - pass - - -class OggSpeexInfo(StreamInfo): - """Ogg Speex stream information.""" - - length = 0 - """file length in seconds, as a float""" - - channels = 0 - """number of channels""" - - bitrate = 0 - """nominal bitrate in bits per second. - - The reference encoder does not set the bitrate; in this case, - the bitrate will be 0. - """ - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"Speex "): - page = OggPage(fileobj) - if not page.first: - raise OggSpeexHeaderError( - "page has ID header, but doesn't start a stream") - self.sample_rate = cdata.uint_le(page.packets[0][36:40]) - self.channels = cdata.uint_le(page.packets[0][48:52]) - self.bitrate = max(0, cdata.int_le(page.packets[0][52:56])) - self.serial = page.serial - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - self.length = page.position / float(self.sample_rate) - - def pprint(self): - return u"Ogg Speex, %.2f seconds" % self.length - - -class OggSpeexVComment(VCommentDict): - """Speex comments embedded in an Ogg bitstream.""" - - def __init__(self, fileobj, info): - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0] - super(OggSpeexVComment, self).__init__(data, framing=False) - self._padding = len(data) - self._size - - def _inject(self, fileobj, padding_func): - """Write tag data into the Speex comment packet/page.""" - - fileobj.seek(0) - - # Find the first header page, with the stream info. - # Use it to get the serial number. - page = OggPage(fileobj) - while not page.packets[0].startswith(b"Speex "): - page = OggPage(fileobj) - - # Look for the next page with that serial number, it'll start - # the comment packet. - serial = page.serial - page = OggPage(fileobj) - while page.serial != serial: - page = OggPage(fileobj) - - # Then find all the pages with the comment packet. - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == old_pages[0].serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - content_size = get_size(fileobj) - len(packets[0]) # approx - vcomment_data = self.write(framing=False) - padding_left = len(packets[0]) - len(vcomment_data) - - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - - # Set the new comment packet. - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggSpeex(OggFileType): - """An Ogg Speex file.""" - - _Info = OggSpeexInfo - _Tags = OggSpeexVComment - _Error = OggSpeexHeaderError - _mimes = ["audio/x-speex"] - - info = None - """A `OggSpeexInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * (b"Speex " in header)) - - -Open = OggSpeex - - -def delete(filename): - """Remove tags from a file.""" - - OggSpeex(filename).delete() diff --git a/resources/lib/mutagen/oggtheora.py b/resources/lib/mutagen/oggtheora.py deleted file mode 100644 index 122e7d4b..00000000 --- a/resources/lib/mutagen/oggtheora.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Theora comments. - -This module handles Theora files wrapped in an Ogg bitstream. The -first Theora stream found is used. - -Based on the specification at http://theora.org/doc/Theora_I_spec.pdf. -""" - -__all__ = ["OggTheora", "Open", "delete"] - -import struct - -from mutagen import StreamInfo -from mutagen._vorbis import VCommentDict -from mutagen._util import cdata, get_size -from mutagen._tags import PaddingInfo -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggTheoraHeaderError(error): - pass - - -class OggTheoraInfo(StreamInfo): - """Ogg Theora stream information.""" - - length = 0 - """File length in seconds, as a float""" - - fps = 0 - """Video frames per second, as a float""" - - bitrate = 0 - """Bitrate in bps (int)""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x80theora"): - page = OggPage(fileobj) - if not page.first: - raise OggTheoraHeaderError( - "page has ID header, but doesn't start a stream") - data = page.packets[0] - vmaj, vmin = struct.unpack("2B", data[7:9]) - if (vmaj, vmin) != (3, 2): - raise OggTheoraHeaderError( - "found Theora version %d.%d != 3.2" % (vmaj, vmin)) - fps_num, fps_den = struct.unpack(">2I", data[22:30]) - self.fps = fps_num / float(fps_den) - self.bitrate = cdata.uint_be(b"\x00" + data[37:40]) - self.granule_shift = (cdata.ushort_be(data[40:42]) >> 5) & 0x1F - self.serial = page.serial - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - position = page.position - mask = (1 << self.granule_shift) - 1 - frames = (position >> self.granule_shift) + (position & mask) - self.length = frames / float(self.fps) - - def pprint(self): - return u"Ogg Theora, %.2f seconds, %d bps" % (self.length, - self.bitrate) - - -class OggTheoraCommentDict(VCommentDict): - """Theora comments embedded in an Ogg bitstream.""" - - def __init__(self, fileobj, info): - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0][7:] - super(OggTheoraCommentDict, self).__init__(data, framing=False) - self._padding = len(data) - self._size - - def _inject(self, fileobj, padding_func): - """Write tag data into the Theora comment packet/page.""" - - fileobj.seek(0) - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x81theora"): - page = OggPage(fileobj) - - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == old_pages[0].serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - content_size = get_size(fileobj) - len(packets[0]) # approx - vcomment_data = b"\x81theora" + self.write(framing=False) - padding_left = len(packets[0]) - len(vcomment_data) - - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggTheora(OggFileType): - """An Ogg Theora file.""" - - _Info = OggTheoraInfo - _Tags = OggTheoraCommentDict - _Error = OggTheoraHeaderError - _mimes = ["video/x-theora"] - - info = None - """A `OggTheoraInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * - ((b"\x80theora" in header) + (b"\x81theora" in header)) * 2) - - -Open = OggTheora - - -def delete(filename): - """Remove tags from a file.""" - - OggTheora(filename).delete() diff --git a/resources/lib/mutagen/oggvorbis.py b/resources/lib/mutagen/oggvorbis.py deleted file mode 100644 index b058a0c1..00000000 --- a/resources/lib/mutagen/oggvorbis.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Vorbis comments. - -This module handles Vorbis files wrapped in an Ogg bitstream. The -first Vorbis stream found is used. - -Read more about Ogg Vorbis at http://vorbis.com/. This module is based -on the specification at http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html. -""" - -__all__ = ["OggVorbis", "Open", "delete"] - -import struct - -from mutagen import StreamInfo -from mutagen._vorbis import VCommentDict -from mutagen._util import get_size -from mutagen._tags import PaddingInfo -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggVorbisHeaderError(error): - pass - - -class OggVorbisInfo(StreamInfo): - """Ogg Vorbis stream information.""" - - length = 0 - """File length in seconds, as a float""" - - channels = 0 - """Number of channels""" - - bitrate = 0 - """Nominal ('average') bitrate in bits per second, as an int""" - - sample_rate = 0 - """Sample rate in Hz""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x01vorbis"): - page = OggPage(fileobj) - if not page.first: - raise OggVorbisHeaderError( - "page has ID header, but doesn't start a stream") - (self.channels, self.sample_rate, max_bitrate, nominal_bitrate, - min_bitrate) = struct.unpack(" nominal_bitrate: - self.bitrate = min_bitrate - else: - self.bitrate = nominal_bitrate - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - self.length = page.position / float(self.sample_rate) - - def pprint(self): - return u"Ogg Vorbis, %.2f seconds, %d bps" % ( - self.length, self.bitrate) - - -class OggVCommentDict(VCommentDict): - """Vorbis comments embedded in an Ogg bitstream.""" - - def __init__(self, fileobj, info): - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0][7:] # Strip off "\x03vorbis". - super(OggVCommentDict, self).__init__(data) - self._padding = len(data) - self._size - - def _inject(self, fileobj, padding_func): - """Write tag data into the Vorbis comment packet/page.""" - - # Find the old pages in the file; we'll need to remove them, - # plus grab any stray setup packet data out of them. - fileobj.seek(0) - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x03vorbis"): - page = OggPage(fileobj) - - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == old_pages[0].serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - content_size = get_size(fileobj) - len(packets[0]) # approx - vcomment_data = b"\x03vorbis" + self.write() - padding_left = len(packets[0]) - len(vcomment_data) - - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - - # Set the new comment packet. - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggVorbis(OggFileType): - """An Ogg Vorbis file.""" - - _Info = OggVorbisInfo - _Tags = OggVCommentDict - _Error = OggVorbisHeaderError - _mimes = ["audio/vorbis", "audio/x-vorbis"] - - info = None - """A `OggVorbisInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * (b"\x01vorbis" in header)) - - -Open = OggVorbis - - -def delete(filename): - """Remove tags from a file.""" - - OggVorbis(filename).delete() diff --git a/resources/lib/mutagen/optimfrog.py b/resources/lib/mutagen/optimfrog.py deleted file mode 100644 index 0d85a818..00000000 --- a/resources/lib/mutagen/optimfrog.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""OptimFROG audio streams with APEv2 tags. - -OptimFROG is a lossless audio compression program. Its main goal is to -reduce at maximum the size of audio files, while permitting bit -identical restoration for all input. It is similar with the ZIP -compression, but it is highly specialized to compress audio data. - -Only versions 4.5 and higher are supported. - -For more information, see http://www.losslessaudio.org/ -""" - -__all__ = ["OptimFROG", "Open", "delete"] - -import struct - -from ._compat import endswith -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete - - -class OptimFROGHeaderError(error): - pass - - -class OptimFROGInfo(StreamInfo): - """OptimFROG stream information. - - Attributes: - - * channels - number of audio channels - * length - file length in seconds, as a float - * sample_rate - audio sampling rate in Hz - """ - - def __init__(self, fileobj): - header = fileobj.read(76) - if (len(header) != 76 or not header.startswith(b"OFR ") or - struct.unpack("` - """ - - _Info = TrueAudioInfo - _mimes = ["audio/x-tta"] - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"ID3") + header.startswith(b"TTA") + - endswith(filename.lower(), b".tta") * 2) - - -Open = TrueAudio - - -class EasyTrueAudio(TrueAudio): - """Like MP3, but uses EasyID3 for tags. - - :ivar info: :class:`TrueAudioInfo` - :ivar tags: :class:`EasyID3 ` - """ - - from mutagen.easyid3 import EasyID3 as ID3 - ID3 = ID3 diff --git a/resources/lib/mutagen/wavpack.py b/resources/lib/mutagen/wavpack.py deleted file mode 100644 index 80710f6d..00000000 --- a/resources/lib/mutagen/wavpack.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""WavPack reading and writing. - -WavPack is a lossless format that uses APEv2 tags. Read - -* http://www.wavpack.com/ -* http://www.wavpack.com/file_format.txt - -for more information. -""" - -__all__ = ["WavPack", "Open", "delete"] - -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete -from mutagen._util import cdata - - -class WavPackHeaderError(error): - pass - -RATES = [6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, - 48000, 64000, 88200, 96000, 192000] - - -class _WavPackHeader(object): - - def __init__(self, block_size, version, track_no, index_no, total_samples, - block_index, block_samples, flags, crc): - - self.block_size = block_size - self.version = version - self.track_no = track_no - self.index_no = index_no - self.total_samples = total_samples - self.block_index = block_index - self.block_samples = block_samples - self.flags = flags - self.crc = crc - - @classmethod - def from_fileobj(cls, fileobj): - """A new _WavPackHeader or raises WavPackHeaderError""" - - header = fileobj.read(32) - if len(header) != 32 or not header.startswith(b"wvpk"): - raise WavPackHeaderError("not a WavPack header: %r" % header) - - block_size = cdata.uint_le(header[4:8]) - version = cdata.ushort_le(header[8:10]) - track_no = ord(header[10:11]) - index_no = ord(header[11:12]) - samples = cdata.uint_le(header[12:16]) - if samples == 2 ** 32 - 1: - samples = -1 - block_index = cdata.uint_le(header[16:20]) - block_samples = cdata.uint_le(header[20:24]) - flags = cdata.uint_le(header[24:28]) - crc = cdata.uint_le(header[28:32]) - - return _WavPackHeader(block_size, version, track_no, index_no, - samples, block_index, block_samples, flags, crc) - - -class WavPackInfo(StreamInfo): - """WavPack stream information. - - Attributes: - - * channels - number of audio channels (1 or 2) - * length - file length in seconds, as a float - * sample_rate - audio sampling rate in Hz - * version - WavPack stream version - """ - - def __init__(self, fileobj): - try: - header = _WavPackHeader.from_fileobj(fileobj) - except WavPackHeaderError: - raise WavPackHeaderError("not a WavPack file") - - self.version = header.version - self.channels = bool(header.flags & 4) or 2 - self.sample_rate = RATES[(header.flags >> 23) & 0xF] - - if header.total_samples == -1 or header.block_index != 0: - # TODO: we could make this faster by using the tag size - # and search backwards for the last block, then do - # last.block_index + last.block_samples - initial.block_index - samples = header.block_samples - while 1: - fileobj.seek(header.block_size - 32 + 8, 1) - try: - header = _WavPackHeader.from_fileobj(fileobj) - except WavPackHeaderError: - break - samples += header.block_samples - else: - samples = header.total_samples - - self.length = float(samples) / self.sample_rate - - def pprint(self): - return u"WavPack, %.2f seconds, %d Hz" % (self.length, - self.sample_rate) - - -class WavPack(APEv2File): - _Info = WavPackInfo - _mimes = ["audio/x-wavpack"] - - @staticmethod - def score(filename, fileobj, header): - return header.startswith(b"wvpk") * 2 - - -Open = WavPack