2017-04-03 01:02:41 +10:00
|
|
|
# -*- coding: utf-8 -*-
|
2018-07-13 02:46:02 +10:00
|
|
|
from __future__ import absolute_import, division, unicode_literals
|
2017-04-03 01:02:41 +10:00
|
|
|
from logging import getLogger
|
|
|
|
|
2019-11-24 19:33:16 +11:00
|
|
|
from . import common
|
2018-11-07 20:37:32 +11:00
|
|
|
from ..plex_api import API
|
2019-11-24 19:33:16 +11:00
|
|
|
from .. import backgroundthread, plex_functions as PF, utils, variables as v
|
|
|
|
from .. import app
|
2017-04-03 01:02:41 +10:00
|
|
|
|
2019-11-24 19:33:16 +11:00
|
|
|
LOG = getLogger('PLEX.sync.get_metadata')
|
2018-11-07 20:37:32 +11:00
|
|
|
LOCK = backgroundthread.threading.Lock()
|
2017-04-03 01:02:41 +10:00
|
|
|
|
|
|
|
|
2019-11-24 19:33:16 +11:00
|
|
|
class GetMetadataThread(common.LibrarySyncMixin,
|
|
|
|
backgroundthread.KillableThread):
|
2017-04-03 01:02:41 +10:00
|
|
|
"""
|
|
|
|
Threaded download of Plex XML metadata for a certain library item.
|
2018-10-20 23:49:04 +11:00
|
|
|
Fills the queue with the downloaded etree XML objects
|
2017-04-03 01:02:41 +10:00
|
|
|
"""
|
2019-11-24 19:33:16 +11:00
|
|
|
def __init__(self, get_metadata_queue, processing_queue):
|
|
|
|
self.get_metadata_queue = get_metadata_queue
|
|
|
|
self.processing_queue = processing_queue
|
|
|
|
super(GetMetadataThread, self).__init__()
|
2019-11-29 03:49:48 +11:00
|
|
|
|
2018-11-07 20:37:32 +11:00
|
|
|
def _collections(self, item):
|
|
|
|
api = API(item['xml'][0])
|
2019-11-24 19:33:16 +11:00
|
|
|
collection_match = item['section'].collection_match
|
|
|
|
collection_xmls = item['section'].collection_xmls
|
|
|
|
if collection_match is None:
|
|
|
|
collection_match = PF.collections(api.library_section_id())
|
|
|
|
if collection_match is None:
|
2018-11-07 20:37:32 +11:00
|
|
|
LOG.error('Could not download collections')
|
|
|
|
return
|
|
|
|
# Extract what we need to know
|
2019-11-24 19:33:16 +11:00
|
|
|
collection_match = \
|
2018-11-07 20:37:32 +11:00
|
|
|
[(utils.cast(int, x.get('index')),
|
2019-11-24 19:33:16 +11:00
|
|
|
utils.cast(int, x.get('ratingKey'))) for x in collection_match]
|
2018-11-07 20:37:32 +11:00
|
|
|
item['children'] = {}
|
2019-06-11 05:29:42 +10:00
|
|
|
for plex_set_id, set_name in api.collections():
|
2019-11-29 03:49:48 +11:00
|
|
|
if self.should_cancel():
|
2018-11-07 20:37:32 +11:00
|
|
|
return
|
2019-11-24 19:33:16 +11:00
|
|
|
if plex_set_id not in collection_xmls:
|
2018-11-07 20:37:32 +11:00
|
|
|
# Get Plex metadata for collections - a pain
|
2019-11-24 19:33:16 +11:00
|
|
|
for index, collection_plex_id in collection_match:
|
2018-11-07 20:37:32 +11:00
|
|
|
if index == plex_set_id:
|
|
|
|
collection_xml = PF.GetPlexMetadata(collection_plex_id)
|
|
|
|
try:
|
|
|
|
collection_xml[0].attrib
|
|
|
|
except (TypeError, IndexError, AttributeError):
|
|
|
|
LOG.error('Could not get collection %s %s',
|
|
|
|
collection_plex_id, set_name)
|
|
|
|
continue
|
2019-11-24 19:33:16 +11:00
|
|
|
collection_xmls[plex_set_id] = collection_xml
|
2018-11-07 20:37:32 +11:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
LOG.error('Did not find Plex collection %s %s',
|
|
|
|
plex_set_id, set_name)
|
|
|
|
continue
|
2019-11-24 19:33:16 +11:00
|
|
|
item['children'][plex_set_id] = collection_xmls[plex_set_id]
|
|
|
|
|
|
|
|
def _process_abort(self, count, section):
|
|
|
|
# Make sure other threads will also receive sentinel
|
|
|
|
self.get_metadata_queue.put(None)
|
|
|
|
if count is not None:
|
|
|
|
self._process_skipped_item(count, section)
|
|
|
|
|
|
|
|
def _process_skipped_item(self, count, section):
|
|
|
|
section.sync_successful = False
|
|
|
|
# Add a "dummy" item so we're not skipping a beat
|
|
|
|
self.processing_queue.put((count, {'section': section, 'xml': None}))
|
2018-11-07 20:37:32 +11:00
|
|
|
|
2017-04-03 01:02:41 +10:00
|
|
|
def run(self):
|
2019-11-24 19:33:16 +11:00
|
|
|
LOG.debug('Starting %s thread', self.__class__.__name__)
|
|
|
|
app.APP.register_thread(self)
|
|
|
|
try:
|
|
|
|
self._run()
|
|
|
|
finally:
|
|
|
|
app.APP.deregister_thread(self)
|
|
|
|
LOG.debug('##===---- %s Stopped ----===##', self.__class__.__name__)
|
|
|
|
|
|
|
|
def _run(self):
|
|
|
|
while True:
|
|
|
|
item = self.get_metadata_queue.get()
|
2017-04-03 01:02:41 +10:00
|
|
|
try:
|
2019-11-24 19:33:16 +11:00
|
|
|
if item is None or self.should_cancel():
|
|
|
|
self._process_abort(item[0] if item else None,
|
|
|
|
item[2] if item else None)
|
|
|
|
break
|
|
|
|
count, plex_id, section = item
|
|
|
|
item = {
|
|
|
|
'xml': PF.GetPlexMetadata(plex_id), # This will block
|
|
|
|
'children': None,
|
|
|
|
'section': section
|
|
|
|
}
|
|
|
|
if item['xml'] is None:
|
|
|
|
# Did not receive a valid XML - skip that item for now
|
|
|
|
LOG.error("Could not get metadata for %s. Skipping item "
|
|
|
|
"for now", plex_id)
|
|
|
|
self._process_skipped_item(count, section)
|
|
|
|
continue
|
|
|
|
elif item['xml'] == 401:
|
|
|
|
LOG.error('HTTP 401 returned by PMS. Too much strain? '
|
|
|
|
'Cancelling sync for now')
|
|
|
|
utils.window('plex_scancrashed', value='401')
|
|
|
|
self._process_abort(count, section)
|
|
|
|
break
|
|
|
|
if section.plex_type == v.PLEX_TYPE_MOVIE:
|
|
|
|
# Check for collections/sets
|
|
|
|
collections = False
|
|
|
|
for child in item['xml'][0]:
|
|
|
|
if child.tag == 'Collection':
|
|
|
|
collections = True
|
|
|
|
break
|
|
|
|
if collections:
|
|
|
|
with LOCK:
|
|
|
|
self._collections(item)
|
|
|
|
if section.get_children:
|
|
|
|
if self.should_cancel():
|
|
|
|
self._process_abort(count, section)
|
|
|
|
break
|
|
|
|
children_xml = PF.GetAllPlexChildren(plex_id) # Will block
|
|
|
|
try:
|
|
|
|
children_xml[0].attrib
|
|
|
|
except (TypeError, IndexError, AttributeError):
|
|
|
|
LOG.error('Could not get children for Plex id %s',
|
|
|
|
plex_id)
|
|
|
|
self._process_skipped_item(count, section)
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
item['children'] = children_xml
|
|
|
|
self.processing_queue.put((count, item))
|
|
|
|
finally:
|
|
|
|
self.get_metadata_queue.task_done()
|