commit
de9c935a40
9 changed files with 73 additions and 65 deletions
17
addon.xml
17
addon.xml
|
@ -1,5 +1,5 @@
|
||||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||||
<addon id="plugin.video.plexkodiconnect" name="PlexKodiConnect" version="2.13.0" provider-name="croneter">
|
<addon id="plugin.video.plexkodiconnect" name="PlexKodiConnect" version="2.14.0" provider-name="croneter">
|
||||||
<requires>
|
<requires>
|
||||||
<import addon="xbmc.python" version="2.1.0"/>
|
<import addon="xbmc.python" version="2.1.0"/>
|
||||||
<import addon="script.module.requests" version="2.9.1" />
|
<import addon="script.module.requests" version="2.9.1" />
|
||||||
|
@ -88,7 +88,20 @@
|
||||||
<summary lang="ko_KR">Plex를 Kodi에 기본 통합</summary>
|
<summary lang="ko_KR">Plex를 Kodi에 기본 통합</summary>
|
||||||
<description lang="ko_KR">Kodi를 Plex Media Server에 연결합니다. 이 플러그인은 Plex로 모든 비디오를 관리하고 Kodi로는 관리하지 않는다고 가정합니다. Kodi 비디오 및 음악 데이터베이스에 이미 저장된 데이터가 손실 될 수 있습니다 (이 플러그인이 직접 변경하므로). 자신의 책임하에 사용하십시오!</description>
|
<description lang="ko_KR">Kodi를 Plex Media Server에 연결합니다. 이 플러그인은 Plex로 모든 비디오를 관리하고 Kodi로는 관리하지 않는다고 가정합니다. Kodi 비디오 및 음악 데이터베이스에 이미 저장된 데이터가 손실 될 수 있습니다 (이 플러그인이 직접 변경하므로). 자신의 책임하에 사용하십시오!</description>
|
||||||
<disclaimer lang="ko_KR">자신의 책임하에 사용</disclaimer>
|
<disclaimer lang="ko_KR">자신의 책임하에 사용</disclaimer>
|
||||||
<news>version 2.13.0:
|
<news>version 2.14.0:
|
||||||
|
- Fix PlexKodiConnect changing or removing subtitles for every video on the PMS
|
||||||
|
- version 2.13.1-2.13.2 for everyone
|
||||||
|
|
||||||
|
version 2.13.2 (beta only):
|
||||||
|
- Fix a racing condition that could lead to the sync getting stuck
|
||||||
|
- Fix RecursionError: maximum recursion depth exceeded
|
||||||
|
- Websocket Fix AttributeError: 'NoneType' object has no attribute 'is_ssl'
|
||||||
|
|
||||||
|
version 2.13.1 (beta only):
|
||||||
|
- Fix a racing condition that could lead to the sync process getting stuck
|
||||||
|
- Fix likelyhood of `database is locked` error occuring
|
||||||
|
|
||||||
|
version 2.13.0:
|
||||||
- Support for the Plex HAMA agent to let Kodi identify animes (using Kodi's uniqueID 'anidb')
|
- Support for the Plex HAMA agent to let Kodi identify animes (using Kodi's uniqueID 'anidb')
|
||||||
- Support forced HAMA IDs when using tvdb uniqueID
|
- Support forced HAMA IDs when using tvdb uniqueID
|
||||||
- version 2.12.26 for everyone
|
- version 2.12.26 for everyone
|
||||||
|
|
|
@ -1,3 +1,16 @@
|
||||||
|
version 2.14.0:
|
||||||
|
- Fix PlexKodiConnect changing or removing subtitles for every video on the PMS
|
||||||
|
- version 2.13.1-2.13.2 for everyone
|
||||||
|
|
||||||
|
version 2.13.2 (beta only):
|
||||||
|
- Fix a racing condition that could lead to the sync getting stuck
|
||||||
|
- Fix RecursionError: maximum recursion depth exceeded
|
||||||
|
- Websocket Fix AttributeError: 'NoneType' object has no attribute 'is_ssl'
|
||||||
|
|
||||||
|
version 2.13.1 (beta only):
|
||||||
|
- Fix a racing condition that could lead to the sync process getting stuck
|
||||||
|
- Fix likelyhood of `database is locked` error occuring
|
||||||
|
|
||||||
version 2.13.0:
|
version 2.13.0:
|
||||||
- Support for the Plex HAMA agent to let Kodi identify animes (using Kodi's uniqueID 'anidb')
|
- Support for the Plex HAMA agent to let Kodi identify animes (using Kodi's uniqueID 'anidb')
|
||||||
- Support forced HAMA IDs when using tvdb uniqueID
|
- Support forced HAMA IDs when using tvdb uniqueID
|
||||||
|
|
|
@ -135,43 +135,6 @@ class ProcessingQueue(Queue.Queue, object):
|
||||||
def _qsize(self):
|
def _qsize(self):
|
||||||
return self._current_queue._qsize() if self._current_queue else 0
|
return self._current_queue._qsize() if self._current_queue else 0
|
||||||
|
|
||||||
def _total_qsize(self):
|
|
||||||
"""
|
|
||||||
This method is BROKEN as it can lead to a deadlock when a single item
|
|
||||||
from the current section takes longer to download then any new items
|
|
||||||
coming in
|
|
||||||
"""
|
|
||||||
return sum(q._qsize() for q in self._queues) if self._queues else 0
|
|
||||||
|
|
||||||
def put(self, item, block=True, timeout=None):
|
|
||||||
"""
|
|
||||||
PKC customization of Queue.put. item needs to be the tuple
|
|
||||||
(count [int], {'section': [Section], 'xml': [etree xml]})
|
|
||||||
"""
|
|
||||||
self.not_full.acquire()
|
|
||||||
try:
|
|
||||||
if self.maxsize > 0:
|
|
||||||
if not block:
|
|
||||||
if self._qsize() == self.maxsize:
|
|
||||||
raise Queue.Full
|
|
||||||
elif timeout is None:
|
|
||||||
while self._qsize() == self.maxsize:
|
|
||||||
self.not_full.wait()
|
|
||||||
elif timeout < 0:
|
|
||||||
raise ValueError("'timeout' must be a non-negative number")
|
|
||||||
else:
|
|
||||||
endtime = _time() + timeout
|
|
||||||
while self._qsize() == self.maxsize:
|
|
||||||
remaining = endtime - _time()
|
|
||||||
if remaining <= 0.0:
|
|
||||||
raise Queue.Full
|
|
||||||
self.not_full.wait(remaining)
|
|
||||||
self._put(item)
|
|
||||||
self.unfinished_tasks += 1
|
|
||||||
self.not_empty.notify()
|
|
||||||
finally:
|
|
||||||
self.not_full.release()
|
|
||||||
|
|
||||||
def _put(self, item):
|
def _put(self, item):
|
||||||
for i, section in enumerate(self._sections):
|
for i, section in enumerate(self._sections):
|
||||||
if item[1]['section'] == section:
|
if item[1]['section'] == section:
|
||||||
|
@ -188,16 +151,13 @@ class ProcessingQueue(Queue.Queue, object):
|
||||||
Once the get()-method returns None, you've received the sentinel and
|
Once the get()-method returns None, you've received the sentinel and
|
||||||
you've thus exhausted the queue
|
you've thus exhausted the queue
|
||||||
"""
|
"""
|
||||||
self.not_full.acquire()
|
with self.not_full:
|
||||||
try:
|
|
||||||
section.number_of_items = 1
|
section.number_of_items = 1
|
||||||
self._add_section(section)
|
self._add_section(section)
|
||||||
# Add the actual sentinel to the queue we just added
|
# Add the actual sentinel to the queue we just added
|
||||||
self._queues[-1]._put((None, None))
|
self._queues[-1]._put((None, None))
|
||||||
self.unfinished_tasks += 1
|
self.unfinished_tasks += 1
|
||||||
self.not_empty.notify()
|
self.not_empty.notify()
|
||||||
finally:
|
|
||||||
self.not_full.release()
|
|
||||||
|
|
||||||
def add_section(self, section):
|
def add_section(self, section):
|
||||||
"""
|
"""
|
||||||
|
@ -207,11 +167,26 @@ class ProcessingQueue(Queue.Queue, object):
|
||||||
Be sure to set section.number_of_items correctly as it will signal
|
Be sure to set section.number_of_items correctly as it will signal
|
||||||
when processing is completely done for a specific section!
|
when processing is completely done for a specific section!
|
||||||
"""
|
"""
|
||||||
self.mutex.acquire()
|
with self.mutex:
|
||||||
try:
|
|
||||||
self._add_section(section)
|
self._add_section(section)
|
||||||
finally:
|
|
||||||
self.mutex.release()
|
def change_section_number_of_items(self, section, number_of_items):
|
||||||
|
"""
|
||||||
|
Hit this method if you've reset section.number_of_items to make
|
||||||
|
sure we're not blocking
|
||||||
|
"""
|
||||||
|
with self.mutex:
|
||||||
|
self._change_section_number_of_items(section, number_of_items)
|
||||||
|
|
||||||
|
def _change_section_number_of_items(self, section, number_of_items):
|
||||||
|
section.number_of_items = number_of_items
|
||||||
|
if (self._current_section == section
|
||||||
|
and self._counter == number_of_items):
|
||||||
|
# We were actually waiting for more items to come in - but there
|
||||||
|
# aren't any!
|
||||||
|
self._init_next_section()
|
||||||
|
if self._qsize() > 0:
|
||||||
|
self.not_empty.notify()
|
||||||
|
|
||||||
def _add_section(self, section):
|
def _add_section(self, section):
|
||||||
self._sections.append(section)
|
self._sections.append(section)
|
||||||
|
|
|
@ -6,6 +6,7 @@ from functools import wraps
|
||||||
from . import variables as v, app
|
from . import variables as v, app
|
||||||
|
|
||||||
DB_WRITE_ATTEMPTS = 100
|
DB_WRITE_ATTEMPTS = 100
|
||||||
|
DB_WRITE_ATTEMPTS_TIMEOUT = 1 # in seconds
|
||||||
DB_CONNECTION_TIMEOUT = 10
|
DB_CONNECTION_TIMEOUT = 10
|
||||||
|
|
||||||
|
|
||||||
|
@ -43,7 +44,7 @@ def catch_operationalerrors(method):
|
||||||
self.kodiconn.commit()
|
self.kodiconn.commit()
|
||||||
if self.artconn:
|
if self.artconn:
|
||||||
self.artconn.commit()
|
self.artconn.commit()
|
||||||
if app.APP.monitor.waitForAbort(0.1):
|
if app.APP.monitor.waitForAbort(DB_WRITE_ATTEMPTS_TIMEOUT):
|
||||||
# PKC needs to quit
|
# PKC needs to quit
|
||||||
return
|
return
|
||||||
# Start new transactions
|
# Start new transactions
|
||||||
|
|
|
@ -46,6 +46,10 @@ class FillMetadataQueue(common.LibrarySyncMixin,
|
||||||
if (not self.repair and
|
if (not self.repair and
|
||||||
plexdb.checksum(plex_id, section.plex_type) == checksum):
|
plexdb.checksum(plex_id, section.plex_type) == checksum):
|
||||||
continue
|
continue
|
||||||
|
if not do_process_section:
|
||||||
|
do_process_section = True
|
||||||
|
self.processing_queue.add_section(section)
|
||||||
|
LOG.debug('Put section in processing queue: %s', section)
|
||||||
try:
|
try:
|
||||||
self.get_metadata_queue.put((count, plex_id, section),
|
self.get_metadata_queue.put((count, plex_id, section),
|
||||||
timeout=QUEUE_TIMEOUT)
|
timeout=QUEUE_TIMEOUT)
|
||||||
|
@ -54,16 +58,14 @@ class FillMetadataQueue(common.LibrarySyncMixin,
|
||||||
'aborting sync now', plex_id)
|
'aborting sync now', plex_id)
|
||||||
section.sync_successful = False
|
section.sync_successful = False
|
||||||
break
|
break
|
||||||
count += 1
|
else:
|
||||||
if not do_process_section:
|
count += 1
|
||||||
do_process_section = True
|
|
||||||
self.processing_queue.add_section(section)
|
|
||||||
LOG.debug('Put section in queue with %s items: %s',
|
|
||||||
section.number_of_items, section)
|
|
||||||
# We might have received LESS items from the PMS than anticipated.
|
# We might have received LESS items from the PMS than anticipated.
|
||||||
# Ensures that our queues finish
|
# Ensures that our queues finish
|
||||||
LOG.debug('%s items to process for section %s', count, section)
|
self.processing_queue.change_section_number_of_items(section,
|
||||||
section.number_of_items = count
|
count)
|
||||||
|
LOG.debug('%s items to process for section %s',
|
||||||
|
section.number_of_items, section)
|
||||||
|
|
||||||
def _run(self):
|
def _run(self):
|
||||||
while not self.should_cancel():
|
while not self.should_cancel():
|
||||||
|
|
|
@ -93,6 +93,7 @@ class Section(object):
|
||||||
"'name': '{self.name}', "
|
"'name': '{self.name}', "
|
||||||
"'section_id': {self.section_id}, "
|
"'section_id': {self.section_id}, "
|
||||||
"'section_type': '{self.section_type}', "
|
"'section_type': '{self.section_type}', "
|
||||||
|
"'plex_type': '{self.plex_type}', "
|
||||||
"'sync_to_kodi': {self.sync_to_kodi}, "
|
"'sync_to_kodi': {self.sync_to_kodi}, "
|
||||||
"'last_sync': {self.last_sync}"
|
"'last_sync': {self.last_sync}"
|
||||||
"}}").format(self=self).encode('utf-8')
|
"}}").format(self=self).encode('utf-8')
|
||||||
|
@ -108,6 +109,8 @@ class Section(object):
|
||||||
Sections compare equal if their section_id, name and plex_type (first
|
Sections compare equal if their section_id, name and plex_type (first
|
||||||
prio) OR section_type (if there is no plex_type is set) compare equal
|
prio) OR section_type (if there is no plex_type is set) compare equal
|
||||||
"""
|
"""
|
||||||
|
if not isinstance(section, Section):
|
||||||
|
return False
|
||||||
return (self.section_id == section.section_id and
|
return (self.section_id == section.section_id and
|
||||||
self.name == section.name and
|
self.name == section.name and
|
||||||
(self.plex_type == section.plex_type if self.plex_type else
|
(self.plex_type == section.plex_type if self.plex_type else
|
||||||
|
|
|
@ -342,8 +342,7 @@ def audio_subtitle_prefs(api, item):
|
||||||
if item.playmethod != v.PLAYBACK_METHOD_TRANSCODE:
|
if item.playmethod != v.PLAYBACK_METHOD_TRANSCODE:
|
||||||
LOG.debug('Telling PMS we are not burning in any subtitles')
|
LOG.debug('Telling PMS we are not burning in any subtitles')
|
||||||
args = {
|
args = {
|
||||||
'subtitleStreamID': 0,
|
'subtitleStreamID': 0
|
||||||
'allParts': 1
|
|
||||||
}
|
}
|
||||||
DU().downloadUrl('{server}/library/parts/%s' % part_id,
|
DU().downloadUrl('{server}/library/parts/%s' % part_id,
|
||||||
action_type='PUT',
|
action_type='PUT',
|
||||||
|
@ -458,8 +457,7 @@ def setup_transcoding_audio_subtitle_prefs(mediastreams, part_id):
|
||||||
select_subs_index = subtitle_streams_list[resp - 1]
|
select_subs_index = subtitle_streams_list[resp - 1]
|
||||||
# Now prep the PMS for our choice
|
# Now prep the PMS for our choice
|
||||||
args = {
|
args = {
|
||||||
'subtitleStreamID': select_subs_index,
|
'subtitleStreamID': select_subs_index
|
||||||
'allParts': 1
|
|
||||||
}
|
}
|
||||||
DU().downloadUrl('{server}/library/parts/%s' % part_id,
|
DU().downloadUrl('{server}/library/parts/%s' % part_id,
|
||||||
action_type='PUT',
|
action_type='PUT',
|
||||||
|
|
|
@ -34,10 +34,10 @@ def unix_date_to_kodi(unix_kodi_time):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
return strftime('%Y-%m-%d %H:%M:%S', localtime(float(unix_kodi_time)))
|
return strftime('%Y-%m-%d %H:%M:%S', localtime(float(unix_kodi_time)))
|
||||||
except Exception:
|
except:
|
||||||
LOG.exception('Received an illegal timestamp from Plex: %s. '
|
LOG.error('Received an illegal timestamp from Plex: %s. '
|
||||||
'Using 1970-01-01 12:00:00',
|
'Using 1970-01-01 12:00:00',
|
||||||
unix_kodi_time)
|
unix_kodi_time)
|
||||||
return '1970-01-01 12:00:00'
|
return '1970-01-01 12:00:00'
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -197,7 +197,10 @@ class WebSocket(object):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def is_ssl(self):
|
def is_ssl(self):
|
||||||
return isinstance(self.sock, ssl.SSLSocket)
|
try:
|
||||||
|
return isinstance(self.sock, ssl.SSLSocket)
|
||||||
|
except:
|
||||||
|
return False
|
||||||
|
|
||||||
headers = property(getheaders)
|
headers = property(getheaders)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue