Playlist sync support, part 1

This commit is contained in:
Croneter 2018-04-28 09:12:29 +02:00
parent 7fb0f32bcf
commit 0a83d6c084
41 changed files with 7065 additions and 16 deletions

View file

@ -99,6 +99,35 @@ class API(object):
"""
return self.item.get('ratingKey')
def path(self, force_first_media=True):
"""
Returns a "fully qualified path": add-on paths or direct paths
depending on the current settings. Will NOT valide the playurl
Returns unicode or None if something went wrong.
"""
filename = self.file_path(force_first_media=force_first_media)
if not state.DIRECT_PATHS or self.plex_type() == v.PLEX_TYPE_CLIP:
if filename and '/' in filename:
filename = filename.rsplit('/', 1)
elif filename:
filename = filename.rsplit('\\', 1)
try:
filename = filename[1]
except (TypeError, IndexError):
filename = None
# Set plugin path and media flags using real filename
path = ('plugin://%s/?plex_id=%s&plex_type=%s&mode=play&filename=%s'
% (v.ADDON_TYPE[self.plex_type()],
self.plex_id(),
self.plex_type(),
filename))
else:
# Direct paths is set the Kodi way
path = self.validate_playurl(filename,
self.plex_type(),
omit_check=True)
return path
def path_and_plex_id(self):
"""
Returns the Plex key such as '/library/metadata/246922' or None
@ -401,6 +430,12 @@ class API(object):
provider = None
return provider
def title(self):
"""
Returns the title of the element as unicode or 'Missing Title Name'
"""
return self.item.get('title', 'Missing Title Name')
def titles(self):
"""
Returns an item's name/title or "Missing Title Name".
@ -454,6 +489,16 @@ class API(object):
resume = 0.0
return int(resume * v.PLEX_TO_KODI_TIMEFACTOR)
def runtime(self):
"""
Returns the total duration of the element as int. 0 if not found
"""
try:
runtime = float(self.item.attrib['duration'])
except (KeyError, ValueError):
runtime = 0.0
return int(runtime * v.PLEX_TO_KODI_TIMEFACTOR)
def resume_runtime(self):
"""
Resume point of time and runtime/totaltime in rounded to seconds.

View file

@ -1182,11 +1182,12 @@ class KodiDBMethods(object):
self.cursor.execute(query, (kodi_id, kodi_type))
def kodiid_from_filename(path, kodi_type):
def kodiid_from_filename(path, kodi_type=None, db_type=None):
"""
Returns kodi_id if we have an item in the Kodi video or audio database with
said path. Feed with the Kodi itemtype, e.v. 'movie', 'song'
Returns None if not possible
said path. Feed with either koditype, e.v. 'movie', 'song' or the DB
you want to poll ('video' or 'music')
Returns None, <kodi_type> if not possible
"""
kodi_id = None
path = try_decode(path)
@ -1196,16 +1197,18 @@ def kodiid_from_filename(path, kodi_type):
except IndexError:
filename = path.rsplit('\\', 1)[1]
path = path.rsplit('\\', 1)[0] + '\\'
if kodi_type == v.KODI_TYPE_SONG:
if kodi_type == v.KODI_TYPE_SONG or db_type == 'music':
with GetKodiDB('music') as kodi_db:
try:
kodi_id, _ = kodi_db.music_id_from_filename(filename, path)
kodi_id, kodi_type = kodi_db.music_id_from_filename(filename,
path)
except TypeError:
LOG.debug('No Kodi audio db element found for path %s', path)
else:
with GetKodiDB('video') as kodi_db:
try:
kodi_id, _ = kodi_db.video_id_from_filename(filename, path)
kodi_id, kodi_type = kodi_db.video_id_from_filename(filename,
path)
except TypeError:
LOG.debug('No kodi video db element found for path %s', path)
return kodi_id
return kodi_id, kodi_type

View file

@ -269,7 +269,7 @@ class KodiMonitor(xbmc.Monitor):
plex_type = None
# If using direct paths and starting playback from a widget
if not kodi_id and kodi_type and path:
kodi_id = kodiid_from_filename(path, kodi_type)
kodi_id, _ = kodiid_from_filename(path, kodi_type)
if kodi_id:
with plexdb.Get_Plex_DB() as plex_db:
plex_dbitem = plex_db.getItem_byKodiId(kodi_id, kodi_type)
@ -438,6 +438,13 @@ class SpecialMonitor(Thread):
else:
# Different context menu is displayed
state.RESUME_PLAYBACK = False
if xbmc.getCondVisibility('Window.IsVisible(MyVideoNav.xml)'):
path = xbmc.getInfoLabel('container.folderpath')
if (isinstance(path, str) and
path.startswith('special://profile/playlists')):
pass
# TODO: start polling PMS for playlist changes
# Optionally: poll PMS continuously with custom intervall
xbmc.sleep(200)
LOG.info("#====---- Special Monitor Stopped ----====#")

View file

@ -26,6 +26,7 @@ from library_sync.process_metadata import ThreadedProcessMetadata
import library_sync.sync_info as sync_info
from library_sync.fanart import ThreadedProcessFanart
import music
import playlists
import state
###############################################################################
@ -234,6 +235,15 @@ class LibrarySync(Thread):
plex_db.plexcursor.execute('''
CREATE TABLE IF NOT EXISTS version(idVersion TEXT)
''')
plex_db.plexcursor.execute('''
CREATE TABLE IF NOT EXISTS playlists(
plex_id TEXT UNIQUE,
plex_name TEXT,
plex_updatedat TEXT,
kodi_path TEXT,
kodi_type TEXT,
kodi_hash TEXT)
''')
# Create an index for actors to speed up sync
utils.create_actor_db_index()
@ -1519,6 +1529,7 @@ class LibrarySync(Thread):
state.DB_SCAN = False
# Start the fanart download thread
self.fanartthread.start()
kodi_playlist_monitor = None
while not self.stopped():
# In the event the server goes offline
@ -1551,6 +1562,7 @@ class LibrarySync(Thread):
state.DB_SCAN = False
if settings('FanartTV') == 'true':
self.sync_fanart()
kodi_playlist_monitor = playlists.kodi_playlist_monitor()
elif not kodi_db_version_checked:
# Install sync was already done, don't force-show dialogs
@ -1591,6 +1603,7 @@ class LibrarySync(Thread):
LOG.info('Startup sync has not yet been successful')
window('plex_dbScan', clear=True)
state.DB_SCAN = False
kodi_playlist_monitor = playlists.kodi_playlist_monitor()
# Currently no db scan, so we can start a new scan
elif state.DB_SCAN is False:
@ -1650,7 +1663,9 @@ class LibrarySync(Thread):
xbmc.sleep(10)
continue
xbmc.sleep(100)
# Shut down playlist monitoring
if kodi_playlist_monitor:
kodi_playlist_monitor.stop()
# doUtils could still have a session open due to interrupted sync
try:
DU().stopSession()

View file

@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
# pathtools: File system path tools.
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.

View file

@ -0,0 +1,206 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# path.py: Path functions.
#
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:module: pathtools.path
:synopsis: Directory walking, listing, and path sanitizing functions.
:author: Yesudeep Mangalapilly <yesudeep@gmail.com>
Functions
---------
.. autofunction:: get_dir_walker
.. autofunction:: walk
.. autofunction:: listdir
.. autofunction:: list_directories
.. autofunction:: list_files
.. autofunction:: absolute_path
.. autofunction:: real_absolute_path
.. autofunction:: parent_dir_path
"""
import os.path
from functools import partial
__all__ = [
'get_dir_walker',
'walk',
'listdir',
'list_directories',
'list_files',
'absolute_path',
'real_absolute_path',
'parent_dir_path',
]
def get_dir_walker(recursive, topdown=True, followlinks=False):
"""
Returns a recursive or a non-recursive directory walker.
:param recursive:
``True`` produces a recursive walker; ``False`` produces a non-recursive
walker.
:returns:
A walker function.
"""
if recursive:
walk = partial(os.walk, topdown=topdown, followlinks=followlinks)
else:
def walk(path, topdown=topdown, followlinks=followlinks):
try:
yield next(os.walk(path, topdown=topdown, followlinks=followlinks))
except NameError:
yield os.walk(path, topdown=topdown, followlinks=followlinks).next() #IGNORE:E1101
return walk
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False):
"""
Walks a directory tree optionally recursively. Works exactly like
:func:`os.walk` only adding the `recursive` argument.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
walk_func = get_dir_walker(recursive, topdown, followlinks)
for root, dirnames, filenames in walk_func(dir_pathname):
yield (root, dirnames, filenames)
def listdir(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all items using their absolute paths in a directory, optionally
recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dirnames, filenames\
in walk(dir_pathname, recursive, topdown, followlinks):
for dirname in dirnames:
yield absolute_path(os.path.join(root, dirname))
for filename in filenames:
yield absolute_path(os.path.join(root, filename))
def list_directories(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all the directories using their absolute paths within the specified
directory, optionally recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dirnames, filenames\
in walk(dir_pathname, recursive, topdown, followlinks):
for dirname in dirnames:
yield absolute_path(os.path.join(root, dirname))
def list_files(dir_pathname,
recursive=True,
topdown=True,
followlinks=False):
"""
Enlists all the files using their absolute paths within the specified
directory, optionally recursively.
:param dir_pathname:
The directory to traverse.
:param recursive:
``True`` for walking recursively through the directory tree;
``False`` otherwise.
:param topdown:
Please see the documentation for :func:`os.walk`
:param followlinks:
Please see the documentation for :func:`os.walk`
"""
for root, dirnames, filenames\
in walk(dir_pathname, recursive, topdown, followlinks):
for filename in filenames:
yield absolute_path(os.path.join(root, filename))
def absolute_path(path):
"""
Returns the absolute path for the given path and normalizes the path.
:param path:
Path for which the absolute normalized path will be found.
:returns:
Absolute normalized path.
"""
return os.path.abspath(os.path.normpath(path))
def real_absolute_path(path):
"""
Returns the real absolute normalized path for the given path.
:param path:
Path for which the real absolute normalized path will be found.
:returns:
Real absolute normalized path.
"""
return os.path.realpath(absolute_path(path))
def parent_dir_path(path):
"""
Returns the parent directory path.
:param path:
Path for which the parent directory will be obtained.
:returns:
Parent directory path.
"""
return absolute_path(os.path.dirname(path))

View file

@ -0,0 +1,265 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# patterns.py: Common wildcard searching/filtering functionality for files.
#
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
:module: pathtools.patterns
:synopsis: Wildcard pattern matching and filtering functions for paths.
:author: Yesudeep Mangalapilly <yesudeep@gmail.com>
Functions
---------
.. autofunction:: match_path
.. autofunction:: match_path_against
.. autofunction:: filter_paths
"""
from fnmatch import fnmatch, fnmatchcase
__all__ = ['match_path',
'match_path_against',
'match_any_paths',
'filter_paths']
def _string_lower(s):
"""
Convenience function to lowercase a string (the :mod:`string` module is
deprecated/removed in Python 3.0).
:param s:
The string which will be lowercased.
:returns:
Lowercased copy of string s.
"""
return s.lower()
def match_path_against(pathname, patterns, case_sensitive=True):
"""
Determines whether the pathname matches any of the given wildcard patterns,
optionally ignoring the case of the pathname and patterns.
:param pathname:
A path name that will be matched against a wildcard pattern.
:param patterns:
A list of wildcard patterns to match_path the filename against.
:param case_sensitive:
``True`` if the matching should be case-sensitive; ``False`` otherwise.
:returns:
``True`` if the pattern matches; ``False`` otherwise.
Doctests::
>>> match_path_against("/home/username/foobar/blah.py", ["*.py", "*.txt"], False)
True
>>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], True)
False
>>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], False)
True
>>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], True)
False
>>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], False)
True
"""
if case_sensitive:
match_func = fnmatchcase
pattern_transform_func = (lambda w: w)
else:
match_func = fnmatch
pathname = pathname.lower()
pattern_transform_func = _string_lower
for pattern in set(patterns):
pattern = pattern_transform_func(pattern)
if match_func(pathname, pattern):
return True
return False
def _match_path(pathname,
included_patterns,
excluded_patterns,
case_sensitive=True):
"""Internal function same as :func:`match_path` but does not check arguments.
Doctests::
>>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded
"""
if not case_sensitive:
included_patterns = set(map(_string_lower, included_patterns))
excluded_patterns = set(map(_string_lower, excluded_patterns))
else:
included_patterns = set(included_patterns)
excluded_patterns = set(excluded_patterns)
common_patterns = included_patterns & excluded_patterns
if common_patterns:
raise ValueError('conflicting patterns `%s` included and excluded'\
% common_patterns)
return (match_path_against(pathname, included_patterns, case_sensitive)\
and not match_path_against(pathname, excluded_patterns,
case_sensitive))
def match_path(pathname,
included_patterns=None,
excluded_patterns=None,
case_sensitive=True):
"""
Matches a pathname against a set of acceptable and ignored patterns.
:param pathname:
A pathname which will be matched against a pattern.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern is specified, the function treats the pathname as
a match_path.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern is specified, the function treats the pathname as
a match_path.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
``True`` if the pathname matches; ``False`` otherwise.
:raises:
ValueError if included patterns and excluded patterns contain the
same pattern.
Doctests::
>>> match_path("/Users/gorakhargosh/foobar.py")
True
>>> match_path("/Users/gorakhargosh/foobar.py", case_sensitive=False)
True
>>> match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True)
True
>>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True)
False
>>> match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False)
False
>>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False)
Traceback (most recent call last):
...
ValueError: conflicting patterns `set(['*.py'])` included and excluded
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
return _match_path(pathname, included, excluded, case_sensitive)
def filter_paths(pathnames,
included_patterns=None,
excluded_patterns=None,
case_sensitive=True):
"""
Filters from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
A list of pathnames that matched the allowable patterns and passed
through the ignored patterns.
Doctests::
>>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"])
>>> set(filter_paths(pathnames)) == pathnames
True
>>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames
True
>>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"])
True
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for pathname in pathnames:
# We don't call the public match_path because it checks arguments
# and sets default values if none are found. We're already doing that
# above.
if _match_path(pathname, included, excluded, case_sensitive):
yield pathname
def match_any_paths(pathnames,
included_patterns=None,
excluded_patterns=None,
case_sensitive=True):
"""
Matches from a set of paths based on acceptable patterns and
ignorable patterns.
:param pathnames:
A list of path names that will be filtered based on matching and
ignored patterns.
:param included_patterns:
Allow filenames matching wildcard patterns specified in this list.
If no pattern list is specified, ["*"] is used as the default pattern,
which matches all files.
:param excluded_patterns:
Ignores filenames matching wildcard patterns specified in this list.
If no pattern list is specified, no files are ignored.
:param case_sensitive:
``True`` if matching should be case-sensitive; ``False`` otherwise.
:returns:
``True`` if any of the paths matches; ``False`` otherwise.
Doctests::
>>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"])
>>> match_any_paths(pathnames)
True
>>> match_any_paths(pathnames, case_sensitive=False)
True
>>> match_any_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)
True
>>> match_any_paths(pathnames, ["*.txt"], case_sensitive=False)
False
>>> match_any_paths(pathnames, ["*.txt"], case_sensitive=True)
False
"""
included = ["*"] if included_patterns is None else included_patterns
excluded = [] if excluded_patterns is None else excluded_patterns
for pathname in pathnames:
# We don't call the public match_path because it checks arguments
# and sets default values if none are found. We're already doing that
# above.
if _match_path(pathname, included, excluded, case_sensitive):
return True
return False

View file

@ -0,0 +1,31 @@
# -*- coding: utf-8 -*-
# version.py: Version information.
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# When updating this version number, please update the
# ``docs/source/global.rst.inc`` file as well.
VERSION_MAJOR = 0
VERSION_MINOR = 1
VERSION_BUILD = 1
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD)
VERSION_STRING = "%d.%d.%d" % VERSION_INFO
__version__ = VERSION_INFO

View file

@ -115,6 +115,39 @@ class Playlist_Object(PlaylistObjectBaseclase):
"""
kind = 'playList'
def __init__(self):
self.plex_name = None
self.plex_updatedat = None
self._kodi_path = None
self.kodi_filename = None
self.kodi_extension = None
self.kodi_hash = None
PlaylistObjectBaseclase.__init__()
@property
def kodi_path(self):
return self._kodi_path
@kodi_path.setter
def kodi_path(self, path):
if '/' in path:
file = path.rsplit('/', 1)
else:
file = path.rsplit('\\', 1)
try:
self.kodi_filename, self.kodi_extension = file.split('.', 1)[1]
except ValueError:
raise PlaylistError('Invalid path: %s' % path)
if path.startswith(v.PLAYLIST_PATH_VIDEO):
self.type = 'video'
elif path.startswith(v.PLAYLIST_PATH_MUSIC):
self.type = 'music'
else:
raise PlaylistError('Playlist type not supported: %s' % path)
if not self.plex_name:
self.plex_name = self.kodi_filename
self._kodi_path = path
class Playqueue_Object(PlaylistObjectBaseclase):
"""
@ -305,15 +338,14 @@ def verify_kodi_item(plex_id, kodi_item):
raise PlaylistError('kodi_item cannot be used for Plex playback')
LOG.debug('Starting research for Kodi id since we didnt get one: %s',
kodi_item)
kodi_id = kodiid_from_filename(kodi_item['file'], v.KODI_TYPE_MOVIE)
kodi_id, _ = kodiid_from_filename(kodi_item['file'], v.KODI_TYPE_MOVIE)
kodi_item['type'] = v.KODI_TYPE_MOVIE
if kodi_id is None:
kodi_id = kodiid_from_filename(kodi_item['file'],
kodi_id, _ = kodiid_from_filename(kodi_item['file'],
v.KODI_TYPE_EPISODE)
kodi_item['type'] = v.KODI_TYPE_EPISODE
if kodi_id is None:
kodi_id = kodiid_from_filename(kodi_item['file'],
v.KODI_TYPE_SONG)
kodi_id, _ = kodiid_from_filename(kodi_item['file'], v.KODI_TYPE_SONG)
kodi_item['type'] = v.KODI_TYPE_SONG
kodi_item['id'] = kodi_id
kodi_item['type'] = None if kodi_id is None else kodi_item['type']
@ -405,9 +437,9 @@ def get_playlist_details_from_xml(playlist, xml):
playlist.selectedItemOffset = xml.attrib.get(
'%sSelectedItemOffset' % playlist.kind)
LOG.debug('Updated playlist from xml: %s', playlist)
except (TypeError, KeyError, AttributeError) as msg:
except (TypeError, KeyError, AttributeError) as err:
raise PlaylistError('Could not get playlist details from xml: %s',
msg)
err.strerror)
def update_playlist_from_PMS(playlist, playlist_id=None, xml=None):
@ -629,6 +661,19 @@ def move_playlist_item(playlist, before_pos, after_pos):
LOG.debug('Done moving for %s', playlist)
def get_all_playlists():
"""
Returns an XML with all Plex playlists or None
"""
xml = DU().downloadUrl("{server}/playlists",
headerOptions={'Accept': 'application/xml'})
try:
xml.attrib
except (AttributeError, TypeError):
xml = None
return xml
def get_PMS_playlist(playlist, playlist_id=None):
"""
Fetches the PMS playlist/playqueue as an XML. Pass in playlist_id if we
@ -768,3 +813,16 @@ def get_plextype_from_xml(xml):
LOG.error('Could not get plex metadata for plex id %s', plex_id)
return
return new_xml[0].attrib.get('type')
def delete_playlist_from_pms(playlist):
"""
Deletes the playlist from the PMS
"""
xml = DU().downloadUrl("{server}/%ss/%s" %
(playlist.kind, playlist.id),
action_type="DELETE")
try:
xml.attrib
except (TypeError, AttributeError):
raise PlaylistError('Could not delete playlist %s' % playlist)

331
resources/lib/playlists.py Normal file
View file

@ -0,0 +1,331 @@
# -*- coding: utf-8 -*-
from logging import getLogger
import os
import sys
import re
import watchdog
import playlist_func as PL
from PlexAPI import API
import kodidb_functions as kodidb
import plexdb_functions as plexdb
import utils
import variables as v
###############################################################################
LOG = getLogger("PLEX." + __name__)
# Our PKC playlists. Keys: ID [int] of plex DB table playlists. Values:
# playlist_func.Playlist_Object()
PLAYLISTS = {}
# Which playlist formates are supported by PKC?
SUPPORTED_FILETYPES = (
'm3u',
'm3u8'
# 'pls',
# 'cue',
)
DEFAULT_ENCODING = sys.getdefaultencoding()
REGEX_PLEX_ID = re.compile(r'''plex_id=(\d+)''')
def create_plex_playlist(playlist):
"""
Adds the playlist [Playlist_Object] to the PMS. If playlist.plex_id is
not None the existing Plex playlist will be overwritten; otherwise a new
playlist will be generated and stored accordingly in the playlist object.
Will also add (or modify an existing) Plex playlist table entry.
Returns None or raises PL.PlaylistError
"""
LOG.info('Creating Plex playlist from Kodi file: %s', playlist.kodi_path)
plex_ids = _playlist_file_to_plex_ids(playlist)
for pos, plex_id in enumerate(plex_ids):
if pos == 0:
PL.init_Plex_playlist(playlist, plex_id=plex_id)
else:
PL.add_item_to_PMS_playlist(playlist, pos, plex_id=plex_id)
update_plex_table(playlist, update_kodi_hash=True)
LOG.info('Done creating Plex %s playlist %s',
playlist.type, playlist.plex_name)
def delete_plex_playlist(playlist):
"""
Removes the playlist [Playlist_Object] from the PMS. Will also delete the
entry in the Plex playlist table.
Returns None or raises PL.PlaylistError
"""
LOG.info('Deleting playlist %s from the PMS', playlist.plex_name)
try:
PL.delete_playlist_from_pms(playlist)
except PL.PlaylistError as err:
LOG.error('Could not delete Plex playlist: %s', err.strerror)
else:
update_plex_table(playlist, delete=True)
def create_kodi_playlist(plex_id):
"""
Creates a new Kodi playlist file. Will also add (or modify an existing) Plex
playlist table entry.
Assumes that the Plex playlist is indeed new. A NEW Kodi playlist will be
created in any case (not replaced)
Returns the playlist or raises PL.PlaylistError
"""
LOG.info('Creating new Kodi playlist from Plex playlist %s', plex_id)
playlist = PL.Playlist_Object()
playlist.id = plex_id
xml = PL.get_PMS_playlist(playlist)
if not xml:
LOG.error('Could not create Kodi playlist for %s', plex_id)
return
PL.get_playlist_details_from_xml(playlist, xml)
if xml.get('playlistType') == 'audio':
playlist.type = 'music'
elif xml.get('playlistType') == 'video':
playlist.type = 'video'
else:
raise RuntimeError('Plex playlist type unknown: %s'
% xml.get('playlistType'))
playlist.plex_name = xml.get('title')
name = utils.slugify(playlist.plex_name)
playlist.kodi_path = os.join(v.PLAYLIST_PATH,
playlist.type,
'%s.m3u8' % name)
# Derive filename close to Plex playlist name
_write_playlist_to_file(playlist, xml)
update_plex_table(playlist, update_kodi_hash=True)
LOG.info('Created Kodi playlist %s based on Plex playlist %s',
playlist.kodi_filename, playlist.plex_name)
def delete_kodi_playlist(playlist):
"""
Removes the corresponding Kodi file for playlist [Playlist_Object] from
disk. Be sure that playlist.kodi_path is set. Will also delete the entry in
the Plex playlist table.
Returns None or raises PL.PlaylistError
"""
try:
os.remove(playlist.kodi_path)
except OSError as err:
LOG.error('Could not delete Kodi playlist file %s. Error:\n %s: %s',
playlist.kodi_path, err.errno, err.strerror)
else:
update_plex_table(playlist, delete=True)
def update_plex_table(playlist, delete=False, new_path=None,
update_kodi_hash=False):
"""
Assumes that all sync operations are over. Takes playlist [Playlist_Object]
and creates/updates the corresponding Plex playlists table entry
Pass delete=True to delete the playlist entry
"""
if delete:
with plexdb.Get_Plex_DB() as plex_db:
plex_db.delete_playlist_entry(playlist)
return
if update_kodi_hash:
playlist.kodi_hash = utils.generate_file_md5(playlist.kodi_path)
with plexdb.Get_Plex_DB() as plex_db:
plex_db.insert_playlist_entry(playlist)
def _playlist_file_to_plex_ids(playlist):
"""
Takes the playlist file located at path [unicode] and parses it.
Returns a list of plex_ids (str) or raises PL.PlaylistError if a single
item cannot be parsed from Kodi to Plex.
"""
if playlist.kodi_extension in ('m3u', 'm3u8'):
plex_ids = m3u_to_plex_ids(playlist)
return plex_ids
def _m3u_iterator(text):
"""
Yields e.g. plugin://plugin.video.plexkodiconnect.movies/?plex_id=xxx
"""
lines = iter(text.split('\n'))
for line in lines:
if line.startswith('#EXTINF:'):
yield next(lines).strip()
def m3u_to_plex_ids(playlist):
"""
Adapter to process *.m3u playlist files. Encoding is not uniform except for
m3u8 files!
"""
plex_ids = set()
with open(playlist.kodi_path, 'rb') as f:
text = f.read()
if playlist.kodi_extension == 'm3u8':
encoding = 'utf-8'
elif v.PLATFORM == 'Windows':
encoding = 'mbcs'
else:
encoding = DEFAULT_ENCODING
try:
text = text.decode(encoding)
except UnicodeDecodeError:
LOG.warning('Fallback to ISO-8859-1 decoding for %s',
playlist.kodi_path)
text = text.decode('ISO-8859-1')
for entry in _m3u_iterator(text):
plex_id = REGEX_PLEX_ID.search(entry)
if plex_id:
plex_id = plex_id.group(1)
plex_ids.append(plex_id)
else:
# Add-on paths not working, try direct
kodi_id, kodi_type = kodidb.kodiid_from_filename(
playlist.kodi_path, db_type=playlist.type)
if not kodi_id:
continue
with plexdb.Get_Plex_DB() as plex_db:
plex_id = plex_db.getItem_byKodiId(kodi_id, kodi_type)
if plex_id:
plex_ids.append(plex_id)
return plex_ids
def _write_playlist_to_file(playlist, xml):
"""
Feed with playlist [Playlist_Object]. Will write the playlist to a m3u8 file
Returns None or raises PL.PlaylistError
"""
text = u'#EXTCPlayListM3U::M3U\n'
for element in xml:
api = API(element)
text += (u'#EXTINF:%s,%s\n%s\n'
% (api.runtime(), api.title(), api.path()))
text += '\n'
text = text.encode('utf-8')
with open(playlist.kodi_path, 'wb') as f:
f.write(text)
def change_plex_playlist_name(playlist, new_name):
"""
TODO - Renames the existing playlist with new_name [unicode]
"""
pass
def plex_id_from_playlist_path(path):
"""
Given the Kodi playlist path [unicode], this will return the Plex id [str]
or None
"""
with plexdb.Get_Plex_DB() as plex_db:
plex_id = plex_db.plex_id_from_playlist_path(path)
if not plex_id:
LOG.error('Could not find existing entry for playlist path %s', path)
return plex_id
def playlist_object_from_db(path=None):
"""
Returns the playlist as a Playlist_Object for path [unicode] from the Plex
playlists table or None if not found.
"""
playlist = PL.Playlist_Object()
return playlist
def full_sync():
"""
Full sync of playlists between Kodi and Plex. Returns True is successful,
False otherwise
"""
xml = PL.get_all_playlists()
if not xml:
return False
for entry in xml:
class PlaylistEventhandler(watchdog.events.FileSystemEventHandler):
"""
PKC eventhandler to monitor Kodi playlists safed to disk
"""
@staticmethod
def _event_relevant(event):
"""
Returns True if the event is relevant for PKC, False otherwise (e.g.
when a smart playlist *.xsp is considered)
"""
LOG.debug('event.is_directory: %s, event.src_path: %s',
event.is_directory, event.src_path)
if event.is_directory:
# todo: take care of folder renames
return False
try:
_, extension = event.src_path.rsplit('.', 1)
except ValueError:
return False
if extension.lower() not in SUPPORTED_FILETYPES:
return False
if event.src_path.startswith(v.PLAYLIST_PATH_MIXED):
return False
return True
def on_created(self, event):
if not self._event_relevant(event):
return
LOG.debug('on_created: %s', event.src_path)
playlist = PL.Playlist_Object()
playlist.kodi_path = event.src_path
create_plex_playlist(playlist)
def on_deleted(self, event):
if not self._event_relevant(event):
return
LOG.debug('on_deleted: %s', event.src_path)
playlist = PL.Playlist_Object()
playlist.kodi_path = event.src_path
delete_plex_playlist(playlist)
def on_modified(self, event):
if not self._event_relevant(event):
return
LOG.debug('on_modified: %s', event.src_path)
playlist = PL.Playlist_Object()
playlist.kodi_path = event.src_path
delete_plex_playlist(playlist)
create_plex_playlist(playlist)
def on_moved(self, event):
if not self._event_relevant(event):
return
LOG.debug('on_moved: %s to %s', event.src_path, event.dest_path)
playlist = PL.Playlist_Object()
playlist.id = plex_id_from_playlist_path(event.src_path)
if not playlist.id:
return
playlist.kodi_path = event.dest_path
change_plex_playlist_name(playlist, playlist.kodi_filename)
update_plex_table(playlist)
def kodi_playlist_monitor():
"""
Monitors the Kodi playlist folder special://profile/playlist for the user.
Will thus catch all changes on the Kodi side of things.
Returns an watchdog Observer instance. Be sure to use
observer.stop() (and maybe observer.join()) to shut down properly
"""
event_handler = PlaylistEventhandler()
observer = watchdog.observers.Observer()
observer.schedule(event_handler, v.PLAYLIST_PATH, recursive=True)
observer.start()
return observer

View file

@ -392,3 +392,47 @@ class Plex_DB_Functions():
result.append({'plex_id': row[0],
'plex_type': row[1]})
return result
def plex_id_from_playlist_path(self, path):
"""
Given the Kodi playlist path [unicode], this will return the Plex id
[str] or None
"""
query = 'SELECT plex_id FROM playlists WHERE kodi_path = ? LIMIT 1'
self.plexcursor.execute(query, (path, ))
try:
plex_id = self.plexcursor.fetchone()[0]
except TypeError:
plex_id = None
return plex_id
def insert_playlist_entry(self, playlist):
"""
Inserts or modifies an existing entry in the Plex playlists table.
"""
query = '''
INSERT OR REPLACE INTO playlists(
plex_id, plex_name, plex_updatedat, kodi_path, kodi_type,
kodi_hash)
VALUES (?, ?, ?, ?, ?, ?)
'''
self.plexcursor.execute(query,
(playlist.plex_id, playlist.plex_name,
playlist.plex_updatedat, playlist.kodi_path,
playlist.kodi_type, playlist.kodi_hash))
def delete_playlist_entry(self, playlist):
"""
Removes the entry for playlist [Playqueue_Object] from the Plex
playlists table.
Be sure to either set playlist.id or playlist.kodi_path
"""
if playlist.id:
query = 'DELETE FROM playlists WHERE plex_id = ?'
var = playlist.id
elif playlist.kodi_path:
query = 'DELETE FROM playlists WHERE kodi_path = ?'
var = playlist.kodi_path
else:
raise RuntimeError('Cannot delete playlist: %s', playlist)
self.plexcursor.execute(query, (var, ))

View file

@ -17,6 +17,7 @@ from os.path import join
from os import remove, walk, makedirs
from shutil import rmtree
from urllib import quote_plus
import hashlib
import xbmc
import xbmcaddon
@ -982,6 +983,21 @@ def delete_nodes():
break
def generate_file_md5(path):
"""
Generates the md5 hash value for the file located at path [unicode]
Returns a unique string containing only hexadecimal digits
"""
m = hashlib.md5()
with open(path, 'rb') as f:
while True:
piece = f.read(32768)
if not piece:
break
m.update(piece)
return m.hexdigest()
###############################################################################
# WRAPPERS

View file

@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
from os.path import join
import xbmc
from xbmcaddon import Addon
@ -39,6 +41,10 @@ KODILANGUAGE = xbmc.getLanguage(xbmc.ISO_639_1)
KODIVERSION = int(xbmc.getInfoLabel("System.BuildVersion")[:2])
KODILONGVERSION = xbmc.getInfoLabel('System.BuildVersion')
KODI_PROFILE = try_decode(xbmc.translatePath("special://profile"))
PLAYLIST_PATH = join(KODI_PROFILE, 'playlist')
PLAYLIST_PATH_MIXED = join(PLAYLIST_PATH, 'mixed')
PLAYLIST_PATH_VIDEO = join(PLAYLIST_PATH, 'video')
PLAYLIST_PATH_MUSIC = join(PLAYLIST_PATH, 'music')
if xbmc.getCondVisibility('system.platform.osx'):
PLATFORM = "MacOSX"

View file

@ -0,0 +1,17 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View file

@ -0,0 +1,615 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.events
:synopsis: File system events and event handlers.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
Event Classes
-------------
.. autoclass:: FileSystemEvent
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: FileSystemMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileMovedEvent
:members:
:show-inheritance:
.. autoclass:: DirMovedEvent
:members:
:show-inheritance:
.. autoclass:: FileModifiedEvent
:members:
:show-inheritance:
.. autoclass:: DirModifiedEvent
:members:
:show-inheritance:
.. autoclass:: FileCreatedEvent
:members:
:show-inheritance:
.. autoclass:: DirCreatedEvent
:members:
:show-inheritance:
.. autoclass:: FileDeletedEvent
:members:
:show-inheritance:
.. autoclass:: DirDeletedEvent
:members:
:show-inheritance:
Event Handler Classes
---------------------
.. autoclass:: FileSystemEventHandler
:members:
:show-inheritance:
.. autoclass:: PatternMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: RegexMatchingEventHandler
:members:
:show-inheritance:
.. autoclass:: LoggingEventHandler
:members:
:show-inheritance:
"""
import os.path
import logging
import re
from pathtools.patterns import match_any_paths
from watchdog.utils import has_attribute
from watchdog.utils import unicode_paths
EVENT_TYPE_MOVED = 'moved'
EVENT_TYPE_DELETED = 'deleted'
EVENT_TYPE_CREATED = 'created'
EVENT_TYPE_MODIFIED = 'modified'
class FileSystemEvent(object):
"""
Immutable type that represents a file system event that is triggered
when a change occurs on the monitored file system.
All FileSystemEvent objects are required to be immutable and hence
can be used as keys in dictionaries or be added to sets.
"""
event_type = None
"""The type of the event as a string."""
is_directory = False
"""True if event was emitted for a directory; False otherwise."""
def __init__(self, src_path):
self._src_path = src_path
@property
def src_path(self):
"""Source path of the file system object that triggered this event."""
return self._src_path
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("<%(class_name)s: event_type=%(event_type)s, "
"src_path=%(src_path)r, "
"is_directory=%(is_directory)s>"
) % (dict(
class_name=self.__class__.__name__,
event_type=self.event_type,
src_path=self.src_path,
is_directory=self.is_directory))
# Used for comparison of events.
@property
def key(self):
return (self.event_type, self.src_path, self.is_directory)
def __eq__(self, event):
return self.key == event.key
def __ne__(self, event):
return self.key != event.key
def __hash__(self):
return hash(self.key)
class FileSystemMovedEvent(FileSystemEvent):
"""
File system event representing any kind of file system movement.
"""
event_type = EVENT_TYPE_MOVED
def __init__(self, src_path, dest_path):
super(FileSystemMovedEvent, self).__init__(src_path)
self._dest_path = dest_path
@property
def dest_path(self):
"""The destination path of the move event."""
return self._dest_path
# Used for hashing this as an immutable object.
@property
def key(self):
return (self.event_type, self.src_path, self.dest_path, self.is_directory)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r, "
"is_directory=%(is_directory)s>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path,
is_directory=self.is_directory))
# File events.
class FileDeletedEvent(FileSystemEvent):
"""File system event representing file deletion on the file system."""
event_type = EVENT_TYPE_DELETED
def __init__(self, src_path):
super(FileDeletedEvent, self).__init__(src_path)
def __repr__(self):
return "<%(class_name)s: src_path=%(src_path)r>" %\
dict(class_name=self.__class__.__name__,
src_path=self.src_path)
class FileModifiedEvent(FileSystemEvent):
"""File system event representing file modification on the file system."""
event_type = EVENT_TYPE_MODIFIED
def __init__(self, src_path):
super(FileModifiedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class FileCreatedEvent(FileSystemEvent):
"""File system event representing file creation on the file system."""
event_type = EVENT_TYPE_CREATED
def __init__(self, src_path):
super(FileCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class FileMovedEvent(FileSystemMovedEvent):
"""File system event representing file movement on the file system."""
def __init__(self, src_path, dest_path):
super(FileMovedEvent, self).__init__(src_path, dest_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path))
# Directory events.
class DirDeletedEvent(FileSystemEvent):
"""File system event representing directory deletion on the file system."""
event_type = EVENT_TYPE_DELETED
is_directory = True
def __init__(self, src_path):
super(DirDeletedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirModifiedEvent(FileSystemEvent):
"""
File system event representing directory modification on the file system.
"""
event_type = EVENT_TYPE_MODIFIED
is_directory = True
def __init__(self, src_path):
super(DirModifiedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirCreatedEvent(FileSystemEvent):
"""File system event representing directory creation on the file system."""
event_type = EVENT_TYPE_CREATED
is_directory = True
def __init__(self, src_path):
super(DirCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
class DirMovedEvent(FileSystemMovedEvent):
"""File system event representing directory movement on the file system."""
is_directory = True
def __init__(self, src_path, dest_path):
super(DirMovedEvent, self).__init__(src_path, dest_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r, "
"dest_path=%(dest_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path,
dest_path=self.dest_path))
class FileSystemEventHandler(object):
"""
Base file system event handler that you can override methods from.
"""
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
self.on_any_event(event)
_method_map = {
EVENT_TYPE_MODIFIED: self.on_modified,
EVENT_TYPE_MOVED: self.on_moved,
EVENT_TYPE_CREATED: self.on_created,
EVENT_TYPE_DELETED: self.on_deleted,
}
event_type = event.event_type
_method_map[event_type](event)
def on_any_event(self, event):
"""Catch-all event handler.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
def on_moved(self, event):
"""Called when a file or a directory is moved or renamed.
:param event:
Event representing file/directory movement.
:type event:
:class:`DirMovedEvent` or :class:`FileMovedEvent`
"""
def on_created(self, event):
"""Called when a file or directory is created.
:param event:
Event representing file/directory creation.
:type event:
:class:`DirCreatedEvent` or :class:`FileCreatedEvent`
"""
def on_deleted(self, event):
"""Called when a file or directory is deleted.
:param event:
Event representing file/directory deletion.
:type event:
:class:`DirDeletedEvent` or :class:`FileDeletedEvent`
"""
def on_modified(self, event):
"""Called when a file or directory is modified.
:param event:
Event representing file/directory modification.
:type event:
:class:`DirModifiedEvent` or :class:`FileModifiedEvent`
"""
class PatternMatchingEventHandler(FileSystemEventHandler):
"""
Matches given patterns with file paths associated with occurring events.
"""
def __init__(self, patterns=None, ignore_patterns=None,
ignore_directories=False, case_sensitive=False):
super(PatternMatchingEventHandler, self).__init__()
self._patterns = patterns
self._ignore_patterns = ignore_patterns
self._ignore_directories = ignore_directories
self._case_sensitive = case_sensitive
@property
def patterns(self):
"""
(Read-only)
Patterns to allow matching event paths.
"""
return self._patterns
@property
def ignore_patterns(self):
"""
(Read-only)
Patterns to ignore matching event paths.
"""
return self._ignore_patterns
@property
def ignore_directories(self):
"""
(Read-only)
``True`` if directories should be ignored; ``False`` otherwise.
"""
return self._ignore_directories
@property
def case_sensitive(self):
"""
(Read-only)
``True`` if path names should be matched sensitive to case; ``False``
otherwise.
"""
return self._case_sensitive
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
if self.ignore_directories and event.is_directory:
return
paths = []
if has_attribute(event, 'dest_path'):
paths.append(unicode_paths.decode(event.dest_path))
if event.src_path:
paths.append(unicode_paths.decode(event.src_path))
if match_any_paths(paths,
included_patterns=self.patterns,
excluded_patterns=self.ignore_patterns,
case_sensitive=self.case_sensitive):
self.on_any_event(event)
_method_map = {
EVENT_TYPE_MODIFIED: self.on_modified,
EVENT_TYPE_MOVED: self.on_moved,
EVENT_TYPE_CREATED: self.on_created,
EVENT_TYPE_DELETED: self.on_deleted,
}
event_type = event.event_type
_method_map[event_type](event)
class RegexMatchingEventHandler(FileSystemEventHandler):
"""
Matches given regexes with file paths associated with occurring events.
"""
def __init__(self, regexes=[r".*"], ignore_regexes=[],
ignore_directories=False, case_sensitive=False):
super(RegexMatchingEventHandler, self).__init__()
if case_sensitive:
self._regexes = [re.compile(r) for r in regexes]
self._ignore_regexes = [re.compile(r) for r in ignore_regexes]
else:
self._regexes = [re.compile(r, re.I) for r in regexes]
self._ignore_regexes = [re.compile(r, re.I) for r in ignore_regexes]
self._ignore_directories = ignore_directories
self._case_sensitive = case_sensitive
@property
def regexes(self):
"""
(Read-only)
Regexes to allow matching event paths.
"""
return self._regexes
@property
def ignore_regexes(self):
"""
(Read-only)
Regexes to ignore matching event paths.
"""
return self._ignore_regexes
@property
def ignore_directories(self):
"""
(Read-only)
``True`` if directories should be ignored; ``False`` otherwise.
"""
return self._ignore_directories
@property
def case_sensitive(self):
"""
(Read-only)
``True`` if path names should be matched sensitive to case; ``False``
otherwise.
"""
return self._case_sensitive
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event:
The event object representing the file system event.
:type event:
:class:`FileSystemEvent`
"""
if self.ignore_directories and event.is_directory:
return
paths = []
if has_attribute(event, 'dest_path'):
paths.append(unicode_paths.decode(event.dest_path))
if event.src_path:
paths.append(unicode_paths.decode(event.src_path))
if any(r.match(p) for r in self.ignore_regexes for p in paths):
return
if any(r.match(p) for r in self.regexes for p in paths):
self.on_any_event(event)
_method_map = {
EVENT_TYPE_MODIFIED: self.on_modified,
EVENT_TYPE_MOVED: self.on_moved,
EVENT_TYPE_CREATED: self.on_created,
EVENT_TYPE_DELETED: self.on_deleted,
}
event_type = event.event_type
_method_map[event_type](event)
class LoggingEventHandler(FileSystemEventHandler):
"""Logs all the events captured."""
def on_moved(self, event):
super(LoggingEventHandler, self).on_moved(event)
what = 'directory' if event.is_directory else 'file'
logging.info("Moved %s: from %s to %s", what, event.src_path,
event.dest_path)
def on_created(self, event):
super(LoggingEventHandler, self).on_created(event)
what = 'directory' if event.is_directory else 'file'
logging.info("Created %s: %s", what, event.src_path)
def on_deleted(self, event):
super(LoggingEventHandler, self).on_deleted(event)
what = 'directory' if event.is_directory else 'file'
logging.info("Deleted %s: %s", what, event.src_path)
def on_modified(self, event):
super(LoggingEventHandler, self).on_modified(event)
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
class LoggingFileSystemEventHandler(LoggingEventHandler):
"""
For backwards-compatibility. Please use :class:`LoggingEventHandler`
instead.
"""
def generate_sub_moved_events(src_dir_path, dest_dir_path):
"""Generates an event list of :class:`DirMovedEvent` and
:class:`FileMovedEvent` objects for all the files and directories within
the given moved directory that were moved along with the directory.
:param src_dir_path:
The source path of the moved directory.
:param dest_dir_path:
The destination path of the moved directory.
:returns:
An iterable of file system events of type :class:`DirMovedEvent` and
:class:`FileMovedEvent`.
"""
for root, directories, filenames in os.walk(dest_dir_path):
for directory in directories:
full_path = os.path.join(root, directory)
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
yield DirMovedEvent(renamed_path, full_path)
for filename in filenames:
full_path = os.path.join(root, filename)
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
yield FileMovedEvent(renamed_path, full_path)
def generate_sub_created_events(src_dir_path):
"""Generates an event list of :class:`DirCreatedEvent` and
:class:`FileCreatedEvent` objects for all the files and directories within
the given moved directory that were moved along with the directory.
:param src_dir_path:
The source path of the created directory.
:returns:
An iterable of file system events of type :class:`DirCreatedEvent` and
:class:`FileCreatedEvent`.
"""
for root, directories, filenames in os.walk(src_dir_path):
for directory in directories:
yield DirCreatedEvent(os.path.join(root, directory))
for filename in filenames:
yield FileCreatedEvent(os.path.join(root, filename))

View file

@ -0,0 +1,92 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers
:synopsis: Observer that picks a native implementation if available.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
Classes
=======
.. autoclass:: Observer
:members:
:show-inheritance:
:inherited-members:
Observer thread that schedules watching directories and dispatches
calls to event handlers.
You can also import platform specific classes directly and use it instead
of :class:`Observer`. Here is a list of implemented observer classes.:
============== ================================ ==============================
Class Platforms Note
============== ================================ ==============================
|Inotify| Linux 2.6.13+ ``inotify(7)`` based observer
|FSEvents| Mac OS X FSEvents based observer
|Kqueue| Mac OS X and BSD with kqueue(2) ``kqueue(2)`` based observer
|WinApi| MS Windows Windows API-based observer
|Polling| Any fallback implementation
============== ================================ ==============================
.. |Inotify| replace:: :class:`.inotify.InotifyObserver`
.. |FSEvents| replace:: :class:`.fsevents.FSEventsObserver`
.. |Kqueue| replace:: :class:`.kqueue.KqueueObserver`
.. |WinApi| replace:: :class:`.read_directory_changes.WindowsApiObserver`
.. |WinApiAsync| replace:: :class:`.read_directory_changes_async.WindowsApiAsyncObserver`
.. |Polling| replace:: :class:`.polling.PollingObserver`
"""
import warnings
from watchdog.utils import platform
from watchdog.utils import UnsupportedLibc
if platform.is_linux():
try:
from .inotify import InotifyObserver as Observer
except UnsupportedLibc:
from .polling import PollingObserver as Observer
elif platform.is_darwin():
# FIXME: catching too broad. Error prone
try:
from .fsevents import FSEventsObserver as Observer
except:
try:
from .kqueue import KqueueObserver as Observer
warnings.warn("Failed to import fsevents. Fall back to kqueue")
except:
from .polling import PollingObserver as Observer
warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.")
elif platform.is_bsd():
from .kqueue import KqueueObserver as Observer
elif platform.is_windows():
# TODO: find a reliable way of checking Windows version and import
# polling explicitly for Windows XP
try:
from .read_directory_changes import WindowsApiObserver as Observer
except:
from .polling import PollingObserver as Observer
warnings.warn("Failed to import read_directory_changes. Fall back to polling.")
else:
from .polling import PollingObserver as Observer

View file

@ -0,0 +1,369 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import threading
from watchdog.utils import BaseThread
from watchdog.utils.compat import queue
from watchdog.utils.bricks import SkipRepeatsQueue
DEFAULT_EMITTER_TIMEOUT = 1 # in seconds.
DEFAULT_OBSERVER_TIMEOUT = 1 # in seconds.
# Collection classes
class EventQueue(SkipRepeatsQueue):
"""Thread-safe event queue based on a special queue that skips adding
the same event (:class:`FileSystemEvent`) multiple times consecutively.
Thus avoiding dispatching multiple event handling
calls when multiple identical events are produced quicker than an observer
can consume them.
"""
class ObservedWatch(object):
"""An scheduled watch.
:param path:
Path string.
:param recursive:
``True`` if watch is recursive; ``False`` otherwise.
"""
def __init__(self, path, recursive):
self._path = path
self._is_recursive = recursive
@property
def path(self):
"""The path that this watch monitors."""
return self._path
@property
def is_recursive(self):
"""Determines whether subdirectories are watched for the path."""
return self._is_recursive
@property
def key(self):
return self.path, self.is_recursive
def __eq__(self, watch):
return self.key == watch.key
def __ne__(self, watch):
return self.key != watch.key
def __hash__(self):
return hash(self.key)
def __repr__(self):
return "<ObservedWatch: path=%s, is_recursive=%s>" % (
self.path, self.is_recursive)
# Observer classes
class EventEmitter(BaseThread):
"""
Producer thread base class subclassed by event emitters
that generate events and populate a queue with them.
:param event_queue:
The event queue to populate with generated events.
:type event_queue:
:class:`watchdog.events.EventQueue`
:param watch:
The watch to observe and produce events for.
:type watch:
:class:`ObservedWatch`
:param timeout:
Timeout (in seconds) between successive attempts at reading events.
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
BaseThread.__init__(self)
self._event_queue = event_queue
self._watch = watch
self._timeout = timeout
@property
def timeout(self):
"""
Blocking timeout for reading events.
"""
return self._timeout
@property
def watch(self):
"""
The watch associated with this emitter.
"""
return self._watch
def queue_event(self, event):
"""
Queues a single event.
:param event:
Event to be queued.
:type event:
An instance of :class:`watchdog.events.FileSystemEvent`
or a subclass.
"""
self._event_queue.put((event, self.watch))
def queue_events(self, timeout):
"""Override this method to populate the event queue with events
per interval period.
:param timeout:
Timeout (in seconds) between successive attempts at
reading events.
:type timeout:
``float``
"""
def run(self):
try:
while self.should_keep_running():
self.queue_events(self.timeout)
finally:
pass
class EventDispatcher(BaseThread):
"""
Consumer thread base class subclassed by event observer threads
that dispatch events from an event queue to appropriate event handlers.
:param timeout:
Event queue blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseThread.__init__(self)
self._event_queue = EventQueue()
self._timeout = timeout
@property
def timeout(self):
"""Event queue block timeout."""
return self._timeout
@property
def event_queue(self):
"""The event queue which is populated with file system events
by emitters and from which events are dispatched by a dispatcher
thread."""
return self._event_queue
def dispatch_events(self, event_queue, timeout):
"""Override this method to consume events from an event queue, blocking
on the queue for the specified timeout before raising :class:`queue.Empty`.
:param event_queue:
Event queue to populate with one set of events.
:type event_queue:
:class:`EventQueue`
:param timeout:
Interval period (in seconds) to wait before timing out on the
event queue.
:type timeout:
``float``
:raises:
:class:`queue.Empty`
"""
def run(self):
while self.should_keep_running():
try:
self.dispatch_events(self.event_queue, self.timeout)
except queue.Empty:
continue
class BaseObserver(EventDispatcher):
"""Base observer."""
def __init__(self, emitter_class, timeout=DEFAULT_OBSERVER_TIMEOUT):
EventDispatcher.__init__(self, timeout)
self._emitter_class = emitter_class
self._lock = threading.RLock()
self._watches = set()
self._handlers = dict()
self._emitters = set()
self._emitter_for_watch = dict()
def _add_emitter(self, emitter):
self._emitter_for_watch[emitter.watch] = emitter
self._emitters.add(emitter)
def _remove_emitter(self, emitter):
del self._emitter_for_watch[emitter.watch]
self._emitters.remove(emitter)
emitter.stop()
try:
emitter.join()
except RuntimeError:
pass
def _clear_emitters(self):
for emitter in self._emitters:
emitter.stop()
for emitter in self._emitters:
try:
emitter.join()
except RuntimeError:
pass
self._emitters.clear()
self._emitter_for_watch.clear()
def _add_handler_for_watch(self, event_handler, watch):
if watch not in self._handlers:
self._handlers[watch] = set()
self._handlers[watch].add(event_handler)
def _remove_handlers_for_watch(self, watch):
del self._handlers[watch]
@property
def emitters(self):
"""Returns event emitter created by this observer."""
return self._emitters
def start(self):
for emitter in self._emitters:
emitter.start()
super(BaseObserver, self).start()
def schedule(self, event_handler, path, recursive=False):
"""
Schedules watching a path and calls appropriate methods specified
in the given event handler in response to file system events.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param path:
Directory path that will be monitored.
:type path:
``str``
:param recursive:
``True`` if events will be emitted for sub-directories
traversed recursively; ``False`` otherwise.
:type recursive:
``bool``
:return:
An :class:`ObservedWatch` object instance representing
a watch.
"""
with self._lock:
watch = ObservedWatch(path, recursive)
self._add_handler_for_watch(event_handler, watch)
# If we don't have an emitter for this watch already, create it.
if self._emitter_for_watch.get(watch) is None:
emitter = self._emitter_class(event_queue=self.event_queue,
watch=watch,
timeout=self.timeout)
self._add_emitter(emitter)
if self.is_alive():
emitter.start()
self._watches.add(watch)
return watch
def add_handler_for_watch(self, event_handler, watch):
"""Adds a handler for the given watch.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param watch:
The watch to add a handler for.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
self._add_handler_for_watch(event_handler, watch)
def remove_handler_for_watch(self, event_handler, watch):
"""Removes a handler for the given watch.
:param event_handler:
An event handler instance that has appropriate event handling
methods which will be called by the observer in response to
file system events.
:type event_handler:
:class:`watchdog.events.FileSystemEventHandler` or a subclass
:param watch:
The watch to remove a handler for.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
self._handlers[watch].remove(event_handler)
def unschedule(self, watch):
"""Unschedules a watch.
:param watch:
The watch to unschedule.
:type watch:
An instance of :class:`ObservedWatch` or a subclass of
:class:`ObservedWatch`
"""
with self._lock:
emitter = self._emitter_for_watch[watch]
del self._handlers[watch]
self._remove_emitter(emitter)
self._watches.remove(watch)
def unschedule_all(self):
"""Unschedules all watches and detaches all associated event
handlers."""
with self._lock:
self._handlers.clear()
self._clear_emitters()
self._watches.clear()
def on_thread_stop(self):
self.unschedule_all()
def dispatch_events(self, event_queue, timeout):
event, watch = event_queue.get(block=True, timeout=timeout)
with self._lock:
# To allow unschedule/stop and safe removal of event handlers
# within event handlers itself, check if the handler is still
# registered after every dispatch.
for handler in list(self._handlers.get(watch, [])):
if handler in self._handlers.get(watch, []):
handler.dispatch(event)
event_queue.task_done()

View file

@ -0,0 +1,172 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.fsevents
:synopsis: FSEvents based emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:platforms: Mac OS X
"""
from __future__ import with_statement
import sys
import threading
import unicodedata
import _watchdog_fsevents as _fsevents
from watchdog.events import (
FileDeletedEvent,
FileModifiedEvent,
FileCreatedEvent,
FileMovedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirCreatedEvent,
DirMovedEvent
)
from watchdog.utils.dirsnapshot import DirectorySnapshot
from watchdog.observers.api import (
BaseObserver,
EventEmitter,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
class FSEventsEmitter(EventEmitter):
"""
Mac OS X FSEvents Emitter class.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self.snapshot = DirectorySnapshot(watch.path, watch.is_recursive)
def on_thread_stop(self):
_fsevents.remove_watch(self.watch)
_fsevents.stop(self)
def queue_events(self, timeout):
with self._lock:
if not self.watch.is_recursive\
and self.watch.path not in self.pathnames:
return
new_snapshot = DirectorySnapshot(self.watch.path,
self.watch.is_recursive)
events = new_snapshot - self.snapshot
self.snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
def run(self):
try:
def callback(pathnames, flags, emitter=self):
emitter.queue_events(emitter.timeout)
# for pathname, flag in zip(pathnames, flags):
# if emitter.watch.is_recursive: # and pathname != emitter.watch.path:
# new_sub_snapshot = DirectorySnapshot(pathname, True)
# old_sub_snapshot = self.snapshot.copy(pathname)
# diff = new_sub_snapshot - old_sub_snapshot
# self.snapshot += new_subsnapshot
# else:
# new_snapshot = DirectorySnapshot(emitter.watch.path, False)
# diff = new_snapshot - emitter.snapshot
# emitter.snapshot = new_snapshot
# INFO: FSEvents reports directory notifications recursively
# by default, so we do not need to add subdirectory paths.
#pathnames = set([self.watch.path])
# if self.watch.is_recursive:
# for root, directory_names, _ in os.walk(self.watch.path):
# for directory_name in directory_names:
# full_path = absolute_path(
# os.path.join(root, directory_name))
# pathnames.add(full_path)
self.pathnames = [self.watch.path]
_fsevents.add_watch(self,
self.watch,
callback,
self.pathnames)
_fsevents.read_events(self)
except Exception as e:
pass
class FSEventsObserver(BaseObserver):
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=FSEventsEmitter,
timeout=timeout)
def schedule(self, event_handler, path, recursive=False):
# Python 2/3 compat
try:
str_class = unicode
except NameError:
str_class = str
# Fix for issue #26: Trace/BPT error when given a unicode path
# string. https://github.com/gorakhargosh/watchdog/issues#issue/26
if isinstance(path, str_class):
#path = unicode(path, 'utf-8')
path = unicodedata.normalize('NFC', path)
# We only encode the path in Python 2 for backwards compatibility.
# On Python 3 we want the path to stay as unicode if possible for
# the sake of path matching not having to be rewritten to use the
# bytes API instead of strings. The _watchdog_fsevent.so code for
# Python 3 can handle both str and bytes paths, which is why we
# do not HAVE to encode it with Python 3. The Python 2 code in
# _watchdog_fsevents.so was not changed for the sake of backwards
# compatibility.
if sys.version_info < (3,):
path = path.encode('utf-8')
return BaseObserver.schedule(self, event_handler, path, recursive)

View file

@ -0,0 +1,240 @@
# -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.fsevents2
:synopsis: FSEvents based emitter implementation.
:platforms: Mac OS X
"""
import os
import logging
import unicodedata
from threading import Thread
from watchdog.utils.compat import queue
from watchdog.events import (
FileDeletedEvent,
FileModifiedEvent,
FileCreatedEvent,
FileMovedEvent,
DirDeletedEvent,
DirModifiedEvent,
DirCreatedEvent,
DirMovedEvent
)
from watchdog.observers.api import (
BaseObserver,
EventEmitter,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT,
)
# pyobjc
import AppKit
from FSEvents import (
FSEventStreamCreate,
CFRunLoopGetCurrent,
FSEventStreamScheduleWithRunLoop,
FSEventStreamStart,
CFRunLoopRun,
CFRunLoopStop,
FSEventStreamStop,
FSEventStreamInvalidate,
FSEventStreamRelease,
)
from FSEvents import (
kCFAllocatorDefault,
kCFRunLoopDefaultMode,
kFSEventStreamEventIdSinceNow,
kFSEventStreamCreateFlagNoDefer,
kFSEventStreamCreateFlagFileEvents,
kFSEventStreamEventFlagItemCreated,
kFSEventStreamEventFlagItemRemoved,
kFSEventStreamEventFlagItemInodeMetaMod,
kFSEventStreamEventFlagItemRenamed,
kFSEventStreamEventFlagItemModified,
kFSEventStreamEventFlagItemFinderInfoMod,
kFSEventStreamEventFlagItemChangeOwner,
kFSEventStreamEventFlagItemXattrMod,
kFSEventStreamEventFlagItemIsFile,
kFSEventStreamEventFlagItemIsDir,
kFSEventStreamEventFlagItemIsSymlink,
)
logger = logging.getLogger(__name__)
class FSEventsQueue(Thread):
""" Low level FSEvents client. """
def __init__(self, path):
Thread.__init__(self)
self._queue = queue.Queue()
self._run_loop = None
if isinstance(path, bytes):
path = path.decode('utf-8')
self._path = unicodedata.normalize('NFC', path)
context = None
latency = 1.0
self._stream_ref = FSEventStreamCreate(
kCFAllocatorDefault, self._callback, context, [self._path],
kFSEventStreamEventIdSinceNow, latency,
kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents)
if self._stream_ref is None:
raise IOError("FSEvents. Could not create stream.")
def run(self):
pool = AppKit.NSAutoreleasePool.alloc().init()
self._run_loop = CFRunLoopGetCurrent()
FSEventStreamScheduleWithRunLoop(
self._stream_ref, self._run_loop, kCFRunLoopDefaultMode)
if not FSEventStreamStart(self._stream_ref):
FSEventStreamInvalidate(self._stream_ref)
FSEventStreamRelease(self._stream_ref)
raise IOError("FSEvents. Could not start stream.")
CFRunLoopRun()
FSEventStreamStop(self._stream_ref)
FSEventStreamInvalidate(self._stream_ref)
FSEventStreamRelease(self._stream_ref)
del pool
# Make sure waiting thread is notified
self._queue.put(None)
def stop(self):
if self._run_loop is not None:
CFRunLoopStop(self._run_loop)
def _callback(self, streamRef, clientCallBackInfo, numEvents, eventPaths, eventFlags, eventIDs):
events = [NativeEvent(path, flags, _id) for path, flags, _id in
zip(eventPaths, eventFlags, eventIDs)]
logger.debug("FSEvents callback. Got %d events:" % numEvents)
for e in events:
logger.debug(e)
self._queue.put(events)
def read_events(self):
"""
Returns a list or one or more events, or None if there are no more
events to be read.
"""
if not self.is_alive():
return None
return self._queue.get()
class NativeEvent(object):
def __init__(self, path, flags, event_id):
self.path = path
self.flags = flags
self.event_id = event_id
self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated)
self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved)
self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed)
self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified)
self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner)
self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod)
self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod)
self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod)
self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink)
self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir)
@property
def _event_type(self):
if self.is_created: return "Created"
if self.is_removed: return "Removed"
if self.is_renamed: return "Renamed"
if self.is_modified: return "Modified"
if self.is_inode_meta_mod: return "InodeMetaMod"
if self.is_xattr_mod: return "XattrMod"
return "Unknown"
def __repr__(self):
s ="<NativeEvent: path=%s, type=%s, is_dir=%s, flags=%s, id=%s>"
return s % (repr(self.path), self._event_type, self.is_directory, hex(self.flags), self.event_id)
class FSEventsEmitter(EventEmitter):
"""
FSEvents based event emitter. Handles conversion of native events.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._fsevents = FSEventsQueue(watch.path)
self._fsevents.start()
def on_thread_stop(self):
self._fsevents.stop()
def queue_events(self, timeout):
events = self._fsevents.read_events()
if events is None:
return
i = 0
while i < len(events):
event = events[i]
# For some reason the create and remove flags are sometimes also
# set for rename and modify type events, so let those take
# precedence.
if event.is_renamed:
# Internal moves appears to always be consecutive in the same
# buffer and have IDs differ by exactly one (while others
# don't) making it possible to pair up the two events coming
# from a singe move operation. (None of this is documented!)
# Otherwise, guess whether file was moved in or out.
#TODO: handle id wrapping
if (i+1 < len(events) and events[i+1].is_renamed and
events[i+1].event_id == event.event_id + 1):
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(event.path, events[i+1].path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
self.queue_event(DirModifiedEvent(os.path.dirname(events[i+1].path)))
i += 1
elif os.path.exists(event.path):
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
else:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
#TODO: generate events for tree
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod :
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(event.path))
elif event.is_created:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
elif event.is_removed:
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(event.path))
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
i += 1
class FSEventsObserver2(BaseObserver):
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=FSEventsEmitter, timeout=timeout)

View file

@ -0,0 +1,218 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <seb@dbzteam.org>
:author: Luke McCarthy <luke@iogopro.co.uk>
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <tim+github@gfxmonk.net>
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import with_statement
import os
import threading
from .inotify_buffer import InotifyBuffer
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.utils import unicode_paths
class InotifyEmitter(EventEmitter):
"""
inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = None
def on_thread_start(self):
path = unicode_paths.encode(self.watch.path)
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
def on_thread_stop(self):
if self._inotify:
self._inotify.close()
def queue_events(self, timeout, full_events=False):
#If "full_events" is true, then the method will report unmatched move events as seperate events
#This behavior is by default only called by a InotifyFullEmitter
with self._lock:
event = self._inotify.read_event()
if event is None:
return
if isinstance(event, tuple):
move_from, move_to = event
src_path = self._decode_path(move_from.src_path)
dest_path = self._decode_path(move_to.src_path)
cls = DirMovedEvent if move_from.is_directory else FileMovedEvent
self.queue_event(cls(src_path, dest_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
self.queue_event(DirModifiedEvent(os.path.dirname(dest_path)))
if move_from.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_event)
return
src_path = self._decode_path(event.src_path)
if event.is_moved_to:
if (full_events):
cls = DirMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(None, src_path))
else:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
if event.is_directory and self.watch.is_recursive:
for sub_event in generate_sub_created_events(src_path):
self.queue_event(sub_event)
elif event.is_attrib:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_modify:
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
self.queue_event(cls(src_path))
elif event.is_delete or (event.is_moved_from and not full_events):
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_moved_from and full_events:
cls = DireMovedEvent if event.is_directory else FileMovedEvent
self.queue_event(cls(src_path, None))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
elif event.is_create:
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
self.queue_event(cls(src_path))
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
def _decode_path(self, path):
""" Decode path only if unicode string was passed to this emitter. """
if isinstance(self.watch.path, bytes):
return path
return unicode_paths.decode(path)
class InotifyFullEmitter(InotifyEmitter):
"""
inotify(7)-based event emitter. By default this class produces move events even if they are not matched
Such move events will have a ``None`` value for the unmatched part.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
InotifyEmitter.__init__(self, event_queue, watch, timeout)
def queue_events(self, timeout, events=True):
InotifyEmitter.queue_events(self, timeout, full_events=events)
class InotifyObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False):
if (generate_full_events):
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
else:
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)

View file

@ -0,0 +1,81 @@
# -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from watchdog.utils import BaseThread
from watchdog.utils.delayed_queue import DelayedQueue
from watchdog.observers.inotify_c import Inotify
logger = logging.getLogger(__name__)
class InotifyBuffer(BaseThread):
"""A wrapper for `Inotify` that holds events for `delay` seconds. During
this time, IN_MOVED_FROM and IN_MOVED_TO events are paired.
"""
delay = 0.5
def __init__(self, path, recursive=False):
BaseThread.__init__(self)
self._queue = DelayedQueue(self.delay)
self._inotify = Inotify(path, recursive)
self.start()
def read_event(self):
"""Returns a single event or a tuple of from/to events in case of a
paired move event. If this buffer has been closed, immediately return
None.
"""
return self._queue.get()
def on_thread_stop(self):
self._inotify.close()
self._queue.close()
def close(self):
self.stop()
self.join()
def run(self):
"""Read event from `inotify` and add them to `queue`. When reading a
IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event
and add them back to the queue as a tuple.
"""
deleted_self = False
while self.should_keep_running() and not deleted_self:
inotify_events = self._inotify.read_events()
for inotify_event in inotify_events:
logger.debug("in-event %s", inotify_event)
if inotify_event.is_moved_to:
def matching_from_event(event):
return (not isinstance(event, tuple) and event.is_moved_from
and event.cookie == inotify_event.cookie)
from_event = self._queue.remove(matching_from_event)
if from_event is not None:
self._queue.put((from_event, inotify_event))
else:
logger.debug("could not find matching move_from event")
self._queue.put(inotify_event)
else:
self._queue.put(inotify_event)
if inotify_event.is_delete_self and \
inotify_event.src_path == self._inotify.path:
# Deleted the watched directory, stop watching for events
deleted_self = True

View file

@ -0,0 +1,563 @@
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import errno
import struct
import threading
import ctypes
import ctypes.util
from functools import reduce
from ctypes import c_int, c_char_p, c_uint32
from watchdog.utils import has_attribute
from watchdog.utils import UnsupportedLibc
def _load_libc():
libc_path = None
try:
libc_path = ctypes.util.find_library('c')
except (OSError, IOError, RuntimeError):
# Note: find_library will on some platforms raise these undocumented
# errors, e.g.on android IOError "No usable temporary directory found"
# will be raised.
pass
if libc_path is not None:
return ctypes.CDLL(libc_path)
# Fallbacks
try:
return ctypes.CDLL('libc.so')
except (OSError, IOError):
return ctypes.CDLL('libc.so.6')
libc = _load_libc()
if not has_attribute(libc, 'inotify_init') or \
not has_attribute(libc, 'inotify_add_watch') or \
not has_attribute(libc, 'inotify_rm_watch'):
raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name)
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
("inotify_add_watch", libc))
inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(
("inotify_rm_watch", libc))
inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(
("inotify_init", libc))
class InotifyConstants(object):
# User-space events
IN_ACCESS = 0x00000001 # File was accessed.
IN_MODIFY = 0x00000002 # File was modified.
IN_ATTRIB = 0x00000004 # Meta-data changed.
IN_CLOSE_WRITE = 0x00000008 # Writable file was closed.
IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed.
IN_OPEN = 0x00000020 # File was opened.
IN_MOVED_FROM = 0x00000040 # File was moved from X.
IN_MOVED_TO = 0x00000080 # File was moved to Y.
IN_CREATE = 0x00000100 # Subfile was created.
IN_DELETE = 0x00000200 # Subfile was deleted.
IN_DELETE_SELF = 0x00000400 # Self was deleted.
IN_MOVE_SELF = 0x00000800 # Self was moved.
# Helper user-space events.
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close.
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves.
# Events sent by the kernel to a watch.
IN_UNMOUNT = 0x00002000 # Backing file system was unmounted.
IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IN_IGNORED = 0x00008000 # File was ignored.
# Special flags.
IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory.
IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link.
IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects
IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch.
IN_ISDIR = 0x40000000 # Event occurred against directory.
IN_ONESHOT = 0x80000000 # Only send event once.
# All user-space events.
IN_ALL_EVENTS = reduce(
lambda x, y: x | y, [
IN_ACCESS,
IN_MODIFY,
IN_ATTRIB,
IN_CLOSE_WRITE,
IN_CLOSE_NOWRITE,
IN_OPEN,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_DELETE,
IN_CREATE,
IN_DELETE_SELF,
IN_MOVE_SELF,
])
# Flags for ``inotify_init1``
IN_CLOEXEC = 0x02000000
IN_NONBLOCK = 0x00004000
# Watchdog's API cares only about these events.
WATCHDOG_ALL_EVENTS = reduce(
lambda x, y: x | y, [
InotifyConstants.IN_MODIFY,
InotifyConstants.IN_ATTRIB,
InotifyConstants.IN_MOVED_FROM,
InotifyConstants.IN_MOVED_TO,
InotifyConstants.IN_CREATE,
InotifyConstants.IN_DELETE,
InotifyConstants.IN_DELETE_SELF,
InotifyConstants.IN_DONT_FOLLOW,
])
class inotify_event_struct(ctypes.Structure):
"""
Structure representation of the inotify_event structure
(used in buffer size calculations)::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
"""
_fields_ = [('wd', c_int),
('mask', c_uint32),
('cookie', c_uint32),
('len', c_uint32),
('name', c_char_p)]
EVENT_SIZE = ctypes.sizeof(inotify_event_struct)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
class Inotify(object):
"""
Linux inotify(7) API wrapper class.
:param path:
The directory path for which we want an inotify object.
:type path:
:class:`bytes`
:param recursive:
``True`` if subdirectories should be monitored; ``False`` otherwise.
"""
def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS):
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init()
if inotify_fd == -1:
Inotify._raise_error()
self._inotify_fd = inotify_fd
self._lock = threading.Lock()
# Stores the watch descriptor for a given path.
self._wd_for_path = dict()
self._path_for_wd = dict()
self._path = path
self._event_mask = event_mask
self._is_recursive = recursive
self._add_dir_watch(path, recursive, event_mask)
self._moved_from_events = dict()
@property
def event_mask(self):
"""The event mask for this inotify instance."""
return self._event_mask
@property
def path(self):
"""The path associated with the inotify instance."""
return self._path
@property
def is_recursive(self):
"""Whether we are watching directories recursively."""
return self._is_recursive
@property
def fd(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def clear_move_records(self):
"""Clear cached records of MOVED_FROM events"""
self._moved_from_events = dict()
def source_for_move(self, destination_event):
"""
The source path corresponding to the given MOVED_TO event.
If the source path is outside the monitored directories, None
is returned instead.
"""
if destination_event.cookie in self._moved_from_events:
return self._moved_from_events[destination_event.cookie].src_path
else:
return None
def remember_move_from_event(self, event):
"""
Save this event as the source event for future MOVED_TO events to
reference.
"""
self._moved_from_events[event.cookie] = event
def add_watch(self, path):
"""
Adds a watch for the given path.
:param path:
Path to begin monitoring.
"""
with self._lock:
self._add_watch(path, self._event_mask)
def remove_watch(self, path):
"""
Removes a watch for the given path.
:param path:
Path string for which the watch will be removed.
"""
with self._lock:
wd = self._wd_for_path.pop(path)
del self._path_for_wd[wd]
if inotify_rm_watch(self._inotify_fd, wd) == -1:
Inotify._raise_error()
def close(self):
"""
Closes the inotify instance and removes all associated watches.
"""
with self._lock:
if self._path in self._wd_for_path:
wd = self._wd_for_path[self._path]
inotify_rm_watch(self._inotify_fd, wd)
os.close(self._inotify_fd)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
"""
# HACK: We need to traverse the directory path
# recursively and simulate events for newly
# created subdirectories/files. This will handle
# mkdir -p foobar/blah/bar; touch foobar/afile
def _recursive_simulate(src_path):
events = []
for root, dirnames, filenames in os.walk(src_path):
for dirname in dirnames:
try:
full_path = os.path.join(root, dirname)
wd_dir = self._add_watch(full_path, self._event_mask)
e = InotifyEvent(
wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path)
events.append(e)
except OSError:
pass
for filename in filenames:
full_path = os.path.join(root, filename)
wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)]
e = InotifyEvent(
wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path)
events.append(e)
return events
event_buffer = None
while True:
try:
event_buffer = os.read(self._inotify_fd, event_buffer_size)
except OSError as e:
if e.errno == errno.EINTR:
continue
break
with self._lock:
event_list = []
for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer):
if wd == -1:
continue
wd_path = self._path_for_wd[wd]
src_path = os.path.join(wd_path, name) if name else wd_path #avoid trailing slash
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_moved_from:
self.remember_move_from_event(inotify_event)
elif inotify_event.is_moved_to:
move_src_path = self.source_for_move(inotify_event)
if move_src_path in self._wd_for_path:
moved_wd = self._wd_for_path[move_src_path]
del self._wd_for_path[move_src_path]
self._wd_for_path[inotify_event.src_path] = moved_wd
self._path_for_wd[moved_wd] = inotify_event.src_path
src_path = os.path.join(wd_path, name)
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
if inotify_event.is_ignored:
# Clean up book-keeping for deleted watches.
path = self._path_for_wd.pop(wd)
if self._wd_for_path[path] == wd:
del self._wd_for_path[path]
continue
event_list.append(inotify_event)
if (self.is_recursive and inotify_event.is_directory and
inotify_event.is_create):
# TODO: When a directory from another part of the
# filesystem is moved into a watched directory, this
# will not generate events for the directory tree.
# We need to coalesce IN_MOVED_TO events and those
# IN_MOVED_TO events which don't pair up with
# IN_MOVED_FROM events should be marked IN_CREATE
# instead relative to this directory.
try:
self._add_watch(src_path, self._event_mask)
except OSError:
continue
event_list.extend(_recursive_simulate(src_path))
return event_list
# Non-synchronized methods.
def _add_dir_watch(self, path, recursive, mask):
"""
Adds a watch (optionally recursively) for the given directory path
to monitor events specified by the mask.
:param path:
Path to monitor
:param recursive:
``True`` to monitor recursively.
:param mask:
Event bit mask.
"""
if not os.path.isdir(path):
raise OSError('Path is not a directory')
self._add_watch(path, mask)
if recursive:
for root, dirnames, _ in os.walk(path):
for dirname in dirnames:
full_path = os.path.join(root, dirname)
if os.path.islink(full_path):
continue
self._add_watch(full_path, mask)
def _add_watch(self, path, mask):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:param mask:
Event bit mask.
"""
wd = inotify_add_watch(self._inotify_fd, path, mask)
if wd == -1:
Inotify._raise_error()
self._wd_for_path[path] = wd
self._path_for_wd[wd] = path
return wd
@staticmethod
def _raise_error():
"""
Raises errors for inotify failures.
"""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError("inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError("inotify instance limit reached")
else:
raise OSError(os.strerror(err))
@staticmethod
def _parse_event_buffer(event_buffer):
"""
Parses an event buffer of ``inotify_event`` structs returned by
inotify::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
i = 0
while i + 16 <= len(event_buffer):
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, i)
name = event_buffer[i + 16:i + 16 + length].rstrip(b'\0')
i += 16 + length
yield wd, mask, cookie, name
class InotifyEvent(object):
"""
Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param name:
Event name.
:param src_path:
Event source path
"""
def __init__(self, wd, mask, cookie, name, src_path):
self._wd = wd
self._mask = mask
self._cookie = cookie
self._name = name
self._src_path = src_path
@property
def src_path(self):
return self._src_path
@property
def wd(self):
return self._wd
@property
def mask(self):
return self._mask
@property
def cookie(self):
return self._cookie
@property
def name(self):
return self._name
@property
def is_modify(self):
return self._mask & InotifyConstants.IN_MODIFY > 0
@property
def is_close_write(self):
return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0
@property
def is_close_nowrite(self):
return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0
@property
def is_access(self):
return self._mask & InotifyConstants.IN_ACCESS > 0
@property
def is_delete(self):
return self._mask & InotifyConstants.IN_DELETE > 0
@property
def is_delete_self(self):
return self._mask & InotifyConstants.IN_DELETE_SELF > 0
@property
def is_create(self):
return self._mask & InotifyConstants.IN_CREATE > 0
@property
def is_moved_from(self):
return self._mask & InotifyConstants.IN_MOVED_FROM > 0
@property
def is_moved_to(self):
return self._mask & InotifyConstants.IN_MOVED_TO > 0
@property
def is_move(self):
return self._mask & InotifyConstants.IN_MOVE > 0
@property
def is_move_self(self):
return self._mask & InotifyConstants.IN_MOVE_SELF > 0
@property
def is_attrib(self):
return self._mask & InotifyConstants.IN_ATTRIB > 0
@property
def is_ignored(self):
return self._mask & InotifyConstants.IN_IGNORED > 0
@property
def is_directory(self):
# It looks like the kernel does not provide this information for
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
return (self.is_delete_self or self.is_move_self or
self._mask & InotifyConstants.IN_ISDIR > 0)
@property
def key(self):
return self._src_path, self._wd, self._mask, self._cookie, self._name
def __eq__(self, inotify_event):
return self.key == inotify_event.key
def __ne__(self, inotify_event):
return self.key == inotify_event.key
def __hash__(self):
return hash(self.key)
@staticmethod
def _get_mask_string(mask):
masks = []
for c in dir(InotifyConstants):
if c.startswith('IN_') and c not in ['IN_ALL_EVENTS', 'IN_CLOSE', 'IN_MOVE']:
c_val = getattr(InotifyConstants, c)
if mask & c_val:
masks.append(c)
mask_string = '|'.join(masks)
return mask_string
def __repr__(self):
mask_string = self._get_mask_string(self.mask)
s = "<InotifyEvent: src_path=%s, wd=%d, mask=%s, cookie=%d, name=%s>"
return s % (self.src_path, self.wd, mask_string, self.cookie, self.name)

View file

@ -0,0 +1,726 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.kqueue
:synopsis: ``kqueue(2)`` based emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:platforms: Mac OS X and BSD with kqueue(2).
.. WARNING:: kqueue is a very heavyweight way to monitor file systems.
Each kqueue-detected directory modification triggers
a full directory scan. Traversing the entire directory tree
and opening file descriptors for all files will create
performance problems. We need to find a way to re-scan
only those directories which report changes and do a diff
between two sub-DirectorySnapshots perhaps.
.. ADMONITION:: About ``select.kqueue`` and Python versions
* Python 2.5 does not ship with ``select.kqueue``
* Python 2.6 ships with a broken ``select.kqueue`` that cannot take
multiple events in the event list passed to ``kqueue.control``.
* Python 2.7 ships with a working ``select.kqueue``
implementation.
I have backported the Python 2.7 implementation to Python 2.5 and 2.6
in the ``select_backport`` package available on PyPI.
.. ADMONITION:: About OS X performance guidelines
Quote from the `Mac OS X File System Performance Guidelines`_:
"When you only want to track changes on a file or directory, be sure to
open it using the ``O_EVTONLY`` flag. This flag prevents the file or
directory from being marked as open or in use. This is important
if you are tracking files on a removable volume and the user tries to
unmount the volume. With this flag in place, the system knows it can
dismiss the volume. If you had opened the files or directories without
this flag, the volume would be marked as busy and would not be
unmounted."
``O_EVTONLY`` is defined as ``0x8000`` in the OS X header files.
More information here: http://www.mlsite.net/blog/?p=2312
Classes
-------
.. autoclass:: KqueueEmitter
:members:
:show-inheritance:
Collections and Utility Classes
-------------------------------
.. autoclass:: KeventDescriptor
:members:
:show-inheritance:
.. autoclass:: KeventDescriptorSet
:members:
:show-inheritance:
.. _Mac OS X File System Performance Guidelines: http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD
"""
from __future__ import with_statement
from watchdog.utils import platform
import threading
import errno
import sys
import stat
import os
# See the notes for this module in the documentation above ^.
#import select
# if not has_attribute(select, 'kqueue') or sys.version_info < (2, 7, 0):
if sys.version_info < (2, 7, 0):
import select_backport as select
else:
import select
from pathtools.path import absolute_path
from watchdog.observers.api import (
BaseObserver,
EventEmitter,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.utils.dirsnapshot import DirectorySnapshot
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent,
EVENT_TYPE_MOVED,
EVENT_TYPE_DELETED,
EVENT_TYPE_CREATED
)
# Maximum number of events to process.
MAX_EVENTS = 4096
# O_EVTONLY value from the header files for OS X only.
O_EVTONLY = 0x8000
# Pre-calculated values for the kevent filter, flags, and fflags attributes.
if platform.is_darwin():
WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY
else:
WATCHDOG_OS_OPEN_FLAGS = os.O_RDONLY | os.O_NONBLOCK
WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE
WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR
WATCHDOG_KQ_FFLAGS = (
select.KQ_NOTE_DELETE |
select.KQ_NOTE_WRITE |
select.KQ_NOTE_EXTEND |
select.KQ_NOTE_ATTRIB |
select.KQ_NOTE_LINK |
select.KQ_NOTE_RENAME |
select.KQ_NOTE_REVOKE
)
# Flag tests.
def is_deleted(kev):
"""Determines whether the given kevent represents deletion."""
return kev.fflags & select.KQ_NOTE_DELETE
def is_modified(kev):
"""Determines whether the given kevent represents modification."""
fflags = kev.fflags
return (fflags & select.KQ_NOTE_EXTEND) or (fflags & select.KQ_NOTE_WRITE)
def is_attrib_modified(kev):
"""Determines whether the given kevent represents attribute modification."""
return kev.fflags & select.KQ_NOTE_ATTRIB
def is_renamed(kev):
"""Determines whether the given kevent represents movement."""
return kev.fflags & select.KQ_NOTE_RENAME
class KeventDescriptorSet(object):
"""
Thread-safe kevent descriptor collection.
"""
def __init__(self):
# Set of KeventDescriptor
self._descriptors = set()
# Descriptor for a given path.
self._descriptor_for_path = dict()
# Descriptor for a given fd.
self._descriptor_for_fd = dict()
# List of kevent objects.
self._kevents = list()
self._lock = threading.Lock()
@property
def kevents(self):
"""
List of kevents monitored.
"""
with self._lock:
return self._kevents
@property
def paths(self):
"""
List of paths for which kevents have been created.
"""
with self._lock:
return list(self._descriptor_for_path.keys())
def get_for_fd(self, fd):
"""
Given a file descriptor, returns the kevent descriptor object
for it.
:param fd:
OS file descriptor.
:type fd:
``int``
:returns:
A :class:`KeventDescriptor` object.
"""
with self._lock:
return self._descriptor_for_fd[fd]
def get(self, path):
"""
Obtains a :class:`KeventDescriptor` object for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._get(path)
def __contains__(self, path):
"""
Determines whether a :class:`KeventDescriptor has been registered
for the specified path.
:param path:
Path for which the descriptor will be obtained.
"""
with self._lock:
path = absolute_path(path)
return self._has_path(path)
def add(self, path, is_directory):
"""
Adds a :class:`KeventDescriptor` to the collection for the given
path.
:param path:
The path for which a :class:`KeventDescriptor` object will be
added.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
with self._lock:
path = absolute_path(path)
if not self._has_path(path):
self._add_descriptor(KeventDescriptor(path, is_directory))
def remove(self, path):
"""
Removes the :class:`KeventDescriptor` object for the given path
if it already exists.
:param path:
Path for which the :class:`KeventDescriptor` object will be
removed.
"""
with self._lock:
path = absolute_path(path)
if self._has_path(path):
self._remove_descriptor(self._get(path))
def clear(self):
"""
Clears the collection and closes all open descriptors.
"""
with self._lock:
for descriptor in self._descriptors:
descriptor.close()
self._descriptors.clear()
self._descriptor_for_fd.clear()
self._descriptor_for_path.clear()
self._kevents = []
# Thread-unsafe methods. Locking is provided at a higher level.
def _get(self, path):
"""Returns a kevent descriptor for a given path."""
return self._descriptor_for_path[path]
def _has_path(self, path):
"""Determines whether a :class:`KeventDescriptor` for the specified
path exists already in the collection."""
return path in self._descriptor_for_path
def _add_descriptor(self, descriptor):
"""
Adds a descriptor to the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be added.
"""
self._descriptors.add(descriptor)
self._kevents.append(descriptor.kevent)
self._descriptor_for_path[descriptor.path] = descriptor
self._descriptor_for_fd[descriptor.fd] = descriptor
def _remove_descriptor(self, descriptor):
"""
Removes a descriptor from the collection.
:param descriptor:
An instance of :class:`KeventDescriptor` to be removed.
"""
self._descriptors.remove(descriptor)
del self._descriptor_for_fd[descriptor.fd]
del self._descriptor_for_path[descriptor.path]
self._kevents.remove(descriptor.kevent)
descriptor.close()
class KeventDescriptor(object):
"""
A kevent descriptor convenience data structure to keep together:
* kevent
* directory status
* path
* file descriptor
:param path:
Path string for which a kevent descriptor will be created.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
def __init__(self, path, is_directory):
self._path = absolute_path(path)
self._is_directory = is_directory
self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS)
self._kev = select.kevent(self._fd,
filter=WATCHDOG_KQ_FILTER,
flags=WATCHDOG_KQ_EV_FLAGS,
fflags=WATCHDOG_KQ_FFLAGS)
@property
def fd(self):
"""OS file descriptor for the kevent descriptor."""
return self._fd
@property
def path(self):
"""The path associated with the kevent descriptor."""
return self._path
@property
def kevent(self):
"""The kevent object associated with the kevent descriptor."""
return self._kev
@property
def is_directory(self):
"""Determines whether the kevent descriptor refers to a directory.
:returns:
``True`` or ``False``
"""
return self._is_directory
def close(self):
"""
Closes the file descriptor associated with a kevent descriptor.
"""
try:
os.close(self.fd)
except OSError:
pass
@property
def key(self):
return (self.path, self.is_directory)
def __eq__(self, descriptor):
return self.key == descriptor.key
def __ne__(self, descriptor):
return self.key != descriptor.key
def __hash__(self):
return hash(self.key)
def __repr__(self):
return "<KeventDescriptor: path=%s, is_directory=%s>"\
% (self.path, self.is_directory)
class KqueueEmitter(EventEmitter):
"""
kqueue(2)-based event emitter.
.. ADMONITION:: About ``kqueue(2)`` behavior and this implementation
``kqueue(2)`` monitors file system events only for
open descriptors, which means, this emitter does a lot of
book-keeping behind the scenes to keep track of open
descriptors for every entry in the monitored directory tree.
This also means the number of maximum open file descriptors
on your system must be increased **manually**.
Usually, issuing a call to ``ulimit`` should suffice::
ulimit -n 1024
Ensure that you pick a number that is larger than the
number of files you expect to be monitored.
``kqueue(2)`` does not provide enough information about the
following things:
* The destination path of a file or directory that is renamed.
* Creation of a file or directory within a directory; in this
case, ``kqueue(2)`` only indicates a modified event on the
parent directory.
Therefore, this emitter takes a snapshot of the directory
tree when ``kqueue(2)`` detects a change on the file system
to be able to determine the above information.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._kq = select.kqueue()
self._lock = threading.RLock()
# A collection of KeventDescriptor.
self._descriptors = KeventDescriptorSet()
def walker_callback(path, stat_info, self=self):
self._register_kevent(path, stat.S_ISDIR(stat_info.st_mode))
self._snapshot = DirectorySnapshot(watch.path,
watch.is_recursive,
walker_callback)
def _register_kevent(self, path, is_directory):
"""
Registers a kevent descriptor for the given path.
:param path:
Path for which a kevent descriptor will be created.
:param is_directory:
``True`` if the path refers to a directory; ``False`` otherwise.
:type is_directory:
``bool``
"""
try:
self._descriptors.add(path, is_directory)
except OSError as e:
if e.errno == errno.ENOENT:
# Probably dealing with a temporary file that was created
# and then quickly deleted before we could open
# a descriptor for it. Therefore, simply queue a sequence
# of created and deleted events for the path.
#path = absolute_path(path)
# if is_directory:
# self.queue_event(DirCreatedEvent(path))
# self.queue_event(DirDeletedEvent(path))
# else:
# self.queue_event(FileCreatedEvent(path))
# self.queue_event(FileDeletedEvent(path))
# TODO: We could simply ignore these files.
# Locked files cause the python process to die with
# a bus error when we handle temporary files.
# eg. .git/index.lock when running tig operations.
# I don't fully understand this at the moment.
pass
else:
# All other errors are propagated.
raise
def _unregister_kevent(self, path):
"""
Convenience function to close the kevent descriptor for a
specified kqueue-monitored path.
:param path:
Path for which the kevent descriptor will be closed.
"""
self._descriptors.remove(path)
def queue_event(self, event):
"""
Handles queueing a single event object.
:param event:
An instance of :class:`watchdog.events.FileSystemEvent`
or a subclass.
"""
# Handles all the book keeping for queued events.
# We do not need to fire moved/deleted events for all subitems in
# a directory tree here, because this function is called by kqueue
# for all those events anyway.
EventEmitter.queue_event(self, event)
if event.event_type == EVENT_TYPE_CREATED:
self._register_kevent(event.src_path, event.is_directory)
elif event.event_type == EVENT_TYPE_MOVED:
self._unregister_kevent(event.src_path)
self._register_kevent(event.dest_path, event.is_directory)
elif event.event_type == EVENT_TYPE_DELETED:
self._unregister_kevent(event.src_path)
def _queue_dirs_modified(self,
dirs_modified,
ref_snapshot,
new_snapshot):
"""
Queues events for directory modifications by scanning the directory
for changes.
A scan is a comparison between two snapshots of the same directory
taken at two different times. This also determines whether files
or directories were created, which updated the modified timestamp
for the directory.
"""
if dirs_modified:
for dir_modified in dirs_modified:
self.queue_event(DirModifiedEvent(dir_modified))
diff_events = new_snapshot - ref_snapshot
for file_created in diff_events.files_created:
self.queue_event(FileCreatedEvent(file_created))
for directory_created in diff_events.dirs_created:
self.queue_event(DirCreatedEvent(directory_created))
def _queue_events_except_renames_and_dir_modifications(self, event_list):
"""
Queues events from the kevent list returned from the call to
:meth:`select.kqueue.control`.
.. NOTE:: Queues only the deletions, file modifications,
attribute modifications. The other events, namely,
file creation, directory modification, file rename,
directory rename, directory creation, etc. are
determined by comparing directory snapshots.
"""
files_renamed = set()
dirs_renamed = set()
dirs_modified = set()
for kev in event_list:
descriptor = self._descriptors.get_for_fd(kev.ident)
src_path = descriptor.path
if is_deleted(kev):
if descriptor.is_directory:
self.queue_event(DirDeletedEvent(src_path))
else:
self.queue_event(FileDeletedEvent(src_path))
elif is_attrib_modified(kev):
if descriptor.is_directory:
self.queue_event(DirModifiedEvent(src_path))
else:
self.queue_event(FileModifiedEvent(src_path))
elif is_modified(kev):
if descriptor.is_directory:
# When a directory is modified, it may be due to
# sub-file/directory renames or new file/directory
# creation. We determine all this by comparing
# snapshots later.
dirs_modified.add(src_path)
else:
self.queue_event(FileModifiedEvent(src_path))
elif is_renamed(kev):
# Kqueue does not specify the destination names for renames
# to, so we have to process these after taking a snapshot
# of the directory.
if descriptor.is_directory:
dirs_renamed.add(src_path)
else:
files_renamed.add(src_path)
return files_renamed, dirs_renamed, dirs_modified
def _queue_renamed(self,
src_path,
is_directory,
ref_snapshot,
new_snapshot):
"""
Compares information from two directory snapshots (one taken before
the rename operation and another taken right after) to determine the
destination path of the file system object renamed, and adds
appropriate events to the event queue.
"""
try:
ref_stat_info = ref_snapshot.stat_info(src_path)
except KeyError:
# Probably caught a temporary file/directory that was renamed
# and deleted. Fires a sequence of created and deleted events
# for the path.
if is_directory:
self.queue_event(DirCreatedEvent(src_path))
self.queue_event(DirDeletedEvent(src_path))
else:
self.queue_event(FileCreatedEvent(src_path))
self.queue_event(FileDeletedEvent(src_path))
# We don't process any further and bail out assuming
# the event represents deletion/creation instead of movement.
return
try:
dest_path = absolute_path(
new_snapshot.path_for_inode(ref_stat_info.st_ino))
if is_directory:
event = DirMovedEvent(src_path, dest_path)
# TODO: Do we need to fire moved events for the items
# inside the directory tree? Does kqueue does this
# all by itself? Check this and then enable this code
# only if it doesn't already.
# A: It doesn't. So I've enabled this block.
if self.watch.is_recursive:
for sub_event in event.sub_moved_events():
self.queue_event(sub_event)
self.queue_event(event)
else:
self.queue_event(FileMovedEvent(src_path, dest_path))
except KeyError:
# If the new snapshot does not have an inode for the
# old path, we haven't found the new name. Therefore,
# we mark it as deleted and remove unregister the path.
if is_directory:
self.queue_event(DirDeletedEvent(src_path))
else:
self.queue_event(FileDeletedEvent(src_path))
def _read_events(self, timeout=None):
"""
Reads events from a call to the blocking
:meth:`select.kqueue.control()` method.
:param timeout:
Blocking timeout for reading events.
:type timeout:
``float`` (seconds)
"""
return self._kq.control(self._descriptors.kevents,
MAX_EVENTS,
timeout)
def queue_events(self, timeout):
"""
Queues events by reading them from a call to the blocking
:meth:`select.kqueue.control()` method.
:param timeout:
Blocking timeout for reading events.
:type timeout:
``float`` (seconds)
"""
with self._lock:
try:
event_list = self._read_events(timeout)
files_renamed, dirs_renamed, dirs_modified = (
self._queue_events_except_renames_and_dir_modifications(event_list))
# Take a fresh snapshot of the directory and update the
# saved snapshot.
new_snapshot = DirectorySnapshot(self.watch.path,
self.watch.is_recursive)
ref_snapshot = self._snapshot
self._snapshot = new_snapshot
if files_renamed or dirs_renamed or dirs_modified:
for src_path in files_renamed:
self._queue_renamed(src_path,
False,
ref_snapshot,
new_snapshot)
for src_path in dirs_renamed:
self._queue_renamed(src_path,
True,
ref_snapshot,
new_snapshot)
self._queue_dirs_modified(dirs_modified,
ref_snapshot,
new_snapshot)
except OSError as e:
if e.errno == errno.EBADF:
# logging.debug(e)
pass
else:
raise
def on_thread_stop(self):
# Clean up.
with self._lock:
self._descriptors.clear()
self._kq.close()
class KqueueObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=KqueueEmitter, timeout=timeout)

View file

@ -0,0 +1,147 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.polling
:synopsis: Polling emitter implementation.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: PollingObserver
:members:
:show-inheritance:
.. autoclass:: PollingObserverVFS
:members:
:show-inheritance:
:special-members:
"""
from __future__ import with_statement
import os
import threading
from functools import partial
from watchdog.utils import stat as default_stat
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent
)
class PollingEmitter(EventEmitter):
"""
Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=default_stat, listdir=os.listdir):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._snapshot = None
self._lock = threading.Lock()
self._take_snapshot = lambda: DirectorySnapshot(
self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)
def on_thread_start(self):
self._snapshot = self._take_snapshot()
def queue_events(self, timeout):
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
try:
new_snapshot = self._take_snapshot()
except OSError as e:
self.queue_event(DirDeletedEvent(self.watch.path))
self.stop()
return
except Exception as e:
raise e
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
class PollingObserver(BaseObserver):
"""
Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout)
class PollingObserverVFS(BaseObserver):
"""
File system independent observer that polls a directory to detect changes.
"""
def __init__(self, stat, listdir, polling_interval=1):
"""
:param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.listdir`` for details.
:type polling_interval: float
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)

View file

@ -0,0 +1,135 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
# Copyright 2014 Thomas Amland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import ctypes
import threading
import os.path
import time
from watchdog.events import (
DirCreatedEvent,
DirDeletedEvent,
DirMovedEvent,
DirModifiedEvent,
FileCreatedEvent,
FileDeletedEvent,
FileMovedEvent,
FileModifiedEvent,
generate_sub_moved_events,
generate_sub_created_events,
)
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.observers.winapi import (
read_events,
get_directory_handle,
close_directory_handle,
)
# HACK:
WATCHDOG_TRAVERSE_MOVED_DIR_DELAY = 1 # seconds
class WindowsApiEmitter(EventEmitter):
"""
Windows API-based emitter that uses ReadDirectoryChangesW
to detect file system changes for a watch.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._handle = None
def on_thread_start(self):
self._handle = get_directory_handle(self.watch.path)
def on_thread_stop(self):
if self._handle:
close_directory_handle(self._handle)
def queue_events(self, timeout):
winapi_events = read_events(self._handle, self.watch.is_recursive)
with self._lock:
last_renamed_src_path = ""
for winapi_event in winapi_events:
src_path = os.path.join(self.watch.path, winapi_event.src_path)
if winapi_event.is_renamed_old:
last_renamed_src_path = src_path
elif winapi_event.is_renamed_new:
dest_path = src_path
src_path = last_renamed_src_path
if os.path.isdir(dest_path):
event = DirMovedEvent(src_path, dest_path)
if self.watch.is_recursive:
# HACK: We introduce a forced delay before
# traversing the moved directory. This will read
# only file movement that finishes within this
# delay time.
time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY)
# The following block of code may not
# obtain moved events for the entire tree if
# the I/O is not completed within the above
# delay time. So, it's not guaranteed to work.
# TODO: Come up with a better solution, possibly
# a way to wait for I/O to complete before
# queuing events.
for sub_moved_event in generate_sub_moved_events(src_path, dest_path):
self.queue_event(sub_moved_event)
self.queue_event(event)
else:
self.queue_event(FileMovedEvent(src_path, dest_path))
elif winapi_event.is_modified:
cls = DirModifiedEvent if os.path.isdir(src_path) else FileModifiedEvent
self.queue_event(cls(src_path))
elif winapi_event.is_added:
isdir = os.path.isdir(src_path)
cls = DirCreatedEvent if isdir else FileCreatedEvent
self.queue_event(cls(src_path))
if isdir:
# If a directory is moved from outside the watched folder to inside it
# we only get a created directory event out of it, not any events for its children
# so use the same hack as for file moves to get the child events
time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY)
sub_events = generate_sub_created_events(src_path)
for sub_created_event in sub_events:
self.queue_event(sub_created_event)
elif winapi_event.is_removed:
self.queue_event(FileDeletedEvent(src_path))
class WindowsApiObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=WindowsApiEmitter,
timeout=timeout)

View file

@ -0,0 +1,349 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# winapi.py: Windows API-Python interface (removes dependency on pywin32)
#
# Copyright (C) 2007 Thomas Heller <theller@ctypes.org>
# Copyright (C) 2010 Will McGugan <will@willmcgugan.com>
# Copyright (C) 2010 Ryan Kelly <ryan@rfk.id.au>
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright (C) 2014 Thomas Amland
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and / or other materials provided with the distribution.
# * Neither the name of the organization nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Portions of this code were taken from pyfilesystem, which uses the above
# new BSD license.
from __future__ import with_statement
import ctypes.wintypes
import struct
from functools import reduce
try:
LPVOID = ctypes.wintypes.LPVOID
except AttributeError:
# LPVOID wasn't defined in Py2.5, guess it was introduced in Py2.6
LPVOID = ctypes.c_void_p
# Invalid handle value.
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
# File notification contants.
FILE_NOTIFY_CHANGE_FILE_NAME = 0x01
FILE_NOTIFY_CHANGE_DIR_NAME = 0x02
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04
FILE_NOTIFY_CHANGE_SIZE = 0x08
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010
FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020
FILE_NOTIFY_CHANGE_CREATION = 0x040
FILE_NOTIFY_CHANGE_SECURITY = 0x0100
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_LIST_DIRECTORY = 1
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_SHARE_DELETE = 0x04
OPEN_EXISTING = 3
# File action constants.
FILE_ACTION_CREATED = 1
FILE_ACTION_DELETED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_ACTION_OVERFLOW = 0xFFFF
# Aliases
FILE_ACTION_ADDED = FILE_ACTION_CREATED
FILE_ACTION_REMOVED = FILE_ACTION_DELETED
THREAD_TERMINATE = 0x0001
# IO waiting constants.
WAIT_ABANDONED = 0x00000080
WAIT_IO_COMPLETION = 0x000000C0
WAIT_OBJECT_0 = 0x00000000
WAIT_TIMEOUT = 0x00000102
# Error codes
ERROR_OPERATION_ABORTED = 995
class OVERLAPPED(ctypes.Structure):
_fields_ = [('Internal', LPVOID),
('InternalHigh', LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('Pointer', LPVOID),
('hEvent', ctypes.wintypes.HANDLE),
]
def _errcheck_bool(value, func, args):
if not value:
raise ctypes.WinError()
return args
def _errcheck_handle(value, func, args):
if not value:
raise ctypes.WinError()
if value == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
return args
def _errcheck_dword(value, func, args):
if value == 0xFFFFFFFF:
raise ctypes.WinError()
return args
ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL
ReadDirectoryChangesW.errcheck = _errcheck_bool
ReadDirectoryChangesW.argtypes = (
ctypes.wintypes.HANDLE, # hDirectory
LPVOID, # lpBuffer
ctypes.wintypes.DWORD, # nBufferLength
ctypes.wintypes.BOOL, # bWatchSubtree
ctypes.wintypes.DWORD, # dwNotifyFilter
ctypes.POINTER(ctypes.wintypes.DWORD), # lpBytesReturned
ctypes.POINTER(OVERLAPPED), # lpOverlapped
LPVOID # FileIOCompletionRoutine # lpCompletionRoutine
)
CreateFileW = ctypes.windll.kernel32.CreateFileW
CreateFileW.restype = ctypes.wintypes.HANDLE
CreateFileW.errcheck = _errcheck_handle
CreateFileW.argtypes = (
ctypes.wintypes.LPCWSTR, # lpFileName
ctypes.wintypes.DWORD, # dwDesiredAccess
ctypes.wintypes.DWORD, # dwShareMode
LPVOID, # lpSecurityAttributes
ctypes.wintypes.DWORD, # dwCreationDisposition
ctypes.wintypes.DWORD, # dwFlagsAndAttributes
ctypes.wintypes.HANDLE # hTemplateFile
)
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.restype = ctypes.wintypes.BOOL
CloseHandle.argtypes = (
ctypes.wintypes.HANDLE, # hObject
)
CancelIoEx = ctypes.windll.kernel32.CancelIoEx
CancelIoEx.restype = ctypes.wintypes.BOOL
CancelIoEx.errcheck = _errcheck_bool
CancelIoEx.argtypes = (
ctypes.wintypes.HANDLE, # hObject
ctypes.POINTER(OVERLAPPED) # lpOverlapped
)
CreateEvent = ctypes.windll.kernel32.CreateEventW
CreateEvent.restype = ctypes.wintypes.HANDLE
CreateEvent.errcheck = _errcheck_handle
CreateEvent.argtypes = (
LPVOID, # lpEventAttributes
ctypes.wintypes.BOOL, # bManualReset
ctypes.wintypes.BOOL, # bInitialState
ctypes.wintypes.LPCWSTR, # lpName
)
SetEvent = ctypes.windll.kernel32.SetEvent
SetEvent.restype = ctypes.wintypes.BOOL
SetEvent.errcheck = _errcheck_bool
SetEvent.argtypes = (
ctypes.wintypes.HANDLE, # hEvent
)
WaitForSingleObjectEx = ctypes.windll.kernel32.WaitForSingleObjectEx
WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD
WaitForSingleObjectEx.errcheck = _errcheck_dword
WaitForSingleObjectEx.argtypes = (
ctypes.wintypes.HANDLE, # hObject
ctypes.wintypes.DWORD, # dwMilliseconds
ctypes.wintypes.BOOL, # bAlertable
)
CreateIoCompletionPort = ctypes.windll.kernel32.CreateIoCompletionPort
CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE
CreateIoCompletionPort.errcheck = _errcheck_handle
CreateIoCompletionPort.argtypes = (
ctypes.wintypes.HANDLE, # FileHandle
ctypes.wintypes.HANDLE, # ExistingCompletionPort
LPVOID, # CompletionKey
ctypes.wintypes.DWORD, # NumberOfConcurrentThreads
)
GetQueuedCompletionStatus = ctypes.windll.kernel32.GetQueuedCompletionStatus
GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
GetQueuedCompletionStatus.errcheck = _errcheck_bool
GetQueuedCompletionStatus.argtypes = (
ctypes.wintypes.HANDLE, # CompletionPort
LPVOID, # lpNumberOfBytesTransferred
LPVOID, # lpCompletionKey
ctypes.POINTER(OVERLAPPED), # lpOverlapped
ctypes.wintypes.DWORD, # dwMilliseconds
)
PostQueuedCompletionStatus = ctypes.windll.kernel32.PostQueuedCompletionStatus
PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
PostQueuedCompletionStatus.errcheck = _errcheck_bool
PostQueuedCompletionStatus.argtypes = (
ctypes.wintypes.HANDLE, # CompletionPort
ctypes.wintypes.DWORD, # lpNumberOfBytesTransferred
ctypes.wintypes.DWORD, # lpCompletionKey
ctypes.POINTER(OVERLAPPED), # lpOverlapped
)
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
_fields_ = [("NextEntryOffset", ctypes.wintypes.DWORD),
("Action", ctypes.wintypes.DWORD),
("FileNameLength", ctypes.wintypes.DWORD),
#("FileName", (ctypes.wintypes.WCHAR * 1))]
("FileName", (ctypes.c_char * 1))]
LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION)
# We don't need to recalculate these flags every time a call is made to
# the win32 API functions.
WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS
WATCHDOG_FILE_SHARE_FLAGS = reduce(
lambda x, y: x | y, [
FILE_SHARE_READ,
FILE_SHARE_WRITE,
FILE_SHARE_DELETE,
])
WATCHDOG_FILE_NOTIFY_FLAGS = reduce(
lambda x, y: x | y, [
FILE_NOTIFY_CHANGE_FILE_NAME,
FILE_NOTIFY_CHANGE_DIR_NAME,
FILE_NOTIFY_CHANGE_ATTRIBUTES,
FILE_NOTIFY_CHANGE_SIZE,
FILE_NOTIFY_CHANGE_LAST_WRITE,
FILE_NOTIFY_CHANGE_SECURITY,
FILE_NOTIFY_CHANGE_LAST_ACCESS,
FILE_NOTIFY_CHANGE_CREATION,
])
BUFFER_SIZE = 2048
def _parse_event_buffer(readBuffer, nBytes):
results = []
while nBytes > 0:
fni = ctypes.cast(readBuffer, LPFNI)[0]
ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset
#filename = ctypes.wstring_at(ptr, fni.FileNameLength)
filename = ctypes.string_at(ptr, fni.FileNameLength)
results.append((fni.Action, filename.decode('utf-16')))
numToSkip = fni.NextEntryOffset
if numToSkip <= 0:
break
readBuffer = readBuffer[numToSkip:]
nBytes -= numToSkip # numToSkip is long. nBytes should be long too.
return results
def get_directory_handle(path):
"""Returns a Windows handle to the specified directory path."""
return CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS,
None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None)
def close_directory_handle(handle):
try:
CancelIoEx(handle, None) # force ReadDirectoryChangesW to return
CloseHandle(handle) # close directory handle
except WindowsError:
try:
CloseHandle(handle) # close directory handle
except:
return
def read_directory_changes(handle, recursive):
"""Read changes to the directory using the specified directory handle.
http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html
"""
event_buffer = ctypes.create_string_buffer(BUFFER_SIZE)
nbytes = ctypes.wintypes.DWORD()
try:
ReadDirectoryChangesW(handle, ctypes.byref(event_buffer),
len(event_buffer), recursive,
WATCHDOG_FILE_NOTIFY_FLAGS,
ctypes.byref(nbytes), None, None)
except WindowsError as e:
if e.winerror == ERROR_OPERATION_ABORTED:
return [], 0
raise e
# Python 2/3 compat
try:
int_class = long
except NameError:
int_class = int
return event_buffer.raw, int_class(nbytes.value)
class WinAPINativeEvent(object):
def __init__(self, action, src_path):
self.action = action
self.src_path = src_path
@property
def is_added(self):
return self.action == FILE_ACTION_CREATED
@property
def is_removed(self):
return self.action == FILE_ACTION_REMOVED
@property
def is_modified(self):
return self.action == FILE_ACTION_MODIFIED
@property
def is_renamed_old(self):
return self.action == FILE_ACTION_RENAMED_OLD_NAME
@property
def is_renamed_new(self):
return self.action == FILE_ACTION_RENAMED_NEW_NAME
def __repr__(self):
return ("<WinAPINativeEvent: action=%d, src_path=%r>" % (self.action, self.src_path))
def read_events(handle, recursive):
buf, nbytes = read_directory_changes(handle, recursive)
events = _parse_event_buffer(buf, nbytes)
return [WinAPINativeEvent(action, path) for action, path in events]

View file

@ -0,0 +1,174 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import subprocess
import time
from watchdog.utils import echo, has_attribute
from watchdog.events import PatternMatchingEventHandler
class Trick(PatternMatchingEventHandler):
"""Your tricks should subclass this class."""
@classmethod
def generate_yaml(cls):
context = dict(module_name=cls.__module__,
klass_name=cls.__name__)
template_yaml = """- %(module_name)s.%(klass_name)s:
args:
- argument1
- argument2
kwargs:
patterns:
- "*.py"
- "*.js"
ignore_patterns:
- "version.py"
ignore_directories: false
"""
return template_yaml % context
class LoggerTrick(Trick):
"""A simple trick that does only logs events."""
def on_any_event(self, event):
pass
@echo.echo
def on_modified(self, event):
pass
@echo.echo
def on_deleted(self, event):
pass
@echo.echo
def on_created(self, event):
pass
@echo.echo
def on_moved(self, event):
pass
class ShellCommandTrick(Trick):
"""Executes shell commands in response to matched events."""
def __init__(self, shell_command=None, patterns=None, ignore_patterns=None,
ignore_directories=False, wait_for_process=False,
drop_during_process=False):
super(ShellCommandTrick, self).__init__(patterns, ignore_patterns,
ignore_directories)
self.shell_command = shell_command
self.wait_for_process = wait_for_process
self.drop_during_process = drop_during_process
self.process = None
def on_any_event(self, event):
from string import Template
if self.drop_during_process and self.process and self.process.poll() is None:
return
if event.is_directory:
object_type = 'directory'
else:
object_type = 'file'
context = {
'watch_src_path': event.src_path,
'watch_dest_path': '',
'watch_event_type': event.event_type,
'watch_object': object_type,
}
if self.shell_command is None:
if has_attribute(event, 'dest_path'):
context.update({'dest_path': event.dest_path})
command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"'
else:
command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"'
else:
if has_attribute(event, 'dest_path'):
context.update({'watch_dest_path': event.dest_path})
command = self.shell_command
command = Template(command).safe_substitute(**context)
self.process = subprocess.Popen(command, shell=True)
if self.wait_for_process:
self.process.wait()
class AutoRestartTrick(Trick):
"""Starts a long-running subprocess and restarts it on matched events.
The command parameter is a list of command arguments, such as
['bin/myserver', '-c', 'etc/myconfig.ini'].
Call start() after creating the Trick. Call stop() when stopping
the process.
"""
def __init__(self, command, patterns=None, ignore_patterns=None,
ignore_directories=False, stop_signal=signal.SIGINT,
kill_after=10):
super(AutoRestartTrick, self).__init__(
patterns, ignore_patterns, ignore_directories)
self.command = command
self.stop_signal = stop_signal
self.kill_after = kill_after
self.process = None
def start(self):
self.process = subprocess.Popen(self.command, preexec_fn=os.setsid)
def stop(self):
if self.process is None:
return
try:
os.killpg(os.getpgid(self.process.pid), self.stop_signal)
except OSError:
# Process is already gone
pass
else:
kill_time = time.time() + self.kill_after
while time.time() < kill_time:
if self.process.poll() is not None:
break
time.sleep(0.25)
else:
try:
os.killpg(os.getpgid(self.process.pid), 9)
except OSError:
# Process is already gone
pass
self.process = None
@echo.echo
def on_any_event(self, event):
self.stop()
self.start()

View file

@ -0,0 +1,158 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.utils
:synopsis: Utility classes and functions.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: BaseThread
:members:
:show-inheritance:
:inherited-members:
"""
import os
import sys
import threading
import watchdog.utils.platform
from watchdog.utils.compat import Event
from collections import namedtuple
if sys.version_info[0] == 2 and platform.is_windows():
# st_ino is not implemented in os.stat on this platform
import win32stat
stat = win32stat.stat
else:
stat = os.stat
def has_attribute(ob, attribute):
"""
:func:`hasattr` swallows exceptions. :func:`has_attribute` tests a Python object for the
presence of an attribute.
:param ob:
object to inspect
:param attribute:
``str`` for the name of the attribute.
"""
return getattr(ob, attribute, None) is not None
class UnsupportedLibc(Exception):
pass
class BaseThread(threading.Thread):
""" Convenience class for creating stoppable threads. """
def __init__(self):
threading.Thread.__init__(self)
if has_attribute(self, 'daemon'):
self.daemon = True
else:
self.setDaemon(True)
self._stopped_event = Event()
if not has_attribute(self._stopped_event, 'is_set'):
self._stopped_event.is_set = self._stopped_event.isSet
@property
def stopped_event(self):
return self._stopped_event
def should_keep_running(self):
"""Determines whether the thread should continue running."""
return not self._stopped_event.is_set()
def on_thread_stop(self):
"""Override this method instead of :meth:`stop()`.
:meth:`stop()` calls this method.
This method is called immediately after the thread is signaled to stop.
"""
pass
def stop(self):
"""Signals the thread to stop."""
self._stopped_event.set()
self.on_thread_stop()
def on_thread_start(self):
"""Override this method instead of :meth:`start()`. :meth:`start()`
calls this method.
This method is called right before this thread is started and this
objects run() method is invoked.
"""
pass
def start(self):
self.on_thread_start()
threading.Thread.start(self)
def load_module(module_name):
"""Imports a module given its name and returns a handle to it."""
try:
__import__(module_name)
except ImportError:
raise ImportError('No module named %s' % module_name)
return sys.modules[module_name]
def load_class(dotted_path):
"""Loads and returns a class definition provided a dotted path
specification the last part of the dotted path is the class name
and there is at least one module name preceding the class name.
Notes:
You will need to ensure that the module you are trying to load
exists in the Python path.
Examples:
- module.name.ClassName # Provided module.name is in the Python path.
- module.ClassName # Provided module is in the Python path.
What won't work:
- ClassName
- modle.name.ClassName # Typo in module name.
- module.name.ClasNam # Typo in classname.
"""
dotted_path_split = dotted_path.split('.')
if len(dotted_path_split) > 1:
klass_name = dotted_path_split[-1]
module_name = '.'.join(dotted_path_split[:-1])
module = load_module(module_name)
if has_attribute(module, klass_name):
klass = getattr(module, klass_name)
return klass
# Finally create and return an instance of the class
# return klass(*args, **kwargs)
else:
raise AttributeError('Module %s does not have class attribute %s' % (
module_name, klass_name))
else:
raise ValueError(
'Dotted module path %s must contain a module name and a classname' % dotted_path)

View file

@ -0,0 +1,249 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility collections or "bricks".
:module: watchdog.utils.bricks
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: lalinsky@gmail.com (Lukáš Lalinský)
:author: python@rcn.com (Raymond Hettinger)
Classes
=======
.. autoclass:: OrderedSetQueue
:members:
:show-inheritance:
:inherited-members:
.. autoclass:: OrderedSet
"""
import sys
import collections
from .compat import queue
class SkipRepeatsQueue(queue.Queue):
"""Thread-safe implementation of an special queue where a
put of the last-item put'd will be dropped.
The implementation leverages locking already implemented in the base class
redefining only the primitives.
Queued items must be immutable and hashable so that they can be used
as dictionary keys. You must implement **only read-only properties** and
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
:meth:`Item.__ne__()` methods for items to be hashable.
An example implementation follows::
class Item(object):
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
based on the OrderedSetQueue below
"""
def _init(self, maxsize):
queue.Queue._init(self, maxsize)
self._last_item = None
def _put(self, item):
if item != self._last_item:
queue.Queue._put(self, item)
self._last_item = item
else:
# `put` increments `unfinished_tasks` even if we did not put
# anything into the queue here
self.unfinished_tasks -= 1
def _get(self):
item = queue.Queue._get(self)
if item is self._last_item:
self._last_item = None
return item
class OrderedSetQueue(queue.Queue):
"""Thread-safe implementation of an ordered set queue.
Disallows adding a duplicate item while maintaining the
order of items in the queue. The implementation leverages
locking already implemented in the base class
redefining only the primitives. Since the internal queue
is not replaced, the order is maintained. The set is used
merely to check for the existence of an item.
Queued items must be immutable and hashable so that they can be used
as dictionary keys. You must implement **only read-only properties** and
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
:meth:`Item.__ne__()` methods for items to be hashable.
An example implementation follows::
class Item(object):
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
:author: lalinsky@gmail.com (Lukáš Lalinský)
:url: http://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue
"""
def _init(self, maxsize):
queue.Queue._init(self, maxsize)
self._set_of_items = set()
def _put(self, item):
if item not in self._set_of_items:
queue.Queue._put(self, item)
self._set_of_items.add(item)
else:
# `put` increments `unfinished_tasks` even if we did not put
# anything into the queue here
self.unfinished_tasks -= 1
def _get(self):
item = queue.Queue._get(self)
self._set_of_items.remove(item)
return item
if sys.version_info >= (2, 6, 0):
KEY, PREV, NEXT = list(range(3))
class OrderedSet(collections.MutableSet):
"""
Implementation based on a doubly-linked link and an internal dictionary.
This design gives :class:`OrderedSet` the same big-Oh running times as
regular sets including O(1) adds, removes, and lookups as well as
O(n) iteration.
.. ADMONITION:: Implementation notes
Runs on Python 2.6 or later (and runs on Python 3.0 or later
without any modifications).
:author: python@rcn.com (Raymond Hettinger)
:url: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, _next = self.map.pop(key)
prev[NEXT] = _next
_next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references

View file

@ -0,0 +1,29 @@
# -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
__all__ = ['queue', 'Event']
try:
import queue
except ImportError:
import Queue as queue
if sys.version_info < (2, 7):
from watchdog.utils.event_backport import Event
else:
from threading import Event

View file

@ -0,0 +1,198 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Most of this code was obtained from the Python documentation online.
"""Decorator utility functions.
decorators:
- synchronized
- propertyx
- accepts
- returns
- singleton
- attrs
- deprecated
"""
import functools
import warnings
import threading
import sys
def synchronized(lock=None):
"""Decorator that synchronizes a method or a function with a mutex lock.
Example usage:
@synchronized()
def operation(self, a, b):
...
"""
if lock is None:
lock = threading.Lock()
def wrapper(function):
def new_function(*args, **kwargs):
lock.acquire()
try:
return function(*args, **kwargs)
finally:
lock.release()
return new_function
return wrapper
def propertyx(function):
"""Decorator to easily create properties in classes.
Example:
class Angle(object):
def __init__(self, rad):
self._rad = rad
@property
def rad():
def fget(self):
return self._rad
def fset(self, angle):
if isinstance(angle, Angle):
angle = angle.rad
self._rad = float(angle)
Arguments:
- `function`: The function to be decorated.
"""
keys = ('fget', 'fset', 'fdel')
func_locals = {'doc': function.__doc__}
def probe_func(frame, event, arg):
if event == 'return':
locals = frame.f_locals
func_locals.update(dict((k, locals.get(k)) for k in keys))
sys.settrace(None)
return probe_func
sys.settrace(probe_func)
function()
return property(**func_locals)
def accepts(*types):
"""Decorator to ensure that the decorated function accepts the given types as arguments.
Example:
@accepts(int, (int,float))
@returns((int,float))
def func(arg1, arg2):
return arg1 * arg2
"""
def check_accepts(f):
assert len(types) == f.__code__.co_argcount
def new_f(*args, **kwds):
for (a, t) in zip(args, types):
assert isinstance(a, t),\
"arg %r does not match %s" % (a, t)
return f(*args, **kwds)
new_f.__name__ = f.__name__
return new_f
return check_accepts
def returns(rtype):
"""Decorator to ensure that the decorated function returns the given
type as argument.
Example:
@accepts(int, (int,float))
@returns((int,float))
def func(arg1, arg2):
return arg1 * arg2
"""
def check_returns(f):
def new_f(*args, **kwds):
result = f(*args, **kwds)
assert isinstance(result, rtype),\
"return value %r does not match %s" % (result, rtype)
return result
new_f.__name__ = f.__name__
return new_f
return check_returns
def singleton(cls):
"""Decorator to ensures a class follows the singleton pattern.
Example:
@singleton
class MyClass:
...
"""
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
def attrs(**kwds):
"""Decorator to add attributes to a function.
Example:
@attrs(versionadded="2.2",
author="Guido van Rossum")
def mymethod(f):
...
"""
def decorate(f):
for k in kwds:
setattr(f, k, kwds[k])
return f
return decorate
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
## Usage examples ##
@deprecated
def my_func():
pass
@other_decorators_must_be_upper
@deprecated
def my_func():
pass
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function %(funcname)s." % {
'funcname': func.__name__,
},
category=DeprecationWarning,
filename=func.__code__.co_filename,
lineno=func.__code__.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func

View file

@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
from collections import deque
class DelayedQueue(object):
def __init__(self, delay):
self.delay = delay
self._lock = threading.Lock()
self._not_empty = threading.Condition(self._lock)
self._queue = deque()
self._closed = False
def put(self, element):
"""Add element to queue."""
self._lock.acquire()
self._queue.append((element, time.time()))
self._not_empty.notify()
self._lock.release()
def close(self):
"""Close queue, indicating no more items will be added."""
self._closed = True
# Interrupt the blocking _not_empty.wait() call in get
self._not_empty.acquire()
self._not_empty.notify()
self._not_empty.release()
def get(self):
"""Remove and return an element from the queue, or this queue has been
closed raise the Closed exception.
"""
while True:
# wait for element to be added to queue
self._not_empty.acquire()
while len(self._queue) == 0 and not self._closed:
self._not_empty.wait()
if self._closed:
self._not_empty.release()
return None
head, insert_time = self._queue[0]
self._not_empty.release()
# wait for delay
time_left = insert_time + self.delay - time.time()
while time_left > 0:
time.sleep(time_left)
time_left = insert_time + self.delay - time.time()
# return element if it's still in the queue
self._lock.acquire()
try:
if len(self._queue) > 0 and self._queue[0][0] is head:
self._queue.popleft()
return head
finally:
self._lock.release()
def remove(self, predicate):
"""Remove and return the first items for which predicate is True,
ignoring delay."""
try:
self._lock.acquire()
for i, (elem, t) in enumerate(self._queue):
if predicate(elem):
del self._queue[i]
return elem
finally:
self._lock.release()
return None

View file

@ -0,0 +1,294 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.utils.dirsnapshot
:synopsis: Directory snapshots and comparison.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
.. ADMONITION:: Where are the moved events? They "disappeared"
This implementation does not take partition boundaries
into consideration. It will only work when the directory
tree is entirely on the same file system. More specifically,
any part of the code that depends on inode numbers can
break if partition boundaries are crossed. In these cases,
the snapshot diff will represent file/directory movement as
created and deleted events.
Classes
-------
.. autoclass:: DirectorySnapshot
:members:
:show-inheritance:
.. autoclass:: DirectorySnapshotDiff
:members:
:show-inheritance:
"""
import errno
import os
from stat import S_ISDIR
from watchdog.utils import platform
from watchdog.utils import stat as default_stat
class DirectorySnapshotDiff(object):
"""
Compares two directory snapshots and creates an object that represents
the difference between the two snapshots.
:param ref:
The reference directory snapshot.
:type ref:
:class:`DirectorySnapshot`
:param snapshot:
The directory snapshot which will be compared
with the reference snapshot.
:type snapshot:
:class:`DirectorySnapshot`
"""
def __init__(self, ref, snapshot):
created = snapshot.paths - ref.paths
deleted = ref.paths - snapshot.paths
# check that all unchanged paths have the same inode
for path in ref.paths & snapshot.paths:
if ref.inode(path) != snapshot.inode(path):
created.add(path)
deleted.add(path)
# find moved paths
moved = set()
for path in set(deleted):
inode = ref.inode(path)
new_path = snapshot.path(inode)
if new_path:
# file is not deleted but moved
deleted.remove(path)
moved.add((path, new_path))
for path in set(created):
inode = snapshot.inode(path)
old_path = ref.path(inode)
if old_path:
created.remove(path)
moved.add((old_path, path))
# find modified paths
# first check paths that have not moved
modified = set()
for path in ref.paths & snapshot.paths:
if ref.inode(path) == snapshot.inode(path):
if ref.mtime(path) != snapshot.mtime(path):
modified.add(path)
for (old_path, new_path) in moved:
if ref.mtime(old_path) != snapshot.mtime(new_path):
modified.add(old_path)
self._dirs_created = [path for path in created if snapshot.isdir(path)]
self._dirs_deleted = [path for path in deleted if ref.isdir(path)]
self._dirs_modified = [path for path in modified if ref.isdir(path)]
self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)]
self._files_created = list(created - set(self._dirs_created))
self._files_deleted = list(deleted - set(self._dirs_deleted))
self._files_modified = list(modified - set(self._dirs_modified))
self._files_moved = list(moved - set(self._dirs_moved))
@property
def files_created(self):
"""List of files that were created."""
return self._files_created
@property
def files_deleted(self):
"""List of files that were deleted."""
return self._files_deleted
@property
def files_modified(self):
"""List of files that were modified."""
return self._files_modified
@property
def files_moved(self):
"""
List of files that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._files_moved
@property
def dirs_modified(self):
"""
List of directories that were modified.
"""
return self._dirs_modified
@property
def dirs_moved(self):
"""
List of directories that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._dirs_moved
@property
def dirs_deleted(self):
"""
List of directories that were deleted.
"""
return self._dirs_deleted
@property
def dirs_created(self):
"""
List of directories that were created.
"""
return self._dirs_created
class DirectorySnapshot(object):
"""
A snapshot of stat information of files in a directory.
:param path:
The directory path for which a snapshot should be taken.
:type path:
``str``
:param recursive:
``True`` if the entire directory tree should be included in the
snapshot; ``False`` otherwise.
:type recursive:
``bool``
:param walker_callback:
.. deprecated:: 0.7.2
:param stat:
Use custom stat function that returns a stat structure for path.
Currently only st_dev, st_ino, st_mode and st_mtime are needed.
A function with the signature ``walker_callback(path, stat_info)``
which will be called for every entry in the directory tree.
:param listdir:
Use custom listdir function. See ``os.listdir`` for details.
"""
def __init__(self, path, recursive=True,
walker_callback=(lambda p, s: None),
stat=default_stat,
listdir=os.listdir):
self._stat_info = {}
self._inode_to_path = {}
st = stat(path)
self._stat_info[path] = st
self._inode_to_path[(st.st_ino, st.st_dev)] = path
def walk(root):
try:
paths = [os.path.join(root, name) for name in listdir(root)]
except OSError as e:
# Directory may have been deleted between finding it in the directory
# list of its parent and trying to delete its contents. If this
# happens we treat it as empty.
if e.errno == errno.ENOENT:
return
else:
raise
entries = []
for p in paths:
try:
entries.append((p, stat(p)))
except OSError:
continue
for _ in entries:
yield _
if recursive:
for path, st in entries:
if S_ISDIR(st.st_mode):
for _ in walk(path):
yield _
for p, st in walk(path):
i = (st.st_ino, st.st_dev)
self._inode_to_path[i] = p
self._stat_info[p] = st
walker_callback(p, st)
@property
def paths(self):
"""
Set of file/directory paths in the snapshot.
"""
return set(self._stat_info.keys())
def path(self, id):
"""
Returns path for id. None if id is unknown to this snapshot.
"""
return self._inode_to_path.get(id)
def inode(self, path):
""" Returns an id for path. """
st = self._stat_info[path]
return (st.st_ino, st.st_dev)
def isdir(self, path):
return S_ISDIR(self._stat_info[path].st_mode)
def mtime(self, path):
return self._stat_info[path].st_mtime
def stat_info(self, path):
"""
Returns a stat information object for the specified path from
the snapshot.
Attached information is subject to change. Do not use unless
you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`,
:func:`isdir` instead.
:param path:
The path for which stat information should be obtained
from a snapshot.
"""
return self._stat_info[path]
def __sub__(self, previous_dirsnap):
"""Allow subtracting a DirectorySnapshot object instance from
another.
:returns:
A :class:`DirectorySnapshotDiff` object.
"""
return DirectorySnapshotDiff(previous_dirsnap, self)
def __str__(self):
return self.__repr__()
def __repr__(self):
return str(self._stat_info)

View file

@ -0,0 +1,157 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# echo.py: Tracing function calls using Python decorators.
#
# Written by Thomas Guest <tag@wordaligned.org>
# Please see http://wordaligned.org/articles/echo
#
# Place into the public domain.
""" Echo calls made to functions and methods in a module.
"Echoing" a function call means printing out the name of the function
and the values of its arguments before making the call (which is more
commonly referred to as "tracing", but Python already has a trace module).
Example: to echo calls made to functions in "my_module" do:
import echo
import my_module
echo.echo_module(my_module)
Example: to echo calls made to functions in "my_module.my_class" do:
echo.echo_class(my_module.my_class)
Alternatively, echo.echo can be used to decorate functions. Calls to the
decorated function will be echoed.
Example:
@echo.echo
def my_function(args):
pass
"""
import inspect
import sys
def name(item):
" Return an item's name. "
return item.__name__
def is_classmethod(instancemethod, klass):
" Determine if an instancemethod is a classmethod. "
return inspect.ismethod(instancemethod) and instancemethod.__self__ is klass
def is_static_method(method, klass):
"""Returns True if method is an instance method of klass."""
for c in klass.mro():
if name(method) in c.__dict__:
return isinstance(c.__dict__[name(method)], staticmethod)
else:
return False
def is_class_private_name(name):
" Determine if a name is a class private name. "
# Exclude system defined names such as __init__, __add__ etc
return name.startswith("__") and not name.endswith("__")
def method_name(method):
""" Return a method's name.
This function returns the name the method is accessed by from
outside the class (i.e. it prefixes "private" methods appropriately).
"""
mname = name(method)
if is_class_private_name(mname):
mname = "_%s%s" % (name(method.__self__.__class__), mname)
return mname
def format_arg_value(arg_val):
""" Return a string representing a (name, value) pair.
>>> format_arg_value(('x', (1, 2, 3)))
'x=(1, 2, 3)'
"""
arg, val = arg_val
return "%s=%r" % (arg, val)
def echo(fn, write=sys.stdout.write):
""" Echo calls to a function.
Returns a decorated version of the input function which "echoes" calls
made to it by writing out the function's name and the arguments it was
called with.
"""
import functools
# Unpack function's arg count, arg names, arg defaults
code = fn.__code__
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
fn_defaults = fn.__defaults__ or list()
argdefs = dict(list(zip(argnames[-len(fn_defaults):], fn_defaults)))
@functools.wraps(fn)
def wrapped(*v, **k):
# Collect function arguments by chaining together positional,
# defaulted, extra positional and keyword arguments.
positional = list(map(format_arg_value, list(zip(argnames, v))))
defaulted = [format_arg_value((a, argdefs[a]))
for a in argnames[len(v):] if a not in k]
nameless = list(map(repr, v[argcount:]))
keyword = list(map(format_arg_value, list(k.items())))
args = positional + defaulted + nameless + keyword
write("%s(%s)\n" % (name(fn), ", ".join(args)))
return fn(*v, **k)
return wrapped
def echo_instancemethod(klass, method, write=sys.stdout.write):
""" Change an instancemethod so that calls to it are echoed.
Replacing a classmethod is a little more tricky.
See: http://www.python.org/doc/current/ref/types.html
"""
mname = method_name(method)
never_echo = "__str__", "__repr__", # Avoid recursion printing method calls
if mname in never_echo:
pass
elif is_classmethod(method, klass):
setattr(klass, mname, classmethod(echo(method.__func__, write)))
else:
setattr(klass, mname, echo(method, write))
def echo_class(klass, write=sys.stdout.write):
""" Echo calls to class methods and static functions
"""
for _, method in inspect.getmembers(klass, inspect.ismethod):
#In python 3 only class methods are returned here, but in python2 instance methods are too.
echo_instancemethod(klass, method, write)
for _, fn in inspect.getmembers(klass, inspect.isfunction):
if is_static_method(fn, klass):
setattr(klass, name(fn), staticmethod(echo(fn, write)))
else:
#It's not a class or a static method, so it must be an instance method.
#This should only be called in python 3, because in python 3 instance methods are considered functions.
echo_instancemethod(klass, fn, write)
def echo_module(mod, write=sys.stdout.write):
""" Echo calls to functions and methods in a module.
"""
for fname, fn in inspect.getmembers(mod, inspect.isfunction):
setattr(mod, fname, echo(fn, write))
for _, klass in inspect.getmembers(mod, inspect.isclass):
echo_class(klass, write)
if __name__ == "__main__":
import doctest
optionflags = doctest.ELLIPSIS
doctest.testfile('echoexample.txt', optionflags=optionflags)
doctest.testmod(optionflags=optionflags)

View file

@ -0,0 +1,41 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Backport of Event from py2.7 (method wait in py2.6 returns None)
from threading import Condition, Lock
class Event(object):
def __init__(self,):
self.__cond = Condition(Lock())
self.__flag = False
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()

View file

@ -0,0 +1,40 @@
# The MIT License (MIT)
# Copyright (c) 2013 Peter M. Elias
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
def import_module(target, relative_to=None):
target_parts = target.split('.')
target_depth = target_parts.count('')
target_path = target_parts[target_depth:]
target = target[target_depth:]
fromlist = [target]
if target_depth and relative_to:
relative_parts = relative_to.split('.')
relative_to = '.'.join(relative_parts[:-(target_depth - 1) or None])
if len(target_path) > 1:
relative_to = '.'.join(filter(None, [relative_to]) + target_path[:-1])
fromlist = target_path[-1:]
target = fromlist[0]
elif not relative_to:
fromlist = []
mod = __import__(relative_to or target, globals(), locals(), fromlist)
return getattr(mod, target, mod)

View file

@ -0,0 +1,57 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
PLATFORM_WINDOWS = 'windows'
PLATFORM_LINUX = 'linux'
PLATFORM_BSD = 'bsd'
PLATFORM_DARWIN = 'darwin'
PLATFORM_UNKNOWN = 'unknown'
def get_platform_name():
if sys.platform.startswith("win"):
return PLATFORM_WINDOWS
elif sys.platform.startswith('darwin'):
return PLATFORM_DARWIN
elif sys.platform.startswith('linux'):
return PLATFORM_LINUX
elif sys.platform.startswith(('dragonfly', 'freebsd', 'netbsd', 'openbsd', )):
return PLATFORM_BSD
else:
return PLATFORM_UNKNOWN
__platform__ = get_platform_name()
def is_linux():
return __platform__ == PLATFORM_LINUX
def is_bsd():
return __platform__ == PLATFORM_BSD
def is_darwin():
return __platform__ == PLATFORM_DARWIN
def is_windows():
return __platform__ == PLATFORM_WINDOWS

View file

@ -0,0 +1,64 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Will Bond <will@wbond.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from watchdog.utils import platform
try:
# Python 2
str_cls = unicode
bytes_cls = str
except NameError:
# Python 3
str_cls = str
bytes_cls = bytes
# This is used by Linux when the locale seems to be improperly set. UTF-8 tends
# to be the encoding used by all distros, so this is a good fallback.
fs_fallback_encoding = 'utf-8'
fs_encoding = sys.getfilesystemencoding() or fs_fallback_encoding
def encode(path):
if isinstance(path, str_cls):
try:
path = path.encode(fs_encoding, 'strict')
except UnicodeEncodeError:
if not platform.is_linux():
raise
path = path.encode(fs_fallback_encoding, 'strict')
return path
def decode(path):
if isinstance(path, bytes_cls):
try:
path = path.decode(fs_encoding, 'strict')
except UnicodeDecodeError:
if not platform.is_linux():
raise
path = path.decode(fs_fallback_encoding, 'strict')
return path

View file

@ -0,0 +1,123 @@
# -*- coding: utf-8 -*-
#
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.utils.win32stat
:synopsis: Implementation of stat with st_ino and st_dev support.
Functions
---------
.. autofunction:: stat
"""
import ctypes
import ctypes.wintypes
import stat as stdstat
from collections import namedtuple
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
OPEN_EXISTING = 3
FILE_READ_ATTRIBUTES = 0x80
FILE_ATTRIBUTE_NORMAL = 0x80
FILE_ATTRIBUTE_READONLY = 0x1
FILE_ATTRIBUTE_DIRECTORY = 0x10
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", ctypes.wintypes.DWORD),
("dwHighDateTime", ctypes.wintypes.DWORD)]
class BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
_fields_ = [('dwFileAttributes', ctypes.wintypes.DWORD),
('ftCreationTime', FILETIME),
('ftLastAccessTime', FILETIME),
('ftLastWriteTime', FILETIME),
('dwVolumeSerialNumber', ctypes.wintypes.DWORD),
('nFileSizeHigh', ctypes.wintypes.DWORD),
('nFileSizeLow', ctypes.wintypes.DWORD),
('nNumberOfLinks', ctypes.wintypes.DWORD),
('nFileIndexHigh', ctypes.wintypes.DWORD),
('nFileIndexLow', ctypes.wintypes.DWORD)]
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.restype = ctypes.wintypes.HANDLE
CreateFile.argtypes = (
ctypes.c_wchar_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.c_void_p,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
ctypes.wintypes.HANDLE,
)
GetFileInformationByHandle = ctypes.windll.kernel32.GetFileInformationByHandle
GetFileInformationByHandle.restype = ctypes.wintypes.BOOL
GetFileInformationByHandle.argtypes = (
ctypes.wintypes.HANDLE,
ctypes.wintypes.POINTER(BY_HANDLE_FILE_INFORMATION),
)
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.restype = ctypes.wintypes.BOOL
CloseHandle.argtypes = (ctypes.wintypes.HANDLE,)
StatResult = namedtuple('StatResult', 'st_dev st_ino st_mode st_mtime')
def _to_mode(attr):
m = 0
if (attr & FILE_ATTRIBUTE_DIRECTORY):
m |= stdstat.S_IFDIR | 0o111
else:
m |= stdstat.S_IFREG
if (attr & FILE_ATTRIBUTE_READONLY):
m |= 0o444
else:
m |= 0o666
return m
def _to_unix_time(ft):
t = (ft.dwHighDateTime) << 32 | ft.dwLowDateTime
return (t / 10000000) - 11644473600
def stat(path):
hfile = CreateFile(path,
FILE_READ_ATTRIBUTES,
0,
None,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
None)
if hfile == INVALID_HANDLE_VALUE:
raise ctypes.WinError()
info = BY_HANDLE_FILE_INFORMATION()
r = GetFileInformationByHandle(hfile, info)
CloseHandle(hfile)
if not r:
raise ctypes.WinError()
return StatResult(st_dev=info.dwVolumeSerialNumber,
st_ino=(info.nFileIndexHigh << 32) + info.nFileIndexLow,
st_mode=_to_mode(info.dwFileAttributes),
st_mtime=_to_unix_time(info.ftLastWriteTime)
)

View file

@ -0,0 +1,28 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# When updating this version number, please update the
# ``docs/source/global.rst.inc`` file as well.
VERSION_MAJOR = 0
VERSION_MINOR = 8
VERSION_BUILD = 3
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD)
VERSION_STRING = "%d.%d.%d" % VERSION_INFO
__version__ = VERSION_INFO

View file

@ -0,0 +1,577 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.watchmedo
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:synopsis: ``watchmedo`` shell script utility.
"""
import os.path
import sys
import yaml
import time
import logging
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from argh import arg, aliases, ArghParser, expects_obj
from watchdog.version import VERSION_STRING
from watchdog.utils import load_class
logging.basicConfig(level=logging.INFO)
CONFIG_KEY_TRICKS = 'tricks'
CONFIG_KEY_PYTHON_PATH = 'python-path'
def path_split(pathname_spec, separator=os.path.sep):
"""
Splits a pathname specification separated by an OS-dependent separator.
:param pathname_spec:
The pathname specification.
:param separator:
(OS Dependent) `:` on Unix and `;` on Windows or user-specified.
"""
return list(pathname_spec.split(separator))
def add_to_sys_path(pathnames, index=0):
"""
Adds specified paths at specified index into the sys.path list.
:param paths:
A list of paths to add to the sys.path
:param index:
(Default 0) The index in the sys.path list where the paths will be
added.
"""
for pathname in pathnames[::-1]:
sys.path.insert(index, pathname)
def load_config(tricks_file_pathname):
"""
Loads the YAML configuration from the specified file.
:param tricks_file_path:
The path to the tricks configuration file.
:returns:
A dictionary of configuration information.
"""
f = open(tricks_file_pathname, 'rb')
content = f.read()
f.close()
config = yaml.load(content)
return config
def parse_patterns(patterns_spec, ignore_patterns_spec, separator=';'):
"""
Parses pattern argument specs and returns a two-tuple of
(patterns, ignore_patterns).
"""
patterns = patterns_spec.split(separator)
ignore_patterns = ignore_patterns_spec.split(separator)
if ignore_patterns == ['']:
ignore_patterns = []
return (patterns, ignore_patterns)
def observe_with(observer, event_handler, pathnames, recursive):
"""
Single observer thread with a scheduled path and event handler.
:param observer:
The observer thread.
:param event_handler:
Event handler which will be called in response to file system events.
:param pathnames:
A list of pathnames to monitor.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for pathname in set(pathnames):
observer.schedule(event_handler, pathname, recursive)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def schedule_tricks(observer, tricks, pathname, recursive):
"""
Schedules tricks with the specified observer and for the given watch
path.
:param observer:
The observer thread into which to schedule the trick and watch.
:param tricks:
A list of tricks.
:param pathname:
A path name which should be watched.
:param recursive:
``True`` if recursive; ``False`` otherwise.
"""
for trick in tricks:
for name, value in list(trick.items()):
TrickClass = load_class(name)
handler = TrickClass(**value)
trick_pathname = getattr(handler, 'source_directory', None) or pathname
observer.schedule(handler, trick_pathname, recursive)
@aliases('tricks')
@arg('files',
nargs='*',
help='perform tricks from given file')
@arg('--python-path',
default='.',
help='paths separated by %s to add to the python path' % os.path.sep)
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout')
@arg('--recursive',
default=True,
help='recursively monitor paths')
@expects_obj
def tricks_from(args):
"""
Subcommand to execute tricks from a tricks configuration file.
:param args:
Command line argument options.
"""
from watchdog.observers import Observer
add_to_sys_path(path_split(args.python_path))
observers = []
for tricks_file in args.files:
observer = Observer(timeout=args.timeout)
if not os.path.exists(tricks_file):
raise IOError("cannot find tricks file: %s" % tricks_file)
config = load_config(tricks_file)
try:
tricks = config[CONFIG_KEY_TRICKS]
except KeyError:
raise KeyError("No `%s' key specified in %s." % (
CONFIG_KEY_TRICKS, tricks_file))
if CONFIG_KEY_PYTHON_PATH in config:
add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH])
dir_path = os.path.dirname(tricks_file)
if not dir_path:
dir_path = os.path.relpath(os.getcwd())
schedule_tricks(observer, tricks, dir_path, args.recursive)
observer.start()
observers.append(observer)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
for o in observers:
o.unschedule_all()
o.stop()
for o in observers:
o.join()
@aliases('generate-tricks-yaml')
@arg('trick_paths',
nargs='*',
help='Dotted paths for all the tricks you want to generate')
@arg('--python-path',
default='.',
help='paths separated by %s to add to the python path' % os.path.sep)
@arg('--append-to-file',
default=None,
help='appends the generated tricks YAML to a file; \
if not specified, prints to standard output')
@arg('-a',
'--append-only',
dest='append_only',
default=False,
help='if --append-to-file is not specified, produces output for \
appending instead of a complete tricks yaml file.')
@expects_obj
def tricks_generate_yaml(args):
"""
Subcommand to generate Yaml configuration for tricks named on the command
line.
:param args:
Command line argument options.
"""
python_paths = path_split(args.python_path)
add_to_sys_path(python_paths)
output = StringIO()
for trick_path in args.trick_paths:
TrickClass = load_class(trick_path)
output.write(TrickClass.generate_yaml())
content = output.getvalue()
output.close()
header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths})
header += "%s:\n" % CONFIG_KEY_TRICKS
if args.append_to_file is None:
# Output to standard output.
if not args.append_only:
content = header + content
sys.stdout.write(content)
else:
if not os.path.exists(args.append_to_file):
content = header + content
output = open(args.append_to_file, 'ab')
output.write(content)
output.close()
@arg('directories',
nargs='*',
default='.',
help='directories to watch.')
@arg('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='matches event paths with these patterns (separated by ;).')
@arg('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='ignores event paths with these patterns (separated by ;).')
@arg('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
help='ignores events for directories')
@arg('-R',
'--recursive',
dest='recursive',
default=False,
help='monitors the directories recursively')
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout')
@arg('--trace',
default=False,
help='dumps complete dispatching trace')
@arg('--debug-force-polling',
default=False,
help='[debug] forces polling')
@arg('--debug-force-kqueue',
default=False,
help='[debug] forces BSD kqueue(2)')
@arg('--debug-force-winapi',
default=False,
help='[debug] forces Windows API')
@arg('--debug-force-winapi-async',
default=False,
help='[debug] forces Windows API + I/O completion')
@arg('--debug-force-fsevents',
default=False,
help='[debug] forces Mac OS X FSEvents')
@arg('--debug-force-inotify',
default=False,
help='[debug] forces Linux inotify(7)')
@expects_obj
def log(args):
"""
Subcommand to log file system events to the console.
:param args:
Command line argument options.
"""
from watchdog.utils import echo
from watchdog.tricks import LoggerTrick
if args.trace:
echo.echo_class(LoggerTrick)
patterns, ignore_patterns =\
parse_patterns(args.patterns, args.ignore_patterns)
handler = LoggerTrick(patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories)
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
elif args.debug_force_kqueue:
from watchdog.observers.kqueue import KqueueObserver as Observer
elif args.debug_force_winapi_async:
from watchdog.observers.read_directory_changes_async import\
WindowsApiAsyncObserver as Observer
elif args.debug_force_winapi:
from watchdog.observers.read_directory_changes import\
WindowsApiObserver as Observer
elif args.debug_force_inotify:
from watchdog.observers.inotify import InotifyObserver as Observer
elif args.debug_force_fsevents:
from watchdog.observers.fsevents import FSEventsObserver as Observer
else:
# Automatically picks the most appropriate observer for the platform
# on which it is running.
from watchdog.observers import Observer
observer = Observer(timeout=args.timeout)
observe_with(observer, handler, args.directories, args.recursive)
@arg('directories',
nargs='*',
default='.',
help='directories to watch')
@arg('-c',
'--command',
dest='command',
default=None,
help='''shell command executed in response to matching events.
These interpolation variables are available to your command string::
${watch_src_path} - event source path;
${watch_dest_path} - event destination path (for moved events);
${watch_event_type} - event type;
${watch_object} - ``file`` or ``directory``
Note::
Please ensure you do not use double quotes (") to quote
your command string. That will force your shell to
interpolate before the command is processed by this
subcommand.
Example option usage::
--command='echo "${watch_src_path}"'
''')
@arg('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='matches event paths with these patterns (separated by ;).')
@arg('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='ignores event paths with these patterns (separated by ;).')
@arg('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
help='ignores events for directories')
@arg('-R',
'--recursive',
dest='recursive',
default=False,
help='monitors the directories recursively')
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout')
@arg('-w', '--wait',
dest='wait_for_process',
action='store_true',
default=False,
help="wait for process to finish to avoid multiple simultaneous instances")
@arg('-W', '--drop',
dest='drop_during_process',
action='store_true',
default=False,
help="Ignore events that occur while command is still being executed " \
"to avoid multiple simultaneous instances")
@arg('--debug-force-polling',
default=False,
help='[debug] forces polling')
@expects_obj
def shell_command(args):
"""
Subcommand to execute shell commands in response to file system events.
:param args:
Command line argument options.
"""
from watchdog.tricks import ShellCommandTrick
if not args.command:
args.command = None
if args.debug_force_polling:
from watchdog.observers.polling import PollingObserver as Observer
else:
from watchdog.observers import Observer
patterns, ignore_patterns = parse_patterns(args.patterns,
args.ignore_patterns)
handler = ShellCommandTrick(shell_command=args.command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
wait_for_process=args.wait_for_process,
drop_during_process=args.drop_during_process)
observer = Observer(timeout=args.timeout)
observe_with(observer, handler, args.directories, args.recursive)
@arg('command',
help='''Long-running command to run in a subprocess.
''')
@arg('command_args',
metavar='arg',
nargs='*',
help='''Command arguments.
Note: Use -- before the command arguments, otherwise watchmedo will
try to interpret them.
''')
@arg('-d',
'--directory',
dest='directories',
metavar='directory',
action='append',
help='Directory to watch. Use another -d or --directory option '
'for each directory.')
@arg('-p',
'--pattern',
'--patterns',
dest='patterns',
default='*',
help='matches event paths with these patterns (separated by ;).')
@arg('-i',
'--ignore-pattern',
'--ignore-patterns',
dest='ignore_patterns',
default='',
help='ignores event paths with these patterns (separated by ;).')
@arg('-D',
'--ignore-directories',
dest='ignore_directories',
default=False,
help='ignores events for directories')
@arg('-R',
'--recursive',
dest='recursive',
default=False,
help='monitors the directories recursively')
@arg('--interval',
'--timeout',
dest='timeout',
default=1.0,
help='use this as the polling interval/blocking timeout')
@arg('--signal',
dest='signal',
default='SIGINT',
help='stop the subprocess with this signal (default SIGINT)')
@arg('--kill-after',
dest='kill_after',
default=10.0,
help='when stopping, kill the subprocess after the specified timeout '
'(default 10)')
@expects_obj
def auto_restart(args):
"""
Subcommand to start a long-running subprocess and restart it
on matched events.
:param args:
Command line argument options.
"""
from watchdog.observers import Observer
from watchdog.tricks import AutoRestartTrick
import signal
import re
if not args.directories:
args.directories = ['.']
# Allow either signal name or number.
if re.match('^SIG[A-Z]+$', args.signal):
stop_signal = getattr(signal, args.signal)
else:
stop_signal = int(args.signal)
# Handle SIGTERM in the same manner as SIGINT so that
# this program has a chance to stop the child process.
def handle_sigterm(_signum, _frame):
raise KeyboardInterrupt()
signal.signal(signal.SIGTERM, handle_sigterm)
patterns, ignore_patterns = parse_patterns(args.patterns,
args.ignore_patterns)
command = [args.command]
command.extend(args.command_args)
handler = AutoRestartTrick(command=command,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=args.ignore_directories,
stop_signal=stop_signal,
kill_after=args.kill_after)
handler.start()
observer = Observer(timeout=args.timeout)
observe_with(observer, handler, args.directories, args.recursive)
handler.stop()
epilog = """Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>.
Copyright 2012 Google, Inc.
Licensed under the terms of the Apache license, version 2.0. Please see
LICENSE in the source code for more information."""
parser = ArghParser(epilog=epilog)
parser.add_commands([tricks_from,
tricks_generate_yaml,
log,
shell_command,
auto_restart])
parser.add_argument('--version',
action='version',
version='%(prog)s ' + VERSION_STRING)
def main():
"""Entry-point function."""
parser.dispatch()
if __name__ == '__main__':
main()