Update Python watchdog module to 1.0.2
This commit is contained in:
parent
58eaa14043
commit
750cf953da
29 changed files with 939 additions and 1396 deletions
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -20,6 +19,7 @@
|
||||||
:module: watchdog.events
|
:module: watchdog.events
|
||||||
:synopsis: File system events and event handlers.
|
:synopsis: File system events and event handlers.
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
|
|
||||||
Event Classes
|
Event Classes
|
||||||
-------------
|
-------------
|
||||||
|
@ -85,13 +85,10 @@ Event Handler Classes
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from builtins import object
|
|
||||||
import os.path
|
import os.path
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
from ..pathtools.patterns import match_any_paths
|
from watchdog.utils.patterns import match_any_paths
|
||||||
from .utils import has_attribute
|
|
||||||
from .utils import unicode_paths
|
|
||||||
|
|
||||||
|
|
||||||
EVENT_TYPE_MOVED = 'moved'
|
EVENT_TYPE_MOVED = 'moved'
|
||||||
|
@ -100,7 +97,7 @@ EVENT_TYPE_CREATED = 'created'
|
||||||
EVENT_TYPE_MODIFIED = 'modified'
|
EVENT_TYPE_MODIFIED = 'modified'
|
||||||
|
|
||||||
|
|
||||||
class FileSystemEvent(object):
|
class FileSystemEvent:
|
||||||
"""
|
"""
|
||||||
Immutable type that represents a file system event that is triggered
|
Immutable type that represents a file system event that is triggered
|
||||||
when a change occurs on the monitored file system.
|
when a change occurs on the monitored file system.
|
||||||
|
@ -115,6 +112,14 @@ class FileSystemEvent(object):
|
||||||
is_directory = False
|
is_directory = False
|
||||||
"""True if event was emitted for a directory; False otherwise."""
|
"""True if event was emitted for a directory; False otherwise."""
|
||||||
|
|
||||||
|
is_synthetic = False
|
||||||
|
"""
|
||||||
|
True if event was synthesized; False otherwise.
|
||||||
|
|
||||||
|
These are events that weren't actually broadcast by the OS, but
|
||||||
|
are presumed to have happened based on other, actual events.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, src_path):
|
def __init__(self, src_path):
|
||||||
self._src_path = src_path
|
self._src_path = src_path
|
||||||
|
|
||||||
|
@ -159,7 +164,7 @@ class FileSystemMovedEvent(FileSystemEvent):
|
||||||
event_type = EVENT_TYPE_MOVED
|
event_type = EVENT_TYPE_MOVED
|
||||||
|
|
||||||
def __init__(self, src_path, dest_path):
|
def __init__(self, src_path, dest_path):
|
||||||
super(FileSystemMovedEvent, self).__init__(src_path)
|
super().__init__(src_path)
|
||||||
self._dest_path = dest_path
|
self._dest_path = dest_path
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -190,56 +195,22 @@ class FileDeletedEvent(FileSystemEvent):
|
||||||
|
|
||||||
event_type = EVENT_TYPE_DELETED
|
event_type = EVENT_TYPE_DELETED
|
||||||
|
|
||||||
def __init__(self, src_path):
|
|
||||||
super(FileDeletedEvent, self).__init__(src_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "<%(class_name)s: src_path=%(src_path)r>" %\
|
|
||||||
dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path)
|
|
||||||
|
|
||||||
|
|
||||||
class FileModifiedEvent(FileSystemEvent):
|
class FileModifiedEvent(FileSystemEvent):
|
||||||
"""File system event representing file modification on the file system."""
|
"""File system event representing file modification on the file system."""
|
||||||
|
|
||||||
event_type = EVENT_TYPE_MODIFIED
|
event_type = EVENT_TYPE_MODIFIED
|
||||||
|
|
||||||
def __init__(self, src_path):
|
|
||||||
super(FileModifiedEvent, self).__init__(src_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return ("<%(class_name)s: src_path=%(src_path)r>"
|
|
||||||
) % (dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path))
|
|
||||||
|
|
||||||
|
|
||||||
class FileCreatedEvent(FileSystemEvent):
|
class FileCreatedEvent(FileSystemEvent):
|
||||||
"""File system event representing file creation on the file system."""
|
"""File system event representing file creation on the file system."""
|
||||||
|
|
||||||
event_type = EVENT_TYPE_CREATED
|
event_type = EVENT_TYPE_CREATED
|
||||||
|
|
||||||
def __init__(self, src_path):
|
|
||||||
super(FileCreatedEvent, self).__init__(src_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return ("<%(class_name)s: src_path=%(src_path)r>"
|
|
||||||
) % (dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path))
|
|
||||||
|
|
||||||
|
|
||||||
class FileMovedEvent(FileSystemMovedEvent):
|
class FileMovedEvent(FileSystemMovedEvent):
|
||||||
"""File system event representing file movement on the file system."""
|
"""File system event representing file movement on the file system."""
|
||||||
|
|
||||||
def __init__(self, src_path, dest_path):
|
|
||||||
super(FileMovedEvent, self).__init__(src_path, dest_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return ("<%(class_name)s: src_path=%(src_path)r, "
|
|
||||||
"dest_path=%(dest_path)r>"
|
|
||||||
) % (dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path,
|
|
||||||
dest_path=self.dest_path))
|
|
||||||
|
|
||||||
|
|
||||||
# Directory events.
|
# Directory events.
|
||||||
|
|
||||||
|
@ -250,14 +221,6 @@ class DirDeletedEvent(FileSystemEvent):
|
||||||
event_type = EVENT_TYPE_DELETED
|
event_type = EVENT_TYPE_DELETED
|
||||||
is_directory = True
|
is_directory = True
|
||||||
|
|
||||||
def __init__(self, src_path):
|
|
||||||
super(DirDeletedEvent, self).__init__(src_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return ("<%(class_name)s: src_path=%(src_path)r>"
|
|
||||||
) % (dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path))
|
|
||||||
|
|
||||||
|
|
||||||
class DirModifiedEvent(FileSystemEvent):
|
class DirModifiedEvent(FileSystemEvent):
|
||||||
"""
|
"""
|
||||||
|
@ -267,14 +230,6 @@ class DirModifiedEvent(FileSystemEvent):
|
||||||
event_type = EVENT_TYPE_MODIFIED
|
event_type = EVENT_TYPE_MODIFIED
|
||||||
is_directory = True
|
is_directory = True
|
||||||
|
|
||||||
def __init__(self, src_path):
|
|
||||||
super(DirModifiedEvent, self).__init__(src_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return ("<%(class_name)s: src_path=%(src_path)r>"
|
|
||||||
) % (dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path))
|
|
||||||
|
|
||||||
|
|
||||||
class DirCreatedEvent(FileSystemEvent):
|
class DirCreatedEvent(FileSystemEvent):
|
||||||
"""File system event representing directory creation on the file system."""
|
"""File system event representing directory creation on the file system."""
|
||||||
|
@ -282,32 +237,14 @@ class DirCreatedEvent(FileSystemEvent):
|
||||||
event_type = EVENT_TYPE_CREATED
|
event_type = EVENT_TYPE_CREATED
|
||||||
is_directory = True
|
is_directory = True
|
||||||
|
|
||||||
def __init__(self, src_path):
|
|
||||||
super(DirCreatedEvent, self).__init__(src_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return ("<%(class_name)s: src_path=%(src_path)r>"
|
|
||||||
) % (dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path))
|
|
||||||
|
|
||||||
|
|
||||||
class DirMovedEvent(FileSystemMovedEvent):
|
class DirMovedEvent(FileSystemMovedEvent):
|
||||||
"""File system event representing directory movement on the file system."""
|
"""File system event representing directory movement on the file system."""
|
||||||
|
|
||||||
is_directory = True
|
is_directory = True
|
||||||
|
|
||||||
def __init__(self, src_path, dest_path):
|
|
||||||
super(DirMovedEvent, self).__init__(src_path, dest_path)
|
|
||||||
|
|
||||||
def __repr__(self):
|
class FileSystemEventHandler:
|
||||||
return ("<%(class_name)s: src_path=%(src_path)r, "
|
|
||||||
"dest_path=%(dest_path)r>"
|
|
||||||
) % (dict(class_name=self.__class__.__name__,
|
|
||||||
src_path=self.src_path,
|
|
||||||
dest_path=self.dest_path))
|
|
||||||
|
|
||||||
|
|
||||||
class FileSystemEventHandler(object):
|
|
||||||
"""
|
"""
|
||||||
Base file system event handler that you can override methods from.
|
Base file system event handler that you can override methods from.
|
||||||
"""
|
"""
|
||||||
|
@ -321,14 +258,12 @@ class FileSystemEventHandler(object):
|
||||||
:class:`FileSystemEvent`
|
:class:`FileSystemEvent`
|
||||||
"""
|
"""
|
||||||
self.on_any_event(event)
|
self.on_any_event(event)
|
||||||
_method_map = {
|
{
|
||||||
EVENT_TYPE_MODIFIED: self.on_modified,
|
|
||||||
EVENT_TYPE_MOVED: self.on_moved,
|
|
||||||
EVENT_TYPE_CREATED: self.on_created,
|
EVENT_TYPE_CREATED: self.on_created,
|
||||||
EVENT_TYPE_DELETED: self.on_deleted,
|
EVENT_TYPE_DELETED: self.on_deleted,
|
||||||
}
|
EVENT_TYPE_MODIFIED: self.on_modified,
|
||||||
event_type = event.event_type
|
EVENT_TYPE_MOVED: self.on_moved,
|
||||||
_method_map[event_type](event)
|
}[event.event_type](event)
|
||||||
|
|
||||||
def on_any_event(self, event):
|
def on_any_event(self, event):
|
||||||
"""Catch-all event handler.
|
"""Catch-all event handler.
|
||||||
|
@ -383,7 +318,7 @@ class PatternMatchingEventHandler(FileSystemEventHandler):
|
||||||
|
|
||||||
def __init__(self, patterns=None, ignore_patterns=None,
|
def __init__(self, patterns=None, ignore_patterns=None,
|
||||||
ignore_directories=False, case_sensitive=False):
|
ignore_directories=False, case_sensitive=False):
|
||||||
super(PatternMatchingEventHandler, self).__init__()
|
super().__init__()
|
||||||
|
|
||||||
self._patterns = patterns
|
self._patterns = patterns
|
||||||
self._ignore_patterns = ignore_patterns
|
self._ignore_patterns = ignore_patterns
|
||||||
|
@ -435,24 +370,16 @@ class PatternMatchingEventHandler(FileSystemEventHandler):
|
||||||
return
|
return
|
||||||
|
|
||||||
paths = []
|
paths = []
|
||||||
if has_attribute(event, 'dest_path'):
|
if hasattr(event, 'dest_path'):
|
||||||
paths.append(unicode_paths.decode(event.dest_path))
|
paths.append(os.fsdecode(event.dest_path))
|
||||||
if event.src_path:
|
if event.src_path:
|
||||||
paths.append(unicode_paths.decode(event.src_path))
|
paths.append(os.fsdecode(event.src_path))
|
||||||
|
|
||||||
if match_any_paths(paths,
|
if match_any_paths(paths,
|
||||||
included_patterns=self.patterns,
|
included_patterns=self.patterns,
|
||||||
excluded_patterns=self.ignore_patterns,
|
excluded_patterns=self.ignore_patterns,
|
||||||
case_sensitive=self.case_sensitive):
|
case_sensitive=self.case_sensitive):
|
||||||
self.on_any_event(event)
|
super().dispatch(event)
|
||||||
_method_map = {
|
|
||||||
EVENT_TYPE_MODIFIED: self.on_modified,
|
|
||||||
EVENT_TYPE_MOVED: self.on_moved,
|
|
||||||
EVENT_TYPE_CREATED: self.on_created,
|
|
||||||
EVENT_TYPE_DELETED: self.on_deleted,
|
|
||||||
}
|
|
||||||
event_type = event.event_type
|
|
||||||
_method_map[event_type](event)
|
|
||||||
|
|
||||||
|
|
||||||
class RegexMatchingEventHandler(FileSystemEventHandler):
|
class RegexMatchingEventHandler(FileSystemEventHandler):
|
||||||
|
@ -460,10 +387,14 @@ class RegexMatchingEventHandler(FileSystemEventHandler):
|
||||||
Matches given regexes with file paths associated with occurring events.
|
Matches given regexes with file paths associated with occurring events.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, regexes=[r".*"], ignore_regexes=[],
|
def __init__(self, regexes=None, ignore_regexes=None,
|
||||||
ignore_directories=False, case_sensitive=False):
|
ignore_directories=False, case_sensitive=False):
|
||||||
super(RegexMatchingEventHandler, self).__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
if regexes is None:
|
||||||
|
regexes = [r".*"]
|
||||||
|
if ignore_regexes is None:
|
||||||
|
ignore_regexes = []
|
||||||
if case_sensitive:
|
if case_sensitive:
|
||||||
self._regexes = [re.compile(r) for r in regexes]
|
self._regexes = [re.compile(r) for r in regexes]
|
||||||
self._ignore_regexes = [re.compile(r) for r in ignore_regexes]
|
self._ignore_regexes = [re.compile(r) for r in ignore_regexes]
|
||||||
|
@ -518,60 +449,50 @@ class RegexMatchingEventHandler(FileSystemEventHandler):
|
||||||
return
|
return
|
||||||
|
|
||||||
paths = []
|
paths = []
|
||||||
if has_attribute(event, 'dest_path'):
|
if hasattr(event, 'dest_path'):
|
||||||
paths.append(unicode_paths.decode(event.dest_path))
|
paths.append(os.fsdecode(event.dest_path))
|
||||||
if event.src_path:
|
if event.src_path:
|
||||||
paths.append(unicode_paths.decode(event.src_path))
|
paths.append(os.fsdecode(event.src_path))
|
||||||
|
|
||||||
if any(r.match(p) for r in self.ignore_regexes for p in paths):
|
if any(r.match(p) for r in self.ignore_regexes for p in paths):
|
||||||
return
|
return
|
||||||
|
|
||||||
if any(r.match(p) for r in self.regexes for p in paths):
|
if any(r.match(p) for r in self.regexes for p in paths):
|
||||||
self.on_any_event(event)
|
super().dispatch(event)
|
||||||
_method_map = {
|
|
||||||
EVENT_TYPE_MODIFIED: self.on_modified,
|
|
||||||
EVENT_TYPE_MOVED: self.on_moved,
|
|
||||||
EVENT_TYPE_CREATED: self.on_created,
|
|
||||||
EVENT_TYPE_DELETED: self.on_deleted,
|
|
||||||
}
|
|
||||||
event_type = event.event_type
|
|
||||||
_method_map[event_type](event)
|
|
||||||
|
|
||||||
|
|
||||||
class LoggingEventHandler(FileSystemEventHandler):
|
class LoggingEventHandler(FileSystemEventHandler):
|
||||||
"""Logs all the events captured."""
|
"""Logs all the events captured."""
|
||||||
|
|
||||||
|
def __init__(self, logger=None):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.logger = logger or logging.root
|
||||||
|
|
||||||
def on_moved(self, event):
|
def on_moved(self, event):
|
||||||
super(LoggingEventHandler, self).on_moved(event)
|
super().on_moved(event)
|
||||||
|
|
||||||
what = 'directory' if event.is_directory else 'file'
|
what = 'directory' if event.is_directory else 'file'
|
||||||
logging.info("Moved %s: from %s to %s", what, event.src_path,
|
self.logger.info("Moved %s: from %s to %s", what, event.src_path,
|
||||||
event.dest_path)
|
event.dest_path)
|
||||||
|
|
||||||
def on_created(self, event):
|
def on_created(self, event):
|
||||||
super(LoggingEventHandler, self).on_created(event)
|
super().on_created(event)
|
||||||
|
|
||||||
what = 'directory' if event.is_directory else 'file'
|
what = 'directory' if event.is_directory else 'file'
|
||||||
logging.info("Created %s: %s", what, event.src_path)
|
self.logger.info("Created %s: %s", what, event.src_path)
|
||||||
|
|
||||||
def on_deleted(self, event):
|
def on_deleted(self, event):
|
||||||
super(LoggingEventHandler, self).on_deleted(event)
|
super().on_deleted(event)
|
||||||
|
|
||||||
what = 'directory' if event.is_directory else 'file'
|
what = 'directory' if event.is_directory else 'file'
|
||||||
logging.info("Deleted %s: %s", what, event.src_path)
|
self.logger.info("Deleted %s: %s", what, event.src_path)
|
||||||
|
|
||||||
def on_modified(self, event):
|
def on_modified(self, event):
|
||||||
super(LoggingEventHandler, self).on_modified(event)
|
super().on_modified(event)
|
||||||
|
|
||||||
what = 'directory' if event.is_directory else 'file'
|
what = 'directory' if event.is_directory else 'file'
|
||||||
logging.info("Modified %s: %s", what, event.src_path)
|
self.logger.info("Modified %s: %s", what, event.src_path)
|
||||||
|
|
||||||
|
|
||||||
class LoggingFileSystemEventHandler(LoggingEventHandler):
|
|
||||||
"""
|
|
||||||
For backwards-compatibility. Please use :class:`LoggingEventHandler`
|
|
||||||
instead.
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def generate_sub_moved_events(src_dir_path, dest_dir_path):
|
def generate_sub_moved_events(src_dir_path, dest_dir_path):
|
||||||
|
@ -591,11 +512,15 @@ def generate_sub_moved_events(src_dir_path, dest_dir_path):
|
||||||
for directory in directories:
|
for directory in directories:
|
||||||
full_path = os.path.join(root, directory)
|
full_path = os.path.join(root, directory)
|
||||||
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
|
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
|
||||||
yield DirMovedEvent(renamed_path, full_path)
|
event = DirMovedEvent(renamed_path, full_path)
|
||||||
|
event.is_synthetic = True
|
||||||
|
yield event
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
full_path = os.path.join(root, filename)
|
full_path = os.path.join(root, filename)
|
||||||
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
|
renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None
|
||||||
yield FileMovedEvent(renamed_path, full_path)
|
event = FileMovedEvent(renamed_path, full_path)
|
||||||
|
event.is_synthetic = True
|
||||||
|
yield event
|
||||||
|
|
||||||
|
|
||||||
def generate_sub_created_events(src_dir_path):
|
def generate_sub_created_events(src_dir_path):
|
||||||
|
@ -611,6 +536,10 @@ def generate_sub_created_events(src_dir_path):
|
||||||
"""
|
"""
|
||||||
for root, directories, filenames in os.walk(src_dir_path):
|
for root, directories, filenames in os.walk(src_dir_path):
|
||||||
for directory in directories:
|
for directory in directories:
|
||||||
yield DirCreatedEvent(os.path.join(root, directory))
|
event = DirCreatedEvent(os.path.join(root, directory))
|
||||||
|
event.is_synthetic = True
|
||||||
|
yield event
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
yield FileCreatedEvent(os.path.join(root, filename))
|
event = FileCreatedEvent(os.path.join(root, filename))
|
||||||
|
event.is_synthetic = True
|
||||||
|
yield event
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -20,7 +19,7 @@
|
||||||
:module: watchdog.observers
|
:module: watchdog.observers
|
||||||
:synopsis: Observer that picks a native implementation if available.
|
:synopsis: Observer that picks a native implementation if available.
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
|
|
||||||
Classes
|
Classes
|
||||||
=======
|
=======
|
||||||
|
@ -55,8 +54,8 @@ Class Platforms Note
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
from ..utils import platform
|
from watchdog.utils import platform
|
||||||
from ..utils import UnsupportedLibc
|
from watchdog.utils import UnsupportedLibc
|
||||||
|
|
||||||
if platform.is_linux():
|
if platform.is_linux():
|
||||||
try:
|
try:
|
||||||
|
@ -65,14 +64,13 @@ if platform.is_linux():
|
||||||
from .polling import PollingObserver as Observer
|
from .polling import PollingObserver as Observer
|
||||||
|
|
||||||
elif platform.is_darwin():
|
elif platform.is_darwin():
|
||||||
# FIXME: catching too broad. Error prone
|
|
||||||
try:
|
try:
|
||||||
from .fsevents import FSEventsObserver as Observer
|
from .fsevents import FSEventsObserver as Observer
|
||||||
except:
|
except Exception:
|
||||||
try:
|
try:
|
||||||
from .kqueue import KqueueObserver as Observer
|
from .kqueue import KqueueObserver as Observer
|
||||||
warnings.warn("Failed to import fsevents. Fall back to kqueue")
|
warnings.warn("Failed to import fsevents. Fall back to kqueue")
|
||||||
except:
|
except Exception:
|
||||||
from .polling import PollingObserver as Observer
|
from .polling import PollingObserver as Observer
|
||||||
warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.")
|
warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.")
|
||||||
|
|
||||||
|
@ -84,9 +82,11 @@ elif platform.is_windows():
|
||||||
# polling explicitly for Windows XP
|
# polling explicitly for Windows XP
|
||||||
try:
|
try:
|
||||||
from .read_directory_changes import WindowsApiObserver as Observer
|
from .read_directory_changes import WindowsApiObserver as Observer
|
||||||
except:
|
except Exception:
|
||||||
from .polling import PollingObserver as Observer
|
from .polling import PollingObserver as Observer
|
||||||
warnings.warn("Failed to import read_directory_changes. Fall back to polling.")
|
warnings.warn("Failed to import read_directory_changes. Fall back to polling.")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
from .polling import PollingObserver as Observer
|
from .polling import PollingObserver as Observer
|
||||||
|
|
||||||
|
__all__ = ["Observer"]
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -16,12 +15,12 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from builtins import object
|
import queue
|
||||||
from __future__ import with_statement
|
|
||||||
import threading
|
import threading
|
||||||
from ..utils import BaseThread
|
from pathlib import Path
|
||||||
from ..utils.compat import queue
|
|
||||||
from ..utils.bricks import SkipRepeatsQueue
|
from watchdog.utils import BaseThread
|
||||||
|
from watchdog.utils.bricks import SkipRepeatsQueue
|
||||||
|
|
||||||
DEFAULT_EMITTER_TIMEOUT = 1 # in seconds.
|
DEFAULT_EMITTER_TIMEOUT = 1 # in seconds.
|
||||||
DEFAULT_OBSERVER_TIMEOUT = 1 # in seconds.
|
DEFAULT_OBSERVER_TIMEOUT = 1 # in seconds.
|
||||||
|
@ -37,7 +36,7 @@ class EventQueue(SkipRepeatsQueue):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
class ObservedWatch(object):
|
class ObservedWatch:
|
||||||
"""An scheduled watch.
|
"""An scheduled watch.
|
||||||
|
|
||||||
:param path:
|
:param path:
|
||||||
|
@ -47,7 +46,10 @@ class ObservedWatch(object):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, path, recursive):
|
def __init__(self, path, recursive):
|
||||||
self._path = path
|
if isinstance(path, Path):
|
||||||
|
self._path = str(path)
|
||||||
|
else:
|
||||||
|
self._path = path
|
||||||
self._is_recursive = recursive
|
self._is_recursive = recursive
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -74,8 +76,8 @@ class ObservedWatch(object):
|
||||||
return hash(self.key)
|
return hash(self.key)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<ObservedWatch: path=%s, is_recursive=%s>" % (
|
return "<%s: path=%s, is_recursive=%s>" % (
|
||||||
self.path, self.is_recursive)
|
type(self).__name__, self.path, self.is_recursive)
|
||||||
|
|
||||||
|
|
||||||
# Observer classes
|
# Observer classes
|
||||||
|
@ -142,11 +144,8 @@ class EventEmitter(BaseThread):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
try:
|
while self.should_keep_running():
|
||||||
while self.should_keep_running():
|
self.queue_events(self.timeout)
|
||||||
self.queue_events(self.timeout)
|
|
||||||
finally:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class EventDispatcher(BaseThread):
|
class EventDispatcher(BaseThread):
|
||||||
|
@ -252,9 +251,13 @@ class BaseObserver(EventDispatcher):
|
||||||
return self._emitters
|
return self._emitters
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
for emitter in self._emitters:
|
for emitter in self._emitters.copy():
|
||||||
emitter.start()
|
try:
|
||||||
super(BaseObserver, self).start()
|
emitter.start()
|
||||||
|
except Exception:
|
||||||
|
self._remove_emitter(emitter)
|
||||||
|
raise
|
||||||
|
super().start()
|
||||||
|
|
||||||
def schedule(self, event_handler, path, recursive=False):
|
def schedule(self, event_handler, path, recursive=False):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -20,17 +19,16 @@
|
||||||
:module: watchdog.observers.fsevents
|
:module: watchdog.observers.fsevents
|
||||||
:synopsis: FSEvents based emitter implementation.
|
:synopsis: FSEvents based emitter implementation.
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
:platforms: Mac OS X
|
:platforms: Mac OS X
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
import os
|
||||||
|
|
||||||
import sys
|
|
||||||
import threading
|
import threading
|
||||||
import unicodedata
|
import unicodedata
|
||||||
import _watchdog_fsevents as _fsevents
|
import _watchdog_fsevents as _fsevents
|
||||||
|
|
||||||
from ..events import (
|
from watchdog.events import (
|
||||||
FileDeletedEvent,
|
FileDeletedEvent,
|
||||||
FileModifiedEvent,
|
FileModifiedEvent,
|
||||||
FileCreatedEvent,
|
FileCreatedEvent,
|
||||||
|
@ -41,8 +39,7 @@ from ..events import (
|
||||||
DirMovedEvent
|
DirMovedEvent
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..utils.dirsnapshot import DirectorySnapshot
|
from watchdog.observers.api import (
|
||||||
from ..observers.api import (
|
|
||||||
BaseObserver,
|
BaseObserver,
|
||||||
EventEmitter,
|
EventEmitter,
|
||||||
DEFAULT_EMITTER_TIMEOUT,
|
DEFAULT_EMITTER_TIMEOUT,
|
||||||
|
@ -70,45 +67,85 @@ class FSEventsEmitter(EventEmitter):
|
||||||
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
|
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
|
||||||
EventEmitter.__init__(self, event_queue, watch, timeout)
|
EventEmitter.__init__(self, event_queue, watch, timeout)
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self.snapshot = DirectorySnapshot(watch.path, watch.is_recursive)
|
|
||||||
|
|
||||||
def on_thread_stop(self):
|
def on_thread_stop(self):
|
||||||
_fsevents.remove_watch(self.watch)
|
if self.watch:
|
||||||
_fsevents.stop(self)
|
_fsevents.remove_watch(self.watch)
|
||||||
|
_fsevents.stop(self)
|
||||||
|
self._watch = None
|
||||||
|
|
||||||
def queue_events(self, timeout):
|
def queue_events(self, timeout):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
if not self.watch.is_recursive\
|
events = self.native_events
|
||||||
and self.watch.path not in self.pathnames:
|
i = 0
|
||||||
return
|
while i < len(events):
|
||||||
new_snapshot = DirectorySnapshot(self.watch.path,
|
event = events[i]
|
||||||
self.watch.is_recursive)
|
src_path = self._encode_path(event.path)
|
||||||
events = new_snapshot - self.snapshot
|
|
||||||
self.snapshot = new_snapshot
|
|
||||||
|
|
||||||
# Files.
|
# For some reason the create and remove flags are sometimes also
|
||||||
for src_path in events.files_deleted:
|
# set for rename and modify type events, so let those take
|
||||||
self.queue_event(FileDeletedEvent(src_path))
|
# precedence.
|
||||||
for src_path in events.files_modified:
|
if event.is_renamed:
|
||||||
self.queue_event(FileModifiedEvent(src_path))
|
# Internal moves appears to always be consecutive in the same
|
||||||
for src_path in events.files_created:
|
# buffer and have IDs differ by exactly one (while others
|
||||||
self.queue_event(FileCreatedEvent(src_path))
|
# don't) making it possible to pair up the two events coming
|
||||||
for src_path, dest_path in events.files_moved:
|
# from a singe move operation. (None of this is documented!)
|
||||||
self.queue_event(FileMovedEvent(src_path, dest_path))
|
# Otherwise, guess whether file was moved in or out.
|
||||||
|
# TODO: handle id wrapping
|
||||||
|
if (i + 1 < len(events) and events[i + 1].is_renamed
|
||||||
|
and events[i + 1].event_id == event.event_id + 1):
|
||||||
|
cls = DirMovedEvent if event.is_directory else FileMovedEvent
|
||||||
|
dst_path = self._encode_path(events[i + 1].path)
|
||||||
|
self.queue_event(cls(src_path, dst_path))
|
||||||
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
|
self.queue_event(DirModifiedEvent(os.path.dirname(dst_path)))
|
||||||
|
i += 1
|
||||||
|
elif os.path.exists(event.path):
|
||||||
|
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
|
||||||
|
self.queue_event(cls(src_path))
|
||||||
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
|
else:
|
||||||
|
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
|
||||||
|
self.queue_event(cls(src_path))
|
||||||
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
|
# TODO: generate events for tree
|
||||||
|
|
||||||
# Directories.
|
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod:
|
||||||
for src_path in events.dirs_deleted:
|
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
|
||||||
self.queue_event(DirDeletedEvent(src_path))
|
self.queue_event(cls(src_path))
|
||||||
for src_path in events.dirs_modified:
|
|
||||||
self.queue_event(DirModifiedEvent(src_path))
|
elif event.is_created:
|
||||||
for src_path in events.dirs_created:
|
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
|
||||||
self.queue_event(DirCreatedEvent(src_path))
|
self.queue_event(cls(src_path))
|
||||||
for src_path, dest_path in events.dirs_moved:
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
self.queue_event(DirMovedEvent(src_path, dest_path))
|
|
||||||
|
elif event.is_removed:
|
||||||
|
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
|
||||||
|
self.queue_event(cls(src_path))
|
||||||
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
|
|
||||||
|
if src_path == self.watch.path:
|
||||||
|
# this should not really occur, instead we expect
|
||||||
|
# is_root_changed to be set
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
elif event.is_root_changed:
|
||||||
|
# This will be set if root or any if its parents is renamed or
|
||||||
|
# deleted.
|
||||||
|
# TODO: find out new path and generate DirMovedEvent?
|
||||||
|
self.queue_event(DirDeletedEvent(self.watch.path))
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
try:
|
try:
|
||||||
def callback(pathnames, flags, emitter=self):
|
def callback(pathnames, flags, ids, emitter=self):
|
||||||
|
with emitter._lock:
|
||||||
|
emitter.native_events = [
|
||||||
|
_fsevents.NativeEvent(event_path, event_flags, event_id)
|
||||||
|
for event_path, event_flags, event_id in zip(pathnames, flags, ids)
|
||||||
|
]
|
||||||
emitter.queue_events(emitter.timeout)
|
emitter.queue_events(emitter.timeout)
|
||||||
|
|
||||||
# for pathname, flag in zip(pathnames, flags):
|
# for pathname, flag in zip(pathnames, flags):
|
||||||
|
@ -124,7 +161,7 @@ class FSEventsEmitter(EventEmitter):
|
||||||
|
|
||||||
# INFO: FSEvents reports directory notifications recursively
|
# INFO: FSEvents reports directory notifications recursively
|
||||||
# by default, so we do not need to add subdirectory paths.
|
# by default, so we do not need to add subdirectory paths.
|
||||||
#pathnames = set([self.watch.path])
|
# pathnames = set([self.watch.path])
|
||||||
# if self.watch.is_recursive:
|
# if self.watch.is_recursive:
|
||||||
# for root, directory_names, _ in os.walk(self.watch.path):
|
# for root, directory_names, _ in os.walk(self.watch.path):
|
||||||
# for directory_name in directory_names:
|
# for directory_name in directory_names:
|
||||||
|
@ -137,9 +174,15 @@ class FSEventsEmitter(EventEmitter):
|
||||||
callback,
|
callback,
|
||||||
self.pathnames)
|
self.pathnames)
|
||||||
_fsevents.read_events(self)
|
_fsevents.read_events(self)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _encode_path(self, path):
|
||||||
|
"""Encode path only if bytes were passed to this emitter. """
|
||||||
|
if isinstance(self.watch.path, bytes):
|
||||||
|
return os.fsencode(path)
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
class FSEventsObserver(BaseObserver):
|
class FSEventsObserver(BaseObserver):
|
||||||
|
|
||||||
|
@ -148,25 +191,8 @@ class FSEventsObserver(BaseObserver):
|
||||||
timeout=timeout)
|
timeout=timeout)
|
||||||
|
|
||||||
def schedule(self, event_handler, path, recursive=False):
|
def schedule(self, event_handler, path, recursive=False):
|
||||||
# Python 2/3 compat
|
|
||||||
try:
|
|
||||||
str_class = str
|
|
||||||
except NameError:
|
|
||||||
str_class = str
|
|
||||||
|
|
||||||
# Fix for issue #26: Trace/BPT error when given a unicode path
|
# Fix for issue #26: Trace/BPT error when given a unicode path
|
||||||
# string. https://github.com/gorakhargosh/watchdog/issues#issue/26
|
# string. https://github.com/gorakhargosh/watchdog/issues#issue/26
|
||||||
if isinstance(path, str_class):
|
if isinstance(path, str):
|
||||||
#path = unicode(path, 'utf-8')
|
|
||||||
path = unicodedata.normalize('NFC', path)
|
path = unicodedata.normalize('NFC', path)
|
||||||
# We only encode the path in Python 2 for backwards compatibility.
|
|
||||||
# On Python 3 we want the path to stay as unicode if possible for
|
|
||||||
# the sake of path matching not having to be rewritten to use the
|
|
||||||
# bytes API instead of strings. The _watchdog_fsevent.so code for
|
|
||||||
# Python 3 can handle both str and bytes paths, which is why we
|
|
||||||
# do not HAVE to encode it with Python 3. The Python 2 code in
|
|
||||||
# _watchdog_fsevents.so was not changed for the sake of backwards
|
|
||||||
# compatibility.
|
|
||||||
if sys.version_info < (3,):
|
|
||||||
path = path.encode('utf-8')
|
|
||||||
return BaseObserver.schedule(self, event_handler, path, recursive)
|
return BaseObserver.schedule(self, event_handler, path, recursive)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
#
|
#
|
||||||
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
||||||
#
|
#
|
||||||
|
@ -19,18 +19,14 @@
|
||||||
:synopsis: FSEvents based emitter implementation.
|
:synopsis: FSEvents based emitter implementation.
|
||||||
:platforms: Mac OS X
|
:platforms: Mac OS X
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
|
|
||||||
from builtins import hex
|
|
||||||
from builtins import zip
|
|
||||||
from builtins import object
|
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
import queue
|
||||||
import unicodedata
|
import unicodedata
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
from ..utils.compat import queue
|
|
||||||
|
|
||||||
from ..events import (
|
from watchdog.events import (
|
||||||
FileDeletedEvent,
|
FileDeletedEvent,
|
||||||
FileModifiedEvent,
|
FileModifiedEvent,
|
||||||
FileCreatedEvent,
|
FileCreatedEvent,
|
||||||
|
@ -40,7 +36,7 @@ from ..events import (
|
||||||
DirCreatedEvent,
|
DirCreatedEvent,
|
||||||
DirMovedEvent
|
DirMovedEvent
|
||||||
)
|
)
|
||||||
from ..observers.api import (
|
from watchdog.observers.api import (
|
||||||
BaseObserver,
|
BaseObserver,
|
||||||
EventEmitter,
|
EventEmitter,
|
||||||
DEFAULT_EMITTER_TIMEOUT,
|
DEFAULT_EMITTER_TIMEOUT,
|
||||||
|
@ -49,7 +45,7 @@ from ..observers.api import (
|
||||||
|
|
||||||
# pyobjc
|
# pyobjc
|
||||||
import AppKit
|
import AppKit
|
||||||
from .FSEvents import (
|
from FSEvents import (
|
||||||
FSEventStreamCreate,
|
FSEventStreamCreate,
|
||||||
CFRunLoopGetCurrent,
|
CFRunLoopGetCurrent,
|
||||||
FSEventStreamScheduleWithRunLoop,
|
FSEventStreamScheduleWithRunLoop,
|
||||||
|
@ -61,7 +57,7 @@ from .FSEvents import (
|
||||||
FSEventStreamRelease,
|
FSEventStreamRelease,
|
||||||
)
|
)
|
||||||
|
|
||||||
from .FSEvents import (
|
from FSEvents import (
|
||||||
kCFAllocatorDefault,
|
kCFAllocatorDefault,
|
||||||
kCFRunLoopDefaultMode,
|
kCFRunLoopDefaultMode,
|
||||||
kFSEventStreamEventIdSinceNow,
|
kFSEventStreamEventIdSinceNow,
|
||||||
|
@ -75,7 +71,6 @@ from .FSEvents import (
|
||||||
kFSEventStreamEventFlagItemFinderInfoMod,
|
kFSEventStreamEventFlagItemFinderInfoMod,
|
||||||
kFSEventStreamEventFlagItemChangeOwner,
|
kFSEventStreamEventFlagItemChangeOwner,
|
||||||
kFSEventStreamEventFlagItemXattrMod,
|
kFSEventStreamEventFlagItemXattrMod,
|
||||||
kFSEventStreamEventFlagItemIsFile,
|
|
||||||
kFSEventStreamEventFlagItemIsDir,
|
kFSEventStreamEventFlagItemIsDir,
|
||||||
kFSEventStreamEventFlagItemIsSymlink,
|
kFSEventStreamEventFlagItemIsSymlink,
|
||||||
)
|
)
|
||||||
|
@ -92,7 +87,7 @@ class FSEventsQueue(Thread):
|
||||||
self._run_loop = None
|
self._run_loop = None
|
||||||
|
|
||||||
if isinstance(path, bytes):
|
if isinstance(path, bytes):
|
||||||
path = path.decode('utf-8')
|
path = os.fsdecode(path)
|
||||||
self._path = unicodedata.normalize('NFC', path)
|
self._path = unicodedata.normalize('NFC', path)
|
||||||
|
|
||||||
context = None
|
context = None
|
||||||
|
@ -102,7 +97,7 @@ class FSEventsQueue(Thread):
|
||||||
kFSEventStreamEventIdSinceNow, latency,
|
kFSEventStreamEventIdSinceNow, latency,
|
||||||
kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents)
|
kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents)
|
||||||
if self._stream_ref is None:
|
if self._stream_ref is None:
|
||||||
raise IOError("FSEvents. Could not create stream.")
|
raise OSError("FSEvents. Could not create stream.")
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
pool = AppKit.NSAutoreleasePool.alloc().init()
|
pool = AppKit.NSAutoreleasePool.alloc().init()
|
||||||
|
@ -112,7 +107,7 @@ class FSEventsQueue(Thread):
|
||||||
if not FSEventStreamStart(self._stream_ref):
|
if not FSEventStreamStart(self._stream_ref):
|
||||||
FSEventStreamInvalidate(self._stream_ref)
|
FSEventStreamInvalidate(self._stream_ref)
|
||||||
FSEventStreamRelease(self._stream_ref)
|
FSEventStreamRelease(self._stream_ref)
|
||||||
raise IOError("FSEvents. Could not start stream.")
|
raise OSError("FSEvents. Could not start stream.")
|
||||||
|
|
||||||
CFRunLoopRun()
|
CFRunLoopRun()
|
||||||
FSEventStreamStop(self._stream_ref)
|
FSEventStreamStop(self._stream_ref)
|
||||||
|
@ -144,7 +139,7 @@ class FSEventsQueue(Thread):
|
||||||
return self._queue.get()
|
return self._queue.get()
|
||||||
|
|
||||||
|
|
||||||
class NativeEvent(object):
|
class NativeEvent:
|
||||||
def __init__(self, path, flags, event_id):
|
def __init__(self, path, flags, event_id):
|
||||||
self.path = path
|
self.path = path
|
||||||
self.flags = flags
|
self.flags = flags
|
||||||
|
@ -162,17 +157,24 @@ class NativeEvent(object):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _event_type(self):
|
def _event_type(self):
|
||||||
if self.is_created: return "Created"
|
if self.is_created:
|
||||||
if self.is_removed: return "Removed"
|
return "Created"
|
||||||
if self.is_renamed: return "Renamed"
|
if self.is_removed:
|
||||||
if self.is_modified: return "Modified"
|
return "Removed"
|
||||||
if self.is_inode_meta_mod: return "InodeMetaMod"
|
if self.is_renamed:
|
||||||
if self.is_xattr_mod: return "XattrMod"
|
return "Renamed"
|
||||||
|
if self.is_modified:
|
||||||
|
return "Modified"
|
||||||
|
if self.is_inode_meta_mod:
|
||||||
|
return "InodeMetaMod"
|
||||||
|
if self.is_xattr_mod:
|
||||||
|
return "XattrMod"
|
||||||
return "Unknown"
|
return "Unknown"
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
s ="<NativeEvent: path=%s, type=%s, is_dir=%s, flags=%s, id=%s>"
|
s = "<%s: path=%s, type=%s, is_dir=%s, flags=%s, id=%s>"
|
||||||
return s % (repr(self.path), self._event_type, self.is_directory, hex(self.flags), self.event_id)
|
return s % (type(self).__name__, repr(self.path), self._event_type,
|
||||||
|
self.is_directory, hex(self.flags), self.event_id)
|
||||||
|
|
||||||
|
|
||||||
class FSEventsEmitter(EventEmitter):
|
class FSEventsEmitter(EventEmitter):
|
||||||
|
@ -205,13 +207,13 @@ class FSEventsEmitter(EventEmitter):
|
||||||
# don't) making it possible to pair up the two events coming
|
# don't) making it possible to pair up the two events coming
|
||||||
# from a singe move operation. (None of this is documented!)
|
# from a singe move operation. (None of this is documented!)
|
||||||
# Otherwise, guess whether file was moved in or out.
|
# Otherwise, guess whether file was moved in or out.
|
||||||
#TODO: handle id wrapping
|
# TODO: handle id wrapping
|
||||||
if (i+1 < len(events) and events[i+1].is_renamed and
|
if (i + 1 < len(events) and events[i + 1].is_renamed
|
||||||
events[i+1].event_id == event.event_id + 1):
|
and events[i + 1].event_id == event.event_id + 1):
|
||||||
cls = DirMovedEvent if event.is_directory else FileMovedEvent
|
cls = DirMovedEvent if event.is_directory else FileMovedEvent
|
||||||
self.queue_event(cls(event.path, events[i+1].path))
|
self.queue_event(cls(event.path, events[i + 1].path))
|
||||||
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
|
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
|
||||||
self.queue_event(DirModifiedEvent(os.path.dirname(events[i+1].path)))
|
self.queue_event(DirModifiedEvent(os.path.dirname(events[i + 1].path)))
|
||||||
i += 1
|
i += 1
|
||||||
elif os.path.exists(event.path):
|
elif os.path.exists(event.path):
|
||||||
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
|
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
|
||||||
|
@ -221,7 +223,7 @@ class FSEventsEmitter(EventEmitter):
|
||||||
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
|
cls = DirDeletedEvent if event.is_directory else FileDeletedEvent
|
||||||
self.queue_event(cls(event.path))
|
self.queue_event(cls(event.path))
|
||||||
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
|
self.queue_event(DirModifiedEvent(os.path.dirname(event.path)))
|
||||||
#TODO: generate events for tree
|
# TODO: generate events for tree
|
||||||
|
|
||||||
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod :
|
elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod :
|
||||||
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
|
cls = DirModifiedEvent if event.is_directory else FileModifiedEvent
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -67,20 +66,18 @@ Some extremely useful articles and documentation:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import threading
|
import threading
|
||||||
from .inotify_buffer import InotifyBuffer
|
from .inotify_buffer import InotifyBuffer
|
||||||
|
|
||||||
from ..observers.api import (
|
from watchdog.observers.api import (
|
||||||
EventEmitter,
|
EventEmitter,
|
||||||
BaseObserver,
|
BaseObserver,
|
||||||
DEFAULT_EMITTER_TIMEOUT,
|
DEFAULT_EMITTER_TIMEOUT,
|
||||||
DEFAULT_OBSERVER_TIMEOUT
|
DEFAULT_OBSERVER_TIMEOUT
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..events import (
|
from watchdog.events import (
|
||||||
DirDeletedEvent,
|
DirDeletedEvent,
|
||||||
DirModifiedEvent,
|
DirModifiedEvent,
|
||||||
DirMovedEvent,
|
DirMovedEvent,
|
||||||
|
@ -92,7 +89,6 @@ from ..events import (
|
||||||
generate_sub_moved_events,
|
generate_sub_moved_events,
|
||||||
generate_sub_created_events,
|
generate_sub_created_events,
|
||||||
)
|
)
|
||||||
from ..utils import unicode_paths
|
|
||||||
|
|
||||||
|
|
||||||
class InotifyEmitter(EventEmitter):
|
class InotifyEmitter(EventEmitter):
|
||||||
|
@ -117,7 +113,7 @@ class InotifyEmitter(EventEmitter):
|
||||||
self._inotify = None
|
self._inotify = None
|
||||||
|
|
||||||
def on_thread_start(self):
|
def on_thread_start(self):
|
||||||
path = unicode_paths.encode(self.watch.path)
|
path = os.fsencode(self.watch.path)
|
||||||
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
|
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
|
||||||
|
|
||||||
def on_thread_stop(self):
|
def on_thread_stop(self):
|
||||||
|
@ -125,8 +121,8 @@ class InotifyEmitter(EventEmitter):
|
||||||
self._inotify.close()
|
self._inotify.close()
|
||||||
|
|
||||||
def queue_events(self, timeout, full_events=False):
|
def queue_events(self, timeout, full_events=False):
|
||||||
#If "full_events" is true, then the method will report unmatched move events as seperate events
|
# If "full_events" is true, then the method will report unmatched move events as separate events
|
||||||
#This behavior is by default only called by a InotifyFullEmitter
|
# This behavior is by default only called by a InotifyFullEmitter
|
||||||
with self._lock:
|
with self._lock:
|
||||||
event = self._inotify.read_event()
|
event = self._inotify.read_event()
|
||||||
if event is None:
|
if event is None:
|
||||||
|
@ -146,7 +142,7 @@ class InotifyEmitter(EventEmitter):
|
||||||
|
|
||||||
src_path = self._decode_path(event.src_path)
|
src_path = self._decode_path(event.src_path)
|
||||||
if event.is_moved_to:
|
if event.is_moved_to:
|
||||||
if (full_events):
|
if full_events:
|
||||||
cls = DirMovedEvent if event.is_directory else FileMovedEvent
|
cls = DirMovedEvent if event.is_directory else FileMovedEvent
|
||||||
self.queue_event(cls(None, src_path))
|
self.queue_event(cls(None, src_path))
|
||||||
else:
|
else:
|
||||||
|
@ -167,19 +163,22 @@ class InotifyEmitter(EventEmitter):
|
||||||
self.queue_event(cls(src_path))
|
self.queue_event(cls(src_path))
|
||||||
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
elif event.is_moved_from and full_events:
|
elif event.is_moved_from and full_events:
|
||||||
cls = DireMovedEvent if event.is_directory else FileMovedEvent
|
cls = DirMovedEvent if event.is_directory else FileMovedEvent
|
||||||
self.queue_event(cls(src_path, None))
|
self.queue_event(cls(src_path, None))
|
||||||
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
elif event.is_create:
|
elif event.is_create:
|
||||||
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
|
cls = DirCreatedEvent if event.is_directory else FileCreatedEvent
|
||||||
self.queue_event(cls(src_path))
|
self.queue_event(cls(src_path))
|
||||||
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
self.queue_event(DirModifiedEvent(os.path.dirname(src_path)))
|
||||||
|
elif event.is_delete_self and src_path == self.watch.path:
|
||||||
|
self.queue_event(DirDeletedEvent(src_path))
|
||||||
|
self.stop()
|
||||||
|
|
||||||
def _decode_path(self, path):
|
def _decode_path(self, path):
|
||||||
""" Decode path only if unicode string was passed to this emitter. """
|
"""Decode path only if unicode string was passed to this emitter. """
|
||||||
if isinstance(self.watch.path, bytes):
|
if isinstance(self.watch.path, bytes):
|
||||||
return path
|
return path
|
||||||
return unicode_paths.decode(path)
|
return os.fsdecode(path)
|
||||||
|
|
||||||
|
|
||||||
class InotifyFullEmitter(InotifyEmitter):
|
class InotifyFullEmitter(InotifyEmitter):
|
||||||
|
@ -204,6 +203,7 @@ class InotifyFullEmitter(InotifyEmitter):
|
||||||
def queue_events(self, timeout, events=True):
|
def queue_events(self, timeout, events=True):
|
||||||
InotifyEmitter.queue_events(self, timeout, full_events=events)
|
InotifyEmitter.queue_events(self, timeout, full_events=events)
|
||||||
|
|
||||||
|
|
||||||
class InotifyObserver(BaseObserver):
|
class InotifyObserver(BaseObserver):
|
||||||
"""
|
"""
|
||||||
Observer thread that schedules watching directories and dispatches
|
Observer thread that schedules watching directories and dispatches
|
||||||
|
@ -215,4 +215,4 @@ class InotifyObserver(BaseObserver):
|
||||||
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
|
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
|
||||||
else:
|
else:
|
||||||
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
|
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
|
||||||
timeout=timeout)
|
timeout=timeout)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
#
|
#
|
||||||
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
||||||
#
|
#
|
||||||
|
@ -15,9 +15,9 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from ..utils import BaseThread
|
from watchdog.utils import BaseThread
|
||||||
from ..utils.delayed_queue import DelayedQueue
|
from watchdog.utils.delayed_queue import DelayedQueue
|
||||||
from ..observers.inotify_c import Inotify
|
from watchdog.observers.inotify_c import Inotify
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -50,6 +50,34 @@ class InotifyBuffer(BaseThread):
|
||||||
self.stop()
|
self.stop()
|
||||||
self.join()
|
self.join()
|
||||||
|
|
||||||
|
def _group_events(self, event_list):
|
||||||
|
"""Group any matching move events"""
|
||||||
|
grouped = []
|
||||||
|
for inotify_event in event_list:
|
||||||
|
logger.debug("in-event %s", inotify_event)
|
||||||
|
|
||||||
|
def matching_from_event(event):
|
||||||
|
return (not isinstance(event, tuple) and event.is_moved_from
|
||||||
|
and event.cookie == inotify_event.cookie)
|
||||||
|
|
||||||
|
if inotify_event.is_moved_to:
|
||||||
|
# Check if move_from is already in the buffer
|
||||||
|
for index, event in enumerate(grouped):
|
||||||
|
if matching_from_event(event):
|
||||||
|
grouped[index] = (event, inotify_event)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# Check if move_from is in delayqueue already
|
||||||
|
from_event = self._queue.remove(matching_from_event)
|
||||||
|
if from_event is not None:
|
||||||
|
grouped.append((from_event, inotify_event))
|
||||||
|
else:
|
||||||
|
logger.debug("could not find matching move_from event")
|
||||||
|
grouped.append(inotify_event)
|
||||||
|
else:
|
||||||
|
grouped.append(inotify_event)
|
||||||
|
return grouped
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
"""Read event from `inotify` and add them to `queue`. When reading a
|
"""Read event from `inotify` and add them to `queue`. When reading a
|
||||||
IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event
|
IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event
|
||||||
|
@ -58,24 +86,13 @@ class InotifyBuffer(BaseThread):
|
||||||
deleted_self = False
|
deleted_self = False
|
||||||
while self.should_keep_running() and not deleted_self:
|
while self.should_keep_running() and not deleted_self:
|
||||||
inotify_events = self._inotify.read_events()
|
inotify_events = self._inotify.read_events()
|
||||||
for inotify_event in inotify_events:
|
grouped_events = self._group_events(inotify_events)
|
||||||
logger.debug("in-event %s", inotify_event)
|
for inotify_event in grouped_events:
|
||||||
if inotify_event.is_moved_to:
|
# Only add delay for unmatched move_from events
|
||||||
|
delay = not isinstance(inotify_event, tuple) and inotify_event.is_moved_from
|
||||||
|
self._queue.put(inotify_event, delay)
|
||||||
|
|
||||||
def matching_from_event(event):
|
if not isinstance(inotify_event, tuple) and inotify_event.is_delete_self and \
|
||||||
return (not isinstance(event, tuple) and event.is_moved_from
|
|
||||||
and event.cookie == inotify_event.cookie)
|
|
||||||
|
|
||||||
from_event = self._queue.remove(matching_from_event)
|
|
||||||
if from_event is not None:
|
|
||||||
self._queue.put((from_event, inotify_event))
|
|
||||||
else:
|
|
||||||
logger.debug("could not find matching move_from event")
|
|
||||||
self._queue.put(inotify_event)
|
|
||||||
else:
|
|
||||||
self._queue.put(inotify_event)
|
|
||||||
|
|
||||||
if inotify_event.is_delete_self and \
|
|
||||||
inotify_event.src_path == self._inotify.path:
|
inotify_event.src_path == self._inotify.path:
|
||||||
# Deleted the watched directory, stop watching for events
|
# Deleted the watched directory, stop watching for events
|
||||||
deleted_self = True
|
deleted_self = True
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -15,8 +15,6 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from builtins import object
|
|
||||||
from __future__ import with_statement
|
|
||||||
import os
|
import os
|
||||||
import errno
|
import errno
|
||||||
import struct
|
import struct
|
||||||
|
@ -25,17 +23,16 @@ import ctypes
|
||||||
import ctypes.util
|
import ctypes.util
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
from ctypes import c_int, c_char_p, c_uint32
|
from ctypes import c_int, c_char_p, c_uint32
|
||||||
from ..utils import has_attribute
|
from watchdog.utils import UnsupportedLibc
|
||||||
from ..utils import UnsupportedLibc
|
|
||||||
|
|
||||||
|
|
||||||
def _load_libc():
|
def _load_libc():
|
||||||
libc_path = None
|
libc_path = None
|
||||||
try:
|
try:
|
||||||
libc_path = ctypes.util.find_library('c')
|
libc_path = ctypes.util.find_library('c')
|
||||||
except (OSError, IOError, RuntimeError):
|
except (OSError, RuntimeError):
|
||||||
# Note: find_library will on some platforms raise these undocumented
|
# Note: find_library will on some platforms raise these undocumented
|
||||||
# errors, e.g.on android IOError "No usable temporary directory found"
|
# errors, e.g.on android OSError "No usable temporary directory found"
|
||||||
# will be raised.
|
# will be raised.
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -45,14 +42,26 @@ def _load_libc():
|
||||||
# Fallbacks
|
# Fallbacks
|
||||||
try:
|
try:
|
||||||
return ctypes.CDLL('libc.so')
|
return ctypes.CDLL('libc.so')
|
||||||
except (OSError, IOError):
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
return ctypes.CDLL('libc.so.6')
|
return ctypes.CDLL('libc.so.6')
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# uClibc
|
||||||
|
try:
|
||||||
|
return ctypes.CDLL('libc.so.0')
|
||||||
|
except OSError as err:
|
||||||
|
raise err
|
||||||
|
|
||||||
|
|
||||||
libc = _load_libc()
|
libc = _load_libc()
|
||||||
|
|
||||||
if not has_attribute(libc, 'inotify_init') or \
|
if not hasattr(libc, 'inotify_init') or \
|
||||||
not has_attribute(libc, 'inotify_add_watch') or \
|
not hasattr(libc, 'inotify_add_watch') or \
|
||||||
not has_attribute(libc, 'inotify_rm_watch'):
|
not hasattr(libc, 'inotify_rm_watch'):
|
||||||
raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name)
|
raise UnsupportedLibc("Unsupported libc version found: %s" % libc._name)
|
||||||
|
|
||||||
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
|
inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(
|
||||||
|
@ -65,7 +74,7 @@ inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(
|
||||||
("inotify_init", libc))
|
("inotify_init", libc))
|
||||||
|
|
||||||
|
|
||||||
class InotifyConstants(object):
|
class InotifyConstants:
|
||||||
# User-space events
|
# User-space events
|
||||||
IN_ACCESS = 0x00000001 # File was accessed.
|
IN_ACCESS = 0x00000001 # File was accessed.
|
||||||
IN_MODIFY = 0x00000002 # File was modified.
|
IN_MODIFY = 0x00000002 # File was modified.
|
||||||
|
@ -158,7 +167,7 @@ DEFAULT_NUM_EVENTS = 2048
|
||||||
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
|
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16)
|
||||||
|
|
||||||
|
|
||||||
class Inotify(object):
|
class Inotify:
|
||||||
"""
|
"""
|
||||||
Linux inotify(7) API wrapper class.
|
Linux inotify(7) API wrapper class.
|
||||||
|
|
||||||
|
@ -185,7 +194,10 @@ class Inotify(object):
|
||||||
self._path = path
|
self._path = path
|
||||||
self._event_mask = event_mask
|
self._event_mask = event_mask
|
||||||
self._is_recursive = recursive
|
self._is_recursive = recursive
|
||||||
self._add_dir_watch(path, recursive, event_mask)
|
if os.path.isdir(path):
|
||||||
|
self._add_dir_watch(path, recursive, event_mask)
|
||||||
|
else:
|
||||||
|
self._add_watch(path, event_mask)
|
||||||
self._moved_from_events = dict()
|
self._moved_from_events = dict()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -308,7 +320,7 @@ class Inotify(object):
|
||||||
if wd == -1:
|
if wd == -1:
|
||||||
continue
|
continue
|
||||||
wd_path = self._path_for_wd[wd]
|
wd_path = self._path_for_wd[wd]
|
||||||
src_path = os.path.join(wd_path, name) if name else wd_path #avoid trailing slash
|
src_path = os.path.join(wd_path, name) if name else wd_path # avoid trailing slash
|
||||||
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
|
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
|
||||||
|
|
||||||
if inotify_event.is_moved_from:
|
if inotify_event.is_moved_from:
|
||||||
|
@ -320,6 +332,13 @@ class Inotify(object):
|
||||||
del self._wd_for_path[move_src_path]
|
del self._wd_for_path[move_src_path]
|
||||||
self._wd_for_path[inotify_event.src_path] = moved_wd
|
self._wd_for_path[inotify_event.src_path] = moved_wd
|
||||||
self._path_for_wd[moved_wd] = inotify_event.src_path
|
self._path_for_wd[moved_wd] = inotify_event.src_path
|
||||||
|
if self.is_recursive:
|
||||||
|
for _path, _wd in self._wd_for_path.copy().items():
|
||||||
|
if _path.startswith(move_src_path + os.path.sep.encode()):
|
||||||
|
moved_wd = self._wd_for_path.pop(_path)
|
||||||
|
_move_to_path = _path.replace(move_src_path, inotify_event.src_path)
|
||||||
|
self._wd_for_path[_move_to_path] = moved_wd
|
||||||
|
self._path_for_wd[moved_wd] = _move_to_path
|
||||||
src_path = os.path.join(wd_path, name)
|
src_path = os.path.join(wd_path, name)
|
||||||
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
|
inotify_event = InotifyEvent(wd, mask, cookie, name, src_path)
|
||||||
|
|
||||||
|
@ -332,8 +351,8 @@ class Inotify(object):
|
||||||
|
|
||||||
event_list.append(inotify_event)
|
event_list.append(inotify_event)
|
||||||
|
|
||||||
if (self.is_recursive and inotify_event.is_directory and
|
if (self.is_recursive and inotify_event.is_directory
|
||||||
inotify_event.is_create):
|
and inotify_event.is_create):
|
||||||
|
|
||||||
# TODO: When a directory from another part of the
|
# TODO: When a directory from another part of the
|
||||||
# filesystem is moved into a watched directory, this
|
# filesystem is moved into a watched directory, this
|
||||||
|
@ -365,7 +384,7 @@ class Inotify(object):
|
||||||
Event bit mask.
|
Event bit mask.
|
||||||
"""
|
"""
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
raise OSError('Path is not a directory')
|
raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path)
|
||||||
self._add_watch(path, mask)
|
self._add_watch(path, mask)
|
||||||
if recursive:
|
if recursive:
|
||||||
for root, dirnames, _ in os.walk(path):
|
for root, dirnames, _ in os.walk(path):
|
||||||
|
@ -399,11 +418,15 @@ class Inotify(object):
|
||||||
"""
|
"""
|
||||||
err = ctypes.get_errno()
|
err = ctypes.get_errno()
|
||||||
if err == errno.ENOSPC:
|
if err == errno.ENOSPC:
|
||||||
raise OSError("inotify watch limit reached")
|
raise OSError(errno.ENOSPC, "inotify watch limit reached")
|
||||||
elif err == errno.EMFILE:
|
elif err == errno.EMFILE:
|
||||||
raise OSError("inotify instance limit reached")
|
raise OSError(errno.EMFILE, "inotify instance limit reached")
|
||||||
|
elif err == errno.EACCES:
|
||||||
|
# Prevent raising an exception when a file with no permissions
|
||||||
|
# changes
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
raise OSError(os.strerror(err))
|
raise OSError(err, os.strerror(err))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _parse_event_buffer(event_buffer):
|
def _parse_event_buffer(event_buffer):
|
||||||
|
@ -431,7 +454,7 @@ class Inotify(object):
|
||||||
yield wd, mask, cookie, name
|
yield wd, mask, cookie, name
|
||||||
|
|
||||||
|
|
||||||
class InotifyEvent(object):
|
class InotifyEvent:
|
||||||
"""
|
"""
|
||||||
Inotify event struct wrapper.
|
Inotify event struct wrapper.
|
||||||
|
|
||||||
|
@ -442,9 +465,9 @@ class InotifyEvent(object):
|
||||||
:param cookie:
|
:param cookie:
|
||||||
Event cookie
|
Event cookie
|
||||||
:param name:
|
:param name:
|
||||||
Event name.
|
Base name of the event source path.
|
||||||
:param src_path:
|
:param src_path:
|
||||||
Event source path
|
Full event source path.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, wd, mask, cookie, name, src_path):
|
def __init__(self, wd, mask, cookie, name, src_path):
|
||||||
|
@ -531,8 +554,8 @@ class InotifyEvent(object):
|
||||||
# It looks like the kernel does not provide this information for
|
# It looks like the kernel does not provide this information for
|
||||||
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
|
# IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir.
|
||||||
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
|
# See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897
|
||||||
return (self.is_delete_self or self.is_move_self or
|
return (self.is_delete_self or self.is_move_self
|
||||||
self._mask & InotifyConstants.IN_ISDIR > 0)
|
or self._mask & InotifyConstants.IN_ISDIR > 0)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def key(self):
|
def key(self):
|
||||||
|
@ -560,5 +583,6 @@ class InotifyEvent(object):
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
mask_string = self._get_mask_string(self.mask)
|
mask_string = self._get_mask_string(self.mask)
|
||||||
s = "<InotifyEvent: src_path=%s, wd=%d, mask=%s, cookie=%d, name=%s>"
|
s = '<%s: src_path=%r, wd=%d, mask=%s, cookie=%d, name=%s>'
|
||||||
return s % (self.src_path, self.wd, mask_string, self.cookie, self.name)
|
return s % (type(self).__name__, self.src_path, self.wd, mask_string,
|
||||||
|
self.cookie, os.fsdecode(self.name))
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -20,6 +19,7 @@
|
||||||
:module: watchdog.observers.kqueue
|
:module: watchdog.observers.kqueue
|
||||||
:synopsis: ``kqueue(2)`` based emitter implementation.
|
:synopsis: ``kqueue(2)`` based emitter implementation.
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
:platforms: Mac OS X and BSD with kqueue(2).
|
:platforms: Mac OS X and BSD with kqueue(2).
|
||||||
|
|
||||||
.. WARNING:: kqueue is a very heavyweight way to monitor file systems.
|
.. WARNING:: kqueue is a very heavyweight way to monitor file systems.
|
||||||
|
@ -30,17 +30,6 @@
|
||||||
only those directories which report changes and do a diff
|
only those directories which report changes and do a diff
|
||||||
between two sub-DirectorySnapshots perhaps.
|
between two sub-DirectorySnapshots perhaps.
|
||||||
|
|
||||||
.. ADMONITION:: About ``select.kqueue`` and Python versions
|
|
||||||
|
|
||||||
* Python 2.5 does not ship with ``select.kqueue``
|
|
||||||
* Python 2.6 ships with a broken ``select.kqueue`` that cannot take
|
|
||||||
multiple events in the event list passed to ``kqueue.control``.
|
|
||||||
* Python 2.7 ships with a working ``select.kqueue``
|
|
||||||
implementation.
|
|
||||||
|
|
||||||
I have backported the Python 2.7 implementation to Python 2.5 and 2.6
|
|
||||||
in the ``select_backport`` package available on PyPI.
|
|
||||||
|
|
||||||
.. ADMONITION:: About OS X performance guidelines
|
.. ADMONITION:: About OS X performance guidelines
|
||||||
|
|
||||||
Quote from the `Mac OS X File System Performance Guidelines`_:
|
Quote from the `Mac OS X File System Performance Guidelines`_:
|
||||||
|
@ -73,40 +62,32 @@ Collections and Utility Classes
|
||||||
:members:
|
:members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|
||||||
.. _Mac OS X File System Performance Guidelines: http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD
|
.. _Mac OS X File System Performance Guidelines:
|
||||||
|
http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from builtins import object
|
from watchdog.utils import platform
|
||||||
from __future__ import with_statement
|
|
||||||
from ..utils import platform
|
|
||||||
|
|
||||||
import threading
|
import threading
|
||||||
import errno
|
import errno
|
||||||
import sys
|
from stat import S_ISDIR
|
||||||
import stat
|
|
||||||
import os
|
import os
|
||||||
|
import os.path
|
||||||
|
import select
|
||||||
|
|
||||||
# See the notes for this module in the documentation above ^.
|
from pathlib import Path
|
||||||
#import select
|
|
||||||
# if not has_attribute(select, 'kqueue') or sys.version_info < (2, 7, 0):
|
|
||||||
if sys.version_info < (2, 7, 0):
|
|
||||||
import select_backport as select
|
|
||||||
else:
|
|
||||||
import select
|
|
||||||
|
|
||||||
from ...pathtools.path import absolute_path
|
from watchdog.observers.api import (
|
||||||
|
|
||||||
from ..observers.api import (
|
|
||||||
BaseObserver,
|
BaseObserver,
|
||||||
EventEmitter,
|
EventEmitter,
|
||||||
DEFAULT_OBSERVER_TIMEOUT,
|
DEFAULT_OBSERVER_TIMEOUT,
|
||||||
DEFAULT_EMITTER_TIMEOUT
|
DEFAULT_EMITTER_TIMEOUT
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..utils.dirsnapshot import DirectorySnapshot
|
from watchdog.utils.dirsnapshot import DirectorySnapshot
|
||||||
|
|
||||||
from ..events import (
|
from watchdog.events import (
|
||||||
DirMovedEvent,
|
DirMovedEvent,
|
||||||
DirDeletedEvent,
|
DirDeletedEvent,
|
||||||
DirCreatedEvent,
|
DirCreatedEvent,
|
||||||
|
@ -117,7 +98,8 @@ from ..events import (
|
||||||
FileModifiedEvent,
|
FileModifiedEvent,
|
||||||
EVENT_TYPE_MOVED,
|
EVENT_TYPE_MOVED,
|
||||||
EVENT_TYPE_DELETED,
|
EVENT_TYPE_DELETED,
|
||||||
EVENT_TYPE_CREATED
|
EVENT_TYPE_CREATED,
|
||||||
|
generate_sub_moved_events,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Maximum number of events to process.
|
# Maximum number of events to process.
|
||||||
|
@ -134,15 +116,19 @@ else:
|
||||||
WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE
|
WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE
|
||||||
WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR
|
WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR
|
||||||
WATCHDOG_KQ_FFLAGS = (
|
WATCHDOG_KQ_FFLAGS = (
|
||||||
select.KQ_NOTE_DELETE |
|
select.KQ_NOTE_DELETE
|
||||||
select.KQ_NOTE_WRITE |
|
| select.KQ_NOTE_WRITE
|
||||||
select.KQ_NOTE_EXTEND |
|
| select.KQ_NOTE_EXTEND
|
||||||
select.KQ_NOTE_ATTRIB |
|
| select.KQ_NOTE_ATTRIB
|
||||||
select.KQ_NOTE_LINK |
|
| select.KQ_NOTE_LINK
|
||||||
select.KQ_NOTE_RENAME |
|
| select.KQ_NOTE_RENAME
|
||||||
select.KQ_NOTE_REVOKE
|
| select.KQ_NOTE_REVOKE
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def absolute_path(path):
|
||||||
|
return Path(path).resolve()
|
||||||
|
|
||||||
# Flag tests.
|
# Flag tests.
|
||||||
|
|
||||||
|
|
||||||
|
@ -167,7 +153,7 @@ def is_renamed(kev):
|
||||||
return kev.fflags & select.KQ_NOTE_RENAME
|
return kev.fflags & select.KQ_NOTE_RENAME
|
||||||
|
|
||||||
|
|
||||||
class KeventDescriptorSet(object):
|
class KeventDescriptorSet:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Thread-safe kevent descriptor collection.
|
Thread-safe kevent descriptor collection.
|
||||||
|
@ -322,7 +308,7 @@ class KeventDescriptorSet(object):
|
||||||
descriptor.close()
|
descriptor.close()
|
||||||
|
|
||||||
|
|
||||||
class KeventDescriptor(object):
|
class KeventDescriptor:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
A kevent descriptor convenience data structure to keep together:
|
A kevent descriptor convenience data structure to keep together:
|
||||||
|
@ -396,8 +382,8 @@ class KeventDescriptor(object):
|
||||||
return hash(self.key)
|
return hash(self.key)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<KeventDescriptor: path=%s, is_directory=%s>"\
|
return "<%s: path=%s, is_directory=%s>"\
|
||||||
% (self.path, self.is_directory)
|
% (type(self).__name__, self.path, self.is_directory)
|
||||||
|
|
||||||
|
|
||||||
class KqueueEmitter(EventEmitter):
|
class KqueueEmitter(EventEmitter):
|
||||||
|
@ -443,9 +429,11 @@ class KqueueEmitter(EventEmitter):
|
||||||
Read events blocking timeout (in seconds).
|
Read events blocking timeout (in seconds).
|
||||||
:type timeout:
|
:type timeout:
|
||||||
``float``
|
``float``
|
||||||
|
:param stat: stat function. See ``os.stat`` for details.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
|
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
|
||||||
|
stat=os.stat):
|
||||||
EventEmitter.__init__(self, event_queue, watch, timeout)
|
EventEmitter.__init__(self, event_queue, watch, timeout)
|
||||||
|
|
||||||
self._kq = select.kqueue()
|
self._kq = select.kqueue()
|
||||||
|
@ -454,12 +442,14 @@ class KqueueEmitter(EventEmitter):
|
||||||
# A collection of KeventDescriptor.
|
# A collection of KeventDescriptor.
|
||||||
self._descriptors = KeventDescriptorSet()
|
self._descriptors = KeventDescriptorSet()
|
||||||
|
|
||||||
def walker_callback(path, stat_info, self=self):
|
def custom_stat(path, self=self):
|
||||||
self._register_kevent(path, stat.S_ISDIR(stat_info.st_mode))
|
stat_info = stat(path)
|
||||||
|
self._register_kevent(path, S_ISDIR(stat_info.st_mode))
|
||||||
|
return stat_info
|
||||||
|
|
||||||
self._snapshot = DirectorySnapshot(watch.path,
|
self._snapshot = DirectorySnapshot(watch.path,
|
||||||
watch.is_recursive,
|
recursive=watch.is_recursive,
|
||||||
walker_callback)
|
stat=custom_stat)
|
||||||
|
|
||||||
def _register_kevent(self, path, is_directory):
|
def _register_kevent(self, path, is_directory):
|
||||||
"""
|
"""
|
||||||
|
@ -480,7 +470,7 @@ class KqueueEmitter(EventEmitter):
|
||||||
# and then quickly deleted before we could open
|
# and then quickly deleted before we could open
|
||||||
# a descriptor for it. Therefore, simply queue a sequence
|
# a descriptor for it. Therefore, simply queue a sequence
|
||||||
# of created and deleted events for the path.
|
# of created and deleted events for the path.
|
||||||
#path = absolute_path(path)
|
# path = absolute_path(path)
|
||||||
# if is_directory:
|
# if is_directory:
|
||||||
# self.queue_event(DirCreatedEvent(path))
|
# self.queue_event(DirCreatedEvent(path))
|
||||||
# self.queue_event(DirDeletedEvent(path))
|
# self.queue_event(DirDeletedEvent(path))
|
||||||
|
@ -494,6 +484,12 @@ class KqueueEmitter(EventEmitter):
|
||||||
# eg. .git/index.lock when running tig operations.
|
# eg. .git/index.lock when running tig operations.
|
||||||
# I don't fully understand this at the moment.
|
# I don't fully understand this at the moment.
|
||||||
pass
|
pass
|
||||||
|
elif e.errno == errno.EOPNOTSUPP:
|
||||||
|
# Probably dealing with the socket or special file
|
||||||
|
# mounted through a file system that does not support
|
||||||
|
# access to it (e.g. NFS). On BSD systems look at
|
||||||
|
# EOPNOTSUPP in man 2 open.
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
# All other errors are propagated.
|
# All other errors are propagated.
|
||||||
raise
|
raise
|
||||||
|
@ -529,127 +525,114 @@ class KqueueEmitter(EventEmitter):
|
||||||
elif event.event_type == EVENT_TYPE_DELETED:
|
elif event.event_type == EVENT_TYPE_DELETED:
|
||||||
self._unregister_kevent(event.src_path)
|
self._unregister_kevent(event.src_path)
|
||||||
|
|
||||||
def _queue_dirs_modified(self,
|
def _gen_kqueue_events(self,
|
||||||
dirs_modified,
|
kev,
|
||||||
ref_snapshot,
|
ref_snapshot,
|
||||||
new_snapshot):
|
new_snapshot):
|
||||||
"""
|
"""
|
||||||
Queues events for directory modifications by scanning the directory
|
Generate events from the kevent list returned from the call to
|
||||||
for changes.
|
|
||||||
|
|
||||||
A scan is a comparison between two snapshots of the same directory
|
|
||||||
taken at two different times. This also determines whether files
|
|
||||||
or directories were created, which updated the modified timestamp
|
|
||||||
for the directory.
|
|
||||||
"""
|
|
||||||
if dirs_modified:
|
|
||||||
for dir_modified in dirs_modified:
|
|
||||||
self.queue_event(DirModifiedEvent(dir_modified))
|
|
||||||
diff_events = new_snapshot - ref_snapshot
|
|
||||||
for file_created in diff_events.files_created:
|
|
||||||
self.queue_event(FileCreatedEvent(file_created))
|
|
||||||
for directory_created in diff_events.dirs_created:
|
|
||||||
self.queue_event(DirCreatedEvent(directory_created))
|
|
||||||
|
|
||||||
def _queue_events_except_renames_and_dir_modifications(self, event_list):
|
|
||||||
"""
|
|
||||||
Queues events from the kevent list returned from the call to
|
|
||||||
:meth:`select.kqueue.control`.
|
:meth:`select.kqueue.control`.
|
||||||
|
|
||||||
.. NOTE:: Queues only the deletions, file modifications,
|
.. NOTE:: kqueue only tells us about deletions, file modifications,
|
||||||
attribute modifications. The other events, namely,
|
attribute modifications. The other events, namely,
|
||||||
file creation, directory modification, file rename,
|
file creation, directory modification, file rename,
|
||||||
directory rename, directory creation, etc. are
|
directory rename, directory creation, etc. are
|
||||||
determined by comparing directory snapshots.
|
determined by comparing directory snapshots.
|
||||||
"""
|
"""
|
||||||
files_renamed = set()
|
descriptor = self._descriptors.get_for_fd(kev.ident)
|
||||||
dirs_renamed = set()
|
src_path = descriptor.path
|
||||||
dirs_modified = set()
|
|
||||||
|
|
||||||
for kev in event_list:
|
if is_renamed(kev):
|
||||||
descriptor = self._descriptors.get_for_fd(kev.ident)
|
# Kqueue does not specify the destination names for renames
|
||||||
src_path = descriptor.path
|
# to, so we have to process these using the a snapshot
|
||||||
|
# of the directory.
|
||||||
if is_deleted(kev):
|
for event in self._gen_renamed_events(src_path,
|
||||||
if descriptor.is_directory:
|
descriptor.is_directory,
|
||||||
self.queue_event(DirDeletedEvent(src_path))
|
ref_snapshot,
|
||||||
else:
|
new_snapshot):
|
||||||
self.queue_event(FileDeletedEvent(src_path))
|
yield event
|
||||||
elif is_attrib_modified(kev):
|
elif is_attrib_modified(kev):
|
||||||
if descriptor.is_directory:
|
if descriptor.is_directory:
|
||||||
self.queue_event(DirModifiedEvent(src_path))
|
yield DirModifiedEvent(src_path)
|
||||||
else:
|
else:
|
||||||
self.queue_event(FileModifiedEvent(src_path))
|
yield FileModifiedEvent(src_path)
|
||||||
elif is_modified(kev):
|
elif is_modified(kev):
|
||||||
if descriptor.is_directory:
|
if descriptor.is_directory:
|
||||||
|
if self.watch.is_recursive or self.watch.path == src_path:
|
||||||
# When a directory is modified, it may be due to
|
# When a directory is modified, it may be due to
|
||||||
# sub-file/directory renames or new file/directory
|
# sub-file/directory renames or new file/directory
|
||||||
# creation. We determine all this by comparing
|
# creation. We determine all this by comparing
|
||||||
# snapshots later.
|
# snapshots later.
|
||||||
dirs_modified.add(src_path)
|
yield DirModifiedEvent(src_path)
|
||||||
else:
|
else:
|
||||||
self.queue_event(FileModifiedEvent(src_path))
|
yield FileModifiedEvent(src_path)
|
||||||
elif is_renamed(kev):
|
elif is_deleted(kev):
|
||||||
# Kqueue does not specify the destination names for renames
|
if descriptor.is_directory:
|
||||||
# to, so we have to process these after taking a snapshot
|
yield DirDeletedEvent(src_path)
|
||||||
# of the directory.
|
else:
|
||||||
if descriptor.is_directory:
|
yield FileDeletedEvent(src_path)
|
||||||
dirs_renamed.add(src_path)
|
|
||||||
else:
|
|
||||||
files_renamed.add(src_path)
|
|
||||||
return files_renamed, dirs_renamed, dirs_modified
|
|
||||||
|
|
||||||
def _queue_renamed(self,
|
def _parent_dir_modified(self, src_path):
|
||||||
src_path,
|
"""
|
||||||
is_directory,
|
Helper to generate a DirModifiedEvent on the parent of src_path.
|
||||||
ref_snapshot,
|
"""
|
||||||
new_snapshot):
|
return DirModifiedEvent(os.path.dirname(src_path))
|
||||||
|
|
||||||
|
def _gen_renamed_events(self,
|
||||||
|
src_path,
|
||||||
|
is_directory,
|
||||||
|
ref_snapshot,
|
||||||
|
new_snapshot):
|
||||||
"""
|
"""
|
||||||
Compares information from two directory snapshots (one taken before
|
Compares information from two directory snapshots (one taken before
|
||||||
the rename operation and another taken right after) to determine the
|
the rename operation and another taken right after) to determine the
|
||||||
destination path of the file system object renamed, and adds
|
destination path of the file system object renamed, and yields
|
||||||
appropriate events to the event queue.
|
the appropriate events to be queued.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
ref_stat_info = ref_snapshot.stat_info(src_path)
|
f_inode = ref_snapshot.inode(src_path)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# Probably caught a temporary file/directory that was renamed
|
# Probably caught a temporary file/directory that was renamed
|
||||||
# and deleted. Fires a sequence of created and deleted events
|
# and deleted. Fires a sequence of created and deleted events
|
||||||
# for the path.
|
# for the path.
|
||||||
if is_directory:
|
if is_directory:
|
||||||
self.queue_event(DirCreatedEvent(src_path))
|
yield DirCreatedEvent(src_path)
|
||||||
self.queue_event(DirDeletedEvent(src_path))
|
yield DirDeletedEvent(src_path)
|
||||||
else:
|
else:
|
||||||
self.queue_event(FileCreatedEvent(src_path))
|
yield FileCreatedEvent(src_path)
|
||||||
self.queue_event(FileDeletedEvent(src_path))
|
yield FileDeletedEvent(src_path)
|
||||||
# We don't process any further and bail out assuming
|
# We don't process any further and bail out assuming
|
||||||
# the event represents deletion/creation instead of movement.
|
# the event represents deletion/creation instead of movement.
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
dest_path = new_snapshot.path(f_inode)
|
||||||
dest_path = absolute_path(
|
if dest_path is not None:
|
||||||
new_snapshot.path_for_inode(ref_stat_info.st_ino))
|
dest_path = absolute_path(dest_path)
|
||||||
if is_directory:
|
if is_directory:
|
||||||
event = DirMovedEvent(src_path, dest_path)
|
event = DirMovedEvent(src_path, dest_path)
|
||||||
|
yield event
|
||||||
|
else:
|
||||||
|
yield FileMovedEvent(src_path, dest_path)
|
||||||
|
yield self._parent_dir_modified(src_path)
|
||||||
|
yield self._parent_dir_modified(dest_path)
|
||||||
|
if is_directory:
|
||||||
# TODO: Do we need to fire moved events for the items
|
# TODO: Do we need to fire moved events for the items
|
||||||
# inside the directory tree? Does kqueue does this
|
# inside the directory tree? Does kqueue does this
|
||||||
# all by itself? Check this and then enable this code
|
# all by itself? Check this and then enable this code
|
||||||
# only if it doesn't already.
|
# only if it doesn't already.
|
||||||
# A: It doesn't. So I've enabled this block.
|
# A: It doesn't. So I've enabled this block.
|
||||||
if self.watch.is_recursive:
|
if self.watch.is_recursive:
|
||||||
for sub_event in event.sub_moved_events():
|
for sub_event in generate_sub_moved_events(src_path, dest_path):
|
||||||
self.queue_event(sub_event)
|
yield sub_event
|
||||||
self.queue_event(event)
|
else:
|
||||||
else:
|
|
||||||
self.queue_event(FileMovedEvent(src_path, dest_path))
|
|
||||||
except KeyError:
|
|
||||||
# If the new snapshot does not have an inode for the
|
# If the new snapshot does not have an inode for the
|
||||||
# old path, we haven't found the new name. Therefore,
|
# old path, we haven't found the new name. Therefore,
|
||||||
# we mark it as deleted and remove unregister the path.
|
# we mark it as deleted and remove unregister the path.
|
||||||
if is_directory:
|
if is_directory:
|
||||||
self.queue_event(DirDeletedEvent(src_path))
|
yield DirDeletedEvent(src_path)
|
||||||
else:
|
else:
|
||||||
self.queue_event(FileDeletedEvent(src_path))
|
yield FileDeletedEvent(src_path)
|
||||||
|
yield self._parent_dir_modified(src_path)
|
||||||
|
|
||||||
def _read_events(self, timeout=None):
|
def _read_events(self, timeout=None):
|
||||||
"""
|
"""
|
||||||
|
@ -678,8 +661,8 @@ class KqueueEmitter(EventEmitter):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
try:
|
try:
|
||||||
event_list = self._read_events(timeout)
|
event_list = self._read_events(timeout)
|
||||||
files_renamed, dirs_renamed, dirs_modified = (
|
# TODO: investigate why order appears to be reversed
|
||||||
self._queue_events_except_renames_and_dir_modifications(event_list))
|
event_list.reverse()
|
||||||
|
|
||||||
# Take a fresh snapshot of the directory and update the
|
# Take a fresh snapshot of the directory and update the
|
||||||
# saved snapshot.
|
# saved snapshot.
|
||||||
|
@ -687,26 +670,24 @@ class KqueueEmitter(EventEmitter):
|
||||||
self.watch.is_recursive)
|
self.watch.is_recursive)
|
||||||
ref_snapshot = self._snapshot
|
ref_snapshot = self._snapshot
|
||||||
self._snapshot = new_snapshot
|
self._snapshot = new_snapshot
|
||||||
|
diff_events = new_snapshot - ref_snapshot
|
||||||
|
|
||||||
|
# Process events
|
||||||
|
for directory_created in diff_events.dirs_created:
|
||||||
|
self.queue_event(DirCreatedEvent(directory_created))
|
||||||
|
for file_created in diff_events.files_created:
|
||||||
|
self.queue_event(FileCreatedEvent(file_created))
|
||||||
|
for file_modified in diff_events.files_modified:
|
||||||
|
self.queue_event(FileModifiedEvent(file_modified))
|
||||||
|
|
||||||
|
for kev in event_list:
|
||||||
|
for event in self._gen_kqueue_events(kev,
|
||||||
|
ref_snapshot,
|
||||||
|
new_snapshot):
|
||||||
|
self.queue_event(event)
|
||||||
|
|
||||||
if files_renamed or dirs_renamed or dirs_modified:
|
|
||||||
for src_path in files_renamed:
|
|
||||||
self._queue_renamed(src_path,
|
|
||||||
False,
|
|
||||||
ref_snapshot,
|
|
||||||
new_snapshot)
|
|
||||||
for src_path in dirs_renamed:
|
|
||||||
self._queue_renamed(src_path,
|
|
||||||
True,
|
|
||||||
ref_snapshot,
|
|
||||||
new_snapshot)
|
|
||||||
self._queue_dirs_modified(dirs_modified,
|
|
||||||
ref_snapshot,
|
|
||||||
new_snapshot)
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == errno.EBADF:
|
if e.errno != errno.EBADF:
|
||||||
# logging.debug(e)
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def on_thread_stop(self):
|
def on_thread_stop(self):
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -21,6 +20,7 @@
|
||||||
:module: watchdog.observers.polling
|
:module: watchdog.observers.polling
|
||||||
:synopsis: Polling emitter implementation.
|
:synopsis: Polling emitter implementation.
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
|
|
||||||
Classes
|
Classes
|
||||||
-------
|
-------
|
||||||
|
@ -34,21 +34,19 @@ Classes
|
||||||
:special-members:
|
:special-members:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
import os
|
import os
|
||||||
import threading
|
import threading
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from ..utils import stat as default_stat
|
|
||||||
from ..utils.dirsnapshot import DirectorySnapshot, \
|
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
|
||||||
DirectorySnapshotDiff
|
from watchdog.observers.api import (
|
||||||
from ..observers.api import (
|
|
||||||
EventEmitter,
|
EventEmitter,
|
||||||
BaseObserver,
|
BaseObserver,
|
||||||
DEFAULT_OBSERVER_TIMEOUT,
|
DEFAULT_OBSERVER_TIMEOUT,
|
||||||
DEFAULT_EMITTER_TIMEOUT
|
DEFAULT_EMITTER_TIMEOUT
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..events import (
|
from watchdog.events import (
|
||||||
DirMovedEvent,
|
DirMovedEvent,
|
||||||
DirDeletedEvent,
|
DirDeletedEvent,
|
||||||
DirCreatedEvent,
|
DirCreatedEvent,
|
||||||
|
@ -67,7 +65,7 @@ class PollingEmitter(EventEmitter):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
|
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
|
||||||
stat=default_stat, listdir=os.listdir):
|
stat=os.stat, listdir=os.scandir):
|
||||||
EventEmitter.__init__(self, event_queue, watch, timeout)
|
EventEmitter.__init__(self, event_queue, watch, timeout)
|
||||||
self._snapshot = None
|
self._snapshot = None
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
|
@ -91,12 +89,10 @@ class PollingEmitter(EventEmitter):
|
||||||
# Update snapshot.
|
# Update snapshot.
|
||||||
try:
|
try:
|
||||||
new_snapshot = self._take_snapshot()
|
new_snapshot = self._take_snapshot()
|
||||||
except OSError as e:
|
except OSError:
|
||||||
self.queue_event(DirDeletedEvent(self.watch.path))
|
self.queue_event(DirDeletedEvent(self.watch.path))
|
||||||
self.stop()
|
self.stop()
|
||||||
return
|
return
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
|
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
|
||||||
self._snapshot = new_snapshot
|
self._snapshot = new_snapshot
|
||||||
|
@ -140,7 +136,7 @@ class PollingObserverVFS(BaseObserver):
|
||||||
def __init__(self, stat, listdir, polling_interval=1):
|
def __init__(self, stat, listdir, polling_interval=1):
|
||||||
"""
|
"""
|
||||||
:param stat: stat function. See ``os.stat`` for details.
|
:param stat: stat function. See ``os.stat`` for details.
|
||||||
:param listdir: listdir function. See ``os.listdir`` for details.
|
:param listdir: listdir function. See ``os.scandir`` for details.
|
||||||
:type polling_interval: float
|
:type polling_interval: float
|
||||||
:param polling_interval: interval in seconds between polling the file system.
|
:param polling_interval: interval in seconds between polling the file system.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
# Copyright 2014 Thomas Amland
|
# Copyright 2014 Thomas Amland
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -17,14 +16,11 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
import ctypes
|
|
||||||
import threading
|
import threading
|
||||||
import os.path
|
import os.path
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ..events import (
|
from watchdog.events import (
|
||||||
DirCreatedEvent,
|
DirCreatedEvent,
|
||||||
DirDeletedEvent,
|
DirDeletedEvent,
|
||||||
DirMovedEvent,
|
DirMovedEvent,
|
||||||
|
@ -37,14 +33,14 @@ from ..events import (
|
||||||
generate_sub_created_events,
|
generate_sub_created_events,
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..observers.api import (
|
from watchdog.observers.api import (
|
||||||
EventEmitter,
|
EventEmitter,
|
||||||
BaseObserver,
|
BaseObserver,
|
||||||
DEFAULT_OBSERVER_TIMEOUT,
|
DEFAULT_OBSERVER_TIMEOUT,
|
||||||
DEFAULT_EMITTER_TIMEOUT
|
DEFAULT_EMITTER_TIMEOUT
|
||||||
)
|
)
|
||||||
|
|
||||||
from ..observers.winapi import (
|
from watchdog.observers.winapi import (
|
||||||
read_events,
|
read_events,
|
||||||
get_directory_handle,
|
get_directory_handle,
|
||||||
close_directory_handle,
|
close_directory_handle,
|
||||||
|
@ -73,8 +69,11 @@ class WindowsApiEmitter(EventEmitter):
|
||||||
if self._handle:
|
if self._handle:
|
||||||
close_directory_handle(self._handle)
|
close_directory_handle(self._handle)
|
||||||
|
|
||||||
|
def _read_events(self):
|
||||||
|
return read_events(self._handle, self.watch.path, self.watch.is_recursive)
|
||||||
|
|
||||||
def queue_events(self, timeout):
|
def queue_events(self, timeout):
|
||||||
winapi_events = read_events(self._handle, self.watch.is_recursive)
|
winapi_events = self._read_events()
|
||||||
with self._lock:
|
with self._lock:
|
||||||
last_renamed_src_path = ""
|
last_renamed_src_path = ""
|
||||||
for winapi_event in winapi_events:
|
for winapi_event in winapi_events:
|
||||||
|
@ -112,7 +111,7 @@ class WindowsApiEmitter(EventEmitter):
|
||||||
isdir = os.path.isdir(src_path)
|
isdir = os.path.isdir(src_path)
|
||||||
cls = DirCreatedEvent if isdir else FileCreatedEvent
|
cls = DirCreatedEvent if isdir else FileCreatedEvent
|
||||||
self.queue_event(cls(src_path))
|
self.queue_event(cls(src_path))
|
||||||
if isdir:
|
if isdir and self.watch.is_recursive:
|
||||||
# If a directory is moved from outside the watched folder to inside it
|
# If a directory is moved from outside the watched folder to inside it
|
||||||
# we only get a created directory event out of it, not any events for its children
|
# we only get a created directory event out of it, not any events for its children
|
||||||
# so use the same hack as for file moves to get the child events
|
# so use the same hack as for file moves to get the child events
|
||||||
|
@ -122,6 +121,9 @@ class WindowsApiEmitter(EventEmitter):
|
||||||
self.queue_event(sub_created_event)
|
self.queue_event(sub_created_event)
|
||||||
elif winapi_event.is_removed:
|
elif winapi_event.is_removed:
|
||||||
self.queue_event(FileDeletedEvent(src_path))
|
self.queue_event(FileDeletedEvent(src_path))
|
||||||
|
elif winapi_event.is_removed_self:
|
||||||
|
self.queue_event(DirDeletedEvent(self.watch.path))
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
|
||||||
class WindowsApiObserver(BaseObserver):
|
class WindowsApiObserver(BaseObserver):
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# winapi.py: Windows API-Python interface (removes dependency on pywin32)
|
# winapi.py: Windows API-Python interface (removes dependency on pywin32)
|
||||||
#
|
#
|
||||||
# Copyright (C) 2007 Thomas Heller <theller@ctypes.org>
|
# Copyright (C) 2007 Thomas Heller <theller@ctypes.org>
|
||||||
|
@ -36,23 +35,15 @@
|
||||||
# Portions of this code were taken from pyfilesystem, which uses the above
|
# Portions of this code were taken from pyfilesystem, which uses the above
|
||||||
# new BSD license.
|
# new BSD license.
|
||||||
|
|
||||||
from builtins import object
|
|
||||||
from __future__ import with_statement
|
|
||||||
|
|
||||||
import ctypes.wintypes
|
import ctypes.wintypes
|
||||||
import struct
|
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
|
||||||
try:
|
LPVOID = ctypes.wintypes.LPVOID
|
||||||
LPVOID = ctypes.wintypes.LPVOID
|
|
||||||
except AttributeError:
|
|
||||||
# LPVOID wasn't defined in Py2.5, guess it was introduced in Py2.6
|
|
||||||
LPVOID = ctypes.c_void_p
|
|
||||||
|
|
||||||
# Invalid handle value.
|
# Invalid handle value.
|
||||||
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
|
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
|
||||||
|
|
||||||
# File notification contants.
|
# File notification constants.
|
||||||
FILE_NOTIFY_CHANGE_FILE_NAME = 0x01
|
FILE_NOTIFY_CHANGE_FILE_NAME = 0x01
|
||||||
FILE_NOTIFY_CHANGE_DIR_NAME = 0x02
|
FILE_NOTIFY_CHANGE_DIR_NAME = 0x02
|
||||||
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04
|
FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04
|
||||||
|
@ -70,17 +61,21 @@ FILE_SHARE_WRITE = 0x02
|
||||||
FILE_SHARE_DELETE = 0x04
|
FILE_SHARE_DELETE = 0x04
|
||||||
OPEN_EXISTING = 3
|
OPEN_EXISTING = 3
|
||||||
|
|
||||||
|
VOLUME_NAME_NT = 0x02
|
||||||
|
|
||||||
# File action constants.
|
# File action constants.
|
||||||
FILE_ACTION_CREATED = 1
|
FILE_ACTION_CREATED = 1
|
||||||
FILE_ACTION_DELETED = 2
|
FILE_ACTION_DELETED = 2
|
||||||
FILE_ACTION_MODIFIED = 3
|
FILE_ACTION_MODIFIED = 3
|
||||||
FILE_ACTION_RENAMED_OLD_NAME = 4
|
FILE_ACTION_RENAMED_OLD_NAME = 4
|
||||||
FILE_ACTION_RENAMED_NEW_NAME = 5
|
FILE_ACTION_RENAMED_NEW_NAME = 5
|
||||||
|
FILE_ACTION_DELETED_SELF = 0xFFFE
|
||||||
FILE_ACTION_OVERFLOW = 0xFFFF
|
FILE_ACTION_OVERFLOW = 0xFFFF
|
||||||
|
|
||||||
# Aliases
|
# Aliases
|
||||||
FILE_ACTION_ADDED = FILE_ACTION_CREATED
|
FILE_ACTION_ADDED = FILE_ACTION_CREATED
|
||||||
FILE_ACTION_REMOVED = FILE_ACTION_DELETED
|
FILE_ACTION_REMOVED = FILE_ACTION_DELETED
|
||||||
|
FILE_ACTION_REMOVED_SELF = FILE_ACTION_DELETED_SELF
|
||||||
|
|
||||||
THREAD_TERMINATE = 0x0001
|
THREAD_TERMINATE = 0x0001
|
||||||
|
|
||||||
|
@ -124,7 +119,9 @@ def _errcheck_dword(value, func, args):
|
||||||
return args
|
return args
|
||||||
|
|
||||||
|
|
||||||
ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW
|
kernel32 = ctypes.WinDLL("kernel32")
|
||||||
|
|
||||||
|
ReadDirectoryChangesW = kernel32.ReadDirectoryChangesW
|
||||||
ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL
|
ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL
|
||||||
ReadDirectoryChangesW.errcheck = _errcheck_bool
|
ReadDirectoryChangesW.errcheck = _errcheck_bool
|
||||||
ReadDirectoryChangesW.argtypes = (
|
ReadDirectoryChangesW.argtypes = (
|
||||||
|
@ -138,7 +135,7 @@ ReadDirectoryChangesW.argtypes = (
|
||||||
LPVOID # FileIOCompletionRoutine # lpCompletionRoutine
|
LPVOID # FileIOCompletionRoutine # lpCompletionRoutine
|
||||||
)
|
)
|
||||||
|
|
||||||
CreateFileW = ctypes.windll.kernel32.CreateFileW
|
CreateFileW = kernel32.CreateFileW
|
||||||
CreateFileW.restype = ctypes.wintypes.HANDLE
|
CreateFileW.restype = ctypes.wintypes.HANDLE
|
||||||
CreateFileW.errcheck = _errcheck_handle
|
CreateFileW.errcheck = _errcheck_handle
|
||||||
CreateFileW.argtypes = (
|
CreateFileW.argtypes = (
|
||||||
|
@ -151,13 +148,13 @@ CreateFileW.argtypes = (
|
||||||
ctypes.wintypes.HANDLE # hTemplateFile
|
ctypes.wintypes.HANDLE # hTemplateFile
|
||||||
)
|
)
|
||||||
|
|
||||||
CloseHandle = ctypes.windll.kernel32.CloseHandle
|
CloseHandle = kernel32.CloseHandle
|
||||||
CloseHandle.restype = ctypes.wintypes.BOOL
|
CloseHandle.restype = ctypes.wintypes.BOOL
|
||||||
CloseHandle.argtypes = (
|
CloseHandle.argtypes = (
|
||||||
ctypes.wintypes.HANDLE, # hObject
|
ctypes.wintypes.HANDLE, # hObject
|
||||||
)
|
)
|
||||||
|
|
||||||
CancelIoEx = ctypes.windll.kernel32.CancelIoEx
|
CancelIoEx = kernel32.CancelIoEx
|
||||||
CancelIoEx.restype = ctypes.wintypes.BOOL
|
CancelIoEx.restype = ctypes.wintypes.BOOL
|
||||||
CancelIoEx.errcheck = _errcheck_bool
|
CancelIoEx.errcheck = _errcheck_bool
|
||||||
CancelIoEx.argtypes = (
|
CancelIoEx.argtypes = (
|
||||||
|
@ -165,7 +162,7 @@ CancelIoEx.argtypes = (
|
||||||
ctypes.POINTER(OVERLAPPED) # lpOverlapped
|
ctypes.POINTER(OVERLAPPED) # lpOverlapped
|
||||||
)
|
)
|
||||||
|
|
||||||
CreateEvent = ctypes.windll.kernel32.CreateEventW
|
CreateEvent = kernel32.CreateEventW
|
||||||
CreateEvent.restype = ctypes.wintypes.HANDLE
|
CreateEvent.restype = ctypes.wintypes.HANDLE
|
||||||
CreateEvent.errcheck = _errcheck_handle
|
CreateEvent.errcheck = _errcheck_handle
|
||||||
CreateEvent.argtypes = (
|
CreateEvent.argtypes = (
|
||||||
|
@ -175,14 +172,14 @@ CreateEvent.argtypes = (
|
||||||
ctypes.wintypes.LPCWSTR, # lpName
|
ctypes.wintypes.LPCWSTR, # lpName
|
||||||
)
|
)
|
||||||
|
|
||||||
SetEvent = ctypes.windll.kernel32.SetEvent
|
SetEvent = kernel32.SetEvent
|
||||||
SetEvent.restype = ctypes.wintypes.BOOL
|
SetEvent.restype = ctypes.wintypes.BOOL
|
||||||
SetEvent.errcheck = _errcheck_bool
|
SetEvent.errcheck = _errcheck_bool
|
||||||
SetEvent.argtypes = (
|
SetEvent.argtypes = (
|
||||||
ctypes.wintypes.HANDLE, # hEvent
|
ctypes.wintypes.HANDLE, # hEvent
|
||||||
)
|
)
|
||||||
|
|
||||||
WaitForSingleObjectEx = ctypes.windll.kernel32.WaitForSingleObjectEx
|
WaitForSingleObjectEx = kernel32.WaitForSingleObjectEx
|
||||||
WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD
|
WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD
|
||||||
WaitForSingleObjectEx.errcheck = _errcheck_dword
|
WaitForSingleObjectEx.errcheck = _errcheck_dword
|
||||||
WaitForSingleObjectEx.argtypes = (
|
WaitForSingleObjectEx.argtypes = (
|
||||||
|
@ -191,7 +188,7 @@ WaitForSingleObjectEx.argtypes = (
|
||||||
ctypes.wintypes.BOOL, # bAlertable
|
ctypes.wintypes.BOOL, # bAlertable
|
||||||
)
|
)
|
||||||
|
|
||||||
CreateIoCompletionPort = ctypes.windll.kernel32.CreateIoCompletionPort
|
CreateIoCompletionPort = kernel32.CreateIoCompletionPort
|
||||||
CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE
|
CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE
|
||||||
CreateIoCompletionPort.errcheck = _errcheck_handle
|
CreateIoCompletionPort.errcheck = _errcheck_handle
|
||||||
CreateIoCompletionPort.argtypes = (
|
CreateIoCompletionPort.argtypes = (
|
||||||
|
@ -201,7 +198,7 @@ CreateIoCompletionPort.argtypes = (
|
||||||
ctypes.wintypes.DWORD, # NumberOfConcurrentThreads
|
ctypes.wintypes.DWORD, # NumberOfConcurrentThreads
|
||||||
)
|
)
|
||||||
|
|
||||||
GetQueuedCompletionStatus = ctypes.windll.kernel32.GetQueuedCompletionStatus
|
GetQueuedCompletionStatus = kernel32.GetQueuedCompletionStatus
|
||||||
GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
|
GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
|
||||||
GetQueuedCompletionStatus.errcheck = _errcheck_bool
|
GetQueuedCompletionStatus.errcheck = _errcheck_bool
|
||||||
GetQueuedCompletionStatus.argtypes = (
|
GetQueuedCompletionStatus.argtypes = (
|
||||||
|
@ -212,7 +209,7 @@ GetQueuedCompletionStatus.argtypes = (
|
||||||
ctypes.wintypes.DWORD, # dwMilliseconds
|
ctypes.wintypes.DWORD, # dwMilliseconds
|
||||||
)
|
)
|
||||||
|
|
||||||
PostQueuedCompletionStatus = ctypes.windll.kernel32.PostQueuedCompletionStatus
|
PostQueuedCompletionStatus = kernel32.PostQueuedCompletionStatus
|
||||||
PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
|
PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL
|
||||||
PostQueuedCompletionStatus.errcheck = _errcheck_bool
|
PostQueuedCompletionStatus.errcheck = _errcheck_bool
|
||||||
PostQueuedCompletionStatus.argtypes = (
|
PostQueuedCompletionStatus.argtypes = (
|
||||||
|
@ -223,13 +220,25 @@ PostQueuedCompletionStatus.argtypes = (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
GetFinalPathNameByHandleW = kernel32.GetFinalPathNameByHandleW
|
||||||
|
GetFinalPathNameByHandleW.restype = ctypes.wintypes.DWORD
|
||||||
|
GetFinalPathNameByHandleW.errcheck = _errcheck_dword
|
||||||
|
GetFinalPathNameByHandleW.argtypes = (
|
||||||
|
ctypes.wintypes.HANDLE, # hFile
|
||||||
|
ctypes.wintypes.LPWSTR, # lpszFilePath
|
||||||
|
ctypes.wintypes.DWORD, # cchFilePath
|
||||||
|
ctypes.wintypes.DWORD, # DWORD
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
|
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
|
||||||
_fields_ = [("NextEntryOffset", ctypes.wintypes.DWORD),
|
_fields_ = [("NextEntryOffset", ctypes.wintypes.DWORD),
|
||||||
("Action", ctypes.wintypes.DWORD),
|
("Action", ctypes.wintypes.DWORD),
|
||||||
("FileNameLength", ctypes.wintypes.DWORD),
|
("FileNameLength", ctypes.wintypes.DWORD),
|
||||||
#("FileName", (ctypes.wintypes.WCHAR * 1))]
|
# ("FileName", (ctypes.wintypes.WCHAR * 1))]
|
||||||
("FileName", (ctypes.c_char * 1))]
|
("FileName", (ctypes.c_char * 1))]
|
||||||
|
|
||||||
|
|
||||||
LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION)
|
LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION)
|
||||||
|
|
||||||
|
|
||||||
|
@ -254,7 +263,17 @@ WATCHDOG_FILE_NOTIFY_FLAGS = reduce(
|
||||||
FILE_NOTIFY_CHANGE_CREATION,
|
FILE_NOTIFY_CHANGE_CREATION,
|
||||||
])
|
])
|
||||||
|
|
||||||
BUFFER_SIZE = 2048
|
# ReadDirectoryChangesW buffer length.
|
||||||
|
# To handle cases with lot of changes, this seems the highest safest value we can use.
|
||||||
|
# Note: it will fail with ERROR_INVALID_PARAMETER when it is greater than 64 KB and
|
||||||
|
# the application is monitoring a directory over the network.
|
||||||
|
# This is due to a packet size limitation with the underlying file sharing protocols.
|
||||||
|
# https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw#remarks
|
||||||
|
BUFFER_SIZE = 64000
|
||||||
|
|
||||||
|
# Buffer length for path-related stuff.
|
||||||
|
# Introduced to keep the old behavior when we bumped BUFFER_SIZE from 2048 to 64000 in v1.0.0.
|
||||||
|
PATH_BUFFER_SIZE = 2048
|
||||||
|
|
||||||
|
|
||||||
def _parse_event_buffer(readBuffer, nBytes):
|
def _parse_event_buffer(readBuffer, nBytes):
|
||||||
|
@ -262,7 +281,7 @@ def _parse_event_buffer(readBuffer, nBytes):
|
||||||
while nBytes > 0:
|
while nBytes > 0:
|
||||||
fni = ctypes.cast(readBuffer, LPFNI)[0]
|
fni = ctypes.cast(readBuffer, LPFNI)[0]
|
||||||
ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset
|
ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset
|
||||||
#filename = ctypes.wstring_at(ptr, fni.FileNameLength)
|
# filename = ctypes.wstring_at(ptr, fni.FileNameLength)
|
||||||
filename = ctypes.string_at(ptr, fni.FileNameLength)
|
filename = ctypes.string_at(ptr, fni.FileNameLength)
|
||||||
results.append((fni.Action, filename.decode('utf-16')))
|
results.append((fni.Action, filename.decode('utf-16')))
|
||||||
numToSkip = fni.NextEntryOffset
|
numToSkip = fni.NextEntryOffset
|
||||||
|
@ -273,6 +292,25 @@ def _parse_event_buffer(readBuffer, nBytes):
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def _is_observed_path_deleted(handle, path):
|
||||||
|
# Comparison of observed path and actual path, returned by
|
||||||
|
# GetFinalPathNameByHandleW. If directory moved to the trash bin, or
|
||||||
|
# deleted, actual path will not be equal to observed path.
|
||||||
|
buff = ctypes.create_unicode_buffer(PATH_BUFFER_SIZE)
|
||||||
|
GetFinalPathNameByHandleW(handle, buff, PATH_BUFFER_SIZE, VOLUME_NAME_NT)
|
||||||
|
return buff.value != path
|
||||||
|
|
||||||
|
|
||||||
|
def _generate_observed_path_deleted_event():
|
||||||
|
# Create synthetic event for notify that observed directory is deleted
|
||||||
|
path = ctypes.create_unicode_buffer('.')
|
||||||
|
event = FILE_NOTIFY_INFORMATION(0, FILE_ACTION_DELETED_SELF, len(path), path.value.encode("utf-8"))
|
||||||
|
event_size = ctypes.sizeof(event)
|
||||||
|
buff = ctypes.create_string_buffer(PATH_BUFFER_SIZE)
|
||||||
|
ctypes.memmove(buff, ctypes.addressof(event), event_size)
|
||||||
|
return buff, event_size
|
||||||
|
|
||||||
|
|
||||||
def get_directory_handle(path):
|
def get_directory_handle(path):
|
||||||
"""Returns a Windows handle to the specified directory path."""
|
"""Returns a Windows handle to the specified directory path."""
|
||||||
return CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS,
|
return CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS,
|
||||||
|
@ -283,14 +321,14 @@ def close_directory_handle(handle):
|
||||||
try:
|
try:
|
||||||
CancelIoEx(handle, None) # force ReadDirectoryChangesW to return
|
CancelIoEx(handle, None) # force ReadDirectoryChangesW to return
|
||||||
CloseHandle(handle) # close directory handle
|
CloseHandle(handle) # close directory handle
|
||||||
except WindowsError:
|
except OSError:
|
||||||
try:
|
try:
|
||||||
CloseHandle(handle) # close directory handle
|
CloseHandle(handle) # close directory handle
|
||||||
except:
|
except Exception:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def read_directory_changes(handle, recursive):
|
def read_directory_changes(handle, path, recursive):
|
||||||
"""Read changes to the directory using the specified directory handle.
|
"""Read changes to the directory using the specified directory handle.
|
||||||
|
|
||||||
http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html
|
http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html
|
||||||
|
@ -302,20 +340,20 @@ def read_directory_changes(handle, recursive):
|
||||||
len(event_buffer), recursive,
|
len(event_buffer), recursive,
|
||||||
WATCHDOG_FILE_NOTIFY_FLAGS,
|
WATCHDOG_FILE_NOTIFY_FLAGS,
|
||||||
ctypes.byref(nbytes), None, None)
|
ctypes.byref(nbytes), None, None)
|
||||||
except WindowsError as e:
|
except OSError as e:
|
||||||
if e.winerror == ERROR_OPERATION_ABORTED:
|
if e.winerror == ERROR_OPERATION_ABORTED:
|
||||||
return [], 0
|
return [], 0
|
||||||
|
|
||||||
|
# Handle the case when the root path is deleted
|
||||||
|
if _is_observed_path_deleted(handle, path):
|
||||||
|
return _generate_observed_path_deleted_event()
|
||||||
|
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
# Python 2/3 compat
|
return event_buffer.raw, int(nbytes.value)
|
||||||
try:
|
|
||||||
int_class = int
|
|
||||||
except NameError:
|
|
||||||
int_class = int
|
|
||||||
return event_buffer.raw, int_class(nbytes.value)
|
|
||||||
|
|
||||||
|
|
||||||
class WinAPINativeEvent(object):
|
class WinAPINativeEvent:
|
||||||
def __init__(self, action, src_path):
|
def __init__(self, action, src_path):
|
||||||
self.action = action
|
self.action = action
|
||||||
self.src_path = src_path
|
self.src_path = src_path
|
||||||
|
@ -340,11 +378,16 @@ class WinAPINativeEvent(object):
|
||||||
def is_renamed_new(self):
|
def is_renamed_new(self):
|
||||||
return self.action == FILE_ACTION_RENAMED_NEW_NAME
|
return self.action == FILE_ACTION_RENAMED_NEW_NAME
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_removed_self(self):
|
||||||
|
return self.action == FILE_ACTION_REMOVED_SELF
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return ("<WinAPINativeEvent: action=%d, src_path=%r>" % (self.action, self.src_path))
|
return ("<%s: action=%d, src_path=%r>" % (
|
||||||
|
type(self).__name__, self.action, self.src_path))
|
||||||
|
|
||||||
|
|
||||||
def read_events(handle, recursive):
|
def read_events(handle, path, recursive):
|
||||||
buf, nbytes = read_directory_changes(handle, recursive)
|
buf, nbytes = read_directory_changes(handle, path, recursive)
|
||||||
events = _parse_event_buffer(buf, nbytes)
|
events = _parse_event_buffer(buf, nbytes)
|
||||||
return [WinAPINativeEvent(action, path) for action, path in events]
|
return [WinAPINativeEvent(action, src_path) for action, src_path in events]
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -16,14 +15,39 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
:module: watchdog.tricks
|
||||||
|
:synopsis: Utility event handlers.
|
||||||
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
|
|
||||||
|
Classes
|
||||||
|
-------
|
||||||
|
.. autoclass:: Trick
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. autoclass:: LoggerTrick
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. autoclass:: ShellCommandTrick
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. autoclass:: AutoRestartTrick
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ..utils import echo, has_attribute
|
from watchdog.utils import echo
|
||||||
from ..events import PatternMatchingEventHandler
|
from watchdog.events import PatternMatchingEventHandler
|
||||||
|
|
||||||
|
|
||||||
class Trick(PatternMatchingEventHandler):
|
class Trick(PatternMatchingEventHandler):
|
||||||
|
@ -80,8 +104,9 @@ class ShellCommandTrick(Trick):
|
||||||
def __init__(self, shell_command=None, patterns=None, ignore_patterns=None,
|
def __init__(self, shell_command=None, patterns=None, ignore_patterns=None,
|
||||||
ignore_directories=False, wait_for_process=False,
|
ignore_directories=False, wait_for_process=False,
|
||||||
drop_during_process=False):
|
drop_during_process=False):
|
||||||
super(ShellCommandTrick, self).__init__(patterns, ignore_patterns,
|
super().__init__(
|
||||||
ignore_directories)
|
patterns=patterns, ignore_patterns=ignore_patterns,
|
||||||
|
ignore_directories=ignore_directories)
|
||||||
self.shell_command = shell_command
|
self.shell_command = shell_command
|
||||||
self.wait_for_process = wait_for_process
|
self.wait_for_process = wait_for_process
|
||||||
self.drop_during_process = drop_during_process
|
self.drop_during_process = drop_during_process
|
||||||
|
@ -106,13 +131,13 @@ class ShellCommandTrick(Trick):
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.shell_command is None:
|
if self.shell_command is None:
|
||||||
if has_attribute(event, 'dest_path'):
|
if hasattr(event, 'dest_path'):
|
||||||
context.update({'dest_path': event.dest_path})
|
context.update({'dest_path': event.dest_path})
|
||||||
command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"'
|
command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"'
|
||||||
else:
|
else:
|
||||||
command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"'
|
command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"'
|
||||||
else:
|
else:
|
||||||
if has_attribute(event, 'dest_path'):
|
if hasattr(event, 'dest_path'):
|
||||||
context.update({'watch_dest_path': event.dest_path})
|
context.update({'watch_dest_path': event.dest_path})
|
||||||
command = self.shell_command
|
command = self.shell_command
|
||||||
|
|
||||||
|
@ -127,17 +152,18 @@ class AutoRestartTrick(Trick):
|
||||||
"""Starts a long-running subprocess and restarts it on matched events.
|
"""Starts a long-running subprocess and restarts it on matched events.
|
||||||
|
|
||||||
The command parameter is a list of command arguments, such as
|
The command parameter is a list of command arguments, such as
|
||||||
['bin/myserver', '-c', 'etc/myconfig.ini'].
|
`['bin/myserver', '-c', 'etc/myconfig.ini']`.
|
||||||
|
|
||||||
Call start() after creating the Trick. Call stop() when stopping
|
Call `start()` after creating the Trick. Call `stop()` when stopping
|
||||||
the process.
|
the process.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, command, patterns=None, ignore_patterns=None,
|
def __init__(self, command, patterns=None, ignore_patterns=None,
|
||||||
ignore_directories=False, stop_signal=signal.SIGINT,
|
ignore_directories=False, stop_signal=signal.SIGINT,
|
||||||
kill_after=10):
|
kill_after=10):
|
||||||
super(AutoRestartTrick, self).__init__(
|
super().__init__(
|
||||||
patterns, ignore_patterns, ignore_directories)
|
patterns=patterns, ignore_patterns=ignore_patterns,
|
||||||
|
ignore_directories=ignore_directories)
|
||||||
self.command = command
|
self.command = command
|
||||||
self.stop_signal = stop_signal
|
self.stop_signal = stop_signal
|
||||||
self.kill_after = kill_after
|
self.kill_after = kill_after
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -21,6 +20,7 @@
|
||||||
:module: watchdog.utils
|
:module: watchdog.utils
|
||||||
:synopsis: Utility classes and functions.
|
:synopsis: Utility classes and functions.
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
|
|
||||||
Classes
|
Classes
|
||||||
-------
|
-------
|
||||||
|
@ -30,51 +30,33 @@ Classes
|
||||||
:inherited-members:
|
:inherited-members:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
from . import platform
|
|
||||||
from .compat import Event
|
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info[0] == 2 and platform.is_windows():
|
|
||||||
# st_ino is not implemented in os.stat on this platform
|
|
||||||
from . import win32stat
|
|
||||||
stat = win32stat.stat
|
|
||||||
else:
|
|
||||||
stat = os.stat
|
|
||||||
|
|
||||||
|
|
||||||
def has_attribute(ob, attribute):
|
|
||||||
"""
|
|
||||||
:func:`hasattr` swallows exceptions. :func:`has_attribute` tests a Python object for the
|
|
||||||
presence of an attribute.
|
|
||||||
|
|
||||||
:param ob:
|
|
||||||
object to inspect
|
|
||||||
:param attribute:
|
|
||||||
``str`` for the name of the attribute.
|
|
||||||
"""
|
|
||||||
return getattr(ob, attribute, None) is not None
|
|
||||||
|
|
||||||
|
|
||||||
class UnsupportedLibc(Exception):
|
class UnsupportedLibc(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class WatchdogShutdown(Exception):
|
||||||
|
"""
|
||||||
|
Semantic exception used to signal an external shutdown event.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class BaseThread(threading.Thread):
|
class BaseThread(threading.Thread):
|
||||||
""" Convenience class for creating stoppable threads. """
|
""" Convenience class for creating stoppable threads. """
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
if has_attribute(self, 'daemon'):
|
if hasattr(self, 'daemon'):
|
||||||
self.daemon = True
|
self.daemon = True
|
||||||
else:
|
else:
|
||||||
self.setDaemon(True)
|
self.setDaemon(True)
|
||||||
self._stopped_event = Event()
|
self._stopped_event = threading.Event()
|
||||||
|
|
||||||
if not has_attribute(self._stopped_event, 'is_set'):
|
if not hasattr(self._stopped_event, 'is_set'):
|
||||||
self._stopped_event.is_set = self._stopped_event.isSet
|
self._stopped_event.is_set = self._stopped_event.isSet
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -145,7 +127,7 @@ def load_class(dotted_path):
|
||||||
module_name = '.'.join(dotted_path_split[:-1])
|
module_name = '.'.join(dotted_path_split[:-1])
|
||||||
|
|
||||||
module = load_module(module_name)
|
module = load_module(module_name)
|
||||||
if has_attribute(module, klass_name):
|
if hasattr(module, klass_name):
|
||||||
klass = getattr(module, klass_name)
|
klass = getattr(module, klass_name)
|
||||||
return klass
|
return klass
|
||||||
# Finally create and return an instance of the class
|
# Finally create and return an instance of the class
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -24,6 +23,7 @@ Utility collections or "bricks".
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
:author: lalinsky@gmail.com (Lukáš Lalinský)
|
:author: lalinsky@gmail.com (Lukáš Lalinský)
|
||||||
:author: python@rcn.com (Raymond Hettinger)
|
:author: python@rcn.com (Raymond Hettinger)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
|
|
||||||
Classes
|
Classes
|
||||||
=======
|
=======
|
||||||
|
@ -36,11 +36,8 @@ Classes
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from builtins import next
|
import queue
|
||||||
from builtins import range
|
|
||||||
import sys
|
|
||||||
import collections
|
|
||||||
from .compat import queue
|
|
||||||
|
|
||||||
class SkipRepeatsQueue(queue.Queue):
|
class SkipRepeatsQueue(queue.Queue):
|
||||||
|
|
||||||
|
@ -57,7 +54,7 @@ class SkipRepeatsQueue(queue.Queue):
|
||||||
|
|
||||||
An example implementation follows::
|
An example implementation follows::
|
||||||
|
|
||||||
class Item(object):
|
class Item:
|
||||||
def __init__(self, a, b):
|
def __init__(self, a, b):
|
||||||
self._a = a
|
self._a = a
|
||||||
self._b = b
|
self._b = b
|
||||||
|
@ -86,12 +83,12 @@ class SkipRepeatsQueue(queue.Queue):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _init(self, maxsize):
|
def _init(self, maxsize):
|
||||||
queue.Queue._init(self, maxsize)
|
super()._init(maxsize)
|
||||||
self._last_item = None
|
self._last_item = None
|
||||||
|
|
||||||
def _put(self, item):
|
def _put(self, item):
|
||||||
if item != self._last_item:
|
if item != self._last_item:
|
||||||
queue.Queue._put(self, item)
|
super()._put(item)
|
||||||
self._last_item = item
|
self._last_item = item
|
||||||
else:
|
else:
|
||||||
# `put` increments `unfinished_tasks` even if we did not put
|
# `put` increments `unfinished_tasks` even if we did not put
|
||||||
|
@ -99,153 +96,7 @@ class SkipRepeatsQueue(queue.Queue):
|
||||||
self.unfinished_tasks -= 1
|
self.unfinished_tasks -= 1
|
||||||
|
|
||||||
def _get(self):
|
def _get(self):
|
||||||
item = queue.Queue._get(self)
|
item = super()._get()
|
||||||
if item is self._last_item:
|
if item is self._last_item:
|
||||||
self._last_item = None
|
self._last_item = None
|
||||||
return item
|
return item
|
||||||
|
|
||||||
|
|
||||||
class OrderedSetQueue(queue.Queue):
|
|
||||||
|
|
||||||
"""Thread-safe implementation of an ordered set queue.
|
|
||||||
|
|
||||||
Disallows adding a duplicate item while maintaining the
|
|
||||||
order of items in the queue. The implementation leverages
|
|
||||||
locking already implemented in the base class
|
|
||||||
redefining only the primitives. Since the internal queue
|
|
||||||
is not replaced, the order is maintained. The set is used
|
|
||||||
merely to check for the existence of an item.
|
|
||||||
|
|
||||||
Queued items must be immutable and hashable so that they can be used
|
|
||||||
as dictionary keys. You must implement **only read-only properties** and
|
|
||||||
the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and
|
|
||||||
:meth:`Item.__ne__()` methods for items to be hashable.
|
|
||||||
|
|
||||||
An example implementation follows::
|
|
||||||
|
|
||||||
class Item(object):
|
|
||||||
def __init__(self, a, b):
|
|
||||||
self._a = a
|
|
||||||
self._b = b
|
|
||||||
|
|
||||||
@property
|
|
||||||
def a(self):
|
|
||||||
return self._a
|
|
||||||
|
|
||||||
@property
|
|
||||||
def b(self):
|
|
||||||
return self._b
|
|
||||||
|
|
||||||
def _key(self):
|
|
||||||
return (self._a, self._b)
|
|
||||||
|
|
||||||
def __eq__(self, item):
|
|
||||||
return self._key() == item._key()
|
|
||||||
|
|
||||||
def __ne__(self, item):
|
|
||||||
return self._key() != item._key()
|
|
||||||
|
|
||||||
def __hash__(self):
|
|
||||||
return hash(self._key())
|
|
||||||
|
|
||||||
:author: lalinsky@gmail.com (Lukáš Lalinský)
|
|
||||||
:url: http://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _init(self, maxsize):
|
|
||||||
queue.Queue._init(self, maxsize)
|
|
||||||
self._set_of_items = set()
|
|
||||||
|
|
||||||
def _put(self, item):
|
|
||||||
if item not in self._set_of_items:
|
|
||||||
queue.Queue._put(self, item)
|
|
||||||
self._set_of_items.add(item)
|
|
||||||
else:
|
|
||||||
# `put` increments `unfinished_tasks` even if we did not put
|
|
||||||
# anything into the queue here
|
|
||||||
self.unfinished_tasks -= 1
|
|
||||||
|
|
||||||
def _get(self):
|
|
||||||
item = queue.Queue._get(self)
|
|
||||||
self._set_of_items.remove(item)
|
|
||||||
return item
|
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info >= (2, 6, 0):
|
|
||||||
KEY, PREV, NEXT = list(range(3))
|
|
||||||
|
|
||||||
class OrderedSet(collections.MutableSet):
|
|
||||||
|
|
||||||
"""
|
|
||||||
Implementation based on a doubly-linked link and an internal dictionary.
|
|
||||||
This design gives :class:`OrderedSet` the same big-Oh running times as
|
|
||||||
regular sets including O(1) adds, removes, and lookups as well as
|
|
||||||
O(n) iteration.
|
|
||||||
|
|
||||||
.. ADMONITION:: Implementation notes
|
|
||||||
|
|
||||||
Runs on Python 2.6 or later (and runs on Python 3.0 or later
|
|
||||||
without any modifications).
|
|
||||||
|
|
||||||
:author: python@rcn.com (Raymond Hettinger)
|
|
||||||
:url: http://code.activestate.com/recipes/576694/
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, iterable=None):
|
|
||||||
self.end = end = []
|
|
||||||
end += [None, end, end] # sentinel node for doubly linked list
|
|
||||||
self.map = {} # key --> [key, prev, next]
|
|
||||||
if iterable is not None:
|
|
||||||
self |= iterable
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self.map)
|
|
||||||
|
|
||||||
def __contains__(self, key):
|
|
||||||
return key in self.map
|
|
||||||
|
|
||||||
def add(self, key):
|
|
||||||
if key not in self.map:
|
|
||||||
end = self.end
|
|
||||||
curr = end[PREV]
|
|
||||||
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
|
|
||||||
|
|
||||||
def discard(self, key):
|
|
||||||
if key in self.map:
|
|
||||||
key, prev, _next = self.map.pop(key)
|
|
||||||
prev[NEXT] = _next
|
|
||||||
_next[PREV] = prev
|
|
||||||
|
|
||||||
def __iter__(self):
|
|
||||||
end = self.end
|
|
||||||
curr = end[NEXT]
|
|
||||||
while curr is not end:
|
|
||||||
yield curr[KEY]
|
|
||||||
curr = curr[NEXT]
|
|
||||||
|
|
||||||
def __reversed__(self):
|
|
||||||
end = self.end
|
|
||||||
curr = end[PREV]
|
|
||||||
while curr is not end:
|
|
||||||
yield curr[KEY]
|
|
||||||
curr = curr[PREV]
|
|
||||||
|
|
||||||
def pop(self, last=True):
|
|
||||||
if not self:
|
|
||||||
raise KeyError('set is empty')
|
|
||||||
key = next(reversed(self)) if last else next(iter(self))
|
|
||||||
self.discard(key)
|
|
||||||
return key
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
if not self:
|
|
||||||
return '%s()' % (self.__class__.__name__,)
|
|
||||||
return '%s(%r)' % (self.__class__.__name__, list(self))
|
|
||||||
|
|
||||||
def __eq__(self, other):
|
|
||||||
if isinstance(other, OrderedSet):
|
|
||||||
return len(self) == len(other) and list(self) == list(other)
|
|
||||||
return set(self) == set(other)
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
self.clear() # remove circular references
|
|
||||||
|
|
|
@ -1,31 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
from future import standard_library
|
|
||||||
standard_library.install_aliases()
|
|
||||||
import sys
|
|
||||||
|
|
||||||
__all__ = ['queue', 'Event']
|
|
||||||
|
|
||||||
try:
|
|
||||||
import queue
|
|
||||||
except ImportError:
|
|
||||||
import queue as queue
|
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info < (2, 7):
|
|
||||||
from .event_backport import Event
|
|
||||||
else:
|
|
||||||
from threading import Event
|
|
|
@ -1,199 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Most of this code was obtained from the Python documentation online.
|
|
||||||
|
|
||||||
"""Decorator utility functions.
|
|
||||||
|
|
||||||
decorators:
|
|
||||||
- synchronized
|
|
||||||
- propertyx
|
|
||||||
- accepts
|
|
||||||
- returns
|
|
||||||
- singleton
|
|
||||||
- attrs
|
|
||||||
- deprecated
|
|
||||||
"""
|
|
||||||
|
|
||||||
from builtins import zip
|
|
||||||
import functools
|
|
||||||
import warnings
|
|
||||||
import threading
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
def synchronized(lock=None):
|
|
||||||
"""Decorator that synchronizes a method or a function with a mutex lock.
|
|
||||||
|
|
||||||
Example usage:
|
|
||||||
|
|
||||||
@synchronized()
|
|
||||||
def operation(self, a, b):
|
|
||||||
...
|
|
||||||
"""
|
|
||||||
if lock is None:
|
|
||||||
lock = threading.Lock()
|
|
||||||
|
|
||||||
def wrapper(function):
|
|
||||||
def new_function(*args, **kwargs):
|
|
||||||
lock.acquire()
|
|
||||||
try:
|
|
||||||
return function(*args, **kwargs)
|
|
||||||
finally:
|
|
||||||
lock.release()
|
|
||||||
|
|
||||||
return new_function
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def propertyx(function):
|
|
||||||
"""Decorator to easily create properties in classes.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
class Angle(object):
|
|
||||||
def __init__(self, rad):
|
|
||||||
self._rad = rad
|
|
||||||
|
|
||||||
@property
|
|
||||||
def rad():
|
|
||||||
def fget(self):
|
|
||||||
return self._rad
|
|
||||||
def fset(self, angle):
|
|
||||||
if isinstance(angle, Angle):
|
|
||||||
angle = angle.rad
|
|
||||||
self._rad = float(angle)
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
- `function`: The function to be decorated.
|
|
||||||
"""
|
|
||||||
keys = ('fget', 'fset', 'fdel')
|
|
||||||
func_locals = {'doc': function.__doc__}
|
|
||||||
|
|
||||||
def probe_func(frame, event, arg):
|
|
||||||
if event == 'return':
|
|
||||||
locals = frame.f_locals
|
|
||||||
func_locals.update(dict((k, locals.get(k)) for k in keys))
|
|
||||||
sys.settrace(None)
|
|
||||||
return probe_func
|
|
||||||
|
|
||||||
sys.settrace(probe_func)
|
|
||||||
function()
|
|
||||||
return property(**func_locals)
|
|
||||||
|
|
||||||
|
|
||||||
def accepts(*types):
|
|
||||||
"""Decorator to ensure that the decorated function accepts the given types as arguments.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
@accepts(int, (int,float))
|
|
||||||
@returns((int,float))
|
|
||||||
def func(arg1, arg2):
|
|
||||||
return arg1 * arg2
|
|
||||||
"""
|
|
||||||
|
|
||||||
def check_accepts(f):
|
|
||||||
assert len(types) == f.__code__.co_argcount
|
|
||||||
|
|
||||||
def new_f(*args, **kwds):
|
|
||||||
for (a, t) in zip(args, types):
|
|
||||||
assert isinstance(a, t),\
|
|
||||||
"arg %r does not match %s" % (a, t)
|
|
||||||
return f(*args, **kwds)
|
|
||||||
|
|
||||||
new_f.__name__ = f.__name__
|
|
||||||
return new_f
|
|
||||||
|
|
||||||
return check_accepts
|
|
||||||
|
|
||||||
|
|
||||||
def returns(rtype):
|
|
||||||
"""Decorator to ensure that the decorated function returns the given
|
|
||||||
type as argument.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
@accepts(int, (int,float))
|
|
||||||
@returns((int,float))
|
|
||||||
def func(arg1, arg2):
|
|
||||||
return arg1 * arg2
|
|
||||||
"""
|
|
||||||
|
|
||||||
def check_returns(f):
|
|
||||||
def new_f(*args, **kwds):
|
|
||||||
result = f(*args, **kwds)
|
|
||||||
assert isinstance(result, rtype),\
|
|
||||||
"return value %r does not match %s" % (result, rtype)
|
|
||||||
return result
|
|
||||||
|
|
||||||
new_f.__name__ = f.__name__
|
|
||||||
return new_f
|
|
||||||
|
|
||||||
return check_returns
|
|
||||||
|
|
||||||
|
|
||||||
def singleton(cls):
|
|
||||||
"""Decorator to ensures a class follows the singleton pattern.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
@singleton
|
|
||||||
class MyClass:
|
|
||||||
...
|
|
||||||
"""
|
|
||||||
instances = {}
|
|
||||||
|
|
||||||
def getinstance():
|
|
||||||
if cls not in instances:
|
|
||||||
instances[cls] = cls()
|
|
||||||
return instances[cls]
|
|
||||||
|
|
||||||
return getinstance
|
|
||||||
|
|
||||||
|
|
||||||
def attrs(**kwds):
|
|
||||||
"""Decorator to add attributes to a function.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
@attrs(versionadded="2.2",
|
|
||||||
author="Guido van Rossum")
|
|
||||||
def mymethod(f):
|
|
||||||
...
|
|
||||||
"""
|
|
||||||
|
|
||||||
def decorate(f):
|
|
||||||
for k in kwds:
|
|
||||||
setattr(f, k, kwds[k])
|
|
||||||
return f
|
|
||||||
|
|
||||||
return decorate
|
|
||||||
|
|
||||||
|
|
||||||
def deprecated(func):
|
|
||||||
"""This is a decorator which can be used to mark functions
|
|
||||||
as deprecated. It will result in a warning being emitted
|
|
||||||
when the function is used.
|
|
||||||
|
|
||||||
## Usage examples ##
|
|
||||||
@deprecated
|
|
||||||
def my_func():
|
|
||||||
pass
|
|
||||||
|
|
||||||
@other_decorators_must_be_upper
|
|
||||||
@deprecated
|
|
||||||
def my_func():
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
|
|
||||||
@functools.wraps(func)
|
|
||||||
def new_func(*args, **kwargs):
|
|
||||||
warnings.warn_explicit(
|
|
||||||
"Call to deprecated function %(funcname)s." % {
|
|
||||||
'funcname': func.__name__,
|
|
||||||
},
|
|
||||||
category=DeprecationWarning,
|
|
||||||
filename=func.__code__.co_filename,
|
|
||||||
lineno=func.__code__.co_firstlineno + 1
|
|
||||||
)
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
|
|
||||||
return new_func
|
|
|
@ -1,4 +1,4 @@
|
||||||
# -*- coding: utf-8 -*-
|
# coding: utf-8
|
||||||
#
|
#
|
||||||
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
||||||
#
|
#
|
||||||
|
@ -14,25 +14,24 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from builtins import object
|
|
||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
from collections import deque
|
from collections import deque
|
||||||
|
|
||||||
|
|
||||||
class DelayedQueue(object):
|
class DelayedQueue:
|
||||||
|
|
||||||
def __init__(self, delay):
|
def __init__(self, delay):
|
||||||
self.delay = delay
|
self.delay_sec = delay
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._not_empty = threading.Condition(self._lock)
|
self._not_empty = threading.Condition(self._lock)
|
||||||
self._queue = deque()
|
self._queue = deque()
|
||||||
self._closed = False
|
self._closed = False
|
||||||
|
|
||||||
def put(self, element):
|
def put(self, element, delay=False):
|
||||||
"""Add element to queue."""
|
"""Add element to queue."""
|
||||||
self._lock.acquire()
|
self._lock.acquire()
|
||||||
self._queue.append((element, time.time()))
|
self._queue.append((element, time.time(), delay))
|
||||||
self._not_empty.notify()
|
self._not_empty.notify()
|
||||||
self._lock.release()
|
self._lock.release()
|
||||||
|
|
||||||
|
@ -57,33 +56,28 @@ class DelayedQueue(object):
|
||||||
if self._closed:
|
if self._closed:
|
||||||
self._not_empty.release()
|
self._not_empty.release()
|
||||||
return None
|
return None
|
||||||
head, insert_time = self._queue[0]
|
head, insert_time, delay = self._queue[0]
|
||||||
self._not_empty.release()
|
self._not_empty.release()
|
||||||
|
|
||||||
# wait for delay
|
# wait for delay if required
|
||||||
time_left = insert_time + self.delay - time.time()
|
if delay:
|
||||||
while time_left > 0:
|
time_left = insert_time + self.delay_sec - time.time()
|
||||||
time.sleep(time_left)
|
while time_left > 0:
|
||||||
time_left = insert_time + self.delay - time.time()
|
time.sleep(time_left)
|
||||||
|
time_left = insert_time + self.delay_sec - time.time()
|
||||||
|
|
||||||
# return element if it's still in the queue
|
# return element if it's still in the queue
|
||||||
self._lock.acquire()
|
with self._lock:
|
||||||
try:
|
|
||||||
if len(self._queue) > 0 and self._queue[0][0] is head:
|
if len(self._queue) > 0 and self._queue[0][0] is head:
|
||||||
self._queue.popleft()
|
self._queue.popleft()
|
||||||
return head
|
return head
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
|
|
||||||
def remove(self, predicate):
|
def remove(self, predicate):
|
||||||
"""Remove and return the first items for which predicate is True,
|
"""Remove and return the first items for which predicate is True,
|
||||||
ignoring delay."""
|
ignoring delay."""
|
||||||
try:
|
with self._lock:
|
||||||
self._lock.acquire()
|
for i, (elem, t, delay) in enumerate(self._queue):
|
||||||
for i, (elem, t) in enumerate(self._queue):
|
|
||||||
if predicate(elem):
|
if predicate(elem):
|
||||||
del self._queue[i]
|
del self._queue[i]
|
||||||
return elem
|
return elem
|
||||||
finally:
|
|
||||||
self._lock.release()
|
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -21,6 +20,7 @@
|
||||||
:module: watchdog.utils.dirsnapshot
|
:module: watchdog.utils.dirsnapshot
|
||||||
:synopsis: Directory snapshots and comparison.
|
:synopsis: Directory snapshots and comparison.
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
|
|
||||||
.. ADMONITION:: Where are the moved events? They "disappeared"
|
.. ADMONITION:: Where are the moved events? They "disappeared"
|
||||||
|
|
||||||
|
@ -42,17 +42,18 @@ Classes
|
||||||
:members:
|
:members:
|
||||||
:show-inheritance:
|
:show-inheritance:
|
||||||
|
|
||||||
|
.. autoclass:: EmptyDirectorySnapshot
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from builtins import str
|
|
||||||
from builtins import object
|
|
||||||
import errno
|
import errno
|
||||||
import os
|
import os
|
||||||
from stat import S_ISDIR
|
from stat import S_ISDIR
|
||||||
from . import stat as default_stat
|
|
||||||
|
|
||||||
|
|
||||||
class DirectorySnapshotDiff(object):
|
class DirectorySnapshotDiff:
|
||||||
"""
|
"""
|
||||||
Compares two directory snapshots and creates an object that represents
|
Compares two directory snapshots and creates an object that represents
|
||||||
the difference between the two snapshots.
|
the difference between the two snapshots.
|
||||||
|
@ -66,15 +67,32 @@ class DirectorySnapshotDiff(object):
|
||||||
with the reference snapshot.
|
with the reference snapshot.
|
||||||
:type snapshot:
|
:type snapshot:
|
||||||
:class:`DirectorySnapshot`
|
:class:`DirectorySnapshot`
|
||||||
|
:param ignore_device:
|
||||||
|
A boolean indicating whether to ignore the device id or not.
|
||||||
|
By default, a file may be uniquely identified by a combination of its first
|
||||||
|
inode and its device id. The problem is that the device id may (or may not)
|
||||||
|
change between system boots. This problem would cause the DirectorySnapshotDiff
|
||||||
|
to think a file has been deleted and created again but it would be the
|
||||||
|
exact same file.
|
||||||
|
Set to True only if you are sure you will always use the same device.
|
||||||
|
:type ignore_device:
|
||||||
|
:class:`bool`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, ref, snapshot):
|
def __init__(self, ref, snapshot, ignore_device=False):
|
||||||
created = snapshot.paths - ref.paths
|
created = snapshot.paths - ref.paths
|
||||||
deleted = ref.paths - snapshot.paths
|
deleted = ref.paths - snapshot.paths
|
||||||
|
|
||||||
|
if ignore_device:
|
||||||
|
def get_inode(directory, full_path):
|
||||||
|
return directory.inode(full_path)[0]
|
||||||
|
else:
|
||||||
|
def get_inode(directory, full_path):
|
||||||
|
return directory.inode(full_path)
|
||||||
|
|
||||||
# check that all unchanged paths have the same inode
|
# check that all unchanged paths have the same inode
|
||||||
for path in ref.paths & snapshot.paths:
|
for path in ref.paths & snapshot.paths:
|
||||||
if ref.inode(path) != snapshot.inode(path):
|
if get_inode(ref, path) != get_inode(snapshot, path):
|
||||||
created.add(path)
|
created.add(path)
|
||||||
deleted.add(path)
|
deleted.add(path)
|
||||||
|
|
||||||
|
@ -99,12 +117,12 @@ class DirectorySnapshotDiff(object):
|
||||||
# first check paths that have not moved
|
# first check paths that have not moved
|
||||||
modified = set()
|
modified = set()
|
||||||
for path in ref.paths & snapshot.paths:
|
for path in ref.paths & snapshot.paths:
|
||||||
if ref.inode(path) == snapshot.inode(path):
|
if get_inode(ref, path) == get_inode(snapshot, path):
|
||||||
if ref.mtime(path) != snapshot.mtime(path):
|
if ref.mtime(path) != snapshot.mtime(path) or ref.size(path) != snapshot.size(path):
|
||||||
modified.add(path)
|
modified.add(path)
|
||||||
|
|
||||||
for (old_path, new_path) in moved:
|
for (old_path, new_path) in moved:
|
||||||
if ref.mtime(old_path) != snapshot.mtime(new_path):
|
if ref.mtime(old_path) != snapshot.mtime(new_path) or ref.size(old_path) != snapshot.size(new_path):
|
||||||
modified.add(old_path)
|
modified.add(old_path)
|
||||||
|
|
||||||
self._dirs_created = [path for path in created if snapshot.isdir(path)]
|
self._dirs_created = [path for path in created if snapshot.isdir(path)]
|
||||||
|
@ -117,6 +135,26 @@ class DirectorySnapshotDiff(object):
|
||||||
self._files_modified = list(modified - set(self._dirs_modified))
|
self._files_modified = list(modified - set(self._dirs_modified))
|
||||||
self._files_moved = list(moved - set(self._dirs_moved))
|
self._files_moved = list(moved - set(self._dirs_moved))
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.__repr__()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
fmt = (
|
||||||
|
'<{0} files(created={1}, deleted={2}, modified={3}, moved={4}),'
|
||||||
|
' folders(created={5}, deleted={6}, modified={7}, moved={8})>'
|
||||||
|
)
|
||||||
|
return fmt.format(
|
||||||
|
type(self).__name__,
|
||||||
|
len(self._files_created),
|
||||||
|
len(self._files_deleted),
|
||||||
|
len(self._files_modified),
|
||||||
|
len(self._files_moved),
|
||||||
|
len(self._dirs_created),
|
||||||
|
len(self._dirs_deleted),
|
||||||
|
len(self._dirs_modified),
|
||||||
|
len(self._dirs_moved)
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def files_created(self):
|
def files_created(self):
|
||||||
"""List of files that were created."""
|
"""List of files that were created."""
|
||||||
|
@ -173,7 +211,8 @@ class DirectorySnapshotDiff(object):
|
||||||
"""
|
"""
|
||||||
return self._dirs_created
|
return self._dirs_created
|
||||||
|
|
||||||
class DirectorySnapshot(object):
|
|
||||||
|
class DirectorySnapshot:
|
||||||
"""
|
"""
|
||||||
A snapshot of stat information of files in a directory.
|
A snapshot of stat information of files in a directory.
|
||||||
|
|
||||||
|
@ -186,59 +225,65 @@ class DirectorySnapshot(object):
|
||||||
snapshot; ``False`` otherwise.
|
snapshot; ``False`` otherwise.
|
||||||
:type recursive:
|
:type recursive:
|
||||||
``bool``
|
``bool``
|
||||||
:param walker_callback:
|
|
||||||
.. deprecated:: 0.7.2
|
|
||||||
:param stat:
|
:param stat:
|
||||||
Use custom stat function that returns a stat structure for path.
|
Use custom stat function that returns a stat structure for path.
|
||||||
Currently only st_dev, st_ino, st_mode and st_mtime are needed.
|
Currently only st_dev, st_ino, st_mode and st_mtime are needed.
|
||||||
|
|
||||||
A function with the signature ``walker_callback(path, stat_info)``
|
A function taking a ``path`` as argument which will be called
|
||||||
which will be called for every entry in the directory tree.
|
for every entry in the directory tree.
|
||||||
:param listdir:
|
:param listdir:
|
||||||
Use custom listdir function. See ``os.listdir`` for details.
|
Use custom listdir function. For details see ``os.scandir``.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, path, recursive=True,
|
def __init__(self, path, recursive=True,
|
||||||
walker_callback=(lambda p, s: None),
|
stat=os.stat, listdir=os.scandir):
|
||||||
stat=default_stat,
|
self.recursive = recursive
|
||||||
listdir=os.listdir):
|
self.stat = stat
|
||||||
|
self.listdir = listdir
|
||||||
|
|
||||||
self._stat_info = {}
|
self._stat_info = {}
|
||||||
self._inode_to_path = {}
|
self._inode_to_path = {}
|
||||||
|
|
||||||
st = stat(path)
|
st = self.stat(path)
|
||||||
self._stat_info[path] = st
|
self._stat_info[path] = st
|
||||||
self._inode_to_path[(st.st_ino, st.st_dev)] = path
|
self._inode_to_path[(st.st_ino, st.st_dev)] = path
|
||||||
|
|
||||||
def walk(root):
|
for p, st in self.walk(path):
|
||||||
try:
|
|
||||||
paths = [os.path.join(root, name) for name in listdir(root)]
|
|
||||||
except OSError as e:
|
|
||||||
# Directory may have been deleted between finding it in the directory
|
|
||||||
# list of its parent and trying to delete its contents. If this
|
|
||||||
# happens we treat it as empty.
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
entries = []
|
|
||||||
for p in paths:
|
|
||||||
try:
|
|
||||||
entries.append((p, stat(p)))
|
|
||||||
except OSError:
|
|
||||||
continue
|
|
||||||
for _ in entries:
|
|
||||||
yield _
|
|
||||||
if recursive:
|
|
||||||
for path, st in entries:
|
|
||||||
if S_ISDIR(st.st_mode):
|
|
||||||
for _ in walk(path):
|
|
||||||
yield _
|
|
||||||
|
|
||||||
for p, st in walk(path):
|
|
||||||
i = (st.st_ino, st.st_dev)
|
i = (st.st_ino, st.st_dev)
|
||||||
self._inode_to_path[i] = p
|
self._inode_to_path[i] = p
|
||||||
self._stat_info[p] = st
|
self._stat_info[p] = st
|
||||||
walker_callback(p, st)
|
|
||||||
|
def walk(self, root):
|
||||||
|
try:
|
||||||
|
paths = [os.path.join(root, entry if isinstance(entry, str) else entry.name)
|
||||||
|
for entry in self.listdir(root)]
|
||||||
|
except OSError as e:
|
||||||
|
# Directory may have been deleted between finding it in the directory
|
||||||
|
# list of its parent and trying to delete its contents. If this
|
||||||
|
# happens we treat it as empty. Likewise if the directory was replaced
|
||||||
|
# with a file of the same name (less likely, but possible).
|
||||||
|
if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for p in paths:
|
||||||
|
try:
|
||||||
|
entry = (p, self.stat(p))
|
||||||
|
entries.append(entry)
|
||||||
|
yield entry
|
||||||
|
except OSError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.recursive:
|
||||||
|
for path, st in entries:
|
||||||
|
try:
|
||||||
|
if S_ISDIR(st.st_mode):
|
||||||
|
for entry in self.walk(path):
|
||||||
|
yield entry
|
||||||
|
except PermissionError:
|
||||||
|
pass
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def paths(self):
|
def paths(self):
|
||||||
|
@ -264,6 +309,9 @@ class DirectorySnapshot(object):
|
||||||
def mtime(self, path):
|
def mtime(self, path):
|
||||||
return self._stat_info[path].st_mtime
|
return self._stat_info[path].st_mtime
|
||||||
|
|
||||||
|
def size(self, path):
|
||||||
|
return self._stat_info[path].st_size
|
||||||
|
|
||||||
def stat_info(self, path):
|
def stat_info(self, path):
|
||||||
"""
|
"""
|
||||||
Returns a stat information object for the specified path from
|
Returns a stat information object for the specified path from
|
||||||
|
@ -293,3 +341,30 @@ class DirectorySnapshot(object):
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return str(self._stat_info)
|
return str(self._stat_info)
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyDirectorySnapshot:
|
||||||
|
"""Class to implement an empty snapshot. This is used together with
|
||||||
|
DirectorySnapshot and DirectorySnapshotDiff in order to get all the files/folders
|
||||||
|
in the directory as created.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def path(_):
|
||||||
|
"""Mock up method to return the path of the received inode. As the snapshot
|
||||||
|
is intended to be empty, it always returns None.
|
||||||
|
|
||||||
|
:returns:
|
||||||
|
None.
|
||||||
|
"""
|
||||||
|
return None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def paths(self):
|
||||||
|
"""Mock up method to return a set of file/directory paths in the snapshot. As
|
||||||
|
the snapshot is intended to be empty, it always returns an empty set.
|
||||||
|
|
||||||
|
:returns:
|
||||||
|
An empty set.
|
||||||
|
"""
|
||||||
|
return set()
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# echo.py: Tracing function calls using Python decorators.
|
# echo.py: Tracing function calls using Python decorators.
|
||||||
#
|
#
|
||||||
# Written by Thomas Guest <tag@wordaligned.org>
|
# Written by Thomas Guest <tag@wordaligned.org>
|
||||||
|
@ -32,8 +31,6 @@ Example:
|
||||||
def my_function(args):
|
def my_function(args):
|
||||||
pass
|
pass
|
||||||
"""
|
"""
|
||||||
from builtins import map
|
|
||||||
from builtins import zip
|
|
||||||
import inspect
|
import inspect
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
@ -47,6 +44,7 @@ def is_classmethod(instancemethod, klass):
|
||||||
" Determine if an instancemethod is a classmethod. "
|
" Determine if an instancemethod is a classmethod. "
|
||||||
return inspect.ismethod(instancemethod) and instancemethod.__self__ is klass
|
return inspect.ismethod(instancemethod) and instancemethod.__self__ is klass
|
||||||
|
|
||||||
|
|
||||||
def is_static_method(method, klass):
|
def is_static_method(method, klass):
|
||||||
"""Returns True if method is an instance method of klass."""
|
"""Returns True if method is an instance method of klass."""
|
||||||
for c in klass.mro():
|
for c in klass.mro():
|
||||||
|
@ -55,6 +53,7 @@ def is_static_method(method, klass):
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_class_private_name(name):
|
def is_class_private_name(name):
|
||||||
" Determine if a name is a class private name. "
|
" Determine if a name is a class private name. "
|
||||||
# Exclude system defined names such as __init__, __add__ etc
|
# Exclude system defined names such as __init__, __add__ etc
|
||||||
|
@ -129,20 +128,22 @@ def echo_instancemethod(klass, method, write=sys.stdout.write):
|
||||||
else:
|
else:
|
||||||
setattr(klass, mname, echo(method, write))
|
setattr(klass, mname, echo(method, write))
|
||||||
|
|
||||||
|
|
||||||
def echo_class(klass, write=sys.stdout.write):
|
def echo_class(klass, write=sys.stdout.write):
|
||||||
""" Echo calls to class methods and static functions
|
""" Echo calls to class methods and static functions
|
||||||
"""
|
"""
|
||||||
for _, method in inspect.getmembers(klass, inspect.ismethod):
|
for _, method in inspect.getmembers(klass, inspect.ismethod):
|
||||||
#In python 3 only class methods are returned here, but in python2 instance methods are too.
|
# In python 3 only class methods are returned here, but in python2 instance methods are too.
|
||||||
echo_instancemethod(klass, method, write)
|
echo_instancemethod(klass, method, write)
|
||||||
for _, fn in inspect.getmembers(klass, inspect.isfunction):
|
for _, fn in inspect.getmembers(klass, inspect.isfunction):
|
||||||
if is_static_method(fn, klass):
|
if is_static_method(fn, klass):
|
||||||
setattr(klass, name(fn), staticmethod(echo(fn, write)))
|
setattr(klass, name(fn), staticmethod(echo(fn, write)))
|
||||||
else:
|
else:
|
||||||
#It's not a class or a static method, so it must be an instance method.
|
# It's not a class or a static method, so it must be an instance method.
|
||||||
#This should only be called in python 3, because in python 3 instance methods are considered functions.
|
# This should only be called in python 3, because in python 3 instance methods are considered functions.
|
||||||
echo_instancemethod(klass, fn, write)
|
echo_instancemethod(klass, fn, write)
|
||||||
|
|
||||||
|
|
||||||
def echo_module(mod, write=sys.stdout.write):
|
def echo_module(mod, write=sys.stdout.write):
|
||||||
""" Echo calls to functions and methods in a module.
|
""" Echo calls to functions and methods in a module.
|
||||||
"""
|
"""
|
||||||
|
@ -151,6 +152,7 @@ def echo_module(mod, write=sys.stdout.write):
|
||||||
for _, klass in inspect.getmembers(mod, inspect.isclass):
|
for _, klass in inspect.getmembers(mod, inspect.isclass):
|
||||||
echo_class(klass, write)
|
echo_class(klass, write)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import doctest
|
import doctest
|
||||||
|
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Backport of Event from py2.7 (method wait in py2.6 returns None)
|
|
||||||
|
|
||||||
from builtins import object
|
|
||||||
from threading import Condition, Lock
|
|
||||||
|
|
||||||
|
|
||||||
class Event(object):
|
|
||||||
|
|
||||||
def __init__(self,):
|
|
||||||
self.__cond = Condition(Lock())
|
|
||||||
self.__flag = False
|
|
||||||
|
|
||||||
def isSet(self):
|
|
||||||
return self.__flag
|
|
||||||
|
|
||||||
is_set = isSet
|
|
||||||
|
|
||||||
def set(self):
|
|
||||||
self.__cond.acquire()
|
|
||||||
try:
|
|
||||||
self.__flag = True
|
|
||||||
self.__cond.notify_all()
|
|
||||||
finally:
|
|
||||||
self.__cond.release()
|
|
||||||
|
|
||||||
def clear(self):
|
|
||||||
self.__cond.acquire()
|
|
||||||
try:
|
|
||||||
self.__flag = False
|
|
||||||
finally:
|
|
||||||
self.__cond.release()
|
|
||||||
|
|
||||||
def wait(self, timeout=None):
|
|
||||||
self.__cond.acquire()
|
|
||||||
try:
|
|
||||||
if not self.__flag:
|
|
||||||
self.__cond.wait(timeout)
|
|
||||||
return self.__flag
|
|
||||||
finally:
|
|
||||||
self.__cond.release()
|
|
|
@ -1,40 +0,0 @@
|
||||||
# The MIT License (MIT)
|
|
||||||
|
|
||||||
# Copyright (c) 2013 Peter M. Elias
|
|
||||||
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE
|
|
||||||
|
|
||||||
|
|
||||||
def import_module(target, relative_to=None):
|
|
||||||
target_parts = target.split('.')
|
|
||||||
target_depth = target_parts.count('')
|
|
||||||
target_path = target_parts[target_depth:]
|
|
||||||
target = target[target_depth:]
|
|
||||||
fromlist = [target]
|
|
||||||
if target_depth and relative_to:
|
|
||||||
relative_parts = relative_to.split('.')
|
|
||||||
relative_to = '.'.join(relative_parts[:-(target_depth - 1) or None])
|
|
||||||
if len(target_path) > 1:
|
|
||||||
relative_to = '.'.join([_f for _f in [relative_to] if _f] + target_path[:-1])
|
|
||||||
fromlist = target_path[-1:]
|
|
||||||
target = fromlist[0]
|
|
||||||
elif not relative_to:
|
|
||||||
fromlist = []
|
|
||||||
mod = __import__(relative_to or target, globals(), locals(), fromlist)
|
|
||||||
return getattr(mod, target, mod)
|
|
87
resources/lib/watchdog/utils/patterns.py
Normal file
87
resources/lib/watchdog/utils/patterns.py
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
# coding: utf-8
|
||||||
|
# patterns.py: Common wildcard searching/filtering functionality for files.
|
||||||
|
#
|
||||||
|
# Copyright (C) 2010 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
|
#
|
||||||
|
# Written by Boris Staletic <boris.staletic@gmail.com>
|
||||||
|
|
||||||
|
# Non-pure path objects are only allowed on their respective OS's.
|
||||||
|
# Thus, these utilities require "pure" path objects that don't access the filesystem.
|
||||||
|
# Since pathlib doesn't have a `case_sensitive` parameter, we have to approximate it
|
||||||
|
# by converting input paths to `PureWindowsPath` and `PurePosixPath` where:
|
||||||
|
# - `PureWindowsPath` is always case-insensitive.
|
||||||
|
# - `PurePosixPath` is always case-sensitive.
|
||||||
|
# Reference: https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.match
|
||||||
|
from pathlib import PureWindowsPath, PurePosixPath
|
||||||
|
|
||||||
|
|
||||||
|
def _match_path(path, included_patterns, excluded_patterns, case_sensitive):
|
||||||
|
"""Internal function same as :func:`match_path` but does not check arguments."""
|
||||||
|
if case_sensitive:
|
||||||
|
path = PurePosixPath(path)
|
||||||
|
else:
|
||||||
|
included_patterns = {pattern.lower() for pattern in included_patterns}
|
||||||
|
excluded_patterns = {pattern.lower() for pattern in excluded_patterns}
|
||||||
|
path = PureWindowsPath(path)
|
||||||
|
|
||||||
|
common_patterns = included_patterns & excluded_patterns
|
||||||
|
if common_patterns:
|
||||||
|
raise ValueError('conflicting patterns `{}` included and excluded'.format(common_patterns))
|
||||||
|
return (any(path.match(p) for p in included_patterns)
|
||||||
|
and not any(path.match(p) for p in excluded_patterns))
|
||||||
|
|
||||||
|
|
||||||
|
def filter_paths(paths, included_patterns=None, excluded_patterns=None, case_sensitive=True):
|
||||||
|
"""
|
||||||
|
Filters from a set of paths based on acceptable patterns and
|
||||||
|
ignorable patterns.
|
||||||
|
:param pathnames:
|
||||||
|
A list of path names that will be filtered based on matching and
|
||||||
|
ignored patterns.
|
||||||
|
:param included_patterns:
|
||||||
|
Allow filenames matching wildcard patterns specified in this list.
|
||||||
|
If no pattern list is specified, ["*"] is used as the default pattern,
|
||||||
|
which matches all files.
|
||||||
|
:param excluded_patterns:
|
||||||
|
Ignores filenames matching wildcard patterns specified in this list.
|
||||||
|
If no pattern list is specified, no files are ignored.
|
||||||
|
:param case_sensitive:
|
||||||
|
``True`` if matching should be case-sensitive; ``False`` otherwise.
|
||||||
|
:returns:
|
||||||
|
A list of pathnames that matched the allowable patterns and passed
|
||||||
|
through the ignored patterns.
|
||||||
|
"""
|
||||||
|
included = ["*"] if included_patterns is None else included_patterns
|
||||||
|
excluded = [] if excluded_patterns is None else excluded_patterns
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
if _match_path(path, set(included), set(excluded), case_sensitive):
|
||||||
|
yield path
|
||||||
|
|
||||||
|
|
||||||
|
def match_any_paths(paths, included_patterns=None, excluded_patterns=None, case_sensitive=True):
|
||||||
|
"""
|
||||||
|
Matches from a set of paths based on acceptable patterns and
|
||||||
|
ignorable patterns.
|
||||||
|
:param pathnames:
|
||||||
|
A list of path names that will be filtered based on matching and
|
||||||
|
ignored patterns.
|
||||||
|
:param included_patterns:
|
||||||
|
Allow filenames matching wildcard patterns specified in this list.
|
||||||
|
If no pattern list is specified, ["*"] is used as the default pattern,
|
||||||
|
which matches all files.
|
||||||
|
:param excluded_patterns:
|
||||||
|
Ignores filenames matching wildcard patterns specified in this list.
|
||||||
|
If no pattern list is specified, no files are ignored.
|
||||||
|
:param case_sensitive:
|
||||||
|
``True`` if matching should be case-sensitive; ``False`` otherwise.
|
||||||
|
:returns:
|
||||||
|
``True`` if any of the paths matches; ``False`` otherwise.
|
||||||
|
"""
|
||||||
|
included = ["*"] if included_patterns is None else included_patterns
|
||||||
|
excluded = [] if excluded_patterns is None else excluded_patterns
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
if _match_path(path, set(included), set(excluded), case_sensitive):
|
||||||
|
return True
|
||||||
|
return False
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -33,11 +32,12 @@ def get_platform_name():
|
||||||
return PLATFORM_DARWIN
|
return PLATFORM_DARWIN
|
||||||
elif sys.platform.startswith('linux'):
|
elif sys.platform.startswith('linux'):
|
||||||
return PLATFORM_LINUX
|
return PLATFORM_LINUX
|
||||||
elif sys.platform.startswith(('dragonfly', 'freebsd', 'netbsd', 'openbsd', )):
|
elif sys.platform.startswith(('dragonfly', 'freebsd', 'netbsd', 'openbsd', 'bsd')):
|
||||||
return PLATFORM_BSD
|
return PLATFORM_BSD
|
||||||
else:
|
else:
|
||||||
return PLATFORM_UNKNOWN
|
return PLATFORM_UNKNOWN
|
||||||
|
|
||||||
|
|
||||||
__platform__ = get_platform_name()
|
__platform__ = get_platform_name()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,64 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013 Will Bond <will@wbond.net>
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in
|
|
||||||
# all copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from . import platform
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Python 2
|
|
||||||
str_cls = str
|
|
||||||
bytes_cls = str
|
|
||||||
except NameError:
|
|
||||||
# Python 3
|
|
||||||
str_cls = str
|
|
||||||
bytes_cls = bytes
|
|
||||||
|
|
||||||
|
|
||||||
# This is used by Linux when the locale seems to be improperly set. UTF-8 tends
|
|
||||||
# to be the encoding used by all distros, so this is a good fallback.
|
|
||||||
fs_fallback_encoding = 'utf-8'
|
|
||||||
fs_encoding = sys.getfilesystemencoding() or fs_fallback_encoding
|
|
||||||
|
|
||||||
|
|
||||||
def encode(path):
|
|
||||||
if isinstance(path, str_cls):
|
|
||||||
try:
|
|
||||||
path = path.encode(fs_encoding, 'strict')
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
if not platform.is_linux():
|
|
||||||
raise
|
|
||||||
path = path.encode(fs_fallback_encoding, 'strict')
|
|
||||||
return path
|
|
||||||
|
|
||||||
|
|
||||||
def decode(path):
|
|
||||||
if isinstance(path, bytes_cls):
|
|
||||||
try:
|
|
||||||
path = path.decode(fs_encoding, 'strict')
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
if not platform.is_linux():
|
|
||||||
raise
|
|
||||||
path = path.decode(fs_fallback_encoding, 'strict')
|
|
||||||
return path
|
|
|
@ -1,125 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
:module: watchdog.utils.win32stat
|
|
||||||
:synopsis: Implementation of stat with st_ino and st_dev support.
|
|
||||||
|
|
||||||
Functions
|
|
||||||
---------
|
|
||||||
|
|
||||||
.. autofunction:: stat
|
|
||||||
|
|
||||||
"""
|
|
||||||
from __future__ import division
|
|
||||||
|
|
||||||
from past.utils import old_div
|
|
||||||
import ctypes
|
|
||||||
import ctypes.wintypes
|
|
||||||
import stat as stdstat
|
|
||||||
from collections import namedtuple
|
|
||||||
|
|
||||||
|
|
||||||
INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value
|
|
||||||
OPEN_EXISTING = 3
|
|
||||||
FILE_READ_ATTRIBUTES = 0x80
|
|
||||||
FILE_ATTRIBUTE_NORMAL = 0x80
|
|
||||||
FILE_ATTRIBUTE_READONLY = 0x1
|
|
||||||
FILE_ATTRIBUTE_DIRECTORY = 0x10
|
|
||||||
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
|
|
||||||
FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000
|
|
||||||
|
|
||||||
|
|
||||||
class FILETIME(ctypes.Structure):
|
|
||||||
_fields_ = [("dwLowDateTime", ctypes.wintypes.DWORD),
|
|
||||||
("dwHighDateTime", ctypes.wintypes.DWORD)]
|
|
||||||
|
|
||||||
|
|
||||||
class BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
|
|
||||||
_fields_ = [('dwFileAttributes', ctypes.wintypes.DWORD),
|
|
||||||
('ftCreationTime', FILETIME),
|
|
||||||
('ftLastAccessTime', FILETIME),
|
|
||||||
('ftLastWriteTime', FILETIME),
|
|
||||||
('dwVolumeSerialNumber', ctypes.wintypes.DWORD),
|
|
||||||
('nFileSizeHigh', ctypes.wintypes.DWORD),
|
|
||||||
('nFileSizeLow', ctypes.wintypes.DWORD),
|
|
||||||
('nNumberOfLinks', ctypes.wintypes.DWORD),
|
|
||||||
('nFileIndexHigh', ctypes.wintypes.DWORD),
|
|
||||||
('nFileIndexLow', ctypes.wintypes.DWORD)]
|
|
||||||
|
|
||||||
|
|
||||||
CreateFile = ctypes.windll.kernel32.CreateFileW
|
|
||||||
CreateFile.restype = ctypes.wintypes.HANDLE
|
|
||||||
CreateFile.argtypes = (
|
|
||||||
ctypes.c_wchar_p,
|
|
||||||
ctypes.wintypes.DWORD,
|
|
||||||
ctypes.wintypes.DWORD,
|
|
||||||
ctypes.c_void_p,
|
|
||||||
ctypes.wintypes.DWORD,
|
|
||||||
ctypes.wintypes.DWORD,
|
|
||||||
ctypes.wintypes.HANDLE,
|
|
||||||
)
|
|
||||||
|
|
||||||
GetFileInformationByHandle = ctypes.windll.kernel32.GetFileInformationByHandle
|
|
||||||
GetFileInformationByHandle.restype = ctypes.wintypes.BOOL
|
|
||||||
GetFileInformationByHandle.argtypes = (
|
|
||||||
ctypes.wintypes.HANDLE,
|
|
||||||
ctypes.wintypes.POINTER(BY_HANDLE_FILE_INFORMATION),
|
|
||||||
)
|
|
||||||
|
|
||||||
CloseHandle = ctypes.windll.kernel32.CloseHandle
|
|
||||||
CloseHandle.restype = ctypes.wintypes.BOOL
|
|
||||||
CloseHandle.argtypes = (ctypes.wintypes.HANDLE,)
|
|
||||||
|
|
||||||
|
|
||||||
StatResult = namedtuple('StatResult', 'st_dev st_ino st_mode st_mtime')
|
|
||||||
|
|
||||||
def _to_mode(attr):
|
|
||||||
m = 0
|
|
||||||
if (attr & FILE_ATTRIBUTE_DIRECTORY):
|
|
||||||
m |= stdstat.S_IFDIR | 0o111
|
|
||||||
else:
|
|
||||||
m |= stdstat.S_IFREG
|
|
||||||
if (attr & FILE_ATTRIBUTE_READONLY):
|
|
||||||
m |= 0o444
|
|
||||||
else:
|
|
||||||
m |= 0o666
|
|
||||||
return m
|
|
||||||
|
|
||||||
def _to_unix_time(ft):
|
|
||||||
t = (ft.dwHighDateTime) << 32 | ft.dwLowDateTime
|
|
||||||
return (old_div(t, 10000000)) - 11644473600
|
|
||||||
|
|
||||||
def stat(path):
|
|
||||||
hfile = CreateFile(path,
|
|
||||||
FILE_READ_ATTRIBUTES,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
OPEN_EXISTING,
|
|
||||||
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
|
|
||||||
None)
|
|
||||||
if hfile == INVALID_HANDLE_VALUE:
|
|
||||||
raise ctypes.WinError()
|
|
||||||
info = BY_HANDLE_FILE_INFORMATION()
|
|
||||||
r = GetFileInformationByHandle(hfile, info)
|
|
||||||
CloseHandle(hfile)
|
|
||||||
if not r:
|
|
||||||
raise ctypes.WinError()
|
|
||||||
return StatResult(st_dev=info.dwVolumeSerialNumber,
|
|
||||||
st_ino=(info.nFileIndexHigh << 32) + info.nFileIndexLow,
|
|
||||||
st_mode=_to_mode(info.dwFileAttributes),
|
|
||||||
st_mtime=_to_unix_time(info.ftLastWriteTime)
|
|
||||||
)
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -19,9 +18,9 @@
|
||||||
|
|
||||||
# When updating this version number, please update the
|
# When updating this version number, please update the
|
||||||
# ``docs/source/global.rst.inc`` file as well.
|
# ``docs/source/global.rst.inc`` file as well.
|
||||||
VERSION_MAJOR = 0
|
VERSION_MAJOR = 1
|
||||||
VERSION_MINOR = 8
|
VERSION_MINOR = 0
|
||||||
VERSION_BUILD = 3
|
VERSION_BUILD = 2
|
||||||
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD)
|
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD)
|
||||||
VERSION_STRING = "%d.%d.%d" % VERSION_INFO
|
VERSION_STRING = "%d.%d.%d" % VERSION_INFO
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
#!/usr/bin/env python
|
# coding: utf-8
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
#
|
||||||
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
|
||||||
# Copyright 2012 Google, Inc.
|
# Copyright 2012 Google, Inc & contributors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -19,28 +18,22 @@
|
||||||
"""
|
"""
|
||||||
:module: watchdog.watchmedo
|
:module: watchdog.watchmedo
|
||||||
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
:author: yesudeep@google.com (Yesudeep Mangalapilly)
|
||||||
|
:author: contact@tiger-222.fr (Mickaël Schoentgen)
|
||||||
:synopsis: ``watchmedo`` shell script utility.
|
:synopsis: ``watchmedo`` shell script utility.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from future import standard_library
|
import errno
|
||||||
standard_library.install_aliases()
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
import sys
|
import sys
|
||||||
import yaml
|
import yaml
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
|
from io import StringIO
|
||||||
try:
|
|
||||||
from io import StringIO
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
from io import StringIO
|
|
||||||
except ImportError:
|
|
||||||
from io import StringIO
|
|
||||||
|
|
||||||
from argh import arg, aliases, ArghParser, expects_obj
|
from argh import arg, aliases, ArghParser, expects_obj
|
||||||
from .version import VERSION_STRING
|
from watchdog.version import VERSION_STRING
|
||||||
from .utils import load_class
|
from watchdog.utils import WatchdogShutdown, load_class
|
||||||
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
@ -49,7 +42,7 @@ CONFIG_KEY_TRICKS = 'tricks'
|
||||||
CONFIG_KEY_PYTHON_PATH = 'python-path'
|
CONFIG_KEY_PYTHON_PATH = 'python-path'
|
||||||
|
|
||||||
|
|
||||||
def path_split(pathname_spec, separator=os.path.sep):
|
def path_split(pathname_spec, separator=os.pathsep):
|
||||||
"""
|
"""
|
||||||
Splits a pathname specification separated by an OS-dependent separator.
|
Splits a pathname specification separated by an OS-dependent separator.
|
||||||
|
|
||||||
|
@ -84,11 +77,8 @@ def load_config(tricks_file_pathname):
|
||||||
:returns:
|
:returns:
|
||||||
A dictionary of configuration information.
|
A dictionary of configuration information.
|
||||||
"""
|
"""
|
||||||
f = open(tricks_file_pathname, 'rb')
|
with open(tricks_file_pathname, 'rb') as f:
|
||||||
content = f.read()
|
return yaml.safe_load(f.read())
|
||||||
f.close()
|
|
||||||
config = yaml.load(content)
|
|
||||||
return config
|
|
||||||
|
|
||||||
|
|
||||||
def parse_patterns(patterns_spec, ignore_patterns_spec, separator=';'):
|
def parse_patterns(patterns_spec, ignore_patterns_spec, separator=';'):
|
||||||
|
@ -122,7 +112,7 @@ def observe_with(observer, event_handler, pathnames, recursive):
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
except KeyboardInterrupt:
|
except WatchdogShutdown:
|
||||||
observer.stop()
|
observer.stop()
|
||||||
observer.join()
|
observer.join()
|
||||||
|
|
||||||
|
@ -155,12 +145,12 @@ def schedule_tricks(observer, tricks, pathname, recursive):
|
||||||
help='perform tricks from given file')
|
help='perform tricks from given file')
|
||||||
@arg('--python-path',
|
@arg('--python-path',
|
||||||
default='.',
|
default='.',
|
||||||
help='paths separated by %s to add to the python path' % os.path.sep)
|
help='paths separated by %s to add to the python path' % os.pathsep)
|
||||||
@arg('--interval',
|
@arg('--interval',
|
||||||
'--timeout',
|
'--timeout',
|
||||||
dest='timeout',
|
dest='timeout',
|
||||||
default=1.0,
|
default=1.0,
|
||||||
help='use this as the polling interval/blocking timeout')
|
help='use this as the polling interval/blocking timeout (in seconds)')
|
||||||
@arg('--recursive',
|
@arg('--recursive',
|
||||||
default=True,
|
default=True,
|
||||||
help='recursively monitor paths')
|
help='recursively monitor paths')
|
||||||
|
@ -180,14 +170,14 @@ def tricks_from(args):
|
||||||
observer = Observer(timeout=args.timeout)
|
observer = Observer(timeout=args.timeout)
|
||||||
|
|
||||||
if not os.path.exists(tricks_file):
|
if not os.path.exists(tricks_file):
|
||||||
raise IOError("cannot find tricks file: %s" % tricks_file)
|
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), tricks_file)
|
||||||
|
|
||||||
config = load_config(tricks_file)
|
config = load_config(tricks_file)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tricks = config[CONFIG_KEY_TRICKS]
|
tricks = config[CONFIG_KEY_TRICKS]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise KeyError("No `%s' key specified in %s." % (
|
raise KeyError("No %r key specified in %s." % (
|
||||||
CONFIG_KEY_TRICKS, tricks_file))
|
CONFIG_KEY_TRICKS, tricks_file))
|
||||||
|
|
||||||
if CONFIG_KEY_PYTHON_PATH in config:
|
if CONFIG_KEY_PYTHON_PATH in config:
|
||||||
|
@ -203,7 +193,7 @@ def tricks_from(args):
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
except KeyboardInterrupt:
|
except WatchdogShutdown:
|
||||||
for o in observers:
|
for o in observers:
|
||||||
o.unschedule_all()
|
o.unschedule_all()
|
||||||
o.stop()
|
o.stop()
|
||||||
|
@ -217,7 +207,7 @@ def tricks_from(args):
|
||||||
help='Dotted paths for all the tricks you want to generate')
|
help='Dotted paths for all the tricks you want to generate')
|
||||||
@arg('--python-path',
|
@arg('--python-path',
|
||||||
default='.',
|
default='.',
|
||||||
help='paths separated by %s to add to the python path' % os.path.sep)
|
help='paths separated by %s to add to the python path' % os.pathsep)
|
||||||
@arg('--append-to-file',
|
@arg('--append-to-file',
|
||||||
default=None,
|
default=None,
|
||||||
help='appends the generated tricks YAML to a file; \
|
help='appends the generated tricks YAML to a file; \
|
||||||
|
@ -258,9 +248,8 @@ def tricks_generate_yaml(args):
|
||||||
else:
|
else:
|
||||||
if not os.path.exists(args.append_to_file):
|
if not os.path.exists(args.append_to_file):
|
||||||
content = header + content
|
content = header + content
|
||||||
output = open(args.append_to_file, 'ab')
|
with open(args.append_to_file, 'ab') as output:
|
||||||
output.write(content)
|
output.write(content)
|
||||||
output.close()
|
|
||||||
|
|
||||||
|
|
||||||
@arg('directories',
|
@arg('directories',
|
||||||
|
@ -349,8 +338,8 @@ def log(args):
|
||||||
elif args.debug_force_fsevents:
|
elif args.debug_force_fsevents:
|
||||||
from watchdog.observers.fsevents import FSEventsObserver as Observer
|
from watchdog.observers.fsevents import FSEventsObserver as Observer
|
||||||
else:
|
else:
|
||||||
# Automatically picks the most appropriate observer for the platform
|
# Automatically picks the most appropriate observer for the platform
|
||||||
# on which it is running.
|
# on which it is running.
|
||||||
from watchdog.observers import Observer
|
from watchdog.observers import Observer
|
||||||
observer = Observer(timeout=args.timeout)
|
observer = Observer(timeout=args.timeout)
|
||||||
observe_with(observer, handler, args.directories, args.recursive)
|
observe_with(observer, handler, args.directories, args.recursive)
|
||||||
|
@ -418,7 +407,7 @@ Example option usage::
|
||||||
dest='drop_during_process',
|
dest='drop_during_process',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
default=False,
|
default=False,
|
||||||
help="Ignore events that occur while command is still being executed " \
|
help="Ignore events that occur while command is still being executed "
|
||||||
"to avoid multiple simultaneous instances")
|
"to avoid multiple simultaneous instances")
|
||||||
@arg('--debug-force-polling',
|
@arg('--debug-force-polling',
|
||||||
default=False,
|
default=False,
|
||||||
|
@ -502,6 +491,9 @@ try to interpret them.
|
||||||
dest='signal',
|
dest='signal',
|
||||||
default='SIGINT',
|
default='SIGINT',
|
||||||
help='stop the subprocess with this signal (default SIGINT)')
|
help='stop the subprocess with this signal (default SIGINT)')
|
||||||
|
@arg('--debug-force-polling',
|
||||||
|
default=False,
|
||||||
|
help='[debug] forces polling')
|
||||||
@arg('--kill-after',
|
@arg('--kill-after',
|
||||||
dest='kill_after',
|
dest='kill_after',
|
||||||
default=10.0,
|
default=10.0,
|
||||||
|
@ -516,26 +508,36 @@ def auto_restart(args):
|
||||||
:param args:
|
:param args:
|
||||||
Command line argument options.
|
Command line argument options.
|
||||||
"""
|
"""
|
||||||
from watchdog.observers import Observer
|
|
||||||
|
if args.debug_force_polling:
|
||||||
|
from watchdog.observers.polling import PollingObserver as Observer
|
||||||
|
else:
|
||||||
|
from watchdog.observers import Observer
|
||||||
|
|
||||||
from watchdog.tricks import AutoRestartTrick
|
from watchdog.tricks import AutoRestartTrick
|
||||||
import signal
|
import signal
|
||||||
import re
|
|
||||||
|
|
||||||
if not args.directories:
|
if not args.directories:
|
||||||
args.directories = ['.']
|
args.directories = ['.']
|
||||||
|
|
||||||
# Allow either signal name or number.
|
# Allow either signal name or number.
|
||||||
if re.match('^SIG[A-Z]+$', args.signal):
|
if args.signal.startswith("SIG"):
|
||||||
stop_signal = getattr(signal, args.signal)
|
stop_signal = getattr(signal, args.signal)
|
||||||
else:
|
else:
|
||||||
stop_signal = int(args.signal)
|
stop_signal = int(args.signal)
|
||||||
|
|
||||||
# Handle SIGTERM in the same manner as SIGINT so that
|
# Handle termination signals by raising a semantic exception which will
|
||||||
# this program has a chance to stop the child process.
|
# allow us to gracefully unwind and stop the observer
|
||||||
def handle_sigterm(_signum, _frame):
|
termination_signals = {signal.SIGTERM, signal.SIGINT}
|
||||||
raise KeyboardInterrupt()
|
|
||||||
|
|
||||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
def handler_termination_signal(_signum, _frame):
|
||||||
|
# Neuter all signals so that we don't attempt a double shutdown
|
||||||
|
for signum in termination_signals:
|
||||||
|
signal.signal(signum, signal.SIG_IGN)
|
||||||
|
raise WatchdogShutdown
|
||||||
|
|
||||||
|
for signum in termination_signals:
|
||||||
|
signal.signal(signum, handler_termination_signal)
|
||||||
|
|
||||||
patterns, ignore_patterns = parse_patterns(args.patterns,
|
patterns, ignore_patterns = parse_patterns(args.patterns,
|
||||||
args.ignore_patterns)
|
args.ignore_patterns)
|
||||||
|
@ -549,12 +551,16 @@ def auto_restart(args):
|
||||||
kill_after=args.kill_after)
|
kill_after=args.kill_after)
|
||||||
handler.start()
|
handler.start()
|
||||||
observer = Observer(timeout=args.timeout)
|
observer = Observer(timeout=args.timeout)
|
||||||
observe_with(observer, handler, args.directories, args.recursive)
|
try:
|
||||||
handler.stop()
|
observe_with(observer, handler, args.directories, args.recursive)
|
||||||
|
except WatchdogShutdown:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
handler.stop()
|
||||||
|
|
||||||
|
|
||||||
epilog = """Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>.
|
epilog = """Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>.
|
||||||
Copyright 2012 Google, Inc.
|
Copyright 2012 Google, Inc & contributors.
|
||||||
|
|
||||||
Licensed under the terms of the Apache license, version 2.0. Please see
|
Licensed under the terms of the Apache license, version 2.0. Please see
|
||||||
LICENSE in the source code for more information."""
|
LICENSE in the source code for more information."""
|
||||||
|
|
Loading…
Reference in a new issue