diff -Nru pytagsfs-0.9.1/NEWS pytagsfs-0.9.2/NEWS --- pytagsfs-0.9.1/NEWS 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/NEWS 2009-12-27 18:02:09.000000000 +0100 @@ -5,6 +5,138 @@ .. contents:: +pytagsfs 0.9.2 2009-12-27 +========================= + + * Fixed pytagsfs manpage references to format string variables "number", + "NUMBER". The correct variable names are "tracknumber", "TRACKNUMBER". + Thanks to Sebastian Pipping for the report. + (Forest Bond) + + +pytagsfs 0.9.2rc2 2009-12-06 +============================ + + * Added missing encode calls in SourceTree methods isreadable, issymlink, + lstat, and utime. + (Forest Bond) + + * Fixed bad assumption in tests.blackbox that a subprocess failed if it + produced output on stderr. + (Forest Bond) + + * Added dependencies for running test suite to README. + (Forest Bond) + + +pytagsfs 0.9.2rc1 2009-12-05 +============================ + + * Fixed AttributeError on getattr due to missing Stat attributes and + interaction with python-fuse bugs. The presence of this issue is dependent + on python-fuse version, but the implemented fix should be compatible with + many. The exception would likely occur on the filesystem operation following + the getattr call due to python-fuse bugs. + + This bug could lead to data corruption if files are opened for writing, but + problems would likely be seen before that point. + + (Forest Bond) + + * Fixed AttributeError on open due to missing FileInfo attribute (a bug in + python-fuse). The exception would likely occur on the filesystem operation + following the open call due to python-fuse bugs. + + This bug could lead to data corruption if files are opened for writing, but + problems would likely be seen before that point. + + (Forest Bond) + + * Fixed unmatched increment/decrement count for frozen paths. This would + manifest itself as follows: + + - KeyErrors may be seen in the log. + - Writes may fail in situations where multiple processes are attempting to + write to the same file concurrently. + - A virtual path may persist after a file is modified such that the path + should change. + + This could possibly lead to data corruption if the a virtual file is opened + for writing by more than one process simultaneously. In that case, + inconsistent data is likely in the absence of this bug. + + (Forest Bond) + + * util/profile: Added benchmark read_all_concurrent. + (Forest Bond) + + * ``TestLinesMetaStore`` was modified to support a data section. All bytes + after the last newline character in the file are treated as arbitrary data. + (Forest Bond) + + * util/profile: Files are now created with a data section of arbitrary size + that can be specified with command-line options --blocksize, --nblocks. + These default to 1024 and 200, respectively. + (Forest Bond) + + * Implemented the beginnings of a token-style locking mechanism (this doesn't + appear to be widely used -- maybe I invented it). At present, this + effectively implements a global lock such that most pytagsfs code need not be + thread safe, but will evolve to allow limited multithreading through + judicious token releases around blocking I/O. This resolves serious + deadlocks resulting from locking calls scattered throughout pytagsfs code. + + It is possible that these deadlocks could have resulted in data corruption + since the filesystem could have deadlocked with files opened for writing, + but only in situations where the filesystem was being accessed concurrently, + in which case a deadlock is likely before the user has a chance to open a + file for writing. + + (Forest Bond) + + * Removed a variety of locks that were rendered useless by the introduction of + token-style locking. + (Forest Bond) + + * Extended token-style locking with the addition of tokens that protect + specific data structures. This improves performance by exchanging the global + token for a secondary one around code sections that would cause a thread to + block on I/O. + (Forest Bond) + + * Debug logging of filesystem operations no longer includes method arguments + or return values that are too large to justify logging them. This improves + performance with debug logging enabled. + (Forest Bond) + + * Introduced FUSE option max_readahead=0. This has no impact on OS X because + MacFUSE-specific option noreadahead was already being used. + + Since the underlying source files can change at any time, no in-kernel + caching can be permitted. Otherwise, reads may return old data. This makes + read performance worse by a factor of about two on my machine (but + correctness is more important than performance by a factor of greater than + two ;). + + The previous behavior could lead to data inconsistencies if a virtual file + and its corresponding real file were being written to simultaneously. + However, data inconsistencies would occur under those circumstances even in + the absence of this issue. + + (Forest Bond) + + * Symlinks in the source tree are now completely ignored. Previously, they + were represented as virtual symlinks but readlink calls would fail. In the + future, we'd like to follow symlinks and handle them correctly. + (Forest Bond) + + * Fixed AttributeError when a file is opened for writing twice after being + either opened for reading and subsequently truncated or truncated and + subsequently opened for reading. The second attempt to open the file for + writing would fail. This bug could not cause data corruption. + (Forest Bond) + + pytagsfs 0.9.1 2009-10-18 ========================= diff -Nru pytagsfs-0.9.1/README pytagsfs-0.9.2/README --- pytagsfs-0.9.1/README 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/README 2009-12-27 18:02:09.000000000 +0100 @@ -22,6 +22,7 @@ pytagsfs has the following dependencies: + * Python (2.4, 2.5, or 2.6): http://www.python.org/ * sclapp (>= 0.5.2): http://www.alittletooquiet.net/software/sclapp * python-fuse (>= 0.2): http://fuse.sourceforge.net/wiki/index.php/FusePython * mutagen: http://www.sacredchao.net/quodlibet/wiki/Development/Mutagen @@ -31,7 +32,15 @@ * inotifyx (Linux only): http://www.alittletooquiet.net/software/inotifyx/ * py-kqueue (Darwin, FreeBSD, NetBSD, OpenBSD): http://pypi.python.org/packages/source/p/py-kqueue/ - * gamin (many Unix-like systems): http://www.gnome.org/~veillard/gamin/ + * gamin (many Unix-like systems, inotifyx and py-kqueue are preferred): + http://www.gnome.org/~veillard/gamin/ + +To run the test suite, the following additional dependencies must be fulfilled: + + * madplay: http://www.underbit.com/products/mad/ + * vorbis-tools (for ogg123): http://www.vorbis.com/ + * flac: http://flac.sourceforge.net/ + * ctypes (Python 2.4 only): http://python.net/crew/theller/ctypes/ Installing diff -Nru pytagsfs-0.9.1/debian/changelog pytagsfs-0.9.2/debian/changelog --- pytagsfs-0.9.1/debian/changelog 2010-02-12 20:32:01.000000000 +0100 +++ pytagsfs-0.9.2/debian/changelog 2010-02-12 20:32:01.000000000 +0100 @@ -1,9 +1,15 @@ -pytagsfs (0.9.1-1ubuntu1) lucid; urgency=low +pytagsfs (0.9.2-1) unstable; urgency=low - * Merge from debian testing (LP: #493164), Ubuntu remaining changes: - - Append --prefix to setup.py install to fix FTBFS with Python 2.6. + * New Upstream Release + * debian/control + + Replace python-all-dev with python-all + + Move python-all to Build-Depends since we need it during clean + + Bump python-all version dependency to > 2.5 + + Add misc:Depends to Dependency + * Run some testcases during build because upstream recommends it + (Add multiple build deps for the testcases) - -- Alessio Treglia Mon, 07 Dec 2009 17:50:23 +0100 + -- Ritesh Raj Sarraf Mon, 18 Jan 2010 17:34:18 +0530 pytagsfs (0.9.1-1) unstable; urgency=low @@ -20,13 +26,6 @@ -- Ritesh Raj Sarraf Fri, 30 Oct 2009 15:42:10 +0530 -pytagsfs (0.9.0-2ubuntu1) karmic; urgency=low - - * Append --install-layout=deb to setup.py install arguments list to prevent - FTBFS with Python 2.6. - - -- Alessio Treglia Wed, 27 May 2009 08:52:02 +0200 - pytagsfs (0.9.0-2) unstable; urgency=low * Take maintenance from Y Giridhar Appaji Nag diff -Nru pytagsfs-0.9.1/debian/control pytagsfs-0.9.2/debian/control --- pytagsfs-0.9.1/debian/control 2010-02-12 20:32:01.000000000 +0100 +++ pytagsfs-0.9.2/debian/control 2010-02-12 20:32:01.000000000 +0100 @@ -1,20 +1,19 @@ Source: pytagsfs Section: utils Priority: optional -Maintainer: Ubuntu Developers -XSBC-Original-Maintainer: Ritesh Raj Sarraf +Maintainer: Ritesh Raj Sarraf Uploaders: Python Applications Packaging Team -Build-Depends: debhelper (>= 5.0.38), quilt -Build-Depends-Indep: python-support, python-sclapp (>= 0.5.2), xsltproc, docbook-xsl, python-libxml2, python-fuse (>= 0.2), python-all-dev (>= 2.3.5-11) +Build-Depends: debhelper (>= 5.0.38), quilt, python-all (>= 2.5) +Build-Depends-Indep: python-support, python-sclapp (>= 0.5.2), xsltproc, docbook-xsl, python-libxml2, python-fuse (>= 0.2), python-mutagen, madplay, vorbis-tools, flac, python-pexpect, python-inotifyx, python-gamin Standards-Version: 3.8.3 -XS-Python-Version: all +XS-Python-Version: >=2.5 Homepage: http://www.pytagsfs.org/ Vcs-Svn: svn://svn.debian.org/svn/python-apps/packages/pytagsfs/trunk Vcs-Browser: http://svn.debian.org/viewsvn/python-apps/packages/pytagsfs/trunk/ Package: pytagsfs Architecture: all -Depends: ${python:Depends}, python-fuse (>= 0.2), python-inotifyx, python-mutagen, python-sclapp (>= 0.5.2), fuse-utils +Depends: ${python:Depends}, ${misc:Depends}, python-fuse (>= 0.2), python-inotifyx, python-mutagen, python-sclapp (>= 0.5.2), fuse-utils XB-Python-Version: ${python:Versions} Description: maps media files to an arbitrary directory structure pytagsfs is a FUSE filesystem that arranges media files in a virtual directory diff -Nru pytagsfs-0.9.1/debian/rules pytagsfs-0.9.2/debian/rules --- pytagsfs-0.9.1/debian/rules 2010-02-12 20:32:01.000000000 +0100 +++ pytagsfs-0.9.2/debian/rules 2010-02-12 20:32:01.000000000 +0100 @@ -20,12 +20,19 @@ build-python%: dh_testdir + + # Currently failing tests that have been disabled + # fs, blackbox, sourcetreerep + python$* setup.py test --tests tests.common,tests.manager,tests.mutagen_meta_store,tests.optgroup,tests.pathstore,tests.pytagsfs_meta_store,tests.pytypes,tests.regex,tests.sourcetreemon,tests.sourcetree,tests.subspat,tests.test_lines_meta_store,tests.util,tests.values + python$* setup.py build touch $@ clean: unpatch dh_testdir dh_testroot + + python setup.py clean --all rm -f build-stamp rm -rf build find . -name '*.py[co]' | xargs rm -f @@ -39,8 +46,7 @@ dh_testdir dh_testroot dh_installdirs - python$* setup.py install --root=$(CURDIR)/debian/pytagsfs \ - --prefix=/usr + python$* setup.py install --root=$(CURDIR)/debian/pytagsfs # Build architecture-independent files here. binary-indep: build install diff -Nru pytagsfs-0.9.1/modules/pytagsfs/file.py pytagsfs-0.9.2/modules/pytagsfs/file.py --- pytagsfs-0.9.1/modules/pytagsfs/file.py 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/file.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,19 +1,30 @@ -import os +# Copyright (c) 2008-2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. -from threading import RLock +import os from pytagsfs.exceptions import ( PathNotFound, InvalidArgument, ) -from pytagsfs.util import ( - nonatomic, - attr_named, -) +from pytagsfs.util import ref_self +from pytagsfs.multithreading import token_exchange + + +# Note: A token refering to a specific file instance is used to protect the +# file's open file descriptor. It should *not* be acquired when accessing +# other attributes. That would break assumptions made by code that relies on +# the global lock for data consistency. truncate_to is of special concern +# because of how it is used in pytagsfs.fs. class File(object): - _lock = None filesystem = None fake_path = None real_path = None @@ -27,7 +38,6 @@ flags, truncate_to = None, ): - self._lock = RLock() self.filesystem = filesystem self.fake_path = fake_path self.real_path = filesystem.source_tree_rep.get_real_path(fake_path) @@ -55,6 +65,14 @@ def write(self, buf, offset): raise NotImplementedError + def set_truncate_to(self, truncate_to): + # Note: We deliberately keep the global token here. + self.truncate_to = truncate_to + + def del_truncate_to(self): + # Note: We deliberately keep the global token here. + del self.truncate_to + class ReadOnlyFile(File): fd = None @@ -64,16 +82,20 @@ super(ReadOnlyFile, self).__init__(*args, **kwargs) self.open_file() - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def open_file(self): real_path = self.filesystem.encode_real_path(self.real_path) self.file = os.fdopen(os.open(real_path, self.flags), 'r') ################################################################################ - @nonatomic(attr_named('_lock')) def fgetattr(self): - stat_result = os.fstat(self.file.fileno()) + token_exchange.push_token(self) + try: + fd = self.file.fileno() + stat_result = os.fstat(fd) + finally: + token_exchange.pop_token() st_size = stat_result.st_size if (self.truncate_to is not None) and (st_size > self.truncate_to): st_size = self.truncate_to @@ -93,16 +115,19 @@ def ftruncate(self, len): raise InvalidArgument - @nonatomic(attr_named('_lock')) def read(self, length, offset): if self.truncate_to is not None: length = self.truncate_to - offset if length < 0: length = 0 - self.file.seek(offset) - return self.file.read(length) + token_exchange.push_token(self) + try: + self.file.seek(offset) + return self.file.read(length) + finally: + token_exchange.pop_token() - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def release(self, flags): self.file.close() @@ -117,45 +142,51 @@ super(ReadWriteFile, self).__init__(*args, **kwargs) self.open_file() - @nonatomic(attr_named('_lock')) def open_file(self): + # Note: get value of truncate_to before pushing a new token. + truncate_to = self.truncate_to real_path = self.filesystem.encode_real_path(self.real_path) - self.fd = os.open(real_path, self.flags) - if self.truncate_to is not None: - os.ftruncate(self.fd, self.truncate_to) + token_exchange.push_token(self) + try: + self.fd = os.open(real_path, self.flags) + + if truncate_to is not None: + os.ftruncate(self.fd, truncate_to) + finally: + token_exchange.pop_token() ################################################################################ - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def read(self, length, offset): os.lseek(self.fd, offset, 0) return os.read(self.fd, length) - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def release(self, flags): return os.close(self.fd) - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def write(self, buf, offset): os.lseek(self.fd, offset, 0) return os.write(self.fd, buf) - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def fgetattr(self): return os.fstat(self.fd) - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def flush(self): return os.close(os.dup(self.fd)) - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def fsync(self, datasync): if datasync and hasattr(os, 'fdatasync'): return os.fdatasync(self.fd) else: return os.fsync(self.fd) - @nonatomic(attr_named('_lock')) + @token_exchange.token_pushed(ref_self) def ftruncate(self, len): return os.ftruncate(self.fd, len) diff -Nru pytagsfs-0.9.1/modules/pytagsfs/fs/__init__.py pytagsfs-0.9.2/modules/pytagsfs/fs/__init__.py --- pytagsfs-0.9.1/modules/pytagsfs/fs/__init__.py 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/fs/__init__.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -20,18 +20,15 @@ from sclapp.legacy_support import wraps from optparse import OptParseError, SUPPRESS_HELP -from threading import Lock, RLock from pytagsfs.fuselib import ( + TokenFuse, FileSystem, - Fuse, ) from pytagsfs.optgroup import GroupingOptionParser from pytagsfs.subspat import SubstitutionPattern from pytagsfs.sourcetree import SourceTree - from pytagsfs.metastore import DelegateMultiMetaStore - from pytagsfs.sourcetreemon import ( SOURCE_TREE_MONITORS, get_source_tree_monitor, @@ -52,8 +49,6 @@ ) from pytagsfs.util import ( get_obj_by_dotted_name, - nonatomic, - attr_named, return_errno, split_path, unicode_path_sep, @@ -63,7 +58,7 @@ from pytagsfs.specialfile import SpecialFileFileSystemMixin from pytagsfs.specialfile.logfile import VirtualLogFile from pytagsfs.profiling import enable_profiling - +from pytagsfs.multithreading import token_exchange from pytagsfs import __version__ as version @@ -266,34 +261,24 @@ class FrozenPath(object): real_path = None - lock = None count = None def __init__(self, real_path): self.real_path = real_path - self.lock = Lock() self.count = 0 def increment(self): - self.lock.acquire() - try: - self.count = self.count + 1 - finally: - self.lock.release() + self.count = self.count + 1 return self.count def decrement(self): - self.lock.acquire() - try: - self.count = self.count - 1 - finally: - self.lock.release() + if self.count == 0: + raise AssertionError('Will not decrement count below zero.') + self.count = self.count - 1 return self.count class FileSystemMappingToRealFiles(FileSystem): - _lock = None - argv = None user_encoding = None cmdline_arguments = None @@ -338,8 +323,6 @@ } def __init__(self): - self._lock = RLock() - self.truncated_paths = {} self.frozen_path_mappings = {} @@ -360,7 +343,7 @@ except OptParseError, e: self.cmdline_parser.error(unicode(e)) self.pre_init() - fuse = Fuse(self) + fuse = TokenFuse(self) fuse.parse(self.fuse_cmdline_arguments) return fuse.main() @@ -395,6 +378,10 @@ '-o', 'noubc', '-o', 'novncache', ]) + else: + self.fuse_cmdline_arguments.extend([ + '-o', 'max_readahead=0', + ]) if self.subtype: self.fuse_cmdline_arguments.extend( @@ -586,9 +573,9 @@ # access # bmap - chmod = operation_on_one_real_path(os.chmod) + chmod = operation_on_one_real_path(token_exchange.token_released(os.chmod)) - chown = operation_on_one_real_path(os.chown) + chown = operation_on_one_real_path(token_exchange.token_released(os.chown)) # create @@ -619,6 +606,7 @@ try: real_path = self.frozen_path_mappings[fake_path].real_path except KeyError: + # Path is not frozen. stat_result = self.source_tree_rep.getattr(fake_path) st_size = stat_result.st_size @@ -643,7 +631,12 @@ stat_result.st_ctime, )) else: - stat_result = os.lstat(real_path) + # Path is frozen. + token_exchange.release_token() + try: + stat_result = os.lstat(real_path) + finally: + token_exchange.reacquire_token() return self.post_process_stat_result(stat_result) @@ -680,7 +673,6 @@ # mknod @return_errno - @nonatomic(attr_named('_lock')) def open(self, fake_path, flags): # When opening a file for writing, the virtual path is "frozen" so # that, if a write will cause the path to change, the same source @@ -696,40 +688,46 @@ file_instance = self.get_read_write_file_instance( fake_path, flags, truncate_to) - for _file_instance in ( - self.read_only_files_by_fake_path.get(fake_path, ())): - del _file_instance.truncate_to + if truncate_to is not None: + for _file_instance in ( + self.read_only_files_by_fake_path.get(fake_path, ()) + ): + _file_instance.del_truncate_to() + else: truncate_to = self.truncated_paths.get(fake_path, None) file_instance = self.get_read_only_file_instance( - fake_path, flags, truncate_to) + fake_path, + flags, + truncate_to, + ) fh = self.get_next_fh() self.open_files[fh] = file_instance if is_writable: - try: - self.frozen_path_mappings[fake_path].increment() - log_debug( - u'open: incremented count for frozen path: %s, %s', - fake_path, - file_instance.real_path, - ) - except KeyError: + if fake_path not in self.frozen_path_mappings: log_debug( u'open: freezing path: %s, %s', fake_path, file_instance.real_path, ) self.frozen_path_mappings[fake_path] = FrozenPath( - file_instance.real_path) + file_instance.real_path + ) + + log_debug( + u'open: incrementing count for frozen path: %s, %s', + fake_path, + file_instance.real_path, + ) + self.frozen_path_mappings[fake_path].increment() + else: - try: - self.read_only_files_by_fake_path[fake_path].append( - file_instance) - except KeyError: - self.read_only_files_by_fake_path[fake_path] = [ - file_instance] + self.read_only_files_by_fake_path.setdefault( + fake_path, + [], + ).append(file_instance) return fh @@ -751,7 +749,6 @@ # readlink @return_errno - @nonatomic(attr_named('_lock')) def release(self, fake_path, flags, fh): fake_path = self.decode_fake_path(fake_path) is_writable = (os.O_RDWR | os.O_WRONLY) & flags @@ -772,9 +769,9 @@ file_instance.real_path, ) del self.frozen_path_mappings[fake_path] + else: - self.read_only_files_by_fake_path[fake_path].remove( - file_instance) + self.read_only_files_by_fake_path[fake_path].remove(file_instance) if not self.read_only_files_by_fake_path[fake_path]: del self.read_only_files_by_fake_path[fake_path] @@ -813,7 +810,6 @@ # symlink @return_errno - @nonatomic(attr_named('_lock')) def truncate(self, fake_path, len): # truncate semantics are designed to accomodate programs that truncate # files before opening them for writing. Generally speaking, this is @@ -847,7 +843,12 @@ except KeyError: pass else: - ftruncate_path(self.encode_real_path(real_path), len) + real_path_encoded = self.encode_real_path(real_path) + token_exchange.release_token() + try: + ftruncate_path(real_path_encoded, len) + finally: + token_exchange.reacquire_token() return # Otherwise, save the new length (if shorter than the existing value). @@ -856,8 +857,9 @@ self.truncated_paths[fake_path] = len for file_instance in ( - self.read_only_files_by_fake_path.get(fake_path, ())): - file_instance.truncate_to = len + self.read_only_files_by_fake_path.get(fake_path, ()) + ): + file_instance.set_truncate_to(len) # unlink diff -Nru pytagsfs-0.9.1/modules/pytagsfs/fs/mail.py pytagsfs-0.9.2/modules/pytagsfs/fs/mail.py --- pytagsfs-0.9.1/modules/pytagsfs/fs/mail.py 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/fs/mail.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008-2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import os from pytagsfs.util import return_errno @@ -60,5 +69,5 @@ @return_errno def rmdir(self, fake_path): if old_fake_path.count('/') == 1: - raise InvalidArgument + raise InvalidArgument() return super(PyMailTagsFileSystem, self).rmdir(fake_path) diff -Nru pytagsfs-0.9.1/modules/pytagsfs/fuselib.py pytagsfs-0.9.2/modules/pytagsfs/fuselib.py --- pytagsfs-0.9.1/modules/pytagsfs/fuselib.py 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/fuselib.py 2009-12-27 18:02:09.000000000 +0100 @@ -21,12 +21,12 @@ from pytagsfs.debug import log_debug, log_critical from pytagsfs.util import ( - lazy_repr, LazyByteString, wraps, ) from pytagsfs.exceptions import FuseError from pytagsfs.profiling import profiled +from pytagsfs.multithreading import token_exchange, GLOBAL fuse.fuse_python_api = (0, 2) @@ -85,37 +85,57 @@ def fsmethod(func): @wraps(func) - def wrapper(self, *args, **kwargs): - log_debug(u'%s(%s)', func.__name__, lazy_repr_args(args)) - + def wrapper(*args, **kwargs): try: - ret = func(self, *args, **kwargs) + ret = func(*args, **kwargs) if ret is None: ret = 0 - except FuseError, e: if e.errno is not None: ret = -e.errno else: ret = -errno.EFAULT - except: log_critical(traceback.format_exc()) ret = -errno.EFAULT + return ret + return wrapper - log_debug(u'%s(...) -> %s', func.__name__, lazy_repr(ret)) + +def logged(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + log_debug(u'%s(%s)', func.__name__, lazy_repr_args(args)) + ret = func(self, *args, **kwargs) + log_debug(u'%s(...) -> %r', func.__name__, ret) return ret + return wrapper + +def logged_noargs(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + log_debug(u'%s(???)', func.__name__) + ret = func(self, *args, **kwargs) + log_debug(u'%s(...) -> %r', func.__name__, ret) + return ret return wrapper -class Fuse(_Fuse): - filesystem_methods = [] - filesystem_method_names = [] +def logged_noret(func): + @wraps(func) + def wrapper(self, *args, **kwargs): + log_debug(u'%s(%s)', func.__name__, lazy_repr_args(args)) + ret = func(self, *args, **kwargs) + log_debug(u'%s(...) -> ???', func.__name__) + return ret + return wrapper + +class Fuse(_Fuse): def __init__(self, filesystem, *args, **kwargs): self.filesystem = filesystem - kwargs['dash_s_do'] = kwargs.get('dash_s_do', 'setsingle') + kwargs.setdefault('dash_s_do', 'setsingle') super(Fuse, self).__init__(*args, **kwargs) def main(self): @@ -126,26 +146,31 @@ self.filesystem.destroy() @profiled + @logged @fsmethod def access(self, path, mode): return self.filesystem.access(path, mode) @profiled + @logged @fsmethod def bmap(self, path, blocksize, idx): return self.filesystem.bmap(path, blocksize, idx) @profiled + @logged @fsmethod def chmod(self, path, mode): return self.filesystem.chmod(path, mode) @profiled + @logged @fsmethod def chown(self, path, uid, gid): return self.filesystem.chown(path, uid, gid) @profiled + @logged @fsmethod def create(self, path, flags, mode): return self.filesystem.create(path, flags, mode) @@ -159,11 +184,13 @@ # main. #@profiled + #@logged #@fsmethod #def fsdestroy(self): # self.filesystem.destroy() @profiled + @logged @fsmethod def fgetattr(self, path, fi): return self._fgetattr(path, fi) @@ -174,6 +201,21 @@ else: stat_result = self.filesystem.fgetattr(path, fi.fh) + # This is a little bit complicated due to different handling in various + # python-fuse versions: + # + # * Some versions do not handle None values; attributes should not be + # specified. + # * Some versions do not handle missing attributes. + # + # Eventually, we should be able to return stat_result directly, but + # only after python-fuse versions that don't support None for attribute + # values have been phased out. In the meantime, for maximum + # compatibility we should always set a non-None value for all + # attributes. Thus, for some attributes we must guess a default using + # the same heuristics that python-fuse uses for None/missing + # attributes. + st = Stat( st_dev = stat_result.st_dev, st_ino = stat_result.st_ino, @@ -181,70 +223,95 @@ st_nlink = stat_result.st_nlink, st_uid = stat_result.st_uid, st_gid = stat_result.st_gid, + #st_rdev = stat_result.st_rdev, st_size = stat_result.st_size, + #st_blksize = stat_result.st_blksize, + #st_blocks = stat_result.st_blocks, st_atime = stat_result.st_atime, st_mtime = stat_result.st_mtime, st_ctime = stat_result.st_ctime, ) - if stat_result.st_blksize is not None: + if stat_result.st_rdev is None: + # I believe this should work with all systems. + st.st_rdev = 0 + else: + st.st_rdev = stat_result.st_rdev + + if stat_result.st_blksize is None: + # Default value used by python-fuse. + st.st_blksize = 4096 + else: st.st_blksize = stat_result.st_blksize - if stat_result.st_blocks is not None: + if stat_result.st_blocks is None: + # Default value used by python-fuse. + st.st_blocks = ((stat_result.st_size + 511) / 512) + else: st.st_blocks = stat_result.st_blocks return st @profiled + @logged @fsmethod def flush(self, path, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.flush(path, fh) @profiled + @logged @fsmethod def fsync(self, path, datasync, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.fsync(path, datasync, fh) @profiled + @logged @fsmethod def fsyncdir(self, path, datasync, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.fsyncdir(path, datasync, fh) @profiled + @logged @fsmethod def ftruncate(self, path, length, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.ftruncate(path, length, fh) @profiled + @logged @fsmethod def getattr(self, path): return self._fgetattr(path, None) @profiled + @logged @fsmethod def getxattr(self, path, name, size): return self.filesystem.getxattr(path, name, size) @profiled + @logged @fsmethod def fsinit(self): self.filesystem.init() @profiled + @logged @fsmethod def link(self, source, target): return self.filesystem.link(source, target) @profiled + @logged @fsmethod def listxattr(self, path, size): return self.filesystem.listxattr(path, size) @profiled + @logged @fsmethod def lock(self, path, cmd, owner, fi = None, **kwargs): # kwargs: l_type, l_start, l_len, l_pid @@ -252,33 +319,44 @@ return self.filesystem.lock(path, cmd, owner, fh, **kwargs) @profiled + @logged @fsmethod def mkdir(self, path, mode): return self.filesystem.mkdir(path, mode) @profiled + @logged @fsmethod def mknod(self, path, mode, dev): return self.filesystem.mknod(path, mode, dev) @profiled + @logged @fsmethod def open(self, path, flags): fh = self.filesystem.open(path, flags) - return FileInfo(fh = fh) + # Note: keep_cache is only specified to avoid AttributeErrors in + # python-fuse, which doesn't handle missing attributes well. I think + # that python-fuse's FileInfo class is supposed to specify a default + # value as a class attribute but it is incorrectly named "keep" instead + # of "keep_cache". Bug not yet filed. + return FileInfo(fh = fh, keep_cache = None) @profiled + @logged @fsmethod def opendir(self, path): return self.filesystem.opendir(path) @profiled + @logged_noret @fsmethod def read(self, path, size, offset, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.read(path, size, offset, fh) @profiled + @logged @fsmethod def readdir(self, path, offset): # FIXME: Our FUSE bindings don't give us fi for readdir, so we fake @@ -291,43 +369,51 @@ return entries @profiled + @logged @fsmethod def readlink(self, path): return self.filesystem.readlink(path) @profiled + @logged @fsmethod def release(self, path, flags, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.release(path, flags, fh) @profiled + @logged @fsmethod def releasedir(self, path, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.releasedir(path, fh) @profiled + @logged @fsmethod def removexattr(self, path, name): return self.filesystem.removexattr(path, name) @profiled + @logged @fsmethod def rename(self, old, new): return self.filesystem.rename(old, new) @profiled + @logged @fsmethod def rmdir(self, path): return self.filesystem.rmdir(path) @profiled + @logged @fsmethod def setxattr(self, path, name, value, size, flags): return self.filesystem.setxattr(path, name, value, size, flags) @profiled + @logged @fsmethod def statfs(self): statfs_result = self.filesystem.statfs() @@ -345,21 +431,25 @@ ) @profiled + @logged @fsmethod def symlink(self, source, target): return self.filesystem.symlink(source, target) @profiled + @logged @fsmethod def truncate(self, path, length): return self.filesystem.truncate(path, length) @profiled + @logged @fsmethod def unlink(self, path): return self.filesystem.unlink(path) @profiled + @logged @fsmethod def utimens(self, path, ts_atime, ts_mtime): atime = timespec_to_float(ts_atime) @@ -367,20 +457,64 @@ return self.filesystem.utimens(path, (atime, mtime)) @profiled + @logged_noargs @fsmethod def write(self, path, buf, offset, fi = None): fh = getattr(fi, 'fh', None) return self.filesystem.write(path, buf, offset, fh) -Fuse.filesystem_method_names = [] -Fuse.filesystem_methods = [] -for name in dir(Fuse): - if not name.startswith('_'): - value = getattr(Fuse, name) - if callable(value): - Fuse.filesystem_method_names.append(name) - Fuse.filesystem_methods.append(value) +class TokenFuse(Fuse): + def main(self): + try: + # Note: calling super with parent class to skip Fuse.main. We're + # overriding, not extending. + return super(Fuse, self).main() + finally: + token_exchange.push_token(GLOBAL) + try: + # See comment regarding fsdestroy. + self.filesystem.destroy() + finally: + token_exchange.pop_token() + + access = token_exchange.token_pushed(GLOBAL)(Fuse.access) + bmap = token_exchange.token_pushed(GLOBAL)(Fuse.bmap) + chmod = token_exchange.token_pushed(GLOBAL)(Fuse.chmod) + chown = token_exchange.token_pushed(GLOBAL)(Fuse.chown) + create = token_exchange.token_pushed(GLOBAL)(Fuse.create) + # XXX: Enable this when Fuse.fsdestroy is enabled. + #fsdestroy = token_exchange.token_pushed(GLOBAL)(Fuse.fsdestroy) + fgetattr = token_exchange.token_pushed(GLOBAL)(Fuse.fgetattr) + flush = token_exchange.token_pushed(GLOBAL)(Fuse.flush) + fsync = token_exchange.token_pushed(GLOBAL)(Fuse.fsync) + fsyncdir = token_exchange.token_pushed(GLOBAL)(Fuse.fsyncdir) + ftruncate = token_exchange.token_pushed(GLOBAL)(Fuse.ftruncate) + getattr = token_exchange.token_pushed(GLOBAL)(Fuse.getattr) + getxattr = token_exchange.token_pushed(GLOBAL)(Fuse.getxattr) + fsinit = token_exchange.token_pushed(GLOBAL)(Fuse.fsinit) + link = token_exchange.token_pushed(GLOBAL)(Fuse.link) + listxattr = token_exchange.token_pushed(GLOBAL)(Fuse.listxattr) + lock = token_exchange.token_pushed(GLOBAL)(Fuse.lock) + mkdir = token_exchange.token_pushed(GLOBAL)(Fuse.mkdir) + mknod = token_exchange.token_pushed(GLOBAL)(Fuse.mknod) + open = token_exchange.token_pushed(GLOBAL)(Fuse.open) + opendir = token_exchange.token_pushed(GLOBAL)(Fuse.opendir) + read = token_exchange.token_pushed(GLOBAL)(Fuse.read) + readdir = token_exchange.token_pushed(GLOBAL)(Fuse.readdir) + readlink = token_exchange.token_pushed(GLOBAL)(Fuse.readlink) + release = token_exchange.token_pushed(GLOBAL)(Fuse.release) + releasedir = token_exchange.token_pushed(GLOBAL)(Fuse.releasedir) + removexattr = token_exchange.token_pushed(GLOBAL)(Fuse.removexattr) + rename = token_exchange.token_pushed(GLOBAL)(Fuse.rename) + rmdir = token_exchange.token_pushed(GLOBAL)(Fuse.rmdir) + setxattr = token_exchange.token_pushed(GLOBAL)(Fuse.setxattr) + statfs = token_exchange.token_pushed(GLOBAL)(Fuse.statfs) + symlink = token_exchange.token_pushed(GLOBAL)(Fuse.symlink) + truncate = token_exchange.token_pushed(GLOBAL)(Fuse.truncate) + unlink = token_exchange.token_pushed(GLOBAL)(Fuse.unlink) + utimens = token_exchange.token_pushed(GLOBAL)(Fuse.utimens) + write = token_exchange.token_pushed(GLOBAL)(Fuse.write) class FileSystem(object): diff -Nru pytagsfs-0.9.1/modules/pytagsfs/main.py pytagsfs-0.9.2/modules/pytagsfs/main.py --- pytagsfs-0.9.1/modules/pytagsfs/main.py 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/main.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import sys, os.path diff -Nru pytagsfs-0.9.1/modules/pytagsfs/metastore/maildir.py pytagsfs-0.9.2/modules/pytagsfs/metastore/maildir.py --- pytagsfs-0.9.1/modules/pytagsfs/metastore/maildir.py 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/metastore/maildir.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import os.path from mailbox import Maildir, ExternalClashError import email.header diff -Nru pytagsfs-0.9.1/modules/pytagsfs/metastore/mutagen_.py pytagsfs-0.9.2/modules/pytagsfs/metastore/mutagen_.py --- pytagsfs-0.9.1/modules/pytagsfs/metastore/mutagen_.py 2009-10-19 02:00:54.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/metastore/mutagen_.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,5 +1,14 @@ # coding: utf-8 +# Copyright (c) 2007-2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import mutagen from mutagen.id3 import ID3FileType from mutagen.easyid3 import EasyID3 @@ -8,7 +17,6 @@ from pytagsfs.metastore import MetaStore from pytagsfs.values import Values from pytagsfs.debug import log_info -from pytagsfs.util import lazy_repr class TranslatedMP4(MP4): @@ -199,10 +207,10 @@ log_info( ( u'_BaseMutagenMetaStore.get_value_from_tag: ' - u'tag value is not a list, dropping: %s, %s' + u'tag value is not a list, dropping: %r, %r' ), field, - lazy_repr(tag), + tag, ) raise ValueError(tag) return list(tag) diff -Nru pytagsfs-0.9.1/modules/pytagsfs/metastore/path.py pytagsfs-0.9.2/modules/pytagsfs/metastore/path.py --- pytagsfs-0.9.1/modules/pytagsfs/metastore/path.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/metastore/path.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2007-2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import os.path from pytagsfs.metastore import MetaStore, UnsettableKeyError diff -Nru pytagsfs-0.9.1/modules/pytagsfs/metastore/testlines.py pytagsfs-0.9.2/modules/pytagsfs/metastore/testlines.py --- pytagsfs-0.9.1/modules/pytagsfs/metastore/testlines.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/metastore/testlines.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -7,6 +7,8 @@ # # A copy of the license has been included in the COPYING file. +import errno + from pytagsfs.metastore import MetaStore from pytagsfs.values import Values from pytagsfs.debug import log_loud @@ -34,20 +36,27 @@ pass else: try: - for line in f: - if line.endswith('\n'): - line = line[:-1] - lines.append(line.decode(encoding)) - num_lines = num_lines + 1 - if num_lines >= self.last_index: - break + content = f.read() finally: f.close() + index = content.rfind('\n') + header = content[:index] + + for line in header.split('\n'): + try: + line = line.decode(encoding) + except UnicodeDecodeError: + line = '' + lines.append(line) + num_lines = num_lines + 1 + if num_lines >= self.last_index: + break + keys = [self.index_to_key(index) for index in range(num_lines)] d = dict(zip(keys, lines)) - for k in d.keys(): + for k in list(iter(d)): if not d[k]: del d[k] @@ -66,10 +75,26 @@ line = d.get(self.index_to_key(index), '').encode(encoding) lines.append(line) + try: + f = open(path, 'r') + except IOError, e: + if e.errno != errno.ENOENT: + raise + content = '' + else: + try: + content = f.read() + finally: + f.close() + + index = content.rfind('\n') + 1 + data = content[index:] + f = open(path, 'w') try: f.write('\n'.join(lines)) f.write('\n') + f.write(data) finally: f.close() diff -Nru pytagsfs-0.9.1/modules/pytagsfs/multithreading.py pytagsfs-0.9.2/modules/pytagsfs/multithreading.py --- pytagsfs-0.9.1/modules/pytagsfs/multithreading.py 1970-01-01 01:00:00.000000000 +0100 +++ pytagsfs-0.9.2/modules/pytagsfs/multithreading.py 2009-12-27 18:02:09.000000000 +0100 @@ -0,0 +1,209 @@ +# Copyright (c) 2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + +''' +This module implements token-style locking. This prevents deadlocks by +enforcing that no thread can acquire more than one lock at a time. + +Tokens are identified by an arbitrary object. The GLOBAL object defined here +identifies a global token. + +Most code should simply assume that the global token is acquired and need not +be written to be thread safe. Code that uses blocking I/O should either wrap +such calls with bracketing release_token/reacquire_token or +push_token/pop_token calls. + +Generally, use release_token/reacquire_token when the calls are atomic (i.e. +do not use an open file descriptor) and use push_token/pop_token when the calls +are not atomic (i.e. do use an open file descriptor). + +push_token/pop_token should be used with a token identifier that uniquely +identifies the resource (usually an object associated with a file descriptor) +being protected. + +As little code as possible should sit in the critical section where the global +token is no longer held, since most code will assume that the global token has +been acquired. Do not call arbitrary functions, if possible, only I/O +functions whose thread safety is known. +''' + +from thread import get_ident, allocate_lock + +try: + from functools import wraps +except ImportError: + from sclapp.legacy_support import wraps + + +# Note: Do not call logging functions from this module. Logging functions +# assume that the global token is acquired. We cannot make that assumption +# here. + + +GLOBAL = object() + + +class TokenError(Exception): + pass + + +class BaseToken(object): + _id = None + + def __init__(self, id): + self._id = id + + def acquire(self): + raise NotImplementedError() + + def release(self): + raise NotImplementedError() + + +class Token(BaseToken): + _lock = None + _owner = None + + def __init__(self, id): + super(Token, self).__init__(id) + self._lock = allocate_lock() + + def acquire(self): + owner = get_ident() + if self._owner == owner: + # Token may only be acquired once per thread. + raise TokenError('token already acquired') + self._lock.acquire() + self._owner = owner + + def release(self): + owner = self._owner + if owner != get_ident(): + raise TokenError('token not acquired') + del self._owner + self._lock.release() + + +class NullToken(BaseToken): + def acquire(self): + pass + + def release(self): + pass + + +class NullLock(object): + def acquire(self): + pass + + def release(self): + pass + + +class TokenExchange(object): + _tokens = None + _lock = None + _owner_token_queues = None + + def __init__(self): + self._tokens = {} + self._lock = allocate_lock() + self._owner_token_queues = {} + + def push_token(self, id): + owner = get_ident() + + self._lock.acquire() + try: + try: + prev_token = self._owner_token_queues[owner][-1] + except (KeyError, IndexError): + prev_token = None + try: + next_token = self._tokens[id] + except KeyError: + next_token = Token(id) + self._tokens[id] = next_token + self._owner_token_queues.setdefault(owner, []).append(next_token) + finally: + self._lock.release() + + if prev_token is not None: + prev_token.release() + next_token.acquire() + + def pop_token(self): + owner = get_ident() + self._lock.acquire() + try: + prev_token = self._owner_token_queues[owner].pop() + try: + next_token = self._owner_token_queues[owner][-1] + except (KeyError, IndexError): + next_token = None + finally: + self._lock.release() + + prev_token.release() + if next_token is not None: + next_token.acquire() + + def token_pushed(self, get_id): + def decorator(wrapped): + @wraps(wrapped) + def fn(*args, **kwargs): + if callable(get_id): + id = get_id(*args, **kwargs) + else: + id = get_id + self.push_token(id) + try: + return wrapped(*args, **kwargs) + finally: + self.pop_token() + return fn + return decorator + + def release_token(self): + owner = get_ident() + self._lock.acquire() + try: + try: + token = self._owner_token_queues[owner][-1] + except (KeyError, IndexError): + token = None + finally: + self._lock.release() + if token is not None: + token.release() + + def reacquire_token(self): + owner = get_ident() + self._lock.acquire() + try: + try: + token = self._owner_token_queues[owner][-1] + except (KeyError, IndexError): + token = None + finally: + self._lock.release() + if token is not None: + token.acquire() + + def token_released(self, wrapped): + @wraps(wrapped) + def fn(*args, **kwargs): + self.release_token() + try: + return wrapped(*args, **kwargs) + finally: + self.reacquire_token() + return fn + +token_exchange = TokenExchange() diff -Nru pytagsfs-0.9.1/modules/pytagsfs/optgroup.py pytagsfs-0.9.2/modules/pytagsfs/optgroup.py --- pytagsfs-0.9.1/modules/pytagsfs/optgroup.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/optgroup.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import sys from optparse import ( Option, diff -Nru pytagsfs-0.9.1/modules/pytagsfs/pathpropcache.py pytagsfs-0.9.2/modules/pytagsfs/pathpropcache.py --- pytagsfs-0.9.1/modules/pytagsfs/pathpropcache.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/pathpropcache.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -7,22 +7,12 @@ # # A copy of the license has been included in the COPYING file. -from threading import RLock - -from pytagsfs.util import ( - nonatomic, - attr_named, -) - class PathPropCache(object): d = None - lock = None def __init__(self): self.d = {} - self.lock = RLock() - @nonatomic(attr_named('lock')) def put(self, path, key, value): try: path_d = self.d[path] @@ -31,11 +21,9 @@ self.d[path] = path_d path_d[key] = value - @nonatomic(attr_named('lock')) def get(self, path, key): return self.d[path][key] - @nonatomic(attr_named('lock')) def prune(self, path = None, key = None): if key is not None: if path is None: diff -Nru pytagsfs-0.9.1/modules/pytagsfs/pathstore/pytypes.py pytagsfs-0.9.2/modules/pytagsfs/pathstore/pytypes.py --- pytagsfs-0.9.1/modules/pytagsfs/pathstore/pytypes.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/pathstore/pytypes.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -16,13 +16,9 @@ from sclapp.util import safe_encode -from threading import RLock - from pytagsfs.pathstore import PathStore from pytagsfs.util import ( last_unique, - nonatomic, - attr_named, unicode_path_sep, ) from pytagsfs.exceptions import ( @@ -268,14 +264,11 @@ ''' + PathStore.__doc__ - lock = None - path_mapping = None entries = None meta_data = None def __init__(self): - self.lock = RLock() super(PyTypesPathStore, self).__init__() self.path_mapping = PathMapping() self.entries = EntryStore() @@ -286,7 +279,6 @@ raise AssertionError( 'old_fake_path must not be "%s"' % unicode_path_sep) - @nonatomic(attr_named('lock')) def add_file(self, fake_path, real_path): try: if self.is_dir(fake_path): @@ -304,13 +296,11 @@ self.path_mapping.add_real_path(fake_path, real_path) self.entries.add_entries_and_directories_recursive(fake_path) - @nonatomic(attr_named('lock')) def add_directory(self, fake_path): self._must_not_exist(fake_path) self.entries.add_entries_and_directories_recursive(fake_path) self.entries.add_directory(fake_path) - @nonatomic(attr_named('lock')) def rename(self, old_fake_path, new_fake_path): if self.is_dir(old_fake_path): return self._rename_directory(old_fake_path, new_fake_path) @@ -318,7 +308,6 @@ return self._rename_file(old_fake_path, new_fake_path) raise FakePathNotFound(old_fake_path) - @nonatomic(attr_named('lock')) def _rename_file(self, old_fake_path, new_fake_path): real_path = self.get_real_path(old_fake_path) @@ -335,7 +324,6 @@ self._remove_file(old_fake_path, real_path) self.add_file(new_fake_path, real_path) - @nonatomic(attr_named('lock')) def _rename_directory(self, old_fake_path, new_fake_path): self._must_be_empty_directory(old_fake_path) @@ -367,7 +355,6 @@ if new_fake_path.endswith(unicode_path_sep): raise ValueError(new_fake_path) - @nonatomic(attr_named('lock')) def remove(self, fake_path, real_path = None): if self.is_dir(fake_path): if real_path is not None: @@ -379,19 +366,16 @@ return self._remove_file(fake_path, real_path) raise FakePathNotFound(fake_path) - @nonatomic(attr_named('lock')) def _remove_file(self, fake_path, real_path = None): self.path_mapping.remove_real_path(fake_path, real_path) self.entries.remove_entries_and_directories_recursive(fake_path) - @nonatomic(attr_named('lock')) def _remove_directory(self, fake_path): self._assert_not_root(fake_path) self._must_be_empty_directory(fake_path) self.entries.remove_directory(fake_path) self.entries.remove_entries_and_directories_recursive(fake_path) - @nonatomic(attr_named('lock')) def get_real_path(self, fake_path): try: return self.path_mapping.get_real_path(fake_path) @@ -400,14 +384,12 @@ raise IsADirectory(fake_path) raise FakePathNotFound(fake_path) - @nonatomic(attr_named('lock')) def get_fake_paths(self, real_path): try: return list(self.path_mapping.get_fake_paths(real_path)) except PathDoesNotExistInPathMapping: raise RealPathNotFound(real_path) - @nonatomic(attr_named('lock')) def get_real_subpaths(self, real_path): if real_path.endswith(unicode_path_sep): raise ValueError(u'real_path %s ends with %s' % ( @@ -416,12 +398,10 @@ real_paths = self.path_mapping.get_reverse_keys() return [p for p in real_paths if p.startswith(real_path)] - @nonatomic(attr_named('lock')) def get_entries(self, fake_path): self._must_be_dir(fake_path) return list(self.entries.get_entries(fake_path)) - @nonatomic(attr_named('lock')) def is_file(self, fake_path): try: self.path_mapping.get_real_path(fake_path) @@ -429,7 +409,6 @@ return False return True - @nonatomic(attr_named('lock')) def is_dir(self, fake_path): try: self.entries.get_entries(fake_path) @@ -437,14 +416,12 @@ return False return True - @nonatomic(attr_named('lock')) def is_empty_dir(self, fake_path): return ( self.is_dir(fake_path) and not self.entries.get_all_entries(fake_path) ) - @nonatomic(attr_named('lock')) def path_exists(self, fake_path): if self.is_file(fake_path): return True @@ -486,19 +463,16 @@ return raise NotAnEndPoint(fake_path) - @nonatomic(attr_named('lock')) def set_meta_data(self, fake_path, meta_data): self._must_be_end_point(fake_path) entry = self.entries.get_entry(fake_path) entry.set_meta_data(meta_data) - @nonatomic(attr_named('lock')) def get_meta_data(self, fake_path): self._must_be_end_point(fake_path) entry = self.entries.get_entry(fake_path) return entry.get_meta_data() - @nonatomic(attr_named('lock')) def unset_meta_data(self, fake_path): self._must_be_end_point(fake_path) entry = self.entries.get_entry(fake_path) diff -Nru pytagsfs-0.9.1/modules/pytagsfs/profiling.py pytagsfs-0.9.2/modules/pytagsfs/profiling.py --- pytagsfs-0.9.1/modules/pytagsfs/profiling.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/profiling.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import time from pytagsfs.specialfile.logfile import VirtualLogFile diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetree.py pytagsfs-0.9.2/modules/pytagsfs/sourcetree.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetree.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetree.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -7,7 +7,7 @@ # # A copy of the license has been included in the COPYING file. -import os +import os, errno from pytagsfs.util import unicode_path_sep from pytagsfs.debug import ( @@ -15,6 +15,7 @@ log_warning, log_traceback, ) +from pytagsfs.multithreading import token_exchange class SourceTree(object): @@ -64,7 +65,20 @@ # os.walk seems to want encoded input path = self.encode(path) - for dirpath, dirnames, filenames in os.walk(path): + token_exchange.release_token() + try: + iterator = os.walk(path) + finally: + token_exchange.reacquire_token() + while True: + token_exchange.release_token() + try: + try: + dirpath, dirnames, filenames = iterator.next() + except StopIteration: + break + finally: + token_exchange.reacquire_token() dirpath = self.decode(dirpath) dirnames = [self.decode(dirname) for dirname in dirnames] filenames = [self.decode(filename) for filename in filenames] @@ -96,11 +110,37 @@ log_debug(u'Encoded path is "%s".', encoded_path) return encoded_path + @token_exchange.token_released def isreadable(self, path): + ''' + Return True if an attempt to read a byte from the file does not cause + an IOError or OSError. + ''' try: - f = open(path, 'r') - f.read(1) - f.close() + f = open(self.encode(path), 'r') + try: + f.read(1) + finally: + f.close() except (IOError, OSError): return False return True + + @token_exchange.token_released + def issymlink(self, path): + ''' + Return True if readlink succeeds. + ''' + try: + os.readlink(self.encode(path)) + except (IOError, OSError): + return False + return True + + @token_exchange.token_released + def lstat(self, path): + return os.lstat(self.encode(path)) + + @token_exchange.token_released + def utime(self, path, times): + return os.utime(self.encode(path), times) diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/__init__.py pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/__init__.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/__init__.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/__init__.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -7,6 +7,7 @@ # # A copy of the license has been included in the COPYING file. + SOURCE_TREE_MONITORS = ( 'pytagsfs.sourcetreemon.inotifyx_.DeferredInotifyxSourceTreeMonitor', 'pytagsfs.sourcetreemon.gamin_.DeferredGaminSourceTreeMonitor', @@ -14,11 +15,13 @@ 'pytagsfs.sourcetreemon.dummy.DummySourceTreeMonitor', ) + def get_source_tree_monitor(dotted_name): from pytagsfs.util import get_obj_by_dotted_name source_tree_mon_cls = get_obj_by_dotted_name(dotted_name) return source_tree_mon_cls() + class SourceTreeMonitor(object): add_cb = lambda *args, **kwargs: None remove_cb = lambda *args, **kwargs: None diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/deferred.py pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/deferred.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/deferred.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/deferred.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,25 +1,28 @@ -from threading import RLock +# Copyright (c) 2008-2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. from pytagsfs.sourcetreemon import SourceTreeMonitor -from pytagsfs.util import ( - nonatomic, - attr_named, -) + ADD = 'ADD' REMOVE = 'REMOVE' UPDATE = 'UPDATE' + class DeferredSourceTreeMonitor(SourceTreeMonitor): event_queue = None - dstm_lock = None orig_add_cb = None orig_remove_cb = None orig_update_cb = None def __init__(self): - self.dstm_lock = RLock() super(DeferredSourceTreeMonitor, self).__init__() self.event_queue = [] @@ -37,15 +40,12 @@ super(DeferredSourceTreeMonitor, self).set_update_cb( self.dstm_update_cb) - @nonatomic(attr_named('dstm_lock')) def dstm_add_cb(self, path, *args): self.event_queue.append((ADD, path) + args) - @nonatomic(attr_named('dstm_lock')) def dstm_remove_cb(self, path, *args): self.event_queue.append((REMOVE, path) + args) - @nonatomic(attr_named('dstm_lock')) def dstm_update_cb(self, path, *args): # Look for the last event for this path. Update events can be collapsed # together, since more than one update can be handled no differently @@ -62,7 +62,6 @@ break self.event_queue.append((UPDATE, path) + args) - @nonatomic(attr_named('dstm_lock')) def finish_processing(self): while self.event_queue: event = self.event_queue.pop(0) @@ -75,7 +74,6 @@ else: raise ValueError('unknown action %s' % str(event[0])) - @nonatomic(attr_named('dstm_lock')) def process_events(self): super(DeferredSourceTreeMonitor, self).process_events() self.finish_processing() diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/dummy.py pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/dummy.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/dummy.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/dummy.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + from pytagsfs.sourcetreemon import SourceTreeMonitor class DummySourceTreeMonitor(SourceTreeMonitor): diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/gamin_.py pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/gamin_.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/gamin_.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/gamin_.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2008 Forest Bond. +# Copyright (c) 2008-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -9,18 +9,18 @@ import os -from threading import RLock - -from pytagsfs.util import nonatomic, attr_named +from pytagsfs.util import ref_self from pytagsfs.sourcetreemon import SourceTreeMonitor from pytagsfs.sourcetreemon.deferred import DeferredSourceTreeMonitor from pytagsfs.debug import log_critical from pytagsfs.exceptions import MissingDependency +from pytagsfs.multithreading import token_exchange, GLOBAL + ################################################################################ + class GaminSourceTreeMonitor(SourceTreeMonitor): - lock = None watch_monitor = None def __init__(self): @@ -43,8 +43,6 @@ gamin.GAMEndExist: 'process_unexpected', } - self.lock = RLock() - ################################################################################ # SourceTreeMonitor API @@ -53,7 +51,7 @@ self.watch_monitor = self.gamin.WatchMonitor() self.watch_monitor.no_exists() - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) def stop(self): for real_path in self.watch_monitor.objects: self.watch_monitor.stop_watch(real_path) @@ -82,13 +80,13 @@ def fileno(self): return self.watch_monitor.get_fd() - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) def process_events(self): return self.watch_monitor.handle_events() ################################################################################ - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) def watch_dir(self, real_path): if real_path in self.watch_monitor.objects: # already watching @@ -103,7 +101,7 @@ except self.gamin.GaminException: pass - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) def unwatch_dir(self, real_path): if real_path not in self.watch_monitor.objects: return @@ -149,14 +147,25 @@ def process_unexpected(self, real_path): # FIXME: Is log_critical necessary here? log_critical( - u'GaminSourceTreeMonitor.process_unexpected: %s', real_path) + u'GaminSourceTreeMonitor.process_unexpected: %s', + real_path, + ) + + # process_events executes with token identified by self and eventually + # calls this callback function. At this point, we no longer need that + # token, but we must reacquire the global token before calling update_cb, + # add_cb, or remove_cb. This is the best place to do that because we are + # certain of context here. + @token_exchange.token_pushed(GLOBAL) def event_callback(self, entry, kind, watch_path): real_path = os.path.join(watch_path, entry) return getattr(self, self.event_processors[kind])(real_path) + ################################################################################ + class DeferredGaminSourceTreeMonitor( DeferredSourceTreeMonitor, GaminSourceTreeMonitor): pass diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/inotifyx_.py pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/inotifyx_.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/inotifyx_.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/inotifyx_.py 2009-12-27 18:02:09.000000000 +0100 @@ -9,19 +9,15 @@ import os, errno -from threading import RLock - -from pytagsfs.util import nonatomic, attr_named - +from pytagsfs.util import ref_self from pytagsfs.sourcetreemon import SourceTreeMonitor from pytagsfs.sourcetreemon.deferred import DeferredSourceTreeMonitor from pytagsfs.exceptions import MissingDependency from pytagsfs.debug import log_warning, log_critical +from pytagsfs.multithreading import token_exchange class InotifyxSourceTreeMonitor(SourceTreeMonitor): - lock = None - wd_to_path = None path_to_wd = None fd = None @@ -45,12 +41,10 @@ self.wd_to_path = {} self.path_to_wd = {} - self.lock = RLock() - def start(self, debug = False): self.fd = self.inotifyx.init() - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) def watch_dir(self, real_path): if real_path in self.path_to_wd: # already watching @@ -67,7 +61,7 @@ # watch unsuccessful, clean up self._rm_watch(wd) - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) def unwatch_dir(self, real_path): try: wd = self.path_to_wd[real_path] @@ -88,7 +82,7 @@ ### SourceTreeMonitor API: - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) def stop(self): os.close(self.fd) @@ -127,8 +121,11 @@ # happen for other event types, but could if paths were rapidly # added and removed. if not (event.mask & self.inotifyx.IN_IGNORED): - log_warning('InotifyxSourceTreeMonitor: late event: %s, %s' % ( - str(event), repr(event))) + log_warning( + 'InotifyxSourceTreeMonitor: late event: %s, %r', + event, + event, + ) return if event.name: @@ -164,9 +161,12 @@ 'failed to match event mask: %s' % event.get_mask_description() ) - @nonatomic(attr_named('lock')) + @token_exchange.token_pushed(ref_self) + def _get_events(self): + return self.inotifyx.get_events(self.fd, 0) + def process_events(self): - for event in self.inotifyx.get_events(self.fd, 0): + for event in self._get_events(): self._process_event(event) diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/kqueue_.py pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/kqueue_.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetreemon/kqueue_.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetreemon/kqueue_.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2008 Forest Bond. +# Copyright (c) 2008-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -9,30 +9,34 @@ import os, stat -from threading import RLock - -from pytagsfs.util import nonatomic, attr_named - +from pytagsfs.util import ref_self from pytagsfs.sourcetreemon import SourceTreeMonitor from pytagsfs.sourcetreemon.deferred import DeferredSourceTreeMonitor from pytagsfs.debug import log_debug from pytagsfs.exceptions import MissingDependency +from pytagsfs.multithreading import token_exchange + ################################################################################ + ADD = object() REMOVE = object() UPDATE = object() + class KqueueException(Exception): pass + class NotWatching(KqueueException): pass + class AlreadyWatching(KqueueException): pass + class KqueueEvent(object): path = None type = None @@ -45,6 +49,7 @@ self.is_dir = is_dir self.kevent = kevent + class KqueueManager(object): kqueue = None directory_entries = None @@ -59,6 +64,7 @@ self.paths = {} self.base_kevents = {} + @token_exchange.token_pushed(ref_self) def watch_directory(self, path): if path in self.paths.values(): raise AlreadyWatching(path) @@ -101,6 +107,7 @@ except (OSError, KqueueException): pass + @token_exchange.token_pushed(ref_self) def watch_file(self, path): if path in self.paths.values(): raise AlreadyWatching(path) @@ -138,6 +145,7 @@ self.watch_directory(path) self.watch_file(path) + @token_exchange.token_pushed(ref_self) def stop_watch(self, path): if path not in self.paths.values(): raise NotWatching(path) @@ -152,8 +160,10 @@ del self.base_kevents[fd] os.close(fd) + @token_exchange.token_pushed(ref_self) def get_events(self): - for kevent in self.kqueue.kevent(None, 1024, 0): + kevents = self.kqueue.kevent(None, 1024, 0) + for kevent in kevents: try: path = self.paths[kevent.ident] except KeyError: @@ -226,14 +236,16 @@ def is_dir(self, path): return (path in self.directory_entries) + @token_exchange.token_pushed(ref_self) def stop(self): for path in self.paths.values(): self.stop_watch(path) + ################################################################################ + class KqueueSourceTreeMonitor(SourceTreeMonitor): - lock = None kqueue_manager = None def __init__(self): @@ -243,7 +255,6 @@ raise MissingDependency('kqueue') self.kqueue = kqueue - self.lock = RLock() ################################################################################ @@ -252,7 +263,6 @@ def start(self, debug = False): self.kqueue_manager = KqueueManager(self.kqueue) - @nonatomic(attr_named('lock')) def stop(self): self.kqueue_manager.stop() @@ -280,9 +290,11 @@ def fileno(self): return self.kqueue_manager.kqueue.fileno() - @nonatomic(attr_named('lock')) + def _get_events(self): + return self.kqueue_manager.get_events() + def process_events(self): - for event in self.kqueue_manager.get_events(): + for event in self._get_events(): if event.type is ADD: self.add_cb(event.path, event.is_dir) elif event.type is REMOVE: @@ -292,14 +304,12 @@ ################################################################################ - @nonatomic(attr_named('lock')) def _unwatch_path(self, real_path): try: self.kqueue_manager.stop_watch(real_path) except NotWatching: pass - @nonatomic(attr_named('lock')) def watch_dir(self, real_path): try: self.kqueue_manager.watch_directory(real_path) @@ -308,7 +318,6 @@ unwatch_dir = _unwatch_path - @nonatomic(attr_named('lock')) def watch_file(self, real_path): try: self.kqueue_manager.watch_file(real_path) @@ -317,8 +326,12 @@ unwatch_file = _unwatch_path + ################################################################################ + class DeferredKqueueSourceTreeMonitor( - DeferredSourceTreeMonitor, KqueueSourceTreeMonitor): + DeferredSourceTreeMonitor, + KqueueSourceTreeMonitor, +): pass diff -Nru pytagsfs-0.9.1/modules/pytagsfs/sourcetreerep/__init__.py pytagsfs-0.9.2/modules/pytagsfs/sourcetreerep/__init__.py --- pytagsfs-0.9.1/modules/pytagsfs/sourcetreerep/__init__.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/sourcetreerep/__init__.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -7,31 +7,23 @@ # # A copy of the license has been included in the COPYING file. -import sys, os, signal, re, stat +import os, re, stat from itertools import chain -from threading import RLock from pytagsfs.metastore import UnsettableKeyError from pytagsfs.exceptions import ( IsADirectory, PathNotFound, - PathExists, InvalidArgument, UnrepresentablePath, ) from pytagsfs.util import ( - nonatomic, - attr_named, split_path, join_path_abs, unicode_path_sep, - lazy_repr, ) from pytagsfs.values import Values -from pytagsfs.subspat import ( - Error as PatternError, - FillError, -) +from pytagsfs.subspat import Error as PatternError from pytagsfs.debug import ( log_debug, log_info, @@ -39,6 +31,7 @@ log_traceback, ) + def _make_filter(expr, real): if real: get_arg = lambda args: args[0] @@ -57,13 +50,12 @@ return filtr + STAT = object() ENTRIES = object() class SourceTreeRepresentation(object): - lock = None - meta_store = None substitution_patterns = None path_store = None @@ -89,8 +81,6 @@ if substitution_pattern.expression == '': raise ValueError('substitution pattern string cannot be empty') - self.lock = RLock() - self.debug = debug self.meta_store = meta_store @@ -143,10 +133,7 @@ u'path %s ends with "%s"' % (repr(path), unicode_path_sep)) def fill_path(self, substitutions): - log_debug( - u'fill_path: substitutions = %s', - lazy_repr(substitutions), - ) + log_debug(u'fill_path: substitutions = %r', substitutions) if isinstance(substitutions, Values): raise TypeError('substitutions must not be Values instance') @@ -179,7 +166,6 @@ return join_path_abs(fake_path_parts) - @nonatomic(attr_named('lock')) def add_source_dir(self, real_path): ''' Recursively add source directory ``real_path`` to the source tree @@ -187,7 +173,6 @@ * Path corresponds with a file that is not a directory. * Directory does not exist. - * os.path.basename(real_path).startswith('.pytagsfs') If a directory with the same real path already exists in the source tree representation, it will be silently removed from the @@ -209,7 +194,6 @@ real_path = os.path.join(dirpath, filename) self.add_source_file(real_path) - @nonatomic(attr_named('lock')) def remove_source_dir(self, real_path): ''' Recursively remove source directory ``real_path`` from the source tree @@ -222,7 +206,6 @@ self.monitor.remove_source_dir(self.source_tree.encode(real_path)) - @nonatomic(attr_named('lock')) def add_source_file(self, real_path): ''' Add source file ``real_path`` to the source tree representation. Do @@ -231,9 +214,25 @@ * The file does not exist. * The target file is a directory. ''' - # Note that this function checks if the source file is readable - # in a few different places. These create race conditions that - # are probably unavoidable, but have minimal consequences. + + # We want to filter out unreadable files and symlinks. These checks + # are racey, of course. A symlink could be removed and replaced with + # a real file immediately after our check. Likewise, a file that is + # unreadable because of permissions could have its mode changed. + + # However, in either of these cases a new source tree monitor event + # will be received and another attempt to add the source file will + # be made. Thus, there is no serious consequence. + + # Note that if all MetaStore implementations pulled metadata from file + # contents, the isreadable check would be unnecessary. But some + # (PathMetaStore, for instance) do not read the source file to obtain + # metadata. + + # Also note that if we handled symlinks correctly everywhere (in + # getattr, populate, and in SourceTreeMonitor implementations), the + # issymlink check could be dropped. Since we don't currently handle + # them correctly, though, it is best to simply ignore them. if not self.source_tree.isreadable(real_path): log_debug( @@ -242,6 +241,13 @@ ) return + if self.source_tree.issymlink(real_path): + log_debug( + u'add_source_file: not adding symlink: %s', + real_path, + ) + return + try: self.remove_source_file(real_path) except PathNotFound: @@ -284,13 +290,6 @@ index = index + 1 for fake_path, splitters in zip(fake_paths, splitter_groups): - if not self.source_tree.isreadable(real_path): - log_debug( - u'add_source_file: not readable, not adding: %s', - real_path, - ) - return - if not self.filter_path( self.source_tree.get_relative_path(real_path), fake_path @@ -304,16 +303,11 @@ self.monitor.add_source_file(self.source_tree.encode(real_path)) - log_debug( - u'add_source_file: adding %s, %s', - lazy_repr(fake_path), - lazy_repr(real_path), - ) + log_debug(u'add_source_file: adding %r, %r', fake_path, real_path) self.path_store.add_file(fake_path, real_path) self._set_splitters(fake_path, splitters) self._cache_prune_branch_to(fake_path) - @nonatomic(attr_named('lock')) def remove_source_file(self, real_path): try: fake_paths = self.get_fake_paths(real_path) @@ -323,9 +317,9 @@ for fake_path in fake_paths: log_debug( - u'remove_source_file: removing %s, %s', - lazy_repr(fake_path), - lazy_repr(real_path), + u'remove_source_file: removing %r, %r', + fake_path, + real_path, ) try: self.path_store.remove(fake_path, real_path) @@ -336,21 +330,20 @@ self.monitor.remove_source_file(self.source_tree.encode(real_path)) - @nonatomic(attr_named('lock')) def update_source_file(self, real_path): self.add_source_file(real_path) - @nonatomic(attr_named('lock')) def add_source_path(self, real_path): self.add_source_file(real_path) self.add_source_dir(real_path) - @nonatomic(attr_named('lock')) def remove_source_path(self, real_path): self.remove_source_file(real_path) self.remove_source_dir(real_path) def update_source_path(self, real_path): + # Note: there is no update_source_dir. Nobody cares if directory + # permissions or timestamps change. We only care about files. self.update_source_file(real_path) ################################################################################ @@ -366,7 +359,6 @@ ################################################################################ - @nonatomic(attr_named('lock')) def rename_path(self, old_fake_path, new_fake_path): self.validate_fake_path(old_fake_path) self.validate_fake_path(new_fake_path) @@ -479,19 +471,18 @@ new_values = new_values_by_real_path[real_path] apply_values = Values.diff3(current_values, old_values, new_values) - log_debug(u'rename: real_path = %s', lazy_repr(real_path)) - log_debug(u'rename: current_values = %s', lazy_repr(current_values)) - log_debug(u'rename: old_values = %s', lazy_repr(old_values)) - log_debug(u'rename: new_values = %s', lazy_repr(new_values)) - log_debug(u'rename: apply_values = %s', lazy_repr(apply_values)) + log_debug(u'rename: real_path = %r', real_path) + log_debug(u'rename: current_values = %r', current_values) + log_debug(u'rename: old_values = %r', old_values) + log_debug(u'rename: new_values = %r', new_values) + log_debug(u'rename: apply_values = %r', apply_values) try: self.meta_store.set(real_path, apply_values) except UnsettableKeyError: - log_debug(u'rename: %s', lazy_repr(UnsettableKeyError)) + log_debug(u'rename: %r', UnsettableKeyError) raise InvalidArgument - @nonatomic(attr_named('lock')) def add_directory_with_parents(self, fake_path): parts = split_path(fake_path) path = unicode_path_sep @@ -500,7 +491,6 @@ if not self.is_dir(path): self.add_directory(path) - @nonatomic(attr_named('lock')) def add_directory(self, fake_path): self.validate_fake_path(fake_path) @@ -509,8 +499,8 @@ if len_parts >= len(self.substitution_patterns): log_error( - 'add_directory: too many directories: %s', - lazy_repr(fake_path), + 'add_directory: too many directories: %r', + fake_path, ) raise InvalidArgument @@ -527,18 +517,15 @@ self._set_splitters(fake_path, splitters) self._cache_prune_branch_to(fake_path) - @nonatomic(attr_named('lock')) def remove_directory(self, fake_path): self.validate_fake_path(fake_path) self.path_store.remove(fake_path) self._cache_prune_branch_to(fake_path) - @nonatomic(attr_named('lock')) def get_real_path(self, fake_path): self.validate_fake_path(fake_path) return self.path_store.get_real_path(fake_path) - @nonatomic(attr_named('lock')) def get_fake_paths(self, real_path): self.validate_source_path(real_path) return self.path_store.get_fake_paths(real_path) @@ -549,30 +536,22 @@ return self._cache_get(fake_path, ENTRIES) except KeyError: pass - self.lock.acquire() - try: - entries = self.path_store.get_entries(fake_path) - self.cache_put(fake_path, ENTRIES, entries) - return entries - finally: - self.lock.release() + entries = self.path_store.get_entries(fake_path) + self.cache_put(fake_path, ENTRIES, entries) + return entries - @nonatomic(attr_named('lock')) def path_exists(self, fake_path): self.validate_fake_path(fake_path) return self.path_store.path_exists(fake_path) - @nonatomic(attr_named('lock')) def is_file(self, fake_path): self.validate_fake_path(fake_path) return self.path_store.is_file(fake_path) - @nonatomic(attr_named('lock')) def is_dir(self, fake_path): self.validate_fake_path(fake_path) return self.path_store.is_dir(fake_path) - @nonatomic(attr_named('lock')) def is_empty_dir(self, fake_path): return self.is_dir(fake_path) and not self.get_entries(fake_path) @@ -593,13 +572,9 @@ return self._cache_get(fake_path, STAT) except KeyError: pass - self.lock.acquire() - try: - stat_result = self._getattr(fake_path) - self.cache_put(fake_path, STAT, stat_result) - return stat_result - finally: - self.lock.release() + stat_result = self._getattr(fake_path) + self.cache_put(fake_path, STAT, stat_result) + return stat_result def _getattr(self, fake_path): try: @@ -607,7 +582,7 @@ except (IsADirectory, PathNotFound): pass else: - return os.lstat(real_path) + return self.source_tree.lstat(real_path) # Files should've been handled above. Now we're just dealing with # directories and non-existent paths. @@ -616,7 +591,7 @@ # caught be our caller. subdirs = self._get_subdirectories(fake_path) - source_root_statinfo = os.lstat(self.source_tree.root) + source_root_statinfo = self.source_tree.lstat(self.source_tree.root) if not isinstance(source_root_statinfo, os.stat_result): # FIXME: I don't think this is being handled correctly. Should we @@ -627,8 +602,8 @@ # source_root_statinfo is actually an integer indicating an error. log_error( - u'SourceTreeRepresentation._getattr: source_root_statinfo = %s', - lazy_repr(source_root_statinfo), + u'SourceTreeRepresentation._getattr: source_root_statinfo = %r', + source_root_statinfo, ) return source_root_statinfo @@ -663,24 +638,22 @@ st_ctime, )) - @nonatomic(attr_named('lock')) def utime(self, fake_path, times): self.validate_fake_path(fake_path) if self.is_file(fake_path): log_debug(u'utime: updating source file') real_path = self.get_real_path(fake_path) - os.utime(real_path, times) + self.source_tree.utime(real_path, times) self._cache_prune_branch_to(fake_path, STAT) elif self.is_empty_dir(fake_path): log_debug(u'utime: updating source tree root') - os.utime(self.source_tree.root, times) + self.source_tree.utime(self.source_tree.root, times) else: log_debug(u'utime: updating all end-points') # May raise FakePathNotFound: for end_point in self.path_store.get_end_points(fake_path): self.utime(end_point, times) - @nonatomic(attr_named('lock')) def _get_subdirectories(self, fake_path): entries = self.get_entries(fake_path) subdirs = [] @@ -701,7 +674,6 @@ def _get_file_time_attr(self, fake_path, attr_name): return getattr(self.getattr(fake_path), attr_name) - @nonatomic(attr_named('lock')) def _get_directory_time_attr(self, fake_path, attr_name): entries = self.get_entries(fake_path) entry_paths = [os.path.join(fake_path, e) for e in entries] @@ -723,11 +695,11 @@ log_error( ( u'_get_directory_time_attr: ' - u'caught %s getting %s for subdir %s' + u'caught %r getting %r for subdir %r' ), - lazy_repr(e), - lazy_repr(attr_name), - lazy_repr(subdir), + e, + attr_name, + subdir, ) file_times = [] @@ -739,11 +711,11 @@ log_error( ( u'_get_directory_time_attr: ' - u'caught %s getting %s for file %s' + u'caught %r getting %r for file %r' ), - lazy_repr(e), - lazy_repr(attr_name), - lazy_repr(file), + e, + attr_name, + file, ) times = list(chain(subdir_times, file_times)) @@ -818,24 +790,20 @@ ################################################################################ - @nonatomic(attr_named('lock')) def cache_put(self, fake_path, key, value): if self.cache is not None: return self.cache.put(fake_path, key, value) - @nonatomic(attr_named('lock')) def _cache_get(self, fake_path, key): if self.cache is not None: return self.cache.get(fake_path, key) raise KeyError('no cache to get from') - @nonatomic(attr_named('lock')) def _cache_prune(self, fake_path = None, key = None): if self.cache is not None: return self.cache.prune(fake_path, key) raise KeyError('no cache to prune') - @nonatomic(attr_named('lock')) def _cache_prune_branch_to(self, fake_path, key = None): while True: try: diff -Nru pytagsfs-0.9.1/modules/pytagsfs/specialfile/__init__.py pytagsfs-0.9.2/modules/pytagsfs/specialfile/__init__.py --- pytagsfs-0.9.1/modules/pytagsfs/specialfile/__init__.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/specialfile/__init__.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,10 +1,18 @@ +# Copyright (c) 2008-2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import os from pytagsfs.file import File from pytagsfs.util import ( return_errno, join_path_abs, - lazy_repr, ) from pytagsfs.exceptions import InvalidArgument from pytagsfs.debug import log_debug @@ -18,9 +26,9 @@ return getattr(super(SpecialFileFileSystemMixin, self), name)( fake_path, *args, **kwargs) log_debug( - u'delegated_class_method: name=%s, fake_path=%s: delegating to %s', - lazy_repr(name), - lazy_repr(fake_path), + u'delegated_class_method: name=%r, fake_path=%r: delegating to %s', + name, + fake_path, cls.__name__, ) return getattr(cls, name)(fake_path, *args, **kwargs) @@ -102,8 +110,8 @@ join_path_abs([cls.filename])] = cls cls.filesystem = self log_debug( - 'SpecialFileFileSystemMixin: _special_file_classes_by_path = %s', - lazy_repr(self._special_file_classes_by_path), + 'SpecialFileFileSystemMixin: _special_file_classes_by_path = %r', + self._special_file_classes_by_path, ) return super(SpecialFileFileSystemMixin, self).init() diff -Nru pytagsfs-0.9.1/modules/pytagsfs/specialfile/logfile.py pytagsfs-0.9.2/modules/pytagsfs/specialfile/logfile.py --- pytagsfs-0.9.1/modules/pytagsfs/specialfile/logfile.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/specialfile/logfile.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -8,7 +8,6 @@ # A copy of the license has been included in the COPYING file. import os, errno, stat, traceback, sys -from threading import RLock try: from functools import wraps @@ -17,11 +16,9 @@ from pytagsfs.util import ( now, - nonatomic, - attr_named, join_path_abs, ) -from pytagsfs.exceptions import PathNotFound, InvalidArgument +from pytagsfs.exceptions import InvalidArgument from pytagsfs.specialfile import SpecialFile @@ -38,7 +35,8 @@ def write(self, s): if not isinstance(s, str): raise AssertionError( - 'RingCharacterBuffer.write: argument should be a byte string.') + 'RingCharacterBuffer.write: argument should be a byte string.' + ) self.buffer.extend(list(s)) self.len = self.len + len(s) @@ -67,8 +65,6 @@ class VirtualLogFile(SpecialFile): # class - lock = RLock() - filename = u'.log' encoding = 'utf-8' file_obj = RingCharacterBuffer(1024 * 1024) @@ -88,7 +84,6 @@ cls.file_obj.max_length = bytes @classmethod - @nonatomic(attr_named('lock')) def log_write(cls, s): if isinstance(s, unicode): s = s.encode(cls.encoding) @@ -98,13 +93,17 @@ @classmethod def ReadOnly(cls, filesystem, fake_path, flags, truncate_to): if truncate_to is not None: - raise InvalidArgument + raise InvalidArgument() return super(VirtualLogFile, cls).ReadOnly( - filesystem, fake_path, flags, truncate_to) + filesystem, + fake_path, + flags, + truncate_to, + ) @classmethod def ReadWrite(cls, filesystem, fake_path, flags, truncate_to): - raise InvalidArgument + raise InvalidArgument() ################################################################################ @@ -127,7 +126,6 @@ # ftruncate @classmethod - @nonatomic(attr_named('lock')) def getattr(cls, path): root_statinfo = cls.filesystem.getattr(os.path.sep) st_dev = root_statinfo.st_dev @@ -156,8 +154,6 @@ # open: inherited # opendir: not relevant - # file_obj.getvalue is nonatomic - @nonatomic(attr_named('lock')) def read(self, length, offset): try: return self.file_obj.getvalue()[offset:offset+length] diff -Nru pytagsfs-0.9.1/modules/pytagsfs/util.py pytagsfs-0.9.2/modules/pytagsfs/util.py --- pytagsfs-0.9.1/modules/pytagsfs/util.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/util.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -85,27 +85,8 @@ return result -def nonatomic(get_lock): - def decorator(wrapped): - @wraps(wrapped) - def fn(*args, **kwargs): - if hasattr(get_lock, '__call__'): - lock = get_lock(*args, **kwargs) - else: - lock = get_lock - lock.acquire() - try: - return wrapped(*args, **kwargs) - finally: - lock.release() - return fn - return decorator - - -def attr_named(name): - def get_attr_by_name(self, *args, **kwargs): - return getattr(self, name) - return get_attr_by_name +def ref_self(self, *args, **kwargs): + return self def rpartition(s, by): @@ -133,7 +114,8 @@ def get_obj_by_dotted_name(dotted_name): from sclapp.util import importName - modname, dot, objname = rpartition(dotted_name, '.') + # Note: both arguments to rpartition must be of same type (unicode, str). + modname, dot, objname = rpartition(dotted_name, type(dotted_name)('.')) mod = importName(modname) return getattr(mod, objname) @@ -224,18 +206,6 @@ return self.evaluator(*self.args, **self.kwargs) -def lazy_repr(obj): - return LazyByteString(repr, obj) - - -def lazy_str(obj): - return LazyByteString(str, obj) - - -def lazy_unicode(obj): - return LazyUnicodeString(unicode, obj) - - def ftruncate_path(path, length): # See unsafe_truncate to understand why this exists. f = open(path, 'r+') diff -Nru pytagsfs-0.9.1/modules/pytagsfs/values.py pytagsfs-0.9.2/modules/pytagsfs/values.py --- pytagsfs-0.9.1/modules/pytagsfs/values.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/modules/pytagsfs/values.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + class Values(dict): def __init__(self, *args, **kwargs): super(Values, self).__init__(*args, **kwargs) diff -Nru pytagsfs-0.9.1/pytags.xml pytagsfs-0.9.2/pytags.xml --- pytagsfs-0.9.1/pytags.xml 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/pytags.xml 2009-12-27 18:02:09.000000000 +0100 @@ -12,8 +12,10 @@
forest@alittletooquiet.net
2007 + 2008 + 2009 - 2007-12-11 + 2009-12-04 diff -Nru pytagsfs-0.9.1/pytagsfs.xml pytagsfs-0.9.2/pytagsfs.xml --- pytagsfs-0.9.1/pytagsfs.xml 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/pytagsfs.xml 2009-12-27 18:02:09.000000000 +0100 @@ -12,8 +12,10 @@
forest@alittletooquiet.net
2007 + 2008 + 2009 - 2007-12-04 + 2009-12-14 @@ -225,8 +227,8 @@ filename name of the original file parent name of the original file's parent directory extension extension of the original file - number track number; concise (like 7) - NUMBER track number; two digits with leading zeros (like 07) + tracknumber track number; concise (like 7) + TRACKNUMBER track number; two digits with leading zeros (like 07) artist artist composer composer title track title @@ -432,7 +434,7 @@ 8 , - pytagfromfilename + pytags 1 diff -Nru pytagsfs-0.9.1/release pytagsfs-0.9.2/release --- pytagsfs-0.9.1/release 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/release 2009-12-27 18:02:09.000000000 +0100 @@ -1 +1 @@ -0.9.1 +0.9.2 diff -Nru pytagsfs-0.9.1/setup.py pytagsfs-0.9.2/setup.py --- pytagsfs-0.9.1/setup.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/setup.py 2009-12-27 18:02:09.000000000 +0100 @@ -3,7 +3,7 @@ # Author: Forest Bond # This file is in the public domain. -import os, sys, commands, glob, inspect +import os, sys, commands from distutils.command.build import build as _build from distutils.command.clean import clean as _clean from distutils.command.build_py import build_py @@ -19,23 +19,12 @@ sys.path.insert(0, modules_dir) sys.path.insert(0, project_dir) -from pytagsfs.fs import UMOUNT_COMMAND from tests.common import TEST_DATA_DIR ################################################################################ -def find_modules(package): - modules = [package] - for name in dir(package): - value = getattr(package, name) - if (inspect.ismodule(value)) and ( - value.__name__.rpartition('.')[0] == package.__name__): - modules.extend(find_modules(value)) - return modules - - class test(Command): description = 'run tests' user_options = [ @@ -54,23 +43,12 @@ self.tests = self.tests.split(',') def run(self): - if self.coverage: - import coverage - coverage.use_cache(0) - coverage.start() - - from tests import load, main, print_names - load() - - try: - if self.print_only: - print_names(self.tests) - else: - main(test_names = self.tests) - finally: - if self.coverage: - import pytagsfs - coverage.report(find_modules(pytagsfs)) + from tests import main + main( + test_names = self.tests, + print_only = self.print_only, + coverage = self.coverage, + ) ################################################################################ @@ -106,6 +84,8 @@ log.warn("failed to remove '%s'" % dirname) def clean_test_data(self): + from pytagsfs.fs import UMOUNT_COMMAND + try: dirs = os.listdir(TEST_DATA_DIR) except (IOError, OSError): diff -Nru pytagsfs-0.9.1/tests/__init__.py pytagsfs-0.9.2/tests/__init__.py --- pytagsfs-0.9.1/tests/__init__.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/__init__.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -7,27 +7,7 @@ # # A copy of the license has been included in the COPYING file. -import glob, os - -def get_test_modules(): - for name in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')): - if name == '__init__': - continue - module = os.path.basename(name)[:-3] - yield module - -def load(): - for module in get_test_modules(): - __import__('tests', {}, {}, [module]) - -def run(**kwargs): - from tests.manager import manager - manager.run(**kwargs) def main(**kwargs): from tests.manager import manager manager.main(**kwargs) - -def print_names(test_names = None): - from tests.manager import manager - print '\n'.join(manager.get_names(test_names)) diff -Nru pytagsfs-0.9.1/tests/blackbox.py pytagsfs-0.9.2/tests/blackbox.py --- pytagsfs-0.9.1/tests/blackbox.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/blackbox.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -11,13 +11,19 @@ from sclapp import locale -import sys, os, time, traceback, stat, shutil, errno +import sys, os, time, traceback, stat, shutil, errno, random, glob, string from subprocess import ( Popen, PIPE, + STDOUT, ) +from threading import Thread -from sclapp.shell import Shell, CommandFailed +from sclapp.shell import ( + Shell, + CommandFailed, + shinterp, +) from sclapp.processes import BackgroundCommand from pytagsfs.fs import UMOUNT_COMMAND @@ -35,6 +41,12 @@ from manager import manager +PACKAGE_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +MODULES_ROOT = os.path.join(PACKAGE_ROOT, 'modules') + +PYTHON_EXECUTABLE = sys.executable +PYTAGSFS_EXECUTABLE = os.path.join(PACKAGE_ROOT, 'pytagsfs') + UMOUNT_RETRY_COUNT = 5 UMOUNT_RETRY_DELAY = 5 @@ -42,6 +54,13 @@ user_encoding = locale.getpreferredencoding() +def get_returncode_output(args): + popen = Popen(args, stdout = PIPE, stderr = STDOUT) + output = popen.stdout.read() + returncode = popen.wait() + return returncode, output + + class ErrorOnError(Exception): def __init__(self, original_traceback, new_traceback): self.original_traceback = original_traceback @@ -62,8 +81,13 @@ mount_parameters = None +class FileNotTaggableError(Exception): + pass + + def add_blackbox_test_class(cls): manager.add_test_case_class(cls) + manager.add_test_case_class(mixin_singlethreaded(cls)) class AudioFormatMixin(object): @@ -80,12 +104,19 @@ return self.decode_audio_data(data) def decode_audio_data(self, data): - popen = Popen(self.decode_args, stdin = PIPE, stdout = PIPE) + popen = Popen( + self.decode_args, + stdin = PIPE, + stdout = PIPE, + stderr = PIPE, + ) stdoutdata, stderrdata = popen.communicate(data) if popen.returncode != 0: - raise AssertionError('subprocess failed') - if stderrdata is not None: - raise AssertionError() + raise AssertionError( + 'subprocess failed (%u, %r)' % (popen.returncode, stderrdata) + ) + if stderrdata: + sys.stderr.write(stderrdata) return stdoutdata def p(self, s): @@ -128,15 +159,26 @@ ] +class SingleThreadedMixin(object): + def get_extra_options(self): + return ['-s'] + + +def mixin_singlethreaded(cls): + newcls = type( + 'SingleThreaded%s' % cls.__name__, + (SingleThreadedMixin, cls), + {}, + ) + newcls.__module__ = cls.__module__ + return newcls + + class _BaseBlackboxTestCase(TestWithDir): test_dir_prefix = 'blk' sh = None - python_executable = None - pytagsfs_executable = None - pytags_executable = None - umount_cmd = None # Indicates that the TestCase requires that the system has non-broken mmap @@ -154,29 +196,18 @@ # test_method_name. self.test_method_name = methodName - super(_BaseBlackboxTestCase, self).__init__( - methodName = methodName) + super(_BaseBlackboxTestCase, self).__init__(methodName = methodName) def setUp(self): super(_BaseBlackboxTestCase, self).setUp() - package_root = os.path.dirname( - os.path.dirname(os.path.abspath(__file__))) - modules_root = os.path.join(package_root, 'modules') - shell = '/bin/sh' - import commands - status, output = commands.getstatusoutput('%s --version' % shell) - if (status == 0) and ('bash' in output): + returncode, output = get_returncode_output([shell, '--version']) + if (returncode == 0) and ('bash' in output): shell = '%s --posix --noediting' % shell - path_parts = [package_root] - if 'PATH' in os.environ: - path_parts.append(os.environ['PATH']) - os.environ['PATH'] = ':'.join(path_parts) - - python_path_parts = [modules_root] + python_path_parts = [MODULES_ROOT] if 'PYTHONPATH' in os.environ: python_path_parts.append(os.environ['PYTHONPATH']) os.environ['PYTHONPATH'] = ':'.join(python_path_parts) @@ -184,26 +215,45 @@ # sh inherits os.environ self.sh = Shell(shell = shell, delaybeforesend = 0) - self.python_executable = sys.executable - self.pytagsfs_executable = os.path.join(package_root, 'pytagsfs') - self.pytags_executable = os.path.join(package_root, 'pytags') - - status, output = commands.getstatusoutput( - 'test -x "$(which fusermount)"') - if status == 0: + returncode, output = get_returncode_output(['which', 'fusermount']) + if returncode == 0: self.umount_cmd = 'fusermount -u' else: - status, output = commands.getstatusoutput( - 'test -x "$(which umount)"') - if status == 0: + returncode, output = get_returncode_output(['which', 'umount']) + if returncode == 0: self.umount_cmd = 'umount' else: raise AssertionError( - 'no valid umount command could be determined') + 'no valid umount command could be determined' + ) + + # Some systems (Mac OSX, for example), have somewhat small limitations + # on the length of mount point paths. This caused problems in the past + # when the test name (which can be quite long) was used as the + # directory name. Consequently, our test directory ($PWD, here, + # created via TestWithDir) must have a reasonably short path. + + test_name = '%s.%s' % (self.__class__.__name__, self.test_method_name) + self.test_name_file = os.path.join(self.test_dir, 'test_name') + + # Store the test name in a file so that the test tree can be linked + # to the test that failed for debugging. + self.sh.execute('echo ? >?', test_name, self.test_name_file) + + self.wd = os.getcwd() + os.chdir(self.test_dir) + self.sh.pushd(self.test_dir) + + self.sh.execute('mkdir -p mnt source') + self.build_tree() def tearDown(self): + os.chdir(self.wd) + + self.sh.popd() self.sh.exit() del self.sh + super(_BaseBlackboxTestCase, self).tearDown() def p(self, s): @@ -212,62 +262,72 @@ def get_audio_data(self, filename): raise NotImplementedError - def get_tags(self, filename): + @classmethod + def get_tags(cls, filename): return MutagenFileMetaStore.tags_class(filename) - def get_tag(self, filename, tag): - return self.get_tags(filename)[tag] - - def set_tag(self, filename, tag, value): - tags = self.get_tags(filename) + @classmethod + def get_tag(cls, filename, tag): + return cls.get_tags(filename)[tag] + + @classmethod + def set_tag(cls, filename, tag, value): + tags = cls.get_tags(filename) + if tags is None: + raise FileNotTaggableError(filename) tags[tag] = value tags.save() def assertFilesHaveSameTags(self, filename1, filename2): self.assertEqual(self.get_tags(filename1), self.get_tags(filename2)) - def get_opts(self): - raise NotImplementedError - - def get_mount_parameters(self): - return {} + def get_options(self): + options = [ + '-o', + ','.join(self.get_mount_options()), + os.path.join(self.test_dir, 'source'), + os.path.join(self.test_dir, 'mnt'), + ] + options.extend(self.get_extra_options()) + return options - def mount(self): + def get_mount_options(self): mount_parameters = dict(self.get_mount_parameters()) - - extra_options = [] + mount_options = ['debug'] if mount_parameters.get('sourcetreerep', None): - extra_options.append( - 'sourcetreerep=%s' % mount_parameters['sourcetreerep']) + mount_options.append( + 'sourcetreerep=%s' % mount_parameters['sourcetreerep'] + ) if mount_parameters.get('sourcetreemon', None): - extra_options.append( - 'sourcetreemon=%s' % mount_parameters['sourcetreemon']) + mount_options.append( + 'sourcetreemon=%s' % mount_parameters['sourcetreemon'] + ) if mount_parameters.get('pathstore', None): - extra_options.append( - 'pathstore=%s' % mount_parameters['pathstore']) + mount_options.append( + 'pathstore=%s' % mount_parameters['pathstore'] + ) + mount_options.extend(self.get_extra_mount_options()) + return mount_options - extra_options.append(self.get_opts()) + def get_extra_options(self): + return [] - # Some systems (Mac OSX, for example), have somewhat small limitations - # on the length of mount point paths. This caused problems in the past - # when the test name (which can be quite long) was used as the - # directory name. Consequently, our test directory ($PWD, here, - # created via TestWithDir) must have a reasonably short path. + def get_extra_mount_options(self): + return [] - args = [ - self.python_executable, - self.pytagsfs_executable, - '-o', - 'debug,%s' % ','.join(extra_options), - os.path.join(self.test_dir, 'source'), - os.path.join(self.test_dir, 'mnt'), - ] + def get_mount_parameters(self): + return {} - self.sh.execute('mount_cmd="? ? ? ? ? ?"', *args) + def mount(self): + args = [PYTHON_EXECUTABLE, PYTAGSFS_EXECUTABLE] + args.extend(self.get_options()) + + mount_cmd = shinterp.interpolate(' '.join(len(args) * '?'), *args) + self.sh.execute('mount_cmd=?', mount_cmd) self.sh.execute('echo "${mount_cmd}" >mount_cmd') self.filesystem_process = BackgroundCommand( - self.python_executable, + PYTHON_EXECUTABLE, args, stderr = 'logfile', stdout = 'logfile', @@ -284,40 +344,12 @@ os.stat('mnt') def umount(self): - e = None - try: - self.sh.execute(UMOUNT_COMMAND % '"${PWD}/mnt"') - except CommandFailed: - for i in range(UMOUNT_RETRY_COUNT): - time.sleep(UMOUNT_RETRY_DELAY) - try: - self.sh.execute(UMOUNT_COMMAND % '"${PWD}/mnt"') - except CommandFailed, e: - pass - else: - break - if e is not None: - raise - - def _init(self): - mount_parameters = self.get_mount_parameters() - - test_name = '%s.%s' % (self.__class__.__name__, self.test_method_name) - self.test_name_file = os.path.join(self.test_dir, 'test_name') - - # Store the test name in a file so that the test tree can be linked - # to the test that failed for debugging. - self.sh.execute('echo ? >?', test_name, self.test_name_file) + # FIXME: If umount cannot be completed, we should probably send SIGKILL + # to the filesystem process and make sure the umount succeeds after + # that. Otherwise, filesystem processes can persist uncleanly. - self.wd = os.getcwd() - os.chdir(self.test_dir) - self.sh.pushd(self.test_dir) - - self.sh.execute('mkdir -p mnt source') - self.build_tree() - - def _de_init(self): try: + # Filesystem process should still be running. self.assertTrue(self.filesystem_process.isRunning()) # Check that mnt is accessible: @@ -325,55 +357,35 @@ # Check for spurious tracebacks: self.assertFileDoesNotContain('logfile', 'Traceback') - except: - self._de_init_exc() - raise - - self.umount() - sleep_until(lambda: ( - not self.filesystem_process.isRunning() - )) + finally: + try: + self.sh.execute(UMOUNT_COMMAND % '"${PWD}/mnt"') + except CommandFailed: + for i in range(UMOUNT_RETRY_COUNT): + e = None + time.sleep(UMOUNT_RETRY_DELAY) + try: + self.sh.execute(UMOUNT_COMMAND % '"${PWD}/mnt"') + except CommandFailed, e: + pass + else: + break + if e is not None: + raise - self.assertFalse(self.filesystem_process.isRunning()) - self.assertEqual(self.filesystem_process.getExitStatus(), 0) - self.assertEqual(self.filesystem_process.getExitSignal(), None) + sleep_until(lambda: ( + not self.filesystem_process.isRunning() + )) + self.assertEqual(self.filesystem_process.getExitStatus(), 0) + self.assertEqual(self.filesystem_process.getExitSignal(), None) + def clean_up(self): self.sh.execute('rm -f mount_cmd') self.sh.execute('rm -f logfile') - self.sh.execute('rm -R mnt source') - self.sh.popd() + self.sh.execute('rmdir mnt') + self.sh.execute('rm -fR source') self.sh.execute('rm ?', self.test_name_file) - os.chdir(self.wd) - - def _de_init_exc(self): - try: - try: - original_traceback = traceback.format_exc() - try: - self.record_failure() - except Exception: - print >>sys.stderr, traceback.format_exc() - try: - self.umount() - except Exception: - new_traceback = traceback.format_exc() - raise ErrorOnError( - original_traceback, - new_traceback, - ) - raise - finally: - self.sh.popd() - finally: - os.chdir(self.wd) - - def record_failure(self): - try: - self.sh.execute( - '( find . -print0 | xargs -0 -n1 ls -lhd ) >listing 2>&1') - except CommandFailed: - print >>sys.stderr, traceback.format_exc() ################################################################################ @@ -387,25 +399,21 @@ class SimpleFilenamePatternTestCase(_BasePathPatternBlackboxTestCase): - def get_opts(self): - return 'format=/%f' + def get_extra_mount_options(self): + return ['format=/%f'] def test_read_dir(self): - self._init() self.mount() try: self.assertEqual( set(f for f in os.listdir(u'mnt') if not f.startswith('.')), set([u'c', u'y', u'•']), ) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_add_empty_file(self): - self._init() self.mount() try: self.sh.execute('touch source/a/b/d') @@ -415,61 +423,52 @@ )) self.assertFileExists('mnt/d') - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(SimpleFilenamePatternTestCase) class SourceExcludeTestCase(_BasePathPatternBlackboxTestCase): - def get_opts(self): - return 'format=/%f,srcfilter=!\/c$' + def get_extra_mount_options(self): + return ['format=/%f', 'srcfilter=!\/c$'] def test_source_exclusion(self): - self._init() self.mount() try: filenames = os.listdir(u'mnt') assert (u'c' not in filenames), repr(filenames) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(SourceExcludeTestCase) class DestExcludeTestCase(_BasePathPatternBlackboxTestCase): - def get_opts(self): - return 'format=/%f,srcfilter=!\/c$' + def get_extra_mount_options(self): + return ['format=/%f', 'srcfilter=!\/c$'] def test_dest_exclusion(self): - self._init() self.mount() try: filenames = os.listdir(u'mnt') assert (u'c' not in filenames), repr(filenames) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(DestExcludeTestCase) class _BasePathPatternUpdatesTestCase(_BasePathPatternBlackboxTestCase): - def get_opts(self): - return 'format=/%p/%f' + def get_extra_mount_options(self): + return ['format=/%p/%f'] class PathPatternSourceTreeUpdatesTestCase(_BasePathPatternUpdatesTestCase): def test_update_file_contents(self): - self._init() self.mount() try: content = 'foo\n' @@ -487,14 +486,11 @@ )) self.assertFileContent('mnt/b/c', content) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_update_mtime(self): - self._init() self.mount() try: stat_a_src = os.stat('source/a/b/c') @@ -525,14 +521,11 @@ assert stat_a_src.st_mtime < stat_b_src.st_mtime assert stat_a_dst.st_mtime < stat_b_dst.st_mtime - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_update_mode(self): - self._init() self.mount() try: stat_a_src = os.stat('source/a/b/c') @@ -561,14 +554,11 @@ self.assertEqual(mode_b_src, 0600) self.assertEqual(mode_b_dst, 0600) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_simple_rename(self): - self._init() self.mount() try: os.makedirs('source/m/n') @@ -580,11 +570,9 @@ self.assertFileDoesNotExist('mnt/b/c') self.assertFileExists('mnt/n/o') - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(PathPatternSourceTreeUpdatesTestCase) @@ -593,21 +581,17 @@ class PathPatternDestTreeUpdatesTestCase(_BasePathPatternUpdatesTestCase): def test_write_truncate(self): - self._init() self.mount() try: f = open('mnt/b/c', 'w') f.write('foo\n') f.close() self.assertFileContent('mnt/b/c', 'foo\n') - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_append(self): - self._init() self.mount() try: f = open('mnt/b/c', 'w') @@ -619,14 +603,11 @@ f.write('buz\n') f.close() self.assertFileContent('mnt/b/c', 'foo\nbar\nbaz\nbuz\n') - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_overwrite_section(self): - self._init() self.mount() try: f = open('mnt/b/c', 'w') @@ -639,18 +620,14 @@ os.write(fd, 'biz\n') os.close(fd) self.assertFileContent('mnt/b/c', 'foo\nbiz\nbaz\n') - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_ftruncate(self): initial = 'foo\nbar\nbaz\nbiz\nbang\nboom' final = initial[:8] - self._init() - f = open('source/a/m', 'w') f.write(initial) f.close() @@ -669,17 +646,13 @@ f.close() self.assertFileContent('mnt/a/m', final) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_seek_to_end(self): content = 'foo\nbar\nbaz\nbiz\nbang\nboom' - self._init() - f = open('source/a/m', 'w') try: f.write(content) @@ -699,19 +672,15 @@ f.close() self.assertFileContent('mnt/a/m', content) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_write_past_end_and_seek_to_end(self): content = 'foo\nbar\n' last = 'baz\n' final = content + last - self._init() - f = open('source/a/m', 'w') try: f.write(content) @@ -735,14 +704,11 @@ f.close() self.assertFileContent('mnt/a/m', final) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_update_mtime(self): - self._init() self.mount() try: stat_a_src = os.stat('source/a/b/c') @@ -768,14 +734,11 @@ assert stat_a_src.st_mtime < stat_b_src.st_mtime assert stat_a_dst.st_mtime < stat_b_dst.st_mtime - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_update_mode(self): - self._init() self.mount() try: stat_a_src = os.stat('source/a/b/c') @@ -799,11 +762,9 @@ self.assertEqual(mode_b_src, 0600) self.assertEqual(mode_b_dst, 0600) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(PathPatternDestTreeUpdatesTestCase) @@ -828,13 +789,12 @@ finally: f.close() - def get_opts(self): - return 'format=/%f' + def get_extra_mount_options(self): + return ['format=/%f'] class TruncateTestCase(_BaseOperationTestCase): def test_read_from_read_only_file(self): - self._init() self.mount() try: # We use unsafe_truncate to guarantee use of truncate, not @@ -848,14 +808,11 @@ # truncate should not affect the source file even now, because the # fake path has not been opened for writing. self.assertFileContent(self.source_file, self.content) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_read_from_read_write_file(self): - self._init() self.mount() try: # We use unsafe_truncate to guarantee use of truncate, not @@ -870,11 +827,9 @@ # truncate should affect the source file now because the fake path # was opened for writing. self.assertFileContent(self.source_file, self.content[:4]) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(TruncateTestCase) add_blackbox_test_class(mixin_unicode(TruncateTestCase)) @@ -882,7 +837,6 @@ class FtruncateTestCase(_BaseOperationTestCase): def test(self): - self._init() self.mount() try: f = open(self.dest_file.encode(user_encoding), 'rb+') @@ -892,11 +846,9 @@ self.assertFileContent(self.source_file, self.content[:4]) finally: f.close() - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(FtruncateTestCase) add_blackbox_test_class(mixin_unicode(FtruncateTestCase)) @@ -906,8 +858,8 @@ class _BaseSourceTreeUpdatesTestCase(_BaseBlackboxTestCase): - def get_opts(self): - return 'format=/%{artist} - %t.%e' + def get_extra_mount_options(self): + return ['format=/%{artist} - %t.%e'] def build_tree(self): self.data_file = self.p(os.path.join(DATA_DIR, 'silence.ext')) @@ -916,7 +868,6 @@ self.set_tag(self.p('source/foo.ext'), 'title', self.p('baz')) def test_tag_change_causing_path_change(self): - self._init() self.mount() try: self.set_tag(self.p('source/foo.ext'), 'artist', self.p('qux')) @@ -931,14 +882,11 @@ ) self.assertFilesHaveSameAudioContent( self.data_file, self.p('mnt/qux - baz.ext')) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_tag_change_causing_new_fake_path(self): - self._init() self.set_tag(self.p('source/foo.ext'), 'artist', []) self.mount() try: @@ -955,11 +903,9 @@ ) self.assertFilesHaveSameAudioContent( self.data_file, self.p('mnt/qux - baz.ext')) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() class OggSourceTreeUpdatesTestCase(OggMixin, _BaseSourceTreeUpdatesTestCase): @@ -984,8 +930,8 @@ class _BaseDestTreeUpdatesTestCase(_BaseBlackboxTestCase): - def get_opts(self): - return 'format=/%{artist} - %t.%e' + def get_extra_mount_options(self): + return ['format=/%{artist} - %t.%e'] def build_tree(self): self.data_file = self.p(os.path.join(DATA_DIR, 'silence.ext')) @@ -994,7 +940,6 @@ self.set_tag(self.p('source/foo.ext'), 'title', self.p('baz')) def test_tag_change_causing_path_change(self): - self._init() self.mount() try: self.set_tag(self.p('mnt/bar - baz.ext'), 'artist', self.p('qux')) @@ -1004,14 +949,11 @@ ) self.assertFilesHaveSameAudioContent( self.data_file, self.p('mnt/qux - baz.ext')) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_remove_file(self): - self._init() self.mount() try: os.unlink(self.p('source/foo.ext')) @@ -1021,11 +963,9 @@ )) self.assertEqual(os.listdir('mnt'), ['.log']) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() class OggDestTreeUpdatesTestCase(OggMixin, _BaseDestTreeUpdatesTestCase): @@ -1061,11 +1001,10 @@ self.set_tag(self.p('source/foo.ext'), 'artist', self.p('bar')) self.set_tag(self.p('source/foo.ext'), 'title', self.p('baz')) - def get_opts(self): - return 'format=/%a/%t.%e' + def get_extra_mount_options(self): + return ['format=/%a/%t.%e'] def test_simple_rename(self): - self._init() self.mount() try: os.rename( @@ -1079,14 +1018,11 @@ tags = self.get_tags(self.p(os.path.join('mnt', 'bar', 'qux.ext'))) self.assertEqual(tags['artist'], [self.p('bar')]) self.assertEqual(tags['title'], [self.p('qux')]) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_rename_across_directories(self): - self._init() self.mount() try: os.mkdir(self.p(os.path.join('mnt', 'qux'))) @@ -1101,14 +1037,11 @@ tags = self.get_tags(self.p(os.path.join('mnt', 'qux', 'baz.ext'))) self.assertEqual(tags['artist'], [self.p('qux')]) self.assertEqual(tags['title'], [self.p('baz')]) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_directory_rename(self): - self._init() self.mount() try: os.rename( @@ -1122,14 +1055,11 @@ tags = self.get_tags(self.p(os.path.join('mnt', 'qux', 'baz.ext'))) self.assertEqual(tags['artist'], [self.p('qux')]) self.assertEqual(tags['title'], [self.p('baz')]) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_invalid_rename(self): - self._init() self.mount() try: try: @@ -1139,11 +1069,9 @@ ) except OSError, e: self.assertEqual(e.errno, errno.EINVAL) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() class OggRenameTestCase(OggMixin, _BaseRenameTestCase): @@ -1171,8 +1099,8 @@ class MkdirRmdirTagsTestCase(_BaseBlackboxTestCase): - def get_opts(self): - return 'format=/%g/%{artist}/%t.%e' + def get_extra_mount_options(self): + return ['format=/%g/%{artist}/%t.%e'] def build_tree(self): data_file = os.path.join(DATA_DIR, 'silence.flac') @@ -1192,7 +1120,6 @@ self.set_tag(file, 'genre', genre) def test_mkdir_rmdir(self): - self._init() self.mount() try: dir = 'mnt/Honky Tonk' @@ -1200,14 +1127,11 @@ self.assertDirectoryExists(dir) self.sh.execute('rmdir ?', dir) self.assertDirectoryDoesNotExist(dir) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_rename_empty_dir(self): - self._init() self.mount() try: dir = 'mnt/Honky Tonk' @@ -1220,14 +1144,11 @@ self.assertRaises(CommandFailed, self.sh.execute, 'ls ?/*', dir2) self.sh.execute('rmdir ?', dir2) self.assertDirectoryDoesNotExist(dir2) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_move_files_into_new_directory(self): - self._init() self.mount() try: self.sh.execute('mkdir mnt/Rock/biz') @@ -1241,11 +1162,9 @@ for filename in filenames: tags = self.get_tags(os.path.join(u'mnt/Rock/biz', filename)) self.assertEqual(tags['artist'], [u'biz']) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(MkdirRmdirTagsTestCase) @@ -1254,40 +1173,48 @@ class StatvfsTestCase(_BasePathPatternBlackboxTestCase): - copied_attributes = ( - 'f_bsize', - 'f_frsize', - 'f_bavail', - 'f_files', - 'f_ffree', - 'f_favail', - 'f_namemax', - ) - other_attributes = ( - 'f_fsid', - 'f_flag', - ) + def get_extra_mount_options(self): + return ['format=/%f'] - def get_opts(self): - return 'format=/%f' + def assertAlmostEqualInt(self, a, b): + self.assertTrue(int(0.9 * b) <= a <= int(1.1 * b)) def test(self): - self._init() + # Some statvfs values may be slightly different because they may have + # changed between statvfs calls on source and mnt (mostly those related + # to free space). For these, we use assertAlmostEqualInt instead of + # assertEqual. + self.mount() try: statvfs_source = os.statvfs('source') statvfs_mnt = os.statvfs('mnt') - for name in self.copied_attributes: - self.assertEqual( - getattr(statvfs_source, name), - getattr(statvfs_mnt, name), - ) + self.assertEqual(statvfs_source.f_bsize, statvfs_mnt.f_bsize) + self.assertEqual(statvfs_source.f_frsize, statvfs_mnt.f_frsize) + self.assertEqual(statvfs_source.f_blocks, statvfs_mnt.f_blocks) + self.assertAlmostEqualInt( + statvfs_mnt.f_bfree, + statvfs_source.f_bfree, + ) + self.assertAlmostEqualInt( + statvfs_mnt.f_bavail, + statvfs_source.f_bavail, + ) + self.assertEqual(statvfs_source.f_files, statvfs_mnt.f_files) + self.assertAlmostEqualInt( + statvfs_mnt.f_ffree, + statvfs_source.f_ffree, + ) + self.assertAlmostEqualInt( + statvfs_mnt.f_favail, + statvfs_source.f_favail, + ) + #f_fsid is not supported by os.statvfs_result self.assertTrue(type(statvfs_mnt.f_flag), int) - except: - self._de_init_exc() - raise - else: - self._de_init() + self.assertEqual(statvfs_source.f_namemax, statvfs_mnt.f_namemax) + finally: + self.umount() + self.clean_up() add_blackbox_test_class(StatvfsTestCase) @@ -1296,31 +1223,25 @@ class LogFileTestCase(_BasePathPatternBlackboxTestCase): - def get_opts(self): - return 'verbosity=debug' + def get_extra_mount_options(self): + return ['verbosity=debug'] def test_log_file_exists(self): - self._init() self.mount() try: self.assertFileExists('mnt/.log') - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() def test_log_file_receives_messages(self): - self._init() self.mount() try: from pytagsfs import __version__ as version self.assertFileContains('mnt/.log', 'pytagsfs version %s' % version) - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(LogFileTestCase) @@ -1328,11 +1249,10 @@ class LogSizeTestCase(_BasePathPatternBlackboxTestCase): logsize = 8 - def get_opts(self): - return 'verbosity=debug,logsize=%u' % self.logsize + def get_extra_mount_options(self): + return ['verbosity=debug', 'logsize=%u' % self.logsize] def test_log_size(self): - self._init() self.mount() try: f = open('mnt/.log', 'r') @@ -1343,21 +1263,18 @@ self.logsize, length)) finally: f.close() - except: - self._de_init_exc() - raise - else: - self._de_init() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(LogSizeTestCase) class ProfilingTestCase(_BasePathPatternBlackboxTestCase): - def get_opts(self): - return 'profile' + def get_extra_mount_options(self): + return ['profile'] def test(self): - self._init() self.mount() try: f = open('mnt/.log', 'r') @@ -1372,12 +1289,10 @@ break parts = line.split() self.assertEqual(parts[1], 'PROF') - self.assertIn(parts[3], Fuse.filesystem_method_names) - except: - self._de_init_exc() - raise - else: - self._de_init() + self.assertTrue(hasattr(Fuse, parts[3])) + finally: + self.umount() + self.clean_up() add_blackbox_test_class(ProfilingTestCase) @@ -1390,8 +1305,8 @@ file_obj = None map = None - def get_opts(self): - return 'format=/%f' + def get_extra_mount_options(self): + return ['format=/%f'] def build_tree(self): f = open('source/foo.txt', 'w') @@ -1429,14 +1344,6 @@ self.file_obj.close() del self.file_obj - def _de_init(self): - self._de_init_mmap() - super(_BaseMmapTestCase, self)._de_init() - - def _de_init_exc(self): - self._de_init_mmap() - super(_BaseMmapTestCase, self)._de_init_exc() - class ReadOnlyMmapTestCase(_BaseMmapTestCase): def _open_file(self): @@ -1451,16 +1358,16 @@ ) def test_read(self): - self._init() self.mount() try: self._init_mmap() - self.assertEqual(self.map[:], self.content) - except: - self._de_init_exc() - raise - else: - self._de_init() + try: + self.assertEqual(self.map[:], self.content) + finally: + self._de_init_mmap() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(ReadOnlyMmapTestCase) @@ -1470,19 +1377,19 @@ return open('mnt/foo.txt', 'rb+') def test_range_assignment(self): - self._init() self.mount() try: self._init_mmap() - content = self.map[:] - self.assertEqual(content, self.content) - self.map[0:4] = 'baz\n' - self.assertEqual(self.map[:], 'baz\nbar\n') - except: - self._de_init_exc() - raise - else: - self._de_init() + try: + content = self.map[:] + self.assertEqual(content, self.content) + self.map[0:4] = 'baz\n' + self.assertEqual(self.map[:], 'baz\nbar\n') + finally: + self._de_init_mmap() + finally: + self.umount() + self.clean_up() class CopyOnWriteReadWriteMmapTestCase(_BaseReadWriteMmapTestCase): @@ -1523,19 +1430,19 @@ ) def test_init_mmap(self): - self._init() self.mount() try: try: - self._init_mmap() - except EnvironmentError, e: - if e.errno != errno.ENODEV: - raise - except: - self._de_init_exc() - raise - else: - self._de_init() + try: + self._init_mmap() + except EnvironmentError, e: + if e.errno != errno.ENODEV: + raise + finally: + self._de_init_mmap() + finally: + self.umount() + self.clean_up() if PLATFORM in ('Linux', 'Darwin', 'BSD'): @@ -1556,49 +1463,216 @@ expected_content = 'foo\nbaz\nbar\n' - self._init() self.mount() try: self._init_mmap() - insert_bytes(self.file_obj, 4, 4) - self.file_obj.seek(4) - self.file_obj.write('baz\n') - self.file_obj.seek(0) - content = self.file_obj.read() - self.assertEqual(content, expected_content) - - self.file_obj.close() - self.file_obj = self._open_file() - content = self.file_obj.read() - self.assertEqual(content, expected_content) - except: - self._de_init_exc() - raise - else: - self._de_init() + try: + insert_bytes(self.file_obj, 4, 4) + self.file_obj.seek(4) + self.file_obj.write('baz\n') + self.file_obj.seek(0) + content = self.file_obj.read() + self.assertEqual(content, expected_content) + + self.file_obj.close() + self.file_obj = self._open_file() + content = self.file_obj.read() + self.assertEqual(content, expected_content) + finally: + self._de_init_mmap() + finally: + self.umount() + self.clean_up() def test_delete_bytes(self): from mutagen._util import delete_bytes expected_content = 'foo\n' - self._init() self.mount() try: self._init_mmap() - delete_bytes(self.file_obj, 4, 4) - self.file_obj.seek(0) - content = self.file_obj.read() - self.assertEqual(content, expected_content) - self.file_obj.close() - - self.file_obj = self._open_file() - content = self.file_obj.read() - self.assertEqual(content, expected_content) - except: - self._de_init_exc() - raise - else: - self._de_init() + try: + delete_bytes(self.file_obj, 4, 4) + self.file_obj.seek(0) + content = self.file_obj.read() + self.assertEqual(content, expected_content) + self.file_obj.close() + + self.file_obj = self._open_file() + content = self.file_obj.read() + self.assertEqual(content, expected_content) + finally: + self._de_init_mmap() + finally: + self.umount() + self.clean_up() add_blackbox_test_class(MutagenMmapTestCase) + + +class FuzzThread(Thread): + _killed = None + + def kill(self): + self._killed = True + + def sleep_random(self): + time.sleep(random.random()) + + def _select_random_path(self, root, predicate = (lambda path: True)): + root = root.rstrip('/') + all_files = [] + for dirpath, dirnames, filenames in os.walk(root): + paths = [os.path.join(dirpath, dirname) for dirname in dirnames] + paths.extend([ + os.path.join(dirpath, filename) for filename in filenames + ]) + for path in paths: + if predicate(path): + all_files.append(path) + return random.choice(all_files) + + def select_random_virtual_file(self): + return self._select_random_path( + 'mnt', + (lambda path: ((path != 'mnt/.log') and os.path.isfile(path))), + ) + + def select_random_real_file(self): + return self._select_random_path( + 'source', + (lambda path: os.path.isfile(path)), + ) + + def select_random_virtual_directory(self): + return self._select_random_path( + 'mnt', + (lambda path: os.path.isdir(path)), + ) + + def select_random_real_directory(self): + return self._select_random_path( + 'source', + (lambda path: os.path.isdir(path)), + ) + + def run(self): + while True: + self.sleep_random() + self.do_action() + if self._killed: + break + + +class StatFuzzThread(FuzzThread): + def do_action(self): + filename = self.select_random_virtual_file() + try: + os.stat(filename) + except (OSError, IOError): + pass + + +class ReadFuzzThread(FuzzThread): + def do_action(self): + filename = self.select_random_virtual_file() + try: + f = open(filename, 'r') + try: + f.seek(0, 2) + size = f.tell() + f.seek(random.randint(0, 2 * size)) + f.read(random.randint(0, 2 * size)) + finally: + f.close() + except (OSError, IOError): + pass + + +class UtimeFuzzThread(FuzzThread): + def do_action(self): + filename = self.select_random_virtual_file() + try: + os.utime(filename, None) + except (OSError, IOError): + pass + + +class SetTagFuzzThread(FuzzThread): + tags = ('artist', 'album', 'title') + + def do_action(self): + filename = self.get_filename() + try: + _BaseBlackboxTestCase.set_tag( + filename, + random.choice(self.tags), + ConcurrentFuzzTestCase.get_random_string(), + ) + except (OSError, IOError, FileNotTaggableError): + pass + + +class SetTagOnVirtualFileFuzzThread(SetTagFuzzThread): + def get_filename(self): + return self.select_random_virtual_file() + + +class SetTagOnRealFileFuzzThread(SetTagFuzzThread): + def get_filename(self): + return self.select_random_real_file() + + +class ConcurrentFuzzTestCase(_BaseBlackboxTestCase): + num_threads = 10 + duration = 10 + thread_classes = ( + StatFuzzThread, + ReadFuzzThread, + UtimeFuzzThread, + SetTagOnVirtualFileFuzzThread, + SetTagOnRealFileFuzzThread, + ) + + def get_opts(self): + return 'format=/%a/%l/%t.%e' + + @classmethod + def get_random_string(cls): + return ''.join([random.choice(string.letters) for i in range(16)]) + + def build_tree(self): + for source_path in glob.glob(os.path.join(DATA_DIR, '*')): + dest_path = os.path.join( + 'source', + os.path.basename(source_path), + ) + shutil.copy(source_path, dest_path) + try: + self.set_tag(dest_path, 'artist', self.get_random_string()) + self.set_tag(dest_path, 'album', self.get_random_string()) + self.set_tag(dest_path, 'title', self.get_random_string()) + except FileNotTaggableError: + # Note: file is left in place even though it won't appear in + # mount tree. + pass + + def test(self): + self.mount() + try: + threads = [] + for i in range(self.num_threads): + thread_cls = random.choice(self.thread_classes) + threads.append(thread_cls()) + for thread in threads: + thread.start() + time.sleep(self.duration) + for thread in threads: + thread.kill() + thread.join() + finally: + self.umount() + self.clean_up() + +add_blackbox_test_class(ConcurrentFuzzTestCase) diff -Nru pytagsfs-0.9.1/tests/common.py pytagsfs-0.9.2/tests/common.py --- pytagsfs-0.9.1/tests/common.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/common.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright (c) 2005-2008 Forest Bond. +# Copyright (c) 2005-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -168,7 +168,7 @@ finally: f.close() assert (s in content), ( - u'Expected to find: %s\nGot: %s' % (repr(s), repr(content)) + u'Expected to find %r in file %r.' % (s, filename) ) def assertFileDoesNotContain(self, filename, s, mode = 'r'): @@ -178,7 +178,7 @@ finally: f.close() assert (s not in content), ( - u'Expected to find: %s\nGot: %s' % (repr(s), repr(content)) + u'Expected not to find %r in file %r.' % (s, filename) ) def _assertFilePredicate(self, filename1, filename2, predicate, mode = 'r'): diff -Nru pytagsfs-0.9.1/tests/fs.py pytagsfs-0.9.2/tests/fs.py --- pytagsfs-0.9.1/tests/fs.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/fs.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2007-2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import os, errno, stat, time from pytagsfs.fs import PyTagsFileSystem @@ -8,7 +17,7 @@ from common import mixin_unicode, TestWithDir -encoding = 'utf-8' +ENCODING = 'utf-8' class _BaseFileSystemTestCase(TestWithDir): @@ -69,7 +78,7 @@ 'pytagsfs', '-o', 'format=/%f', - self.source_dir.encode(encoding), + self.source_dir.encode(ENCODING), 'mnt', ] @@ -87,7 +96,7 @@ self.filename = self.p('foo') self.source_file = os.path.join(self.source_dir, self.filename) self.dest_file = join_path_abs([self.filename]) - self.dest_file_encoded = self.dest_file.encode(encoding) + self.dest_file_encoded = self.dest_file.encode(ENCODING) self.content = self.get_content() f = open(self.source_file, 'w') @@ -115,7 +124,7 @@ 'pytagsfs', '-o', 'format=/%f/%f', - self.source_dir.encode(encoding), + self.source_dir.encode(ENCODING), 'mnt', ] @@ -360,7 +369,7 @@ class MkdirTestCase(_BaseDirectoryOperationTestCase): def test(self): - path = self.p('/foo').encode(encoding) + path = self.p('/foo').encode(ENCODING) self.filesystem.mkdir(path, 0) stat_result = self.filesystem.getattr(path) assert stat.S_ISDIR(stat_result.st_mode) @@ -386,7 +395,7 @@ def test_non_existent_file(self): try: self.filesystem.open( - self.p('/qux').encode(encoding), os.O_RDONLY) + self.p('/qux').encode(ENCODING), os.O_RDONLY) except FuseError, e: self.assertEqual(e.errno, errno.ENOENT) else: @@ -442,7 +451,7 @@ self.assertEqual(type(entry), str) self.assertEqual( set(entries), - set(['.log'] + [f.encode(encoding) for f in self.files]), + set(['.log'] + [f.encode(ENCODING) for f in self.files]), ) manager.add_test_case_class(ReaddirTestCase) @@ -501,20 +510,20 @@ 'metastores=pytagsfs.metastore.testlines.TestLinesMetaStore,' 'format=/%a' ), - self.source_dir.encode(encoding), + self.source_dir.encode(ENCODING), 'mnt', ] def get_content(self): - return self.p('foo').encode(encoding) + return '%s\n' % self.p('foo').encode(ENCODING) def test(self): - qux = self.p(u'qux').encode(encoding) - path = '/%s'.encode(encoding) % qux + qux = self.p(u'qux').encode(ENCODING) + path = '/%s'.encode(ENCODING) % qux self.filesystem.rename(self.dest_file_encoded, path) self.assertVirtualFileContent( self.dest_file_encoded, - ''.join([qux, self.content[len(qux):], '\n']), + ''.join([qux, self.content[len(qux):]]), ) manager.add_test_case_class(RenameTestCase) @@ -523,7 +532,7 @@ class RmdirTestCase(_BaseDirectoryOperationTestCase): def test(self): - path = self.p('/foo').encode(encoding) + path = self.p('/foo').encode(ENCODING) self.filesystem.mkdir(path, 0) self.filesystem.rmdir(path) try: @@ -570,13 +579,13 @@ 'pytagsfs', '-o', 'format=/%f/%f', - self.source_dir.encode(encoding), + self.source_dir.encode(ENCODING), 'mnt', ] def test(self): statfs_result = self.filesystem.statfs() - statvfs_result = os.statvfs(self.source_dir.encode(encoding)) + statvfs_result = os.statvfs(self.source_dir.encode(ENCODING)) for name in self.copied_attributes: self.assertEqual( getattr(statfs_result, name), @@ -683,6 +692,40 @@ finally: self.filesystem.release(self.dest_file_encoded, flags, fh) + def test_truncate_open_rdonly_open_wronly_twice(self): + self.filesystem.truncate(self.dest_file_encoded, 4) + fh_rdonly = self.filesystem.open(self.dest_file_encoded, os.O_RDONLY) + try: + fh_wronly1 = self.filesystem.open( + self.dest_file_encoded, + os.O_WRONLY, + ) + try: + + fh_wronly2 = self.filesystem.open( + self.dest_file_encoded, + os.O_WRONLY, + ) + self.filesystem.release( + self.dest_file_encoded, + os.O_WRONLY, + fh_wronly2, + ) + + finally: + self.filesystem.release( + self.dest_file_encoded, + os.O_WRONLY, + fh_wronly1, + ) + + finally: + self.filesystem.release( + self.dest_file_encoded, + os.O_RDONLY, + fh_rdonly, + ) + manager.add_test_case_class(TruncateTestCase) manager.add_test_case_class(mixin_unicode(TruncateTestCase)) @@ -725,11 +768,155 @@ fh = self.filesystem.open(self.dest_file_encoded, flags) try: self.filesystem.write( - self.dest_file_encoded, content, 0, fh) - read_content = self.filesystem.read(self.dest_file_encoded, 1024, 0, fh) + self.dest_file_encoded, + content, + 0, + fh, + ) + read_content = self.filesystem.read( + self.dest_file_encoded, + 1024, + 0, + fh, + ) finally: self.filesystem.release(self.dest_file_encoded, flags, fh) self.assertEqual(content, read_content) manager.add_test_case_class(WriteTestCase) manager.add_test_case_class(mixin_unicode(WriteTestCase)) + + +### + + +class FrozenPathTestCase(_BaseSingleFileOperationTestCase): + def get_argv(self): + return [ + 'pytagsfs', + '-o', + ( + 'metastores=pytagsfs.metastore.testlines.TestLinesMetaStore,' + 'format=/%a' + ), + self.source_dir.encode(ENCODING), + 'mnt', + ] + + def get_content(self): + return '%s\n' % self.p('foo').encode(ENCODING) + + def test_open_writable_once(self): + flags = os.O_RDWR + qux = self.p(u'qux').encode(ENCODING) + + fh = self.filesystem.open(self.dest_file_encoded, flags) + try: + + # File should be reachable via old fake path. + self.filesystem.getattr(self.dest_file_encoded) + + # Write the new content -- will eventually cause the file's fake + # path to change. + self.filesystem.write( + self.dest_file_encoded, + '%s\n' % qux, + 0, + fh, + ) + + # The file can now be reached via two different fake paths. + # One is frozen, the other is the new fake path. + self.filesystem.getattr(self.dest_file_encoded) + self.filesystem.getattr('/%s' % qux) + + finally: + self.filesystem.release(self.dest_file_encoded, flags, fh) + + # Now the file can only be reached via the new fake path. + self.assertRaises( + FuseError, + self.filesystem.getattr, + self.dest_file_encoded, + ) + self.filesystem.getattr('/%s' % qux) + + def test_open_writable_twice(self): + flags = os.O_RDWR + qux = self.p(u'qux').encode(ENCODING) + + fh1 = self.filesystem.open(self.dest_file_encoded, flags) + try: + + fh2 = self.filesystem.open(self.dest_file_encoded, flags) + try: + + # File should be reachable via old fake path. + self.filesystem.getattr(self.dest_file_encoded) + + # Write the new content -- will eventually cause the file's + # fake path to change. + self.filesystem.write( + self.dest_file_encoded, + '%s\n' % qux, + 0, + fh2, + ) + + # The file can now be reached via two different fake paths. + # One is frozen, the other is the new fake path. + self.filesystem.getattr(self.dest_file_encoded) + self.filesystem.getattr('/%s' % qux) + + finally: + self.filesystem.release(self.dest_file_encoded, flags, fh2) + + # The file can now be reached via two different fake paths. + # One is frozen, the other is the new fake path. + self.filesystem.getattr(self.dest_file_encoded) + self.filesystem.getattr('/%s' % qux) + + finally: + self.filesystem.release(self.dest_file_encoded, flags, fh1) + + # Now the file can only be reached via the new fake path. + self.assertRaises( + FuseError, + self.filesystem.getattr, + self.dest_file_encoded, + ) + self.filesystem.getattr('/%s' % qux) + +manager.add_test_case_class(FrozenPathTestCase) +manager.add_test_case_class(mixin_unicode(FrozenPathTestCase)) + + +class GetattrWithSymlinkTestCase(_BasePyTagsFileSystemTestCase): + filename = None + source_file = None + dest_file = None + dest_file_encoded = None + + def build_source_tree(self): + super(GetattrWithSymlinkTestCase, self).build_source_tree() + self.filename = self.p('foo') + self.source_file = os.path.join(self.source_dir, self.filename) + self.dest_file = join_path_abs([self.filename]) + self.dest_file_encoded = self.dest_file.encode(ENCODING) + os.symlink('/dev/null', self.source_file) + + def remove_source_tree(self): + os.unlink(self.source_file) + super(GetattrWithSymlinkTestCase, self).remove_source_tree() + + def test(self): + # Symlinks are rejected. This file should not appear in the virtual + # tree. + self.assertRaises( + FuseError, + self.filesystem.getattr, + self.dest_file_encoded, + ) + +manager.add_test_case_class(GetattrWithSymlinkTestCase) +manager.add_test_case_class(mixin_unicode(GetattrWithSymlinkTestCase)) diff -Nru pytagsfs-0.9.1/tests/manager.py pytagsfs-0.9.2/tests/manager.py --- pytagsfs-0.9.1/tests/manager.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/manager.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,10 +1,21 @@ -import sys +# Copyright (c) 2008-2009 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + +import os, sys, glob, inspect from doctest import DocTestCase, DocTestFinder, DocTestParser -from unittest import TestCase, TestSuite, TextTestRunner, TestLoader +from unittest import TestSuite, TextTestRunner, TestLoader + class ListTestLoader(TestLoader): suiteClass = list + class TestManager(object): tests = None loader = None @@ -13,29 +24,77 @@ self.tests = [] self.loader = ListTestLoader() - def main(self, **kwargs): - result = self.run(**kwargs) - if not result.wasSuccessful(): + def get_test_modules(self): + for name in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')): + if name == '__init__': + continue + module = os.path.basename(name)[:-3] + yield module + + def load(self): + for module in self.get_test_modules(): + __import__('tests', {}, {}, [module]) + + def find_modules(self, package): + modules = [package] + for name in dir(package): + value = getattr(package, name) + if (inspect.ismodule(value)) and ( + value.__name__.rpartition('.')[0] == package.__name__): + modules.extend(self.find_modules(value)) + return modules + + def main(self, test_names = None, print_only = False, coverage = False): + result = self.run( + test_names = test_names, + print_only = print_only, + coverage = coverage, + ) + + if (not print_only) and (not result.wasSuccessful()): sys.exit(1) sys.exit(0) - def run(self, test_names = None): - suite = TestSuite() - runner = TextTestRunner(verbosity = 2) - - for test in self.tests: - if self.should_run_test(test, test_names): - suite.addTest(test) - - return runner.run(suite) - - def iter_names(self, test_names = None): - for test in self.tests: - if self.should_run_test(test, test_names): - yield test.id() + def run(self, test_names = None, print_only = False, coverage = False): + if 'pytagsfs' in sys.modules: + raise AssertionError( + 'pytagsfs already imported; ' + 'this interferes with coverage analysis' + ) + + if coverage: + import coverage as _coverage + + if int(_coverage.__version__.split('.')[0]) < 3: + print >>sys.stderr, ( + 'warning: coverage versions < 3 ' + 'are known to produce imperfect results' + ) + + _coverage.use_cache(False) + _coverage.start() + + try: + self.load() - def get_names(self, test_names = None): - return list(self.iter_names(test_names)) + suite = TestSuite() + runner = TextTestRunner(verbosity = 2) + + for test in self.tests: + if self.should_run_test(test, test_names): + if print_only: + print test.id() + else: + suite.addTest(test) + + if not print_only: + return runner.run(suite) + + finally: + if coverage: + _coverage.stop() + import pytagsfs + _coverage.report(self.find_modules(pytagsfs)) def should_run_test(self, test, test_names): if test_names is None: @@ -50,8 +109,17 @@ return False def add_test_suite(self, test_suite): - self.tests.extend(list(test_suite)) - + self.tests.extend(self.flatten_test_suite(test_suite)) + + def flatten_test_suite(self, test_suite): + tests = [] + if isinstance(test_suite, TestSuite): + for test in list(test_suite): + tests.extend(self.flatten_test_suite(test)) + else: + tests.append(test_suite) + return tests + def add_test_case_class(self, test_case_class): self.tests.extend( self.loader.loadTestsFromTestCase(test_case_class)) @@ -94,9 +162,18 @@ for test_case in self.get_doc_test_cases_from_string(*args, **kwargs): self.add_test_case_class(test_case) + def import_dotted_name(self, name): + mod = __import__(name) + components = name.split('.') + for component in components[1:]: + try: + mod = getattr(mod, component) + except AttributeError: + raise ImportError('%r has no attribute %s' % (mod, component)) + return mod + def get_doc_test_cases_from_module(self, name): - from sclapp.util import importName - mod = importName(name) + mod = self.import_dotted_name(name) finder = DocTestFinder() tests = finder.find(mod) @@ -125,7 +202,9 @@ def add_doc_test_cases_from_text_file(self, *args, **kwargs): for test_case in self.get_doc_test_cases_from_text_file( - *args, **kwargs): + *args, + **kwargs + ): self.add_test_case_class(test_case) manager = TestManager() diff -Nru pytagsfs-0.9.1/tests/regex.py pytagsfs-0.9.2/tests/regex.py --- pytagsfs-0.9.1/tests/regex.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/regex.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2007-2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + from unittest import TestCase from manager import manager diff -Nru pytagsfs-0.9.1/tests/sourcetree.py pytagsfs-0.9.2/tests/sourcetree.py --- pytagsfs-0.9.1/tests/sourcetree.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/sourcetree.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2007-2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + import os from unittest import TestCase diff -Nru pytagsfs-0.9.1/tests/sourcetreemon.py pytagsfs-0.9.2/tests/sourcetreemon.py --- pytagsfs-0.9.1/tests/sourcetreemon.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/sourcetreemon.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,4 +1,4 @@ -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -329,6 +329,9 @@ if count > len(dirs) + 1: raise AssertionError('waited too long for events') + self._check_removals(removals, dirs) + + def _check_removals(self, removals, dirs): self.assertEqual(removals, dirs) @@ -336,12 +339,13 @@ class InotifyxSourceTreeMonitorTestCase(_SourceTreeMonitorTestCase): sourcetreemoncls = InotifyxSourceTreeMonitor sets_is_dir = True + manager.add_test_case_class(InotifyxSourceTreeMonitorTestCase) - class DeferredInotifyxSourceTreeMonitorTestCase( - _SourceTreeMonitorTestCase): + class DeferredInotifyxSourceTreeMonitorTestCase(_SourceTreeMonitorTestCase): sourcetreemoncls = DeferredInotifyxSourceTreeMonitor sets_is_dir = True + manager.add_test_case_class(DeferredInotifyxSourceTreeMonitorTestCase) @@ -349,11 +353,25 @@ class GaminSourceTreeMonitorTestCase(_SourceTreeMonitorTestCase): sourcetreemoncls = GaminSourceTreeMonitor sets_is_dir = False + + def _check_removals(self, removals, dirs): + # Gamin does not return these events in a reliable order. This + # situation is handled fine, even though it is inappropriate. As + # an exception, we don't fail this test if that happens. + self.assertEqual(set(removals), set(dirs)) + manager.add_test_case_class(GaminSourceTreeMonitorTestCase) class DeferredGaminSourceTreeMonitorTestCase(_SourceTreeMonitorTestCase): sourcetreemoncls = DeferredGaminSourceTreeMonitor sets_is_dir = False + + def _check_removals(self, removals, dirs): + # Gamin does not return these events in a reliable order. This + # situation is handled fine, even though it is inappropriate. As + # an exception, we don't fail this test if that happens. + self.assertEqual(set(removals), set(dirs)) + manager.add_test_case_class(DeferredGaminSourceTreeMonitorTestCase) @@ -361,9 +379,11 @@ class KqueueSourceTreeMonitorTestCase(_SourceTreeMonitorTestCase): sourcetreemoncls = KqueueSourceTreeMonitor sets_is_dir = False + manager.add_test_case_class(KqueueSourceTreeMonitorTestCase) class DeferredKqueueSourceTreeMonitorTestCase(_SourceTreeMonitorTestCase): sourcetreemoncls = DeferredKqueueSourceTreeMonitor sets_is_dir = False + manager.add_test_case_class(DeferredKqueueSourceTreeMonitorTestCase) diff -Nru pytagsfs-0.9.1/tests/values.py pytagsfs-0.9.2/tests/values.py --- pytagsfs-0.9.1/tests/values.py 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/tests/values.py 2009-12-27 18:02:09.000000000 +0100 @@ -1,3 +1,12 @@ +# Copyright (c) 2008 Forest Bond. +# This file is part of the pytagsfs software package. +# +# pytagsfs is free software; you can redistribute it and/or modify it under the +# terms of the GNU General Public License version 2 as published by the Free +# Software Foundation. +# +# A copy of the license has been included in the COPYING file. + from unittest import TestCase from pytagsfs.values import Values diff -Nru pytagsfs-0.9.1/util/profile pytagsfs-0.9.2/util/profile --- pytagsfs-0.9.1/util/profile 2009-10-19 02:00:55.000000000 +0200 +++ pytagsfs-0.9.2/util/profile 2009-12-27 18:02:09.000000000 +0100 @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright (c) 2007-2008 Forest Bond. +# Copyright (c) 2007-2009 Forest Bond. # This file is part of the pytagsfs software package. # # pytagsfs is free software; you can redistribute it and/or modify it under the @@ -9,7 +9,7 @@ # # A copy of the license has been included in the COPYING file. -import os, sys, time, uuid +import os, sys, time, uuid, subprocess from optparse import OptionParser from pytagsfs.fs import UMOUNT_COMMAND @@ -93,13 +93,16 @@ yield choice_group -def create_source_file(source_dir, permutation): +def create_source_file(source_dir, permutation, blksize, nblocks): filename = os.path.join(source_dir, unique_id()) f = open(filename, 'w') try: try: f.write('\n'.join(permutation)) f.write('\n') + data = blksize * '\0' + for i in range(nblocks): + f.write(data) finally: f.close() except Exception: @@ -108,7 +111,7 @@ return filename -def build_source_tree(source_dir, depth, breadth): +def build_source_tree(source_dir, depth, breadth, blocksize, nblocks): choice_groups = list(iter_choice_groups(depth, breadth)) permutations = list(iter_permutations(choice_groups)) filenames = [] @@ -117,7 +120,12 @@ try: try: for permutation in permutations: - filenames.append(create_source_file(source_dir, permutation)) + filenames.append(create_source_file( + source_dir, + permutation, + blocksize, + nblocks, + )) except: for filename in filenames: os.unlink(filename) @@ -156,23 +164,33 @@ def umount(mount_point): - time.sleep(1) - os.system(UMOUNT_COMMAND % quote_arg(mount_point)) + for retry in range(5): + time.sleep(2) + if os.system(UMOUNT_COMMAND % quote_arg(mount_point)) == 0: + break def run_benchmark( benchmark, mount_point, source_dir, - mount_options = None, - reps = None, - depth = 2, - breadth = 10, + mount_options, + reps, + depth, + breadth, + blocksize, + nblocks, ): + # logsize must be large enough to hold all of the profiling messages. + # Otherwise, we'll get jumbled messages as a result of the log data being + # shifted back to make room for new data. If log file parsing fails, that + # is probably the cause (but it could also indicate bad concurrent writes + # to the log buffer). options = ','.join([ 'profile', 'metastores=pytagsfs.metastore.testlines.TestLinesMetaStore', 'format=%s' % make_format(depth), + 'logsize=%u' % (10 * 1024 * 1024), ]) if mount_options is not None: options = ','.join([options, mount_options]) @@ -186,7 +204,13 @@ os.mkdir(mount_point) try: - filenames = build_source_tree(source_dir, depth, breadth) + filenames = build_source_tree( + source_dir, + depth, + breadth, + blocksize, + nblocks, + ) try: mount(options, source_dir, mount_point) try: @@ -230,16 +254,19 @@ def benchmark_stat_mount_point(mount_point, source_dir): os.stat(mount_point) + benchmark_stat_mount_point.reps = 1000 def benchmark_listdir_mount_point(mount_point, source_dir): os.listdir(mount_point) + benchmark_listdir_mount_point.reps = 1000 def benchmark_find(mount_point, source_dir): os.system('find %s >/dev/null' % quote_arg(mount_point)) + benchmark_find.reps = 10 @@ -248,14 +275,39 @@ "find %s -type f ! -name '.log' -exec cat '{}' \; >/dev/null" % quote_arg(mount_point) ) + benchmark_read_all.reps = 10 +def benchmark_read_all_concurrent(mount_point, source_dir): + paths = [] + for dirpath, dirnames, filenames in os.walk(mount_point): + for filename in filenames: + if filename != '.log': + paths.append(os.path.join(dirpath, filename)) + popens = [] + dev_null = open('/dev/null', 'w') + try: + for path in paths: + popen = subprocess.Popen( + ['cat', path], + stdout = dev_null, + ) + popens.append(popen) + finally: + dev_null.close() + for popen in popens: + popen.wait() + +benchmark_read_all_concurrent.reps = 10 + + BENCHMARKS = [ benchmark_stat_mount_point, benchmark_listdir_mount_point, benchmark_find, benchmark_read_all, + benchmark_read_all_concurrent, ] @@ -274,6 +326,8 @@ parser.add_option('--reps', type = 'int', default = None) parser.add_option('--depth', type = 'int', default = 2) parser.add_option('--breadth', type = 'int', default = 10) + parser.add_option('--blocksize', type = 'int', default = 1024) + parser.add_option('--nblocks', type = 'int', default = 200) opts, args = parser.parse_args(argv[1:]) @@ -303,6 +357,8 @@ reps = opts.reps, depth = opts.depth, breadth = opts.breadth, + blocksize = opts.blocksize, + nblocks = opts.nblocks, ) elif opts.parse: